diff --git a/f32-branch/.buildinfo b/f32-branch/.buildinfo new file mode 100644 index 00000000..d39c0cf9 --- /dev/null +++ b/f32-branch/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: c4915faa43645ef53e2c72f589eb92b8 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/f32-branch/.doctrees/composer-cli.doctree b/f32-branch/.doctrees/composer-cli.doctree new file mode 100644 index 00000000..a2ae3156 Binary files /dev/null and b/f32-branch/.doctrees/composer-cli.doctree differ diff --git a/f32-branch/.doctrees/composer.cli.doctree b/f32-branch/.doctrees/composer.cli.doctree new file mode 100644 index 00000000..11c62862 Binary files /dev/null and b/f32-branch/.doctrees/composer.cli.doctree differ diff --git a/f32-branch/.doctrees/composer.doctree b/f32-branch/.doctrees/composer.doctree new file mode 100644 index 00000000..43b5c8c7 Binary files /dev/null and b/f32-branch/.doctrees/composer.doctree differ diff --git a/f32-branch/.doctrees/environment.pickle b/f32-branch/.doctrees/environment.pickle new file mode 100644 index 00000000..79b47a11 Binary files /dev/null and b/f32-branch/.doctrees/environment.pickle differ diff --git a/f32-branch/.doctrees/index.doctree b/f32-branch/.doctrees/index.doctree new file mode 100644 index 00000000..f4b945c9 Binary files /dev/null and b/f32-branch/.doctrees/index.doctree differ diff --git a/f32-branch/.doctrees/intro.doctree b/f32-branch/.doctrees/intro.doctree new file mode 100644 index 00000000..ec1909d3 Binary files /dev/null and b/f32-branch/.doctrees/intro.doctree differ diff --git a/f32-branch/.doctrees/lifted.doctree b/f32-branch/.doctrees/lifted.doctree new file mode 100644 index 00000000..d50b7740 Binary files /dev/null and b/f32-branch/.doctrees/lifted.doctree differ diff --git a/f32-branch/.doctrees/livemedia-creator.doctree b/f32-branch/.doctrees/livemedia-creator.doctree new file mode 100644 index 00000000..a6a5e2fc Binary files /dev/null and b/f32-branch/.doctrees/livemedia-creator.doctree differ diff --git a/f32-branch/.doctrees/lorax-composer.doctree b/f32-branch/.doctrees/lorax-composer.doctree new file mode 100644 index 00000000..308d27ce Binary files /dev/null and b/f32-branch/.doctrees/lorax-composer.doctree differ diff --git a/f32-branch/.doctrees/lorax.doctree b/f32-branch/.doctrees/lorax.doctree new file mode 100644 index 00000000..13f54d4d Binary files /dev/null and b/f32-branch/.doctrees/lorax.doctree differ diff --git a/f32-branch/.doctrees/mkksiso.doctree b/f32-branch/.doctrees/mkksiso.doctree new file mode 100644 index 00000000..1e6abd11 Binary files /dev/null and b/f32-branch/.doctrees/mkksiso.doctree differ diff --git a/f32-branch/.doctrees/modules.doctree b/f32-branch/.doctrees/modules.doctree new file mode 100644 index 00000000..79e48e0e Binary files /dev/null and b/f32-branch/.doctrees/modules.doctree differ diff --git a/f32-branch/.doctrees/product-images.doctree b/f32-branch/.doctrees/product-images.doctree new file mode 100644 index 00000000..2025b6c8 Binary files /dev/null and b/f32-branch/.doctrees/product-images.doctree differ diff --git a/f32-branch/.doctrees/pylorax.api.doctree b/f32-branch/.doctrees/pylorax.api.doctree new file mode 100644 index 00000000..1d999b39 Binary files /dev/null and b/f32-branch/.doctrees/pylorax.api.doctree differ diff --git a/f32-branch/.doctrees/pylorax.doctree b/f32-branch/.doctrees/pylorax.doctree new file mode 100644 index 00000000..49aac96c Binary files /dev/null and b/f32-branch/.doctrees/pylorax.doctree differ diff --git a/f32-branch/README b/f32-branch/README new file mode 100644 index 00000000..615e8738 --- /dev/null +++ b/f32-branch/README @@ -0,0 +1,5 @@ +To build the docs for this branch run: +make test-in-docker +make docs-in-docker + +If you already have a welder/lorax-composer:latest docker image you can skip running 'test-in-docker'. diff --git a/f32-branch/_modules/composer/cli.html b/f32-branch/_modules/composer/cli.html new file mode 100644 index 00000000..3fdc764b --- /dev/null +++ b/f32-branch/_modules/composer/cli.html @@ -0,0 +1,259 @@ + + + + + + + + + + + composer.cli — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli

+#
+# composer-cli
+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+from composer.cli.blueprints import blueprints_cmd
+from composer.cli.modules import modules_cmd
+from composer.cli.projects import projects_cmd
+from composer.cli.compose import compose_cmd
+from composer.cli.sources import sources_cmd
+from composer.cli.status import status_cmd
+from composer.cli.upload import upload_cmd
+from composer.cli.providers import providers_cmd
+
+command_map = {
+    "blueprints": blueprints_cmd,
+    "modules":    modules_cmd,
+    "projects":   projects_cmd,
+    "compose":    compose_cmd,
+    "sources":    sources_cmd,
+    "status":     status_cmd,
+    "upload":     upload_cmd,
+    "providers":  providers_cmd
+    }
+
+
+
[docs]def main(opts): + """ Main program execution + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + """ + + # Making sure opts.args is not empty (thus, has a command and subcommand) + # is already handled in src/bin/composer-cli. + if opts.args[0] not in command_map: + log.error("Unknown command %s", opts.args[0]) + return 1 + else: + try: + return command_map[opts.args[0]](opts) + except Exception as e: + log.error(str(e)) + return 1
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/blueprints.html b/f32-branch/_modules/composer/cli/blueprints.html new file mode 100644 index 00000000..390a0422 --- /dev/null +++ b/f32-branch/_modules/composer/cli/blueprints.html @@ -0,0 +1,779 @@ + + + + + + + + + + + composer.cli.blueprints — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.blueprints

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import os
+
+from composer import http_client as client
+from composer.cli.help import blueprints_help
+from composer.cli.utilities import argify, frozen_toml_filename, toml_filename, handle_api_result
+from composer.cli.utilities import packageNEVRA
+
+
[docs]def blueprints_cmd(opts): + """Process blueprints commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + + This dispatches the blueprints commands to a function + """ + cmd_map = { + "list": blueprints_list, + "show": blueprints_show, + "changes": blueprints_changes, + "diff": blueprints_diff, + "save": blueprints_save, + "delete": blueprints_delete, + "depsolve": blueprints_depsolve, + "push": blueprints_push, + "freeze": blueprints_freeze, + "tag": blueprints_tag, + "undo": blueprints_undo, + "workspace": blueprints_workspace + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(blueprints_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown blueprints command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json)
+ +
[docs]def blueprints_list(socket_path, api_version, args, show_json=False): + """Output the list of available blueprints + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints list + """ + api_route = client.api_url(api_version, "/blueprints/list") + result = client.get_url_json_unlimited(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + # "list" should output a plain list of identifiers, one per line. + print("\n".join(result["blueprints"])) + + return rc
+ +
[docs]def blueprints_show(socket_path, api_version, args, show_json=False): + """Show the blueprints, in TOML format + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints show <blueprint,...> Display the blueprint in TOML format. + + Multiple blueprints will be separated by \n\n + """ + for blueprint in argify(args): + api_route = client.api_url(api_version, "/blueprints/info/%s?format=toml" % blueprint) + print(client.get_url_raw(socket_path, api_route) + "\n\n") + + return 0
+ +
[docs]def blueprints_changes(socket_path, api_version, args, show_json=False): + """Display the changes for each of the blueprints + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints changes <blueprint,...> Display the changes for each blueprint. + """ + def changes_total_fn(data): + """Return the maximum number of possible changes""" + + # Each blueprint can have a different total, return the largest one + return max([c["total"] for c in data["blueprints"]]) + + api_route = client.api_url(api_version, "/blueprints/changes/%s" % (",".join(argify(args)))) + result = client.get_url_json_unlimited(socket_path, api_route, total_fn=changes_total_fn) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for blueprint in result["blueprints"]: + print(blueprint["name"]) + for change in blueprint["changes"]: + prettyCommitDetails(change) + + return rc
+ +
[docs]def prettyCommitDetails(change, indent=4): + """Print the blueprint's change in a nice way + + :param change: The individual blueprint change dict + :type change: dict + :param indent: Number of spaces to indent + :type indent: int + """ + def revision(): + if change["revision"]: + return " revision %d" % change["revision"] + else: + return "" + + print(" " * indent + change["timestamp"] + " " + change["commit"] + revision()) + print(" " * indent + change["message"] + "\n")
+ +
[docs]def blueprints_diff(socket_path, api_version, args, show_json=False): + """Display the differences between 2 versions of a blueprint + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints diff <blueprint-name> Display the differences between 2 versions of a blueprint. + <from-commit> Commit hash or NEWEST + <to-commit> Commit hash, NEWEST, or WORKSPACE + """ + if len(args) == 0: + log.error("blueprints diff is missing the blueprint name, from commit, and to commit") + return 1 + elif len(args) == 1: + log.error("blueprints diff is missing the from commit, and the to commit") + return 1 + elif len(args) == 2: + log.error("blueprints diff is missing the to commit") + return 1 + + api_route = client.api_url(api_version, "/blueprints/diff/%s/%s/%s" % (args[0], args[1], args[2])) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for diff in result["diff"]: + print(pretty_diff_entry(diff)) + + return rc
+ +
[docs]def pretty_dict(d): + """Return the dict as a human readable single line + + :param d: key/values + :type d: dict + :returns: String of the dict's keys and values + :rtype: str + + key="str", key="str1,str2", ... + """ + result = [] + for k in d: + if type(d[k]) == type(""): + result.append('%s="%s"' % (k, d[k])) + elif type(d[k]) == type([]) and type(d[k][0]) == type(""): + result.append('%s="%s"' % (k, ", ".join(d[k]))) + elif type(d[k]) == type([]) and type(d[k][0]) == type({}): + result.append('%s="%s"' % (k, pretty_dict(d[k]))) + return " ".join(result)
+ +
[docs]def dict_names(lst): + """Return comma-separated list of the dict's name/user fields + + :param d: key/values + :type d: dict + :returns: String of the dict's keys and values + :rtype: str + + root, norm + """ + if "user" in lst[0]: + field_name = "user" + elif "name" in lst[0]: + field_name = "name" + else: + # Use first fields in sorted keys + field_name = sorted(lst[0].keys())[0] + + return ", ".join(d[field_name] for d in lst)
+ +
[docs]def pretty_diff_entry(diff): + """Generate nice diff entry string. + + :param diff: Difference entry dict + :type diff: dict + :returns: Nice string + """ + if diff["old"] and diff["new"]: + change = "Changed" + elif diff["new"] and not diff["old"]: + change = "Added" + elif diff["old"] and not diff["new"]: + change = "Removed" + else: + change = "Unknown" + + if diff["old"]: + name = list(diff["old"].keys())[0] + elif diff["new"]: + name = list(diff["new"].keys())[0] + else: + name = "Unknown" + + def details(diff): + if change == "Changed": + if type(diff["old"][name]) == type(""): + if name == "Description" or " " in diff["old"][name]: + return '"%s" -> "%s"' % (diff["old"][name], diff["new"][name]) + else: + return "%s -> %s" % (diff["old"][name], diff["new"][name]) + elif name in ["Module", "Package"]: + return "%s %s -> %s" % (diff["old"][name]["name"], diff["old"][name]["version"], + diff["new"][name]["version"]) + elif type(diff["old"][name]) == type([]): + if type(diff["old"][name][0]) == type(""): + return "%s -> %s" % (" ".join(diff["old"][name]), " ".join(diff["new"][name])) + elif type(diff["old"][name][0]) == type({}): + # Lists of dicts are too long to display in detail, just show their names + return "%s -> %s" % (dict_names(diff["old"][name]), dict_names(diff["new"][name])) + elif type(diff["old"][name]) == type({}): + return "%s -> %s" % (pretty_dict(diff["old"][name]), pretty_dict(diff["new"][name])) + else: + return "Unknown" + elif change == "Added": + if name in ["Module", "Package"]: + return "%s %s" % (diff["new"][name]["name"], diff["new"][name]["version"]) + elif name in ["Group"]: + return diff["new"][name]["name"] + elif type(diff["new"][name]) == type(""): + return diff["new"][name] + elif type(diff["new"][name]) == type([]): + if type(diff["new"][name][0]) == type(""): + return " ".join(diff["new"][name]) + elif type(diff["new"][name][0]) == type({}): + # Lists of dicts are too long to display in detail, just show their names + return dict_names(diff["new"][name]) + elif type(diff["new"][name]) == type({}): + return pretty_dict(diff["new"][name]) + else: + return "unknown/todo: %s" % type(diff["new"][name]) + elif change == "Removed": + if name in ["Module", "Package"]: + return "%s %s" % (diff["old"][name]["name"], diff["old"][name]["version"]) + elif name in ["Group"]: + return diff["old"][name]["name"] + elif type(diff["old"][name]) == type(""): + return diff["old"][name] + elif type(diff["old"][name]) == type([]): + if type(diff["old"][name][0]) == type(""): + return " ".join(diff["old"][name]) + elif type(diff["old"][name][0]) == type({}): + # Lists of dicts are too long to display in detail, just show their names + return dict_names(diff["old"][name]) + elif type(diff["old"][name]) == type({}): + return pretty_dict(diff["old"][name]) + else: + return "unknown/todo: %s" % type(diff["new"][name]) + + return change + " " + name + " " + details(diff)
+ +
[docs]def blueprints_save(socket_path, api_version, args, show_json=False): + """Save the blueprint to a TOML file + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints save <blueprint,...> Save the blueprint to a file, <blueprint-name>.toml + """ + for blueprint in argify(args): + api_route = client.api_url(api_version, "/blueprints/info/%s?format=toml" % blueprint) + blueprint_toml = client.get_url_raw(socket_path, api_route) + open(toml_filename(blueprint), "w").write(blueprint_toml) + + return 0
+ +
[docs]def blueprints_delete(socket_path, api_version, args, show_json=False): + """Delete a blueprint from the server + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + delete <blueprint> Delete a blueprint from the server + """ + api_route = client.api_url(api_version, "/blueprints/delete/%s" % args[0]) + result = client.delete_url_json(socket_path, api_route) + + return handle_api_result(result, show_json)[0]
+ +
[docs]def blueprints_depsolve(socket_path, api_version, args, show_json=False): + """Display the packages needed to install the blueprint + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints depsolve <blueprint,...> Display the packages needed to install the blueprint. + """ + api_route = client.api_url(api_version, "/blueprints/depsolve/%s" % (",".join(argify(args)))) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for blueprint in result["blueprints"]: + if blueprint["blueprint"].get("version", ""): + print("blueprint: %s v%s" % (blueprint["blueprint"]["name"], blueprint["blueprint"]["version"])) + else: + print("blueprint: %s" % (blueprint["blueprint"]["name"])) + for dep in blueprint["dependencies"]: + print(" " + packageNEVRA(dep)) + + return rc
+ +
[docs]def blueprints_push(socket_path, api_version, args, show_json=False): + """Push a blueprint TOML file to the server, updating the blueprint + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + push <blueprint> Push a blueprint TOML file to the server. + """ + api_route = client.api_url(api_version, "/blueprints/new") + rval = 0 + for blueprint in argify(args): + if not os.path.exists(blueprint): + log.error("Missing blueprint file: %s", blueprint) + continue + blueprint_toml = open(blueprint, "r").read() + + result = client.post_url_toml(socket_path, api_route, blueprint_toml) + if handle_api_result(result, show_json)[0]: + rval = 1 + + return rval
+ +
[docs]def blueprints_freeze(socket_path, api_version, args, show_json=False): + """Handle the blueprints freeze commands + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints freeze <blueprint,...> Display the frozen blueprint's modules and packages. + blueprints freeze show <blueprint,...> Display the frozen blueprint in TOML format. + blueprints freeze save <blueprint,...> Save the frozen blueprint to a file, <blueprint-name>.frozen.toml. + """ + if args[0] == "show": + return blueprints_freeze_show(socket_path, api_version, args[1:], show_json) + elif args[0] == "save": + return blueprints_freeze_save(socket_path, api_version, args[1:], show_json) + + if len(args) == 0: + log.error("freeze is missing the blueprint name") + return 1 + + api_route = client.api_url(api_version, "/blueprints/freeze/%s" % (",".join(argify(args)))) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for entry in result["blueprints"]: + blueprint = entry["blueprint"] + if blueprint.get("version", ""): + print("blueprint: %s v%s" % (blueprint["name"], blueprint["version"])) + else: + print("blueprint: %s" % (blueprint["name"])) + + for m in blueprint["modules"]: + print(" %s-%s" % (m["name"], m["version"])) + + for p in blueprint["packages"]: + print(" %s-%s" % (p["name"], p["version"])) + + return rc
+ +
[docs]def blueprints_freeze_show(socket_path, api_version, args, show_json=False): + """Show the frozen blueprint in TOML format + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints freeze show <blueprint,...> Display the frozen blueprint in TOML format. + """ + if len(args) == 0: + log.error("freeze show is missing the blueprint name") + return 1 + + for blueprint in argify(args): + api_route = client.api_url(api_version, "/blueprints/freeze/%s?format=toml" % blueprint) + print(client.get_url_raw(socket_path, api_route)) + + return 0
+ +
[docs]def blueprints_freeze_save(socket_path, api_version, args, show_json=False): + """Save the frozen blueprint to a TOML file + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints freeze save <blueprint,...> Save the frozen blueprint to a file, <blueprint-name>.frozen.toml. + """ + if len(args) == 0: + log.error("freeze save is missing the blueprint name") + return 1 + + for blueprint in argify(args): + api_route = client.api_url(api_version, "/blueprints/freeze/%s?format=toml" % blueprint) + blueprint_toml = client.get_url_raw(socket_path, api_route) + open(frozen_toml_filename(blueprint), "w").write(blueprint_toml) + + return 0
+ +
[docs]def blueprints_tag(socket_path, api_version, args, show_json=False): + """Tag the most recent blueprint commit as a release + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints tag <blueprint> Tag the most recent blueprint commit as a release. + """ + api_route = client.api_url(api_version, "/blueprints/tag/%s" % args[0]) + result = client.post_url(socket_path, api_route, "") + + return handle_api_result(result, show_json)[0]
+ +
[docs]def blueprints_undo(socket_path, api_version, args, show_json=False): + """Undo changes to a blueprint + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints undo <blueprint> <commit> Undo changes to a blueprint by reverting to the selected commit. + """ + if len(args) == 0: + log.error("undo is missing the blueprint name and commit hash") + return 1 + elif len(args) == 1: + log.error("undo is missing commit hash") + return 1 + + api_route = client.api_url(api_version, "/blueprints/undo/%s/%s" % (args[0], args[1])) + result = client.post_url(socket_path, api_route, "") + + return handle_api_result(result, show_json)[0]
+ +
[docs]def blueprints_workspace(socket_path, api_version, args, show_json=False): + """Push the blueprint TOML to the temporary workspace storage + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + blueprints workspace <blueprint> Push the blueprint TOML to the temporary workspace storage. + """ + api_route = client.api_url(api_version, "/blueprints/workspace") + rval = 0 + for blueprint in argify(args): + if not os.path.exists(blueprint): + log.error("Missing blueprint file: %s", blueprint) + continue + blueprint_toml = open(blueprint, "r").read() + + result = client.post_url_toml(socket_path, api_route, blueprint_toml) + if handle_api_result(result, show_json)[0]: + rval = 1 + + return rval
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/cmdline.html b/f32-branch/_modules/composer/cli/cmdline.html new file mode 100644 index 00000000..8d36728b --- /dev/null +++ b/f32-branch/_modules/composer/cli/cmdline.html @@ -0,0 +1,251 @@ + + + + + + + + + + + composer.cli.cmdline — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.cmdline

+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+import sys
+import argparse
+
+from composer import vernum
+from composer.cli.help import epilog
+
+VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
+
+
[docs]def composer_cli_parser(): + """ Return the ArgumentParser for composer-cli""" + + parser = argparse.ArgumentParser(description="Lorax Composer commandline tool", + epilog=epilog, + formatter_class=argparse.RawDescriptionHelpFormatter, + fromfile_prefix_chars="@") + + parser.add_argument("-j", "--json", action="store_true", default=False, + help="Output the raw JSON response instead of the normal output.") + parser.add_argument("-s", "--socket", default="/run/weldr/api.socket", metavar="SOCKET", + help="Path to the socket file to listen on") + parser.add_argument("--log", dest="logfile", default=None, metavar="LOG", + help="Path to logfile (./composer-cli.log)") + parser.add_argument("-a", "--api", dest="api_version", default="1", metavar="APIVER", + help="API Version to use") + parser.add_argument("--test", dest="testmode", default=0, type=int, metavar="TESTMODE", + help="Pass test mode to compose. 1=Mock compose with fail. 2=Mock compose with finished.") + parser.add_argument("-V", action="store_true", dest="showver", + help="show program's version number and exit") + + # Commands are implemented by parsing the remaining arguments outside of argparse + parser.add_argument('args', nargs=argparse.REMAINDER) + + return parser
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/compose.html b/f32-branch/_modules/composer/cli/compose.html new file mode 100644 index 00000000..bf5618a9 --- /dev/null +++ b/f32-branch/_modules/composer/cli/compose.html @@ -0,0 +1,889 @@ + + + + + + + + + + + composer.cli.compose — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.compose

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+from datetime import datetime
+import sys
+import json
+import toml
+
+from composer import http_client as client
+from composer.cli.help import compose_help
+from composer.cli.utilities import argify, handle_api_result, packageNEVRA
+
+
[docs]def compose_cmd(opts): + """Process compose commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + + This dispatches the compose commands to a function + + compose_cmd expects api to be passed. eg. + + {"version": 1, "backend": "lorax-composer"} + + """ + result = client.get_url_json(opts.socket, "/api/status") + # Get the api version and fall back to 0 if it fails. + api_version = result.get("api", "0") + backend = result.get("backend", "unknown") + api = {"version": api_version, "backend": backend} + + cmd_map = { + "list": compose_list, + "status": compose_status, + "types": compose_types, + "start": compose_start, + "log": compose_log, + "cancel": compose_cancel, + "delete": compose_delete, + "info": compose_info, + "metadata": compose_metadata, + "results": compose_results, + "logs": compose_logs, + "image": compose_image, + "start-ostree": compose_ostree, + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(compose_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown compose command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json, opts.testmode, api=api)
+ +
[docs]def get_size(args): + """Return optional size argument, and remaining args + + :param api: Details about the API server, "version" and "backend" + :type api: dict + :returns: (args, size) + :rtype: tuple + + - check size argument for int + - check other args for --size in wrong place + - raise error? Or just return 0? + - no size returns 0 in size + - multiply by 1024**2 to make it easier on users to specify large sizes + + """ + if len(args) == 0: + return (args, 0) + + if args[0] != "--size" and "--size" in args[1:]: + raise RuntimeError("--size must be first argument after the command") + if args[0] != "--size": + return (args, 0) + + if len(args) < 2: + raise RuntimeError("--size is missing the value, in MiB") + + # Let this raise an error for non-digit input + size = int(args[1]) + return (args[2:], size * 1024**2)
+ +
[docs]def compose_list(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Return a simple list of compose identifiers""" + + states = ("running", "waiting", "finished", "failed") + + which = set() + + if any(a not in states for a in args): + # TODO: error about unknown state + return 1 + elif not args: + which.update(states) + else: + which.update(args) + + results = [] + + if "running" in which or "waiting" in which: + api_route = client.api_url(api_version, "/compose/queue") + r = client.get_url_json(socket_path, api_route) + if "running" in which: + results += r["run"] + if "waiting" in which: + results += r["new"] + + if "finished" in which: + api_route = client.api_url(api_version, "/compose/finished") + r = client.get_url_json(socket_path, api_route) + results += r["finished"] + + if "failed" in which: + api_route = client.api_url(api_version, "/compose/failed") + r = client.get_url_json(socket_path, api_route) + results += r["failed"] + + if results: + if show_json: + print(json.dumps(results, indent=4)) + else: + list_fmt = "{id} {queue_status} {blueprint} {version} {compose_type}" + print("\n".join(list_fmt.format(**c) for c in results)) + + return 0
+ +
[docs]def compose_status(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Return the status of all known composes + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + This doesn't map directly to an API command, it combines the results from queue, finished, + and failed so raw JSON output is not available. + """ + def get_status(compose): + return {"id": compose["id"], + "blueprint": compose["blueprint"], + "version": compose["version"], + "compose_type": compose["compose_type"], + "image_size": compose["image_size"], + "status": compose["queue_status"], + "created": compose.get("job_created"), + "started": compose.get("job_started"), + "finished": compose.get("job_finished")} + + # Sort the status in a specific order + def sort_status(a): + order = ["RUNNING", "WAITING", "FINISHED", "FAILED"] + return (order.index(a["status"]), a["blueprint"], a["version"], a["compose_type"]) + + status = [] + + # Get the composes currently in the queue + api_route = client.api_url(api_version, "/compose/queue") + result = client.get_url_json(socket_path, api_route) + status.extend(list(map(get_status, result["run"] + result["new"]))) + + # Get the list of finished composes + api_route = client.api_url(api_version, "/compose/finished") + result = client.get_url_json(socket_path, api_route) + status.extend(list(map(get_status, result["finished"]))) + + # Get the list of failed composes + api_route = client.api_url(api_version, "/compose/failed") + result = client.get_url_json(socket_path, api_route) + status.extend(list(map(get_status, result["failed"]))) + + # Sort them by status (running, waiting, finished, failed) and then by name and version. + status.sort(key=sort_status) + + if show_json: + print(json.dumps(status, indent=4)) + return 0 + + # Print them as UUID blueprint STATUS + for c in status: + if c["image_size"] > 0: + image_size = str(c["image_size"]) + else: + image_size = "" + + dt = datetime.fromtimestamp(c.get("finished") or c.get("started") or c.get("created")) + + print("%s %-8s %s %-15s %s %-16s %s" % (c["id"], c["status"], dt.strftime("%c"), c["blueprint"], + c["version"], c["compose_type"], image_size))
+ + +
[docs]def compose_types(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Return information about the supported compose types + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + Add additional details to types that are known to composer-cli. Raw JSON output does not + include this extra information. + """ + api_route = client.api_url(api_version, "/compose/types") + result = client.get_url_json(socket_path, api_route) + if show_json: + print(json.dumps(result, indent=4)) + return 0 + + # output a plain list of identifiers, one per line + print("\n".join(t["name"] for t in result["types"] if t["enabled"]))
+ +
[docs]def compose_start(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Start a new compose using the selected blueprint and type + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: Set to 1 to simulate a failed compose, set to 2 to simulate a finished one. + :type testmode: int + :param api: Details about the API server, "version" and "backend" + :type api: dict + + compose start [--size XXX] <blueprint-name> <compose-type> [<image-name> <provider> <profile> | <image-name> <profile.toml>] + """ + if api == None: + log.error("Missing api version/backend") + return 1 + + # Get the optional size before checking other parameters + try: + args, size = get_size(args) + except (RuntimeError, ValueError) as e: + log.error(str(e)) + return 1 + + if len(args) == 0: + log.error("start is missing the blueprint name and output type") + return 1 + if len(args) == 1: + log.error("start is missing the output type") + return 1 + if len(args) == 3: + log.error("start is missing the provider and profile details") + return 1 + + config = { + "blueprint_name": args[0], + "compose_type": args[1], + "branch": "master" + } + if size > 0: + if api["backend"] == "lorax-composer": + log.warning("lorax-composer does not support --size, it will be ignored.") + else: + config["size"] = size + + if len(args) == 4: + config["upload"] = {"image_name": args[2]} + # profile TOML file (maybe) + try: + config["upload"].update(toml.load(args[3])) + except toml.TomlDecodeError as e: + log.error(str(e)) + return 1 + elif len(args) == 5: + config["upload"] = { + "image_name": args[2], + "provider": args[3], + "profile": args[4] + } + + if testmode: + test_url = "?test=%d" % testmode + else: + test_url = "" + api_route = client.api_url(api_version, "/compose" + test_url) + result = client.post_url_json(socket_path, api_route, json.dumps(config)) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + print("Compose %s added to the queue" % result["build_id"]) + + if "upload_id" in result and result["upload_id"]: + print ("Upload %s added to the upload queue" % result["upload_id"]) + + return rc
+ +
[docs]def compose_ostree(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Start a new compose using the selected blueprint and type + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: Set to 1 to simulate a failed compose, set to 2 to simulate a finished one. + :type testmode: int + :param api: Details about the API server, "version" and "backend" + :type api: dict + + compose start [--size XXX] <blueprint-name> <compose-type> <ostree-ref> <ostree-parent> [<image-name> <provider> <profile> | <image-name> <profile.toml>] + """ + if api == None: + log.error("Missing api version/backend") + return 1 + + if api["backend"] == "lorax-composer": + log.warning("lorax-composer doesn not support start-ostree.") + return 1 + + # Get the optional size before checking other parameters + try: + args, size = get_size(args) + except (RuntimeError, ValueError) as e: + log.error(str(e)) + return 1 + + if len(args) == 0: + log.error("start-ostree is missing the blueprint name, output type, and ostree details") + return 1 + if len(args) == 1: + log.error("start-ostree is missing the output type") + return 1 + if len(args) == 2: + log.error("start-ostree is missing the ostree reference") + return 1 + if len(args) == 3: + log.error("start-ostree is missing the ostree parent") + return 1 + if len(args) == 5: + log.error("start-ostree is missing the provider and profile details") + return 1 + + config = { + "blueprint_name": args[0], + "compose_type": args[1], + "branch": "master", + "ostree": {"ref": args[2], "parent": args[3]}, + } + if size > 0: + config["size"] = size + + if len(args) == 6: + config["upload"] = {"image_name": args[4]} + # profile TOML file (maybe) + try: + config["upload"].update(toml.load(args[5])) + except toml.TomlDecodeError as e: + log.error(str(e)) + return 1 + elif len(args) == 7: + config["upload"] = { + "image_name": args[4], + "provider": args[5], + "profile": args[6] + } + + if testmode: + test_url = "?test=%d" % testmode + else: + test_url = "" + api_route = client.api_url(api_version, "/compose" + test_url) + result = client.post_url_json(socket_path, api_route, json.dumps(config)) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + print("Compose %s added to the queue" % result["build_id"]) + + if "upload_id" in result and result["upload_id"]: + print ("Upload %s added to the upload queue" % result["upload_id"]) + + return rc
+ +
[docs]def compose_log(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Show the last part of the compose log + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose log <uuid> [<size>kB] + + This will display the last 1kB of the compose's log file. Can be used to follow progress + during the build. + """ + if len(args) == 0: + log.error("log is missing the compose build id") + return 1 + if len(args) == 2: + try: + log_size = int(args[1]) + except ValueError: + log.error("Log size must be an integer.") + return 1 + else: + log_size = 1024 + + api_route = client.api_url(api_version, "/compose/log/%s?size=%d" % (args[0], log_size)) + try: + result = client.get_url_raw(socket_path, api_route) + except RuntimeError as e: + print(str(e)) + return 1 + + print(result) + return 0
+ +
[docs]def compose_cancel(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Cancel a running compose + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose cancel <uuid> + + This will cancel a running compose. It does nothing if the compose has finished. + """ + if len(args) == 0: + log.error("cancel is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/cancel/%s" % args[0]) + result = client.delete_url_json(socket_path, api_route) + return handle_api_result(result, show_json)[0]
+ +
[docs]def compose_delete(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Delete a finished compose's results + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose delete <uuid,...> + + Delete the listed compose results. It will only delete results for composes that have finished + or failed, not a running compose. + """ + if len(args) == 0: + log.error("delete is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/delete/%s" % (",".join(argify(args)))) + result = client.delete_url_json(socket_path, api_route) + return handle_api_result(result, show_json)[0]
+ +
[docs]def compose_info(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Return detailed information about the compose + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose info <uuid> + + This returns information about the compose, including the blueprint and the dependencies. + """ + if len(args) == 0: + log.error("info is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/info/%s" % args[0]) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + if result["image_size"] > 0: + image_size = str(result["image_size"]) + else: + image_size = "" + + + print("%s %-8s %-15s %s %-16s %s" % (result["id"], + result["queue_status"], + result["blueprint"]["name"], + result["blueprint"]["version"], + result["compose_type"], + image_size)) + print("Packages:") + for p in result["blueprint"]["packages"]: + print(" %s-%s" % (p["name"], p["version"])) + + print("Modules:") + for m in result["blueprint"]["modules"]: + print(" %s-%s" % (m["name"], m["version"])) + + print("Dependencies:") + for d in result["deps"]["packages"]: + print(" " + packageNEVRA(d)) + + return rc
+ +
[docs]def compose_metadata(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Download a tar file of the compose's metadata + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose metadata <uuid> + + Saves the metadata as uuid-metadata.tar + """ + if len(args) == 0: + log.error("metadata is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/metadata/%s" % args[0]) + try: + rc = client.download_file(socket_path, api_route) + except RuntimeError as e: + print(str(e)) + rc = 1 + + return rc
+ +
[docs]def compose_results(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Download a tar file of the compose's results + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose results <uuid> + + The results includes the metadata, output image, and logs. + It is saved as uuid.tar + """ + if len(args) == 0: + log.error("results is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/results/%s" % args[0]) + try: + rc = client.download_file(socket_path, api_route, sys.stdout.isatty()) + except RuntimeError as e: + print(str(e)) + rc = 1 + + return rc
+ +
[docs]def compose_logs(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Download a tar of the compose's logs + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose logs <uuid> + + Saves the logs as uuid-logs.tar + """ + if len(args) == 0: + log.error("logs is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/logs/%s" % args[0]) + try: + rc = client.download_file(socket_path, api_route, sys.stdout.isatty()) + except RuntimeError as e: + print(str(e)) + rc = 1 + + return rc
+ +
[docs]def compose_image(socket_path, api_version, args, show_json=False, testmode=0, api=None): + """Download the compose's output image + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + compose image <uuid> + + This downloads only the result image, saving it as the image name, which depends on the type + of compose that was selected. + """ + if len(args) == 0: + log.error("logs is missing the compose build id") + return 1 + + api_route = client.api_url(api_version, "/compose/image/%s" % args[0]) + try: + rc = client.download_file(socket_path, api_route, sys.stdout.isatty()) + except RuntimeError as e: + print(str(e)) + rc = 1 + + return rc
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/modules.html b/f32-branch/_modules/composer/cli/modules.html new file mode 100644 index 00000000..f3e33a48 --- /dev/null +++ b/f32-branch/_modules/composer/cli/modules.html @@ -0,0 +1,249 @@ + + + + + + + + + + + composer.cli.modules — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.modules

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+from composer import http_client as client
+from composer.cli.help import modules_help
+from composer.cli.utilities import handle_api_result
+
+
[docs]def modules_cmd(opts): + """Process modules commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + """ + if opts.args[1] == "help" or opts.args[1] == "--help": + print(modules_help) + return 0 + elif opts.args[1] != "list": + log.error("Unknown modules command: %s", opts.args[1]) + return 1 + + api_route = client.api_url(opts.api_version, "/modules/list") + result = client.get_url_json_unlimited(opts.socket, api_route) + (rc, exit_now) = handle_api_result(result, opts.json) + if exit_now: + return rc + + # "list" should output a plain list of identifiers, one per line. + print("\n".join(r["name"] for r in result["modules"])) + + return rc
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/projects.html b/f32-branch/_modules/composer/cli/projects.html new file mode 100644 index 00000000..7c57f1e8 --- /dev/null +++ b/f32-branch/_modules/composer/cli/projects.html @@ -0,0 +1,311 @@ + + + + + + + + + + + composer.cli.projects — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.projects

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import textwrap
+
+from composer import http_client as client
+from composer.cli.help import projects_help
+from composer.cli.utilities import handle_api_result
+
+
[docs]def projects_cmd(opts): + """Process projects commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + """ + cmd_map = { + "list": projects_list, + "info": projects_info, + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(projects_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown projects command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json)
+ +
[docs]def projects_list(socket_path, api_version, args, show_json=False): + """Output the list of available projects + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + projects list + """ + api_route = client.api_url(api_version, "/projects/list") + result = client.get_url_json_unlimited(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for proj in result["projects"]: + for k in [field for field in ("name", "summary", "homepage", "description") if proj[field]]: + print("%s: %s" % (k.title(), textwrap.fill(proj[k], subsequent_indent=" " * (len(k)+2)))) + print("\n\n") + + return rc
+ +
[docs]def projects_info(socket_path, api_version, args, show_json=False): + """Output info on a list of projects + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + projects info <project,...> + """ + if len(args) == 0: + log.error("projects info is missing the packages") + return 1 + + api_route = client.api_url(api_version, "/projects/info/%s" % ",".join(args)) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + for proj in result["projects"]: + for k in [field for field in ("name", "summary", "homepage", "description") if proj[field]]: + print("%s: %s" % (k.title(), textwrap.fill(proj[k], subsequent_indent=" " * (len(k)+2)))) + print("Builds: ") + for build in proj["builds"]: + print(" %s%s-%s.%s at %s for %s" % ("" if not build["epoch"] else str(build["epoch"]) + ":", + build["source"]["version"], + build["release"], + build["arch"], + build["build_time"], + build["changelog"])) + print("") + return rc
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/providers.html b/f32-branch/_modules/composer/cli/providers.html new file mode 100644 index 00000000..c7edc494 --- /dev/null +++ b/f32-branch/_modules/composer/cli/providers.html @@ -0,0 +1,523 @@ + + + + + + + + + + + composer.cli.providers — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.providers

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import json
+import toml
+import os
+
+from composer import http_client as client
+from composer.cli.help import providers_help
+from composer.cli.utilities import handle_api_result, toml_filename
+
+
[docs]def providers_cmd(opts): + """Process providers commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + + This dispatches the providers commands to a function + """ + cmd_map = { + "list": providers_list, + "info": providers_info, + "show": providers_show, + "push": providers_push, + "save": providers_save, + "delete": providers_delete, + "template": providers_template + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(providers_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown providers command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json, opts.testmode)
+ +
[docs]def providers_list(socket_path, api_version, args, show_json=False, testmode=0): + """Return the list of providers + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers list + """ + api_route = client.api_url(api_version, "/upload/providers") + r = client.get_url_json(socket_path, api_route) + results = r["providers"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + else: + if len(args) == 1: + if args[0] not in results: + log.error("%s is not a valid provider", args[0]) + return 1 + print("\n".join(sorted(results[args[0]]["profiles"].keys()))) + else: + print("\n".join(sorted(results.keys()))) + + return 0
+ +
[docs]def providers_info(socket_path, api_version, args, show_json=False, testmode=0): + """Show information about each provider + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers info <PROVIDER> + """ + if len(args) == 0: + log.error("info is missing the provider name") + return 1 + + api_route = client.api_url(api_version, "/upload/providers") + r = client.get_url_json(socket_path, api_route) + results = r["providers"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + else: + if args[0] not in results: + log.error("%s is not a valid provider", args[0]) + return 1 + p = results[args[0]] + print("%s supports these image types: %s" % (p["display"], ", ".join(p["supported_types"]))) + print("Settings:") + for k in p["settings-info"]: + f = p["settings-info"][k] + print(" %-20s: %s is a %s" % (k, f["display"], f["type"])) + + return 0
+ +
[docs]def providers_show(socket_path, api_version, args, show_json=False, testmode=0): + """Return details about a provider + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers show <provider> <profile> + """ + if len(args) == 0: + log.error("show is missing the provider name") + return 1 + if len(args) == 1: + log.error("show is missing the profile name") + return 1 + + api_route = client.api_url(api_version, "/upload/providers") + r = client.get_url_json(socket_path, api_route) + results = r["providers"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + else: + if args[0] not in results: + log.error("%s is not a valid provider", args[0]) + return 1 + if args[1] not in results[args[0]]["profiles"]: + log.error("%s is not a valid %s profile", args[1], args[0]) + return 1 + + # Print the details for this profile + # fields are different for each provider, so we just print out the key:values + for k in results[args[0]]["profiles"][args[1]]: + print("%s: %s" % (k, results[args[0]]["profiles"][args[1]][k])) + return 0
+ +
[docs]def providers_push(socket_path, api_version, args, show_json=False, testmode=0): + """Add a new provider profile or overwrite an existing one + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers push <profile.toml> + + """ + if len(args) == 0: + log.error("push is missing the profile TOML file") + return 1 + if not os.path.exists(args[0]): + log.error("Missing profile TOML file: %s", args[0]) + return 1 + + api_route = client.api_url(api_version, "/upload/providers/save") + profile = toml.load(args[0]) + result = client.post_url_json(socket_path, api_route, json.dumps(profile)) + return handle_api_result(result, show_json)[0]
+ +
[docs]def providers_save(socket_path, api_version, args, show_json=False, testmode=0): + """Save a provider's profile to a TOML file + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers save <provider> <profile> + + """ + if len(args) == 0: + log.error("save is missing the provider name") + return 1 + if len(args) == 1: + log.error("save is missing the profile name") + return 1 + + api_route = client.api_url(api_version, "/upload/providers") + r = client.get_url_json(socket_path, api_route) + results = r["providers"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + else: + if args[0] not in results: + log.error("%s is not a valid provider", args[0]) + return 1 + if args[1] not in results[args[0]]["profiles"]: + log.error("%s is not a valid %s profile", args[1], args[0]) + return 1 + + profile = { + "provider": args[0], + "profile": args[1], + "settings": results[args[0]]["profiles"][args[1]] + } + open(toml_filename(args[1]), "w").write(toml.dumps(profile)) + + return 0
+ +
[docs]def providers_delete(socket_path, api_version, args, show_json=False, testmode=0): + """Delete a profile from a provider + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers delete <provider> <profile> + + """ + if len(args) == 0: + log.error("delete is missing the provider name") + return 1 + if len(args) == 1: + log.error("delete is missing the profile name") + return 1 + + api_route = client.api_url(api_version, "/upload/providers/delete/%s/%s" % (args[0], args[1])) + result = client.delete_url_json(socket_path, api_route) + return handle_api_result(result, show_json)[0]
+ +
[docs]def providers_template(socket_path, api_version, args, show_json=False, testmode=0): + """Return a TOML template for setting the provider's fields + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + providers template <provider> + """ + if len(args) == 0: + log.error("template is missing the provider name") + return 1 + + api_route = client.api_url(api_version, "/upload/providers") + r = client.get_url_json(socket_path, api_route) + results = r["providers"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + return 0 + + if args[0] not in results: + log.error("%s is not a valid provider", args[0]) + return 1 + + template = {"provider": args[0]} + settings = results[args[0]]["settings-info"] + template["settings"] = dict([(k, settings[k]["display"]) for k in settings]) + print(toml.dumps(template)) + + return 0
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/sources.html b/f32-branch/_modules/composer/cli/sources.html new file mode 100644 index 00000000..6d1065a9 --- /dev/null +++ b/f32-branch/_modules/composer/cli/sources.html @@ -0,0 +1,353 @@ + + + + + + + + + + + composer.cli.sources — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.sources

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import os
+
+from composer import http_client as client
+from composer.cli.help import sources_help
+from composer.cli.utilities import argify, handle_api_result
+
+
[docs]def sources_cmd(opts): + """Process sources commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + """ + cmd_map = { + "list": sources_list, + "info": sources_info, + "add": sources_add, + "change": sources_add, + "delete": sources_delete, + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(sources_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown sources command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json)
+ +
[docs]def sources_list(socket_path, api_version, args, show_json=False): + """Output the list of available sources + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + sources list + """ + api_route = client.api_url(api_version, "/projects/source/list") + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + # "list" should output a plain list of identifiers, one per line. + print("\n".join(result["sources"])) + return rc
+ +
[docs]def sources_info(socket_path, api_version, args, show_json=False): + """Output info on a list of projects + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + sources info <source-name> + """ + if len(args) == 0: + log.error("sources info is missing the name of the source") + return 1 + + if show_json: + api_route = client.api_url(api_version, "/projects/source/info/%s" % ",".join(args)) + result = client.get_url_json(socket_path, api_route) + rc = handle_api_result(result, show_json)[0] + else: + api_route = client.api_url(api_version, "/projects/source/info/%s?format=toml" % ",".join(args)) + try: + result = client.get_url_raw(socket_path, api_route) + print(result) + rc = 0 + except RuntimeError as e: + print(str(e)) + rc = 1 + + return rc
+ +
[docs]def sources_add(socket_path, api_version, args, show_json=False): + """Add or change a source + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + sources add <source.toml> + """ + api_route = client.api_url(api_version, "/projects/source/new") + rval = 0 + for source in argify(args): + if not os.path.exists(source): + log.error("Missing source file: %s", source) + continue + source_toml = open(source, "r").read() + + result = client.post_url_toml(socket_path, api_route, source_toml) + if handle_api_result(result, show_json)[0]: + rval = 1 + return rval
+ +
[docs]def sources_delete(socket_path, api_version, args, show_json=False): + """Delete a source + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + + sources delete <source-name> + """ + api_route = client.api_url(api_version, "/projects/source/delete/%s" % args[0]) + result = client.delete_url_json(socket_path, api_route) + + return handle_api_result(result, show_json)[0]
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/status.html b/f32-branch/_modules/composer/cli/status.html new file mode 100644 index 00000000..f7c57edb --- /dev/null +++ b/f32-branch/_modules/composer/cli/status.html @@ -0,0 +1,257 @@ + + + + + + + + + + + composer.cli.status — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.status

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+from composer import http_client as client
+from composer.cli.help import status_help
+from composer.cli.utilities import handle_api_result
+
+
[docs]def status_cmd(opts): + """Process status commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + """ + if opts.args[1] == "help" or opts.args[1] == "--help": + print(status_help) + return 0 + elif opts.args[1] != "show": + log.error("Unknown status command: %s", opts.args[1]) + return 1 + + result = client.get_url_json(opts.socket, "/api/status") + (rc, exit_now) = handle_api_result(result, opts.json) + if exit_now: + return rc + + print("API server status:") + print(" Database version: " + result["db_version"]) + print(" Database supported: %s" % result["db_supported"]) + print(" Schema version: " + result["schema_version"]) + print(" API version: " + result["api"]) + print(" Backend: " + result["backend"]) + print(" Build: " + result["build"]) + + if result["msgs"]: + print("Error messages:") + print("\n".join([" " + r for r in result["msgs"]])) + + return rc
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/upload.html b/f32-branch/_modules/composer/cli/upload.html new file mode 100644 index 00000000..93088b68 --- /dev/null +++ b/f32-branch/_modules/composer/cli/upload.html @@ -0,0 +1,478 @@ + + + + + + + + + + + composer.cli.upload — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.upload

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import json
+import toml
+import os
+
+from composer import http_client as client
+from composer.cli.help import upload_help
+from composer.cli.utilities import handle_api_result
+
+
[docs]def upload_cmd(opts): + """Process upload commands + + :param opts: Cmdline arguments + :type opts: argparse.Namespace + :returns: Value to return from sys.exit() + :rtype: int + + This dispatches the upload commands to a function + """ + cmd_map = { + "list": upload_list, + "info": upload_info, + "start": upload_start, + "log": upload_log, + "cancel": upload_cancel, + "delete": upload_delete, + "reset": upload_reset, + } + if opts.args[1] == "help" or opts.args[1] == "--help": + print(upload_help) + return 0 + elif opts.args[1] not in cmd_map: + log.error("Unknown upload command: %s", opts.args[1]) + return 1 + + return cmd_map[opts.args[1]](opts.socket, opts.api_version, opts.args[2:], opts.json, opts.testmode)
+ +
[docs]def upload_list(socket_path, api_version, args, show_json=False, testmode=0): + """Return the composes and their associated upload uuids and status + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload list + """ + api_route = client.api_url(api_version, "/compose/finished") + r = client.get_url_json(socket_path, api_route) + results = r["finished"] + if not results: + return 0 + + if show_json: + print(json.dumps(results, indent=4)) + else: + compose_fmt = "{id} {queue_status} {blueprint} {version} {compose_type}" + upload_fmt = ' {uuid} "{image_name}" {provider_name} {status}' + for c in results: + print(compose_fmt.format(**c)) + print("\n".join(upload_fmt.format(**u) for u in c["uploads"])) + print() + + return 0
+ +
[docs]def upload_info(socket_path, api_version, args, show_json=False, testmode=0): + """Return detailed information about the upload + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload info <uuid> + + This returns information about the upload, including uuid, name, status, service, and image. + """ + if len(args) == 0: + log.error("info is missing the upload uuid") + return 1 + + api_route = client.api_url(api_version, "/upload/info/%s" % args[0]) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + image_path = result["upload"]["image_path"] + print("%s %-8s %-15s %-8s %s" % (result["upload"]["uuid"], + result["upload"]["status"], + result["upload"]["image_name"], + result["upload"]["provider_name"], + os.path.basename(image_path) if image_path else "UNFINISHED")) + + return rc
+ +
[docs]def upload_start(socket_path, api_version, args, show_json=False, testmode=0): + """Start upload up a build uuid image + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload start <build-uuid> <image-name> [<provider> <profile> | <profile.toml>] + """ + if len(args) == 0: + log.error("start is missing the compose build id") + return 1 + if len(args) == 1: + log.error("start is missing the image name") + return 1 + if len(args) == 2: + log.error("start is missing the provider and profile details") + return 1 + + body = {"image_name": args[1]} + if len(args) == 3: + try: + body.update(toml.load(args[2])) + except toml.TomlDecodeError as e: + log.error(str(e)) + return 1 + elif len(args) == 4: + body["provider"] = args[2] + body["profile"] = args[3] + else: + log.error("start has incorrect number of arguments") + return 1 + + api_route = client.api_url(api_version, "/compose/uploads/schedule/%s" % args[0]) + result = client.post_url_json(socket_path, api_route, json.dumps(body)) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + print("Upload %s added to the queue" % result["upload_id"]) + return rc
+ +
[docs]def upload_log(socket_path, api_version, args, show_json=False, testmode=0): + """Return the upload log + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload log <build-uuid> + """ + if len(args) == 0: + log.error("log is missing the upload uuid") + return 1 + + api_route = client.api_url(api_version, "/upload/log/%s" % args[0]) + result = client.get_url_json(socket_path, api_route) + (rc, exit_now) = handle_api_result(result, show_json) + if exit_now: + return rc + + print("Upload log for %s:\n" % result["upload_id"]) + print(result["log"]) + + return 0
+ +
[docs]def upload_cancel(socket_path, api_version, args, show_json=False, testmode=0): + """Cancel the queued or running upload + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload cancel <build-uuid> + """ + if len(args) == 0: + log.error("cancel is missing the upload uuid") + return 1 + + api_route = client.api_url(api_version, "/upload/cancel/%s" % args[0]) + result = client.delete_url_json(socket_path, api_route) + return handle_api_result(result, show_json)[0]
+ +
[docs]def upload_delete(socket_path, api_version, args, show_json=False, testmode=0): + """Delete an upload and remove it from the build + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload delete <build-uuid> + """ + if len(args) == 0: + log.error("delete is missing the upload uuid") + return 1 + + api_route = client.api_url(api_version, "/upload/delete/%s" % args[0]) + result = client.delete_url_json(socket_path, api_route) + return handle_api_result(result, show_json)[0]
+ +
[docs]def upload_reset(socket_path, api_version, args, show_json=False, testmode=0): + """Reset the upload and execute it again + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param api_version: Version of the API to talk to. eg. "0" + :type api_version: str + :param args: List of remaining arguments from the cmdline + :type args: list of str + :param show_json: Set to True to show the JSON output instead of the human readable output + :type show_json: bool + :param testmode: unused in this function + :type testmode: int + + upload reset <build-uuid> + """ + if len(args) == 0: + log.error("reset is missing the upload uuid") + return 1 + + api_route = client.api_url(api_version, "/upload/reset/%s" % args[0]) + result = client.post_url_json(socket_path, api_route, json.dumps({})) + return handle_api_result(result, show_json)[0]
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/cli/utilities.html b/f32-branch/_modules/composer/cli/utilities.html new file mode 100644 index 00000000..8ff39ae8 --- /dev/null +++ b/f32-branch/_modules/composer/cli/utilities.html @@ -0,0 +1,296 @@ + + + + + + + + + + + composer.cli.utilities — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.cli.utilities

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import json
+
+
[docs]def argify(args): + """Take a list of human args and return a list with each item + + :param args: list of strings with possible commas and spaces + :type args: list of str + :returns: List of all the items + :rtype: list of str + + Examples: + + ["one,two", "three", ",four", ",five,"] returns ["one", "two", "three", "four", "five"] + """ + return [i for i in [arg for entry in args for arg in entry.split(",")] if i]
+ +
[docs]def toml_filename(blueprint_name): + """Convert a blueprint name into a filename.toml + + :param blueprint_name: The blueprint's name + :type blueprint_name: str + :returns: The blueprint name with ' ' converted to - and .toml appended + :rtype: str + """ + return blueprint_name.replace(" ", "-") + ".toml"
+ +
[docs]def frozen_toml_filename(blueprint_name): + """Convert a blueprint name into a filename.toml + + :param blueprint_name: The blueprint's name + :type blueprint_name: str + :returns: The blueprint name with ' ' converted to - and .toml appended + :rtype: str + """ + return blueprint_name.replace(" ", "-") + ".frozen.toml"
+ +
[docs]def handle_api_result(result, show_json=False): + """Log any errors, return the correct value + + :param result: JSON result from the http query + :type result: dict + :rtype: tuple + :returns: (rc, should_exit_now) + + Return the correct rc for the program (0 or 1), and whether or + not to continue processing the results. + """ + if show_json: + print(json.dumps(result, indent=4)) + else: + for err in result.get("errors", []): + log.error(err["msg"]) + + # What's the rc? If status is present, use that + # If not, use length of errors + if "status" in result: + rc = int(not result["status"]) + else: + rc = int(len(result.get("errors", [])) > 0) + + # Caller should return if showing json, or status was present and False + exit_now = show_json or ("status" in result and rc) + return (rc, exit_now)
+ +
[docs]def packageNEVRA(pkg): + """Return the package info as a NEVRA + + :param pkg: The package details + :type pkg: dict + :returns: name-[epoch:]version-release-arch + :rtype: str + """ + if pkg["epoch"]: + return "%s-%s:%s-%s.%s" % (pkg["name"], pkg["epoch"], pkg["version"], pkg["release"], pkg["arch"]) + else: + return "%s-%s-%s.%s" % (pkg["name"], pkg["version"], pkg["release"], pkg["arch"])
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/http_client.html b/f32-branch/_modules/composer/http_client.html new file mode 100644 index 00000000..4edd84ea --- /dev/null +++ b/f32-branch/_modules/composer/http_client.html @@ -0,0 +1,459 @@ + + + + + + + + + + + composer.http_client — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.http_client

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("composer-cli")
+
+import os
+import sys
+import json
+from urllib.parse import urlparse, urlunparse
+
+from composer.unix_socket import UnixHTTPConnectionPool
+
+
[docs]def api_url(api_version, url): + """Return the versioned path to the API route + + :param api_version: The version of the API to talk to. eg. "0" + :type api_version: str + :param url: The API route to talk to + :type url: str + :returns: The full url to use for the route and API version + :rtype: str + """ + return os.path.normpath("/api/v%s/%s" % (api_version, url))
+ +
[docs]def append_query(url, query): + """Add a query argument to a URL + + The query should be of the form "param1=what&param2=ever", i.e., no + leading '?'. The new query data will be appended to any existing + query string. + + :param url: The original URL + :type url: str + :param query: The query to append + :type query: str + :returns: The new URL with the query argument included + :rtype: str + """ + + url_parts = urlparse(url) + if url_parts.query: + new_query = url_parts.query + "&" + query + else: + new_query = query + return urlunparse([url_parts[0], url_parts[1], url_parts[2], + url_parts[3], new_query, url_parts[5]])
+ +
[docs]def get_url_raw(socket_path, url): + """Return the raw results of a GET request + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to request + :type url: str + :returns: The raw response from the server + :rtype: str + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("GET", url) + if r.status == 400: + err = json.loads(r.data.decode("utf-8")) + if "status" in err and err["status"] == False: + msgs = [e["msg"] for e in err["errors"]] + raise RuntimeError(", ".join(msgs)) + + return r.data.decode('utf-8')
+ +
[docs]def get_url_json(socket_path, url): + """Return the JSON results of a GET request + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to request + :type url: str + :returns: The json response from the server + :rtype: dict + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("GET", url) + return json.loads(r.data.decode('utf-8'))
+ +
[docs]def get_url_json_unlimited(socket_path, url, total_fn=None): + """Return the JSON results of a GET request + + For URLs that use offset/limit arguments, this command will + fetch all results for the given request. + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to request + :type url: str + :returns: The json response from the server + :rtype: dict + """ + def default_total_fn(data): + """Return the total number of available results""" + return data["total"] + + http = UnixHTTPConnectionPool(socket_path) + + # Start with limit=0 to just get the number of objects + total_url = append_query(url, "limit=0") + r_total = http.request("GET", total_url) + json_total = json.loads(r_total.data.decode('utf-8')) + + # Where to get the total from + if not total_fn: + total_fn = default_total_fn + + # Add the "total" returned by limit=0 as the new limit + unlimited_url = append_query(url, "limit=%d" % total_fn(json_total)) + r_unlimited = http.request("GET", unlimited_url) + return json.loads(r_unlimited.data.decode('utf-8'))
+ +
[docs]def delete_url_json(socket_path, url): + """Send a DELETE request to the url and return JSON response + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to send DELETE to + :type url: str + :returns: The json response from the server + :rtype: dict + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("DELETE", url) + return json.loads(r.data.decode("utf-8"))
+ +
[docs]def post_url(socket_path, url, body): + """POST raw data to the URL + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to send POST to + :type url: str + :param body: The data for the body of the POST + :type body: str + :returns: The json response from the server + :rtype: dict + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("POST", url, + body=body.encode("utf-8")) + return json.loads(r.data.decode("utf-8"))
+ +
[docs]def post_url_toml(socket_path, url, body): + """POST a TOML string to the URL + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to send POST to + :type url: str + :param body: The data for the body of the POST + :type body: str + :returns: The json response from the server + :rtype: dict + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("POST", url, + body=body.encode("utf-8"), + headers={"Content-Type": "text/x-toml"}) + return json.loads(r.data.decode("utf-8"))
+ +
[docs]def post_url_json(socket_path, url, body): + """POST some JSON data to the URL + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to send POST to + :type url: str + :param body: The data for the body of the POST + :type body: str + :returns: The json response from the server + :rtype: dict + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("POST", url, + body=body.encode("utf-8"), + headers={"Content-Type": "application/json"}) + return json.loads(r.data.decode("utf-8"))
+ +
[docs]def get_filename(headers): + """Get the filename from the response header + + :param response: The urllib3 response object + :type response: Response + :raises: RuntimeError if it cannot find a filename in the header + :returns: Filename from content-disposition header + :rtype: str + """ + log.debug("Headers = %s", headers) + if "content-disposition" not in headers: + raise RuntimeError("No Content-Disposition header; cannot get filename") + + try: + k, _, v = headers["content-disposition"].split(";")[1].strip().partition("=") + if k != "filename": + raise RuntimeError("No filename= found in content-disposition header") + except RuntimeError: + raise + except Exception as e: + raise RuntimeError("Error parsing filename from content-disposition header: %s" % str(e)) + + return os.path.basename(v)
+ +
[docs]def download_file(socket_path, url, progress=True): + """Download a file, saving it to the CWD with the included filename + + :param socket_path: Path to the Unix socket to use for API communication + :type socket_path: str + :param url: URL to send POST to + :type url: str + """ + http = UnixHTTPConnectionPool(socket_path) + r = http.request("GET", url, preload_content=False) + if r.status == 400: + err = json.loads(r.data.decode("utf-8")) + if not err["status"]: + msgs = [e["msg"] for e in err["errors"]] + raise RuntimeError(", ".join(msgs)) + + filename = get_filename(r.headers) + if os.path.exists(filename): + msg = "%s exists, skipping download" % filename + log.error(msg) + raise RuntimeError(msg) + + with open(filename, "wb") as f: + while True: + data = r.read(10 * 1024**2) + if not data: + break + f.write(data) + + if progress: + data_written = f.tell() + if data_written > 5 * 1024**2: + sys.stdout.write("%s: %0.2f MB \r" % (filename, data_written / 1024**2)) + else: + sys.stdout.write("%s: %0.2f kB\r" % (filename, data_written / 1024)) + sys.stdout.flush() + + print("") + r.release_conn() + + return 0
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/composer/unix_socket.html b/f32-branch/_modules/composer/unix_socket.html new file mode 100644 index 00000000..6124ea5b --- /dev/null +++ b/f32-branch/_modules/composer/unix_socket.html @@ -0,0 +1,262 @@ + + + + + + + + + + + composer.unix_socket — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for composer.unix_socket

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import http.client
+import socket
+import urllib3
+
+
+# These 2 classes were adapted and simplified for use with just urllib3.
+# Originally from https://github.com/msabramo/requests-unixsocket/blob/master/requests_unixsocket/adapters.py
+
+# The following was adapted from some code from docker-py
+# https://github.com/docker/docker-py/blob/master/docker/transport/unixconn.py
+
[docs]class UnixHTTPConnection(http.client.HTTPConnection, object): + + def __init__(self, socket_path, timeout=60*5): + """Create an HTTP connection to a unix domain socket + + :param socket_path: The path to the Unix domain socket + :param timeout: Number of seconds to timeout the connection + """ + super(UnixHTTPConnection, self).__init__('localhost', timeout=timeout) + self.socket_path = socket_path + self.sock = None + + def __del__(self): # base class does not have d'tor + if self.sock: + self.sock.close() + +
[docs] def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(self.timeout) + sock.connect(self.socket_path) + self.sock = sock
+ +
[docs]class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): + + def __init__(self, socket_path, timeout=60*5): + """Create a connection pool using a Unix domain socket + + :param socket_path: The path to the Unix domain socket + :param timeout: Number of seconds to timeout the connection + + NOTE: retries are disabled for these connections, they are never useful + """ + super(UnixHTTPConnectionPool, self).__init__('localhost', timeout=timeout, retries=False) + self.socket_path = socket_path + + def _new_conn(self): + return UnixHTTPConnection(self.socket_path, self.timeout)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/index.html b/f32-branch/_modules/index.html new file mode 100644 index 00000000..66c2e0d1 --- /dev/null +++ b/f32-branch/_modules/index.html @@ -0,0 +1,250 @@ + + + + + + + + + + + Overview: module code — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/lifted/config.html b/f32-branch/_modules/lifted/config.html new file mode 100644 index 00000000..5a1a748f --- /dev/null +++ b/f32-branch/_modules/lifted/config.html @@ -0,0 +1,234 @@ + + + + + + + + + + + lifted.config — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for lifted.config

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+from pylorax.sysutils import joinpaths
+
+
[docs]def configure(conf): + """Add lifted settings to the configuration + + :param conf: configuration object + :type conf: ComposerConfig + :returns: None + + This uses the composer.share_dir and composer.lib_dir as the base + directories for the settings. + """ + share_dir = conf.get("composer", "share_dir") + lib_dir = conf.get("composer", "lib_dir") + + conf.add_section("upload") + conf.set("upload", "providers_dir", joinpaths(share_dir, "/lifted/providers/")) + conf.set("upload", "queue_dir", joinpaths(lib_dir, "/upload/queue/")) + conf.set("upload", "settings_dir", joinpaths(lib_dir, "/upload/settings/"))
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/lifted/providers.html b/f32-branch/_modules/lifted/providers.html new file mode 100644 index 00000000..afd04f10 --- /dev/null +++ b/f32-branch/_modules/lifted/providers.html @@ -0,0 +1,444 @@ + + + + + + + + + + + lifted.providers — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for lifted.providers

+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+from glob import glob
+import os
+import re
+import stat
+
+import pylorax.api.toml as toml
+
+
+def _get_profile_path(ucfg, provider_name, profile, exists=True):
+    """Helper to return the directory and path for a provider's profile file
+
+    :param ucfg: upload config
+    :type ucfg: object
+    :param provider_name: the name of the cloud provider, e.g. "azure"
+    :type provider_name: str
+    :param profile: the name of the profile to save
+    :type profile: str != ""
+    :returns: Full path of the profile .toml file
+    :rtype: str
+    :raises: ValueError when passed invalid settings or an invalid profile name
+    :raises: RuntimeError when the provider or profile couldn't be found
+    """
+    # Make sure no path elements are present
+    profile = os.path.basename(profile)
+    provider_name = os.path.basename(provider_name)
+    if not profile:
+        raise ValueError("Profile name cannot be empty!")
+    if not provider_name:
+        raise ValueError("Provider name cannot be empty!")
+
+    directory = os.path.join(ucfg["settings_dir"], provider_name)
+    # create the settings directory if it doesn't exist
+    os.makedirs(directory, exist_ok=True)
+
+    path = os.path.join(directory, f"{profile}.toml")
+    if exists and not os.path.isfile(path):
+        raise RuntimeError(f'Couldn\'t find profile "{profile}"!')
+
+    return os.path.abspath(path)
+
+
[docs]def resolve_provider(ucfg, provider_name): + """Get information about the specified provider as defined in that + provider's `provider.toml`, including the provider's display name and expected + settings. + + At a minimum, each setting has a display name (that likely differs from its + snake_case name) and a type. Currently, there are two types of settings: + string and boolean. String settings can optionally have a "placeholder" + value for use on the front end and a "regex" for making sure that a value + follows an expected pattern. + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the provider to look for + :type provider_name: str + :raises: RuntimeError when the provider couldn't be found + :returns: the provider + :rtype: dict + """ + # Make sure no path elements are present + provider_name = os.path.basename(provider_name) + path = os.path.join(ucfg["providers_dir"], provider_name, "provider.toml") + try: + with open(path) as provider_file: + provider = toml.load(provider_file) + except OSError as error: + raise RuntimeError(f'Couldn\'t find provider "{provider_name}"!') from error + + return provider
+ + +
[docs]def load_profiles(ucfg, provider_name): + """Return all settings profiles associated with a provider + + :param ucfg: upload config + :type ucfg: object + :param provider_name: name a provider to find profiles for + :type provider_name: str + :returns: a dict of settings dicts, keyed by profile name + :rtype: dict + """ + # Make sure no path elements are present + provider_name = os.path.basename(provider_name) + + def load_path(path): + with open(path) as file: + return toml.load(file) + + def get_name(path): + return os.path.splitext(os.path.basename(path))[0] + + paths = glob(os.path.join(ucfg["settings_dir"], provider_name, "*")) + return {get_name(path): load_path(path) for path in paths}
+ + +
[docs]def resolve_playbook_path(ucfg, provider_name): + """Given a provider's name, return the path to its playbook + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the provider to find the playbook for + :type provider_name: str + :raises: RuntimeError when the provider couldn't be found + :returns: the path to the playbook + :rtype: str + """ + # Make sure no path elements are present + provider_name = os.path.basename(provider_name) + + path = os.path.join(ucfg["providers_dir"], provider_name, "playbook.yaml") + if not os.path.isfile(path): + raise RuntimeError(f'Couldn\'t find playbook for "{provider_name}"!') + return path
+ + +
[docs]def list_providers(ucfg): + """List the names of the available upload providers + + :param ucfg: upload config + :type ucfg: object + :returns: a list of all available provider_names + :rtype: list of str + """ + paths = glob(os.path.join(ucfg["providers_dir"], "*")) + return sorted(os.path.basename(path) for path in paths)
+ + +
[docs]def validate_settings(ucfg, provider_name, settings, image_name=None): + """Raise a ValueError if any settings are invalid + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the provider to validate the settings against + :type provider_name: str + :param settings: the settings to validate + :type settings: dict + :param image_name: optionally check whether an image_name is valid + :type image_name: str + :raises: ValueError when the passed settings are invalid + :raises: RuntimeError when provider_name can't be found + """ + if image_name == "": + raise ValueError("Image name cannot be empty!") + type_map = {"string": str, "boolean": bool} + settings_info = resolve_provider(ucfg, provider_name)["settings-info"] + for key, value in settings.items(): + if key not in settings_info: + raise ValueError(f'Received unexpected setting: "{key}"!') + setting_type = settings_info[key]["type"] + correct_type = type_map[setting_type] + if not isinstance(value, correct_type): + raise ValueError( + f'Expected a {correct_type} for "{key}", received a {type(value)}!' + ) + if setting_type == "string" and "regex" in settings_info[key]: + if not re.match(settings_info[key]["regex"], value): + raise ValueError(f'Value "{value}" is invalid for setting "{key}"!')
+ + +
[docs]def save_settings(ucfg, provider_name, profile, settings): + """Save (and overwrite) settings for a given provider + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the cloud provider, e.g. "azure" + :type provider_name: str + :param profile: the name of the profile to save + :type profile: str != "" + :param settings: settings to save for that provider + :type settings: dict + :raises: ValueError when passed invalid settings or an invalid profile name + """ + path = _get_profile_path(ucfg, provider_name, profile, exists=False) + validate_settings(ucfg, provider_name, settings, image_name=None) + + # touch the TOML file if it doesn't exist + if not os.path.isfile(path): + open(path, "a").close() + + # make sure settings files aren't readable by others, as they will contain + # sensitive credentials + current = stat.S_IMODE(os.lstat(path).st_mode) + os.chmod(path, current & ~stat.S_IROTH) + + with open(path, "w") as settings_file: + toml.dump(settings, settings_file)
+ +
[docs]def load_settings(ucfg, provider_name, profile): + """Load settings for a provider's profile + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the cloud provider, e.g. "azure" + :type provider_name: str + :param profile: the name of the profile to save + :type profile: str != "" + :returns: The profile settings for the selected provider + :rtype: dict + :raises: ValueError when passed invalid settings or an invalid profile name + :raises: RuntimeError when the provider or profile couldn't be found + :raises: ValueError when the passed settings are invalid + + This also calls validate_settings on the loaded settings, potentially + raising an error if the saved settings are invalid. + """ + path = _get_profile_path(ucfg, provider_name, profile) + + with open(path) as file: + settings = toml.load(file) + validate_settings(ucfg, provider_name, settings) + return settings
+ +
[docs]def delete_profile(ucfg, provider_name, profile): + """Delete a provider's profile settings file + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the cloud provider, e.g. "azure" + :type provider_name: str + :param profile: the name of the profile to save + :type profile: str != "" + :raises: ValueError when passed invalid settings or an invalid profile name + :raises: RuntimeError when the provider or profile couldn't be found + """ + path = _get_profile_path(ucfg, provider_name, profile) + + if os.path.exists(path): + os.unlink(path)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/lifted/queue.html b/f32-branch/_modules/lifted/queue.html new file mode 100644 index 00000000..3ef32a37 --- /dev/null +++ b/f32-branch/_modules/lifted/queue.html @@ -0,0 +1,468 @@ + + + + + + + + + + + lifted.queue — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for lifted.queue

+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+from functools import partial
+from glob import glob
+import logging
+import multiprocessing
+
+# We use a multiprocessing Pool for uploads so that we can cancel them with a
+# simple SIGINT, which should bubble down to subprocesses.
+from multiprocessing import Pool
+
+# multiprocessing.dummy is to threads as multiprocessing is to processes.
+# Since daemonic processes can't have children, we use a thread to monitor the
+# upload pool.
+from multiprocessing.dummy import Process
+
+from operator import attrgetter
+import os
+import stat
+import time
+
+import pylorax.api.toml as toml
+
+from lifted.upload import Upload
+from lifted.providers import resolve_playbook_path, validate_settings
+
+# the maximum number of simultaneous uploads
+SIMULTANEOUS_UPLOADS = 1
+
+log = logging.getLogger("lifted")
+multiprocessing.log_to_stderr().setLevel(logging.INFO)
+
+
+def _get_queue_path(ucfg):
+    path = ucfg["queue_dir"]
+
+    # create the upload_queue directory if it doesn't exist
+    os.makedirs(path, exist_ok=True)
+
+    return path
+
+
+def _get_upload_path(ucfg, uuid, write=False):
+    # Make sure no path elements are present
+    uuid = os.path.basename(uuid)
+
+    path = os.path.join(_get_queue_path(ucfg), f"{uuid}.toml")
+    if write and not os.path.exists(path):
+        open(path, "a").close()
+    if os.path.exists(path):
+        # make sure uploads aren't readable by others, as they will contain
+        # sensitive credentials
+        current = stat.S_IMODE(os.lstat(path).st_mode)
+        os.chmod(path, current & ~stat.S_IROTH)
+    return path
+
+
+def _list_upload_uuids(ucfg):
+    paths = glob(os.path.join(_get_queue_path(ucfg), "*"))
+    return [os.path.splitext(os.path.basename(path))[0] for path in paths]
+
+
+def _write_upload(ucfg, upload):
+    with open(_get_upload_path(ucfg, upload.uuid, write=True), "w") as upload_file:
+        toml.dump(upload.serializable(), upload_file)
+
+
+def _write_callback(ucfg):
+    return partial(_write_upload, ucfg)
+
+
+
[docs]def get_upload(ucfg, uuid, ignore_missing=False, ignore_corrupt=False): + """Get an Upload object by UUID + + :param ucfg: upload config + :type ucfg: object + :param uuid: UUID of the upload to get + :type uuid: str + :param ignore_missing: if True, don't raise a RuntimeError when the specified upload is missing, instead just return None + :type ignore_missing: bool + :param ignore_corrupt: if True, don't raise a RuntimeError when the specified upload could not be deserialized, instead just return None + :type ignore_corrupt: bool + :returns: the upload object or None + :rtype: Upload or None + :raises: RuntimeError + """ + try: + with open(_get_upload_path(ucfg, uuid), "r") as upload_file: + return Upload(**toml.load(upload_file)) + except FileNotFoundError as error: + if not ignore_missing: + raise RuntimeError(f"Could not find upload {uuid}!") from error + except toml.TomlError as error: + if not ignore_corrupt: + raise RuntimeError(f"Could not parse upload {uuid}!") from error
+ + +
[docs]def get_uploads(ucfg, uuids): + """Gets a list of Upload objects from a list of upload UUIDs, ignoring + missing or corrupt uploads + + :param ucfg: upload config + :type ucfg: object + :param uuids: list of upload UUIDs to get + :type uuids: list of str + :returns: a list of the uploads that were successfully deserialized + :rtype: list of Upload + """ + uploads = ( + get_upload(ucfg, uuid, ignore_missing=True, ignore_corrupt=True) + for uuid in uuids + ) + return list(filter(None, uploads))
+ + +
[docs]def get_all_uploads(ucfg): + """Get a list of all stored Upload objects + + :param ucfg: upload config + :type ucfg: object + :returns: a list of all stored upload objects + :rtype: list of Upload + """ + return get_uploads(ucfg, _list_upload_uuids(ucfg))
+ + +
[docs]def create_upload(ucfg, provider_name, image_name, settings): + """Creates a new upload + + :param ucfg: upload config + :type ucfg: object + :param provider_name: the name of the cloud provider to upload to, e.g. "azure" + :type provider_name: str + :param image_name: what to name the image in the cloud + :type image_name: str + :param settings: settings to pass to the upload, specific to the cloud provider + :type settings: dict + :returns: the created upload object + :rtype: Upload + """ + validate_settings(ucfg, provider_name, settings, image_name) + return Upload( + provider_name=provider_name, + playbook_path=resolve_playbook_path(ucfg, provider_name), + image_name=image_name, + settings=settings, + status_callback=_write_callback(ucfg), + )
+ + +
[docs]def ready_upload(ucfg, uuid, image_path): + """Pass an image_path to an upload and mark it ready to execute + + :param ucfg: upload config + :type ucfg: object + :param uuid: the UUID of the upload to mark ready + :type uuid: str + :param image_path: the path of the image to pass to the upload + :type image_path: str + """ + get_upload(ucfg, uuid).ready(image_path, _write_callback(ucfg))
+ + +
[docs]def reset_upload(ucfg, uuid, new_image_name=None, new_settings=None): + """Reset an upload so it can be attempted again + + :param ucfg: upload config + :type ucfg: object + :param uuid: the UUID of the upload to reset + :type uuid: str + :param new_image_name: optionally update the upload's image_name + :type new_image_name: str + :param new_settings: optionally update the upload's settings + :type new_settings: dict + """ + upload = get_upload(ucfg, uuid) + validate_settings( + ucfg, + upload.provider_name, + new_settings or upload.settings, + new_image_name or upload.image_name, + ) + if new_image_name: + upload.image_name = new_image_name + if new_settings: + upload.settings = new_settings + upload.reset(_write_callback(ucfg))
+ + +
[docs]def cancel_upload(ucfg, uuid): + """Cancel an upload + + :param ucfg: the compose config + :type ucfg: ComposerConfig + :param uuid: the UUID of the upload to cancel + :type uuid: str + """ + get_upload(ucfg, uuid).cancel(_write_callback(ucfg))
+ + +
[docs]def delete_upload(ucfg, uuid): + """Delete an upload + + :param ucfg: the compose config + :type ucfg: ComposerConfig + :param uuid: the UUID of the upload to delete + :type uuid: str + """ + upload = get_upload(ucfg, uuid) + if upload and upload.is_cancellable(): + upload.cancel() + os.remove(_get_upload_path(ucfg, uuid))
+ + +
[docs]def start_upload_monitor(ucfg): + """Start a thread that manages the upload queue + + :param ucfg: the compose config + :type ucfg: ComposerConfig + """ + process = Process(target=_monitor, args=(ucfg,)) + process.daemon = True + process.start()
+ + +def _monitor(ucfg): + log.info("Started upload monitor.") + for upload in get_all_uploads(ucfg): + # Set abandoned uploads to FAILED + if upload.status == "RUNNING": + upload.set_status("FAILED", _write_callback(ucfg)) + pool = Pool(processes=SIMULTANEOUS_UPLOADS) + pool_uuids = set() + + def remover(uuid): + return lambda _: pool_uuids.remove(uuid) + + while True: + # Every second, scoop up READY uploads from the filesystem and throw + # them in the pool + all_uploads = get_all_uploads(ucfg) + for upload in sorted(all_uploads, key=attrgetter("creation_time")): + ready = upload.status == "READY" + if ready and upload.uuid not in pool_uuids: + log.info("Starting upload %s...", upload.uuid) + pool_uuids.add(upload.uuid) + callback = remover(upload.uuid) + pool.apply_async( + upload.execute, + (_write_callback(ucfg),), + callback=callback, + error_callback=callback, + ) + time.sleep(1) +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/lifted/upload.html b/f32-branch/_modules/lifted/upload.html new file mode 100644 index 00000000..7adb05e2 --- /dev/null +++ b/f32-branch/_modules/lifted/upload.html @@ -0,0 +1,411 @@ + + + + + + + + + + + lifted.upload — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for lifted.upload

+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+from datetime import datetime
+import logging
+from multiprocessing import current_process
+import os
+import signal
+from uuid import uuid4
+
+from ansible_runner.interface import run as ansible_run
+from ansible_runner.exceptions import AnsibleRunnerException
+
+log = logging.getLogger("lifted")
+
+
+
[docs]class Upload: + """Represents an upload of an image to a cloud provider. Instances of this + class are serialized as TOML and stored in the upload queue directory, + which is /var/lib/lorax/upload/queue/ by default""" + + def __init__( + self, + uuid=None, + provider_name=None, + playbook_path=None, + image_name=None, + settings=None, + creation_time=None, + upload_log=None, + upload_pid=None, + image_path=None, + status_callback=None, + status=None, + ): + self.uuid = uuid or str(uuid4()) + self.provider_name = provider_name + self.playbook_path = playbook_path + self.image_name = image_name + self.settings = settings + self.creation_time = creation_time or datetime.now().timestamp() + self.upload_log = upload_log or "" + self.upload_pid = upload_pid + self.image_path = image_path + if status: + self.status = status + else: + self.set_status("WAITING", status_callback) + + def _log(self, message, callback=None): + """Logs something to the upload log with an optional callback + + :param message: the object to log + :type message: object + :param callback: a function of the form callback(self) + :type callback: function + """ + if message: + messages = str(message).splitlines() + + # Log multi-line messages as individual log lines + for m in messages: + log.info(m) + self.upload_log += f"{message}\n" + if callback: + callback(self) + +
[docs] def serializable(self): + """Returns a representation of the object as a dict for serialization + + :returns: the object's __dict__ + :rtype: dict + """ + return self.__dict__
+ +
[docs] def summary(self): + """Return a dict with useful information about the upload + + :returns: upload information + :rtype: dict + """ + + return { + "uuid": self.uuid, + "status": self.status, + "provider_name": self.provider_name, + "image_name": self.image_name, + "image_path": self.image_path, + "creation_time": self.creation_time, + "settings": self.settings, + }
+ +
[docs] def set_status(self, status, status_callback=None): + """Sets the status of the upload with an optional callback + + :param status: the new status + :type status: str + :param status_callback: a function of the form callback(self) + :type status_callback: function + """ + self._log("Setting status to %s" % status) + self.status = status + if status_callback: + status_callback(self)
+ +
[docs] def ready(self, image_path, status_callback): + """Provide an image_path and mark the upload as ready to execute + + :param image_path: path of the image to upload + :type image_path: str + :param status_callback: a function of the form callback(self) + :type status_callback: function + """ + self._log("Setting image_path to %s" % image_path) + self.image_path = image_path + if self.status == "WAITING": + self.set_status("READY", status_callback)
+ +
[docs] def reset(self, status_callback): + """Reset the upload so it can be attempted again + + :param status_callback: a function of the form callback(self) + :type status_callback: function + """ + if self.is_cancellable(): + raise RuntimeError(f"Can't reset, status is {self.status}!") + if not self.image_path: + raise RuntimeError("Can't reset, no image supplied yet!") + # self.error = None + self._log("Resetting state") + self.set_status("READY", status_callback)
+ +
[docs] def is_cancellable(self): + """Is the upload in a cancellable state? + + :returns: whether the upload is cancellable + :rtype: bool + """ + return self.status in ("WAITING", "READY", "RUNNING")
+ +
[docs] def cancel(self, status_callback=None): + """Cancel the upload. Sends a SIGINT to self.upload_pid. + + :param status_callback: a function of the form callback(self) + :type status_callback: function + """ + if not self.is_cancellable(): + raise RuntimeError(f"Can't cancel, status is already {self.status}!") + if self.upload_pid: + os.kill(self.upload_pid, signal.SIGINT) + self.set_status("CANCELLED", status_callback)
+ +
[docs] def execute(self, status_callback=None): + """Execute the upload. Meant to be called from a dedicated process so + that the upload can be cancelled by sending a SIGINT to + self.upload_pid. + + :param status_callback: a function of the form callback(self) + :type status_callback: function + """ + if self.status != "READY": + raise RuntimeError("This upload is not ready!") + + try: + self.upload_pid = current_process().pid + self.set_status("RUNNING", status_callback) + self._log("Executing playbook.yml") + + # NOTE: event_handler doesn't seem to be called for playbook errors + logger = lambda e: self._log(e["stdout"], status_callback) + + runner = ansible_run( + playbook=self.playbook_path, + extravars={ + **self.settings, + "image_name": self.image_name, + "image_path": self.image_path, + }, + event_handler=logger, + verbosity=2, + ) + + # Try logging events and stats -- but they may not exist, so catch the error + try: + for e in runner.events: + self._log("%s" % dir(e), status_callback) + + self._log("%s" % runner.stats, status_callback) + except AnsibleRunnerException: + self._log("%s" % runner.stdout.read(), status_callback) + + if runner.status == "successful": + self.set_status("FINISHED", status_callback) + else: + self.set_status("FAILED", status_callback) + except Exception: + import traceback + log.error(traceback.format_exc(limit=2))
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax.html b/f32-branch/_modules/pylorax.html new file mode 100644 index 00000000..b226901d --- /dev/null +++ b/f32-branch/_modules/pylorax.html @@ -0,0 +1,653 @@ + + + + + + + + + + + pylorax — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax

+#
+# __init__.py
+#
+# Copyright (C) 2010-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     David Cantrell <dcantrell@redhat.com>
+#                     Will Woods <wwoods@redhat.com>
+
+# set up logging
+import logging
+logger = logging.getLogger("pylorax")
+logger.addHandler(logging.NullHandler())
+
+program_log = logging.getLogger("program")
+
+import sys
+import os
+import configparser
+import tempfile
+import locale
+from subprocess import CalledProcessError
+import selinux
+from glob import glob
+
+from pylorax.base import BaseLoraxClass, DataHolder
+import pylorax.output as output
+
+import dnf
+
+from pylorax.sysutils import joinpaths, remove, linktree
+
+from pylorax.treebuilder import RuntimeBuilder, TreeBuilder
+from pylorax.buildstamp import BuildStamp
+from pylorax.treeinfo import TreeInfo
+from pylorax.discinfo import DiscInfo
+from pylorax.executils import runcmd, runcmd_output
+
+
+# get lorax version
+try:
+    import pylorax.version
+except ImportError:
+    vernum = "devel"
+else:
+    vernum = pylorax.version.num
+
+DRACUT_DEFAULT = ["--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips"]
+
+# Used for DNF conf.module_platform_id
+DEFAULT_PLATFORM_ID = "platform:f32"
+
+
[docs]class ArchData(DataHolder): + lib64_arches = ("x86_64", "ppc64le", "s390x", "ia64", "aarch64") + bcj_arch = dict(i386="x86", x86_64="x86", + ppc64le="powerpc", + arm="arm", armhfp="arm") + + def __init__(self, buildarch): + super(ArchData, self).__init__() + self.buildarch = buildarch + self.basearch = dnf.rpm.basearch(buildarch) + self.libdir = "lib64" if self.basearch in self.lib64_arches else "lib" + self.bcj = self.bcj_arch.get(self.basearch)
+ +
[docs]class Lorax(BaseLoraxClass): + + def __init__(self): + BaseLoraxClass.__init__(self) + self._configured = False + self.product = None + self.workdir = None + self.arch = None + self.conf = None + self.inroot = None + self.debug = False + self.outputdir = None + self._templatedir = None + + # set locale to C + locale.setlocale(locale.LC_ALL, 'C') + +
[docs] def configure(self, conf_file="/etc/lorax/lorax.conf"): + self.conf = configparser.SafeConfigParser() + + # set defaults + self.conf.add_section("lorax") + self.conf.set("lorax", "debug", "1") + self.conf.set("lorax", "sharedir", "/usr/share/lorax") + self.conf.set("lorax", "logdir", "/var/log/lorax") + + self.conf.add_section("output") + self.conf.set("output", "colors", "1") + self.conf.set("output", "encoding", "utf-8") + self.conf.set("output", "ignorelist", "/usr/share/lorax/ignorelist") + + self.conf.add_section("templates") + self.conf.set("templates", "ramdisk", "ramdisk.ltmpl") + + self.conf.add_section("compression") + self.conf.set("compression", "type", "xz") + self.conf.set("compression", "args", "") + self.conf.set("compression", "bcj", "on") + + # read the config file + if os.path.isfile(conf_file): + self.conf.read(conf_file) + + # set up the output + self.debug = self.conf.getboolean("lorax", "debug") + output_level = output.DEBUG if self.debug else output.INFO + + if sys.stdout.isatty(): + colors = self.conf.getboolean("output", "colors") + else: + colors = False + encoding = self.conf.get("output", "encoding") + + self.output.basic_config(output_level=output_level, + colors=colors, encoding=encoding) + + ignorelist = self.conf.get("output", "ignorelist") + if os.path.isfile(ignorelist): + with open(ignorelist, "r") as fobj: + for line in fobj: + line = line.strip() + if line and not line.startswith("#"): + self.output.ignore(line) + + # cron does not have sbin in PATH, + # so we have to add it ourselves + os.environ["PATH"] = "{0}:/sbin:/usr/sbin".format(os.environ["PATH"]) + + # remove some environmental variables that can cause problems with package scripts + env_remove = ('DISPLAY', 'DBUS_SESSION_BUS_ADDRESS') + list(os.environ.pop(k) for k in env_remove if k in os.environ) + + self._configured = True
+ + @property + def templatedir(self): + """Find the template directory. + + Pick the first directory under sharedir/templates.d/ if it exists. + Otherwise use the sharedir + """ + if not self._templatedir: + self._templatedir = find_templates(self.conf.get("lorax", "sharedir")) + logger.info("Using templatedir %s", self._templatedir) + return self._templatedir + +
[docs] def init_stream_logging(self): + sh = logging.StreamHandler() + sh.setLevel(logging.INFO) + logger.addHandler(sh)
+ +
[docs] def init_file_logging(self, logdir, logname="pylorax.log"): + fh = logging.FileHandler(filename=joinpaths(logdir, logname), mode="w") + fh.setLevel(logging.DEBUG) + logger.addHandler(fh)
+ +
[docs] def run(self, dbo, product, version, release, variant="", bugurl="", + isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None, + domacboot=True, doupgrade=True, remove_temp=False, + installpkgs=None, excludepkgs=None, + size=2, + add_templates=None, + add_template_vars=None, + add_arch_templates=None, + add_arch_template_vars=None, + verify=True, + user_dracut_args=None, + squashfs_only=False, + skip_branding=False): + + assert self._configured + + installpkgs = installpkgs or [] + excludepkgs = excludepkgs or [] + + if domacboot: + try: + runcmd(["rpm", "-q", "hfsplus-tools"]) + except CalledProcessError: + logger.critical("you need to install hfsplus-tools to create mac images") + sys.exit(1) + + # set up work directory + self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.") + if not os.path.isdir(self.workdir): + os.makedirs(self.workdir) + + # set up log directory + logdir = self.conf.get("lorax", "logdir") + if not os.path.isdir(logdir): + os.makedirs(logdir) + + self.init_stream_logging() + self.init_file_logging(logdir) + + logger.debug("version is %s", vernum) + log_selinux_state() + + logger.debug("using work directory %s", self.workdir) + logger.debug("using log directory %s", logdir) + + # set up output directory + self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.") + if not os.path.isdir(self.outputdir): + os.makedirs(self.outputdir) + logger.debug("using output directory %s", self.outputdir) + + # do we have root privileges? + logger.info("checking for root privileges") + if not os.geteuid() == 0: + logger.critical("no root privileges") + sys.exit(1) + + # do we have a proper dnf base object? + logger.info("checking dnf base object") + if not isinstance(dbo, dnf.Base): + logger.critical("no dnf base object") + sys.exit(1) + self.inroot = dbo.conf.installroot + logger.debug("using install root: %s", self.inroot) + + if not buildarch: + buildarch = get_buildarch(dbo) + + logger.info("setting up build architecture") + self.arch = ArchData(buildarch) + for attr in ('buildarch', 'basearch', 'libdir'): + logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr)) + + logger.info("setting up build parameters") + self.product = DataHolder(name=product, version=version, release=release, + variant=variant, bugurl=bugurl, isfinal=isfinal) + logger.debug("product data: %s", self.product) + + # NOTE: if you change isolabel, you need to change pungi to match, or + # the pungi images won't boot. + isolabel = volid or "%s-%s-%s" % (self.product.name, self.product.version, self.arch.basearch) + + if len(isolabel) > 32: + logger.fatal("the volume id cannot be longer than 32 characters") + sys.exit(1) + + # NOTE: rb.root = dbo.conf.installroot (== self.inroot) + rb = RuntimeBuilder(product=self.product, arch=self.arch, + dbo=dbo, templatedir=self.templatedir, + installpkgs=installpkgs, + excludepkgs=excludepkgs, + add_templates=add_templates, + add_template_vars=add_template_vars, + skip_branding=skip_branding) + + logger.info("installing runtime packages") + rb.install() + + # write .buildstamp + buildstamp = BuildStamp(self.product.name, self.product.version, + self.product.bugurl, self.product.isfinal, + self.arch.buildarch, self.product.variant) + + buildstamp.write(joinpaths(self.inroot, ".buildstamp")) + + if self.debug: + rb.writepkglists(joinpaths(logdir, "pkglists")) + rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt")) + + logger.info("doing post-install configuration") + rb.postinstall() + + # write .discinfo + discinfo = DiscInfo(self.product.release, self.arch.basearch) + discinfo.write(joinpaths(self.outputdir, ".discinfo")) + + logger.info("backing up installroot") + installroot = joinpaths(self.workdir, "installroot") + linktree(self.inroot, installroot) + + logger.info("generating kernel module metadata") + rb.generate_module_data() + + logger.info("cleaning unneeded files") + rb.cleanup() + + if verify: + logger.info("verifying the installroot") + if not rb.verify(): + sys.exit(1) + else: + logger.info("Skipping verify") + + if self.debug: + rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt")) + + logger.info("creating the runtime image") + runtime = "images/install.img" + compression = self.conf.get("compression", "type") + compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member + if self.conf.getboolean("compression", "bcj"): + if self.arch.bcj: + compressargs += ["-Xbcj", self.arch.bcj] + else: + logger.info("no BCJ filter for arch %s", self.arch.basearch) + if squashfs_only: + # Create an ext4 rootfs.img and compress it with squashfs + rb.create_squashfs_runtime(joinpaths(installroot,runtime), + compression=compression, compressargs=compressargs, + size=size) + else: + # Create an ext4 rootfs.img and compress it with squashfs + rb.create_ext4_runtime(joinpaths(installroot,runtime), + compression=compression, compressargs=compressargs, + size=size) + rb.finished() + + logger.info("preparing to build output tree and boot images") + treebuilder = TreeBuilder(product=self.product, arch=self.arch, + inroot=installroot, outroot=self.outputdir, + runtime=runtime, isolabel=isolabel, + domacboot=domacboot, doupgrade=doupgrade, + templatedir=self.templatedir, + add_templates=add_arch_templates, + add_template_vars=add_arch_template_vars, + workdir=self.workdir) + + logger.info("rebuilding initramfs images") + if not user_dracut_args: + dracut_args = DRACUT_DEFAULT + else: + dracut_args = [] + for arg in user_dracut_args: + dracut_args += arg.split(" ", 1) + + anaconda_args = dracut_args + ["--add", "anaconda pollcdrom qemu qemu-net"] + + logger.info("dracut args = %s", dracut_args) + logger.info("anaconda args = %s", anaconda_args) + treebuilder.rebuild_initrds(add_args=anaconda_args) + + logger.info("populating output tree and building boot images") + treebuilder.build() + + # write .treeinfo file and we're done + treeinfo = TreeInfo(self.product.name, self.product.version, + self.product.variant, self.arch.basearch) + for section, data in treebuilder.treeinfo_data.items(): + treeinfo.add_section(section, data) + treeinfo.write(joinpaths(self.outputdir, ".treeinfo")) + + # cleanup + if remove_temp: + remove(self.workdir)
+ + +
[docs]def get_buildarch(dbo): + # get architecture of the available anaconda package + buildarch = None + q = dbo.sack.query() + a = q.available() + for anaconda in a.filter(name="anaconda-core"): + if anaconda.arch != "src": + buildarch = anaconda.arch + break + if not buildarch: + logger.critical("no anaconda-core package in the repository") + sys.exit(1) + + return buildarch
+ + +
[docs]def setup_logging(logfile, theLogger): + """ + Setup the various logs + + :param logfile: filename to write the log to + :type logfile: string + :param theLogger: top-level logger + :type theLogger: logging.Logger + """ + if not os.path.isdir(os.path.abspath(os.path.dirname(logfile))): + os.makedirs(os.path.abspath(os.path.dirname(logfile))) + + # Setup logging to console and to logfile + logger.setLevel(logging.DEBUG) + theLogger.setLevel(logging.DEBUG) + + sh = logging.StreamHandler() + sh.setLevel(logging.INFO) + fmt = logging.Formatter("%(asctime)s: %(message)s") + sh.setFormatter(fmt) + logger.addHandler(sh) + theLogger.addHandler(sh) + + fh = logging.FileHandler(filename=logfile, mode="w") + fh.setLevel(logging.DEBUG) + fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s") + fh.setFormatter(fmt) + logger.addHandler(fh) + theLogger.addHandler(fh) + + # External program output log + program_log.setLevel(logging.DEBUG) + f = os.path.abspath(os.path.dirname(logfile))+"/program.log" + fh = logging.FileHandler(filename=f, mode="w") + fh.setLevel(logging.DEBUG) + fmt = logging.Formatter("%(asctime)s %(levelname)s: %(message)s") + fh.setFormatter(fmt) + program_log.addHandler(fh)
+ + +
[docs]def find_templates(templatedir="/usr/share/lorax"): + """ Find the templates to use. + + :param str templatedir: Top directory to search for templates + :returns: Path to templates + :rtype: str + + If there is a templates.d directory under templatedir the + lowest numbered directory entry is returned. + + eg. /usr/share/lorax/templates.d/99-generic/ + """ + if os.path.isdir(joinpaths(templatedir, "templates.d")): + try: + templatedir = sorted(glob(joinpaths(templatedir, "templates.d", "*")))[0] + except IndexError: + pass + return templatedir
+ +
[docs]def log_selinux_state(): + """Log the current state of selinux""" + if selinux.is_selinux_enabled(): + if selinux.security_getenforce(): + logger.info("selinux is enabled and in Enforcing mode") + else: + logger.info("selinux is enabled and in Permissive mode") + else: + logger.info("selinux is Disabled")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/bisect.html b/f32-branch/_modules/pylorax/api/bisect.html new file mode 100644 index 00000000..806781f3 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/bisect.html @@ -0,0 +1,250 @@ + + + + + + + + + + + pylorax.api.bisect — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.bisect

+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
[docs]def insort_left(a, x, key=None, lo=0, hi=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + :param a: sorted list + :type a: list + :param x: item to insert into the list + :type x: object + :param key: Function to use to compare items in the list + :type key: function + :returns: index where the item was inserted + :rtype: int + + If x is already in a, insert it to the left of the leftmost x. + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + + This is a modified version of bisect.insort_left that can use a + function for the compare, and returns the index position where it + was inserted. + """ + if key is None: + key = lambda i: i + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + while lo < hi: + mid = (lo+hi)//2 + if key(a[mid]) < key(x): lo = mid+1 + else: hi = mid + a.insert(lo, x) + return lo
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/checkparams.html b/f32-branch/_modules/pylorax/api/checkparams.html new file mode 100644 index 00000000..e6b5aea4 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/checkparams.html @@ -0,0 +1,245 @@ + + + + + + + + + + + pylorax.api.checkparams — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.checkparams

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+from flask import jsonify
+from functools import update_wrapper
+
+# A decorator for checking the parameters provided to the API route implementing
+# functions.  The tuples parameter is a list of tuples.  Each tuple is the string
+# name of a parameter ("blueprint_name", not blueprint_name), the value it's set
+# to by flask if the caller did not provide it, and a message to be returned to
+# the user.
+#
+# If the parameter is set to its default, the error message is returned.  Otherwise,
+# the decorated function is called and its return value is returned.
+
[docs]def checkparams(tuples): + def decorator(f): + def wrapped_function(*args, **kwargs): + for tup in tuples: + if kwargs[tup[0]] == tup[1]: + log.error("(%s) %s", f.__name__, tup[2]) + return jsonify(status=False, errors=[tup[2]]), 400 + + return f(*args, **kwargs) + + return update_wrapper(wrapped_function, f) + + return decorator
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/cmdline.html b/f32-branch/_modules/pylorax/api/cmdline.html new file mode 100644 index 00000000..e06ac911 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/cmdline.html @@ -0,0 +1,264 @@ + + + + + + + + + + + pylorax.api.cmdline — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.cmdline

+#
+# cmdline.py
+#
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+import sys
+import argparse
+
+from pylorax import vernum
+
+DEFAULT_USER  = "root"
+DEFAULT_GROUP = "weldr"
+
+version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
+
+
[docs]def lorax_composer_parser(): + """ Return the ArgumentParser for lorax-composer""" + + parser = argparse.ArgumentParser(description="Lorax Composer API Server", + fromfile_prefix_chars="@") + + parser.add_argument("--socket", default="/run/weldr/api.socket", metavar="SOCKET", + help="Path to the socket file to listen on") + parser.add_argument("--user", default=DEFAULT_USER, metavar="USER", + help="User to use for reduced permissions") + parser.add_argument("--group", default=DEFAULT_GROUP, metavar="GROUP", + help="Group to set ownership of the socket to") + parser.add_argument("--log", dest="logfile", default="/var/log/lorax-composer/composer.log", metavar="LOG", + help="Path to logfile (/var/log/lorax-composer/composer.log)") + parser.add_argument("--mockfiles", default="/var/tmp/bdcs-mockfiles/", metavar="MOCKFILES", + help="Path to JSON files used for /api/mock/ paths (/var/tmp/bdcs-mockfiles/)") + parser.add_argument("--sharedir", type=os.path.abspath, metavar="SHAREDIR", + help="Directory containing all the templates. Overrides config file sharedir") + parser.add_argument("-V", action="store_true", dest="showver", + help="show program's version number and exit") + parser.add_argument("-c", "--config", default="/etc/lorax/composer.conf", metavar="CONFIG", + help="Path to lorax-composer configuration file.") + parser.add_argument("--releasever", default=None, metavar="STRING", + help="Release version to use for $releasever in dnf repository urls") + parser.add_argument("--tmp", default="/var/tmp", + help="Top level temporary directory") + parser.add_argument("--proxy", default=None, metavar="PROXY", + help="Set proxy for DNF, overrides configuration file setting.") + parser.add_argument("--no-system-repos", action="store_true", default=False, + help="Do not copy over system repos from /etc/yum.repos.d/ at startup") + parser.add_argument("BLUEPRINTS", metavar="BLUEPRINTS", + help="Path to the blueprints") + + return parser
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/compose.html b/f32-branch/_modules/pylorax/api/compose.html new file mode 100644 index 00000000..d0d148d1 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/compose.html @@ -0,0 +1,1468 @@ + + + + + + + + + + + pylorax.api.compose — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.compose

+# Copyright (C) 2018-2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Setup for composing an image
+
+Adding New Output Types
+-----------------------
+
+The new output type must add a kickstart template to ./share/composer/ where the
+name of the kickstart (without the trailing .ks) matches the entry in compose_args.
+
+The kickstart should not have any url or repo entries, these will be added at build
+time. The %packages section should be the last thing, and while it can contain mandatory
+packages required by the output type, it should not have the trailing %end because the
+package NEVRAs will be appended to it at build time.
+
+compose_args should have a name matching the kickstart, and it should set the novirt_install
+parameters needed to generate the desired output. Other types should be set to False.
+
+"""
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from glob import glob
+from io import StringIO
+from math import ceil
+import shutil
+from uuid import uuid4
+
+# Use pykickstart to calculate disk image size
+from pykickstart.parser import KickstartParser
+from pykickstart.version import makeVersion
+
+from pylorax import ArchData, find_templates, get_buildarch
+from pylorax.api.gitrpm import create_gitrpm_repo
+from pylorax.api.projects import projects_depsolve, projects_depsolve_with_size, dep_nevra
+from pylorax.api.projects import ProjectsError
+from pylorax.api.recipes import read_recipe_and_id
+from pylorax.api.timestamp import TS_CREATED, write_timestamp
+import pylorax.api.toml as toml
+from pylorax.base import DataHolder
+from pylorax.imgutils import default_image_name
+from pylorax.ltmpl import LiveTemplateRunner
+from pylorax.sysutils import joinpaths, flatconfig
+
+
+
[docs]def test_templates(dbo, share_dir): + """ Try depsolving each of the the templates and report any errors + + :param dbo: dnf base object + :type dbo: dnf.Base + :returns: List of template types and errors + :rtype: List of errors + + Return a list of templates and errors encountered or an empty list + """ + template_errors = [] + for compose_type, enabled in compose_types(share_dir): + if not enabled: + continue + + # Read the kickstart template for this type + ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" + ks_template = open(ks_template_path, "r").read() + + # How much space will the packages in the default template take? + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(ks_template+"\n%end\n") + pkgs = [(name, "*") for name in ks.handler.packages.packageList] + grps = [grp.name for grp in ks.handler.packages.groupList] + try: + projects_depsolve(dbo, pkgs, grps) + except ProjectsError as e: + template_errors.append("Error depsolving %s: %s" % (compose_type, str(e))) + + return template_errors
+ + +
[docs]def repo_to_ks(r, url="url"): + """ Return a kickstart line with the correct args. + :param r: DNF repository information + :type r: dnf.Repo + :param url: "url" or "baseurl" to use for the baseurl parameter + :type url: str + :returns: kickstart command arguments for url/repo command + :rtype: str + + Set url to "baseurl" if it is a repo, leave it as "url" for the installation url. + """ + cmd = "" + # url uses --url not --baseurl + if r.baseurl: + cmd += '--%s="%s" ' % (url, r.baseurl[0]) + elif r.metalink: + cmd += '--metalink="%s" ' % r.metalink + elif r.mirrorlist: + cmd += '--mirrorlist="%s" ' % r.mirrorlist + else: + raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") + + if r.proxy: + cmd += '--proxy="%s" ' % r.proxy + + if not r.sslverify: + cmd += '--noverifyssl' + + if r.sslcacert: + cmd += ' --sslcacert="%s"' % r.sslcacert + if r.sslclientcert: + cmd += ' --sslclientcert="%s"' % r.sslclientcert + if r.sslclientkey: + cmd += ' --sslclientkey="%s"' % r.sslclientkey + + return cmd
+ + +
[docs]def bootloader_append(line, kernel_append): + """ Insert the kernel_append string into the --append argument + + :param line: The bootloader ... line + :type line: str + :param kernel_append: The arguments to append to the --append section + :type kernel_append: str + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(line) + + if ks.handler.bootloader.appendLine: + ks.handler.bootloader.appendLine += " %s" % kernel_append + else: + ks.handler.bootloader.appendLine = kernel_append + + # Converting back to a string includes a comment, return just the bootloader line + return str(ks.handler.bootloader).splitlines()[-1]
+ + +
[docs]def get_kernel_append(recipe): + """Return the customizations.kernel append value + + :param recipe: + :type recipe: Recipe object + :returns: append value or empty string + :rtype: str + """ + if "customizations" not in recipe or \ + "kernel" not in recipe["customizations"] or \ + "append" not in recipe["customizations"]["kernel"]: + return "" + return recipe["customizations"]["kernel"]["append"]
+ + +
[docs]def timezone_cmd(line, settings): + """ Update the timezone line with the settings + + :param line: The timezone ... line + :type line: str + :param settings: A dict with timezone and/or ntpservers list + :type settings: dict + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(line) + + if "timezone" in settings: + ks.handler.timezone.timezone = settings["timezone"] + if "ntpservers" in settings: + ks.handler.timezone.ntpservers = settings["ntpservers"] + + # Converting back to a string includes a comment, return just the timezone line + return str(ks.handler.timezone).splitlines()[-1]
+ + +
[docs]def get_timezone_settings(recipe): + """Return the customizations.timezone dict + + :param recipe: + :type recipe: Recipe object + :returns: append value or empty string + :rtype: dict + """ + if "customizations" not in recipe or \ + "timezone" not in recipe["customizations"]: + return {} + return recipe["customizations"]["timezone"]
+ + +
[docs]def lang_cmd(line, languages): + """ Update the lang line with the languages + + :param line: The lang ... line + :type line: str + :param settings: The list of languages + :type settings: list + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(line) + + if languages: + ks.handler.lang.lang = languages[0] + + if len(languages) > 1: + ks.handler.lang.addsupport = languages[1:] + + # Converting back to a string includes a comment, return just the lang line + return str(ks.handler.lang).splitlines()[-1]
+ + +
[docs]def get_languages(recipe): + """Return the customizations.locale.languages list + + :param recipe: The recipe + :type recipe: Recipe object + :returns: list of language strings + :rtype: list + """ + if "customizations" not in recipe or \ + "locale" not in recipe["customizations"] or \ + "languages" not in recipe["customizations"]["locale"]: + return [] + return recipe["customizations"]["locale"]["languages"]
+ + +
[docs]def keyboard_cmd(line, layout): + """ Update the keyboard line with the layout + + :param line: The keyboard ... line + :type line: str + :param settings: The keyboard layout + :type settings: str + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(line) + + if layout: + ks.handler.keyboard.keyboard = layout + ks.handler.keyboard.vc_keymap = "" + ks.handler.keyboard.x_layouts = [] + + # Converting back to a string includes a comment, return just the keyboard line + return str(ks.handler.keyboard).splitlines()[-1]
+ + +
[docs]def get_keyboard_layout(recipe): + """Return the customizations.locale.keyboard list + + :param recipe: The recipe + :type recipe: Recipe object + :returns: The keyboard layout string + :rtype: str + """ + if "customizations" not in recipe or \ + "locale" not in recipe["customizations"] or \ + "keyboard" not in recipe["customizations"]["locale"]: + return [] + return recipe["customizations"]["locale"]["keyboard"]
+ + +
[docs]def firewall_cmd(line, settings): + """ Update the firewall line with the new ports and services + + :param line: The firewall ... line + :type line: str + :param settings: A dict with the list of services and ports to enable and disable + :type settings: dict + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(line) + + # Do not override firewall --disabled + if ks.handler.firewall.enabled != False and settings: + ks.handler.firewall.ports = sorted(set(settings["ports"] + ks.handler.firewall.ports)) + ks.handler.firewall.services = sorted(set(settings["enabled"] + ks.handler.firewall.services)) + ks.handler.firewall.remove_services = sorted(set(settings["disabled"] + ks.handler.firewall.remove_services)) + + # Converting back to a string includes a comment, return just the keyboard line + return str(ks.handler.firewall).splitlines()[-1]
+ + +
[docs]def get_firewall_settings(recipe): + """Return the customizations.firewall settings + + :param recipe: The recipe + :type recipe: Recipe object + :returns: A dict of settings + :rtype: dict + """ + settings = {"ports": [], "enabled": [], "disabled": []} + + if "customizations" not in recipe or \ + "firewall" not in recipe["customizations"]: + return settings + + settings["ports"] = recipe["customizations"]["firewall"].get("ports", []) + + if "services" in recipe["customizations"]["firewall"]: + settings["enabled"] = recipe["customizations"]["firewall"]["services"].get("enabled", []) + settings["disabled"] = recipe["customizations"]["firewall"]["services"].get("disabled", []) + return settings
+ + +
[docs]def services_cmd(line, settings): + """ Update the services line with additional services to enable/disable + + :param line: The services ... line + :type line: str + :param settings: A dict with the list of services to enable and disable + :type settings: dict + + Using pykickstart to process the line is the best way to make sure it + is parsed correctly, and re-assembled for inclusion into the final kickstart + """ + # Empty services and no additional settings, return an empty string + if not line and not settings["enabled"] and not settings["disabled"]: + return "" + + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + + # Allow passing in a 'default' so that the enable/disable may be applied to it, without + # parsing it and emitting a kickstart error message + if line != "services": + ks.readKickstartFromString(line) + + # Add to any existing services, removing any duplicates + ks.handler.services.enabled = sorted(set(settings["enabled"] + ks.handler.services.enabled)) + ks.handler.services.disabled = sorted(set(settings["disabled"] + ks.handler.services.disabled)) + + # Converting back to a string includes a comment, return just the keyboard line + return str(ks.handler.services).splitlines()[-1]
+ + +
[docs]def get_services(recipe): + """Return the customizations.services settings + + :param recipe: The recipe + :type recipe: Recipe object + :returns: A dict of settings + :rtype: dict + """ + settings = {"enabled": [], "disabled": []} + + if "customizations" not in recipe or \ + "services" not in recipe["customizations"]: + return settings + + settings["enabled"] = sorted(recipe["customizations"]["services"].get("enabled", [])) + settings["disabled"] = sorted(recipe["customizations"]["services"].get("disabled", [])) + return settings
+ + +
[docs]def get_default_services(recipe): + """Get the default string for services, based on recipe + :param recipe: The recipe + + :type recipe: Recipe object + :returns: string with "services" or "" + :rtype: str + + When no services have been selected we don't need to add anything to the kickstart + so return an empty string. Otherwise return "services" which will be updated with + the settings. + """ + services = get_services(recipe) + + if services["enabled"] or services["disabled"]: + return "services" + else: + return ""
+ + +
[docs]def customize_ks_template(ks_template, recipe): + """ Customize the kickstart template and return it + + :param ks_template: The kickstart template + :type ks_template: str + :param recipe: + :type recipe: Recipe object + + Apply customizations to existing template commands, or add defaults for ones that are + missing and required. + + Apply customizations.kernel.append to the bootloader argument in the template. + Add bootloader line if it is missing. + + Add default timezone if needed. It does NOT replace an existing timezone entry + """ + # Commands to be modified [NEW-COMMAND-FUNC, NEW-VALUE, DEFAULT, REPLACE] + # The function is called with a kickstart command string and the value to replace + # The value is specific to the command, and is understood by the function + # The default is a complete kickstart command string, suitable for writing to the template + # If REPLACE is False it will not change an existing entry only add a missing one + commands = {"bootloader": [bootloader_append, + get_kernel_append(recipe), + 'bootloader --location=none', True], + "timezone": [timezone_cmd, + get_timezone_settings(recipe), + 'timezone UTC', False], + "lang": [lang_cmd, + get_languages(recipe), + 'lang en_US.UTF-8', True], + "keyboard": [keyboard_cmd, + get_keyboard_layout(recipe), + 'keyboard --xlayouts us --vckeymap us', True], + "firewall": [firewall_cmd, + get_firewall_settings(recipe), + 'firewall --enabled', True], + "services": [services_cmd, + get_services(recipe), + get_default_services(recipe), True] + } + found = {} + + output = StringIO() + for line in ks_template.splitlines(): + for cmd in commands: + (new_command, value, default, replace) = commands[cmd] + if line.startswith(cmd): + found[cmd] = True + if value and replace: + log.debug("Replacing %s with %s", cmd, value) + print(new_command(line, value), file=output) + else: + log.debug("Skipping %s", cmd) + print(line, file=output) + break + else: + # No matches, write the line as-is + print(line, file=output) + + # Write out defaults for the ones not found + # These must go FIRST because the template still needs to have the packages added + defaults = StringIO() + for cmd in commands: + if cmd in found: + continue + (new_command, value, default, _) = commands[cmd] + if value and default: + log.debug("Setting %s to use %s", cmd, value) + print(new_command(default, value), file=defaults) + elif default: + log.debug("Setting %s to %s", cmd, default) + print(default, file=defaults) + + return defaults.getvalue() + output.getvalue()
+ + +
[docs]def write_ks_root(f, user): + """ Write kickstart root password and sshkey entry + + :param f: kickstart file object + :type f: open file object + :param user: A blueprint user dictionary + :type user: dict + :returns: True if it wrote a rootpw command to the kickstart + :rtype: bool + + If the entry contains a ssh key, use sshkey to write it + If it contains password, use rootpw to set it + + root cannot be used with the user command. So only key and password are supported + for root. + """ + wrote_rootpw = False + + # ssh key uses the sshkey kickstart command + if "key" in user: + f.write('sshkey --user %s "%s"\n' % (user["name"], user["key"])) + + if "password" in user: + if any(user["password"].startswith(prefix) for prefix in ["$2b$", "$6$", "$5$"]): + log.debug("Detected pre-crypted password") + f.write('rootpw --iscrypted "%s"\n' % user["password"]) + wrote_rootpw = True + else: + log.debug("Detected plaintext password") + f.write('rootpw --plaintext "%s"\n' % user["password"]) + wrote_rootpw = True + + return wrote_rootpw
+ +
[docs]def write_ks_user(f, user): + """ Write kickstart user and sshkey entry + + :param f: kickstart file object + :type f: open file object + :param user: A blueprint user dictionary + :type user: dict + + If the entry contains a ssh key, use sshkey to write it + All of the user fields are optional, except name, write out a kickstart user entry + with whatever options are relevant. + """ + # ssh key uses the sshkey kickstart command + if "key" in user: + f.write('sshkey --user %s "%s"\n' % (user["name"], user["key"])) + + # Write out the user kickstart command, much of it is optional + f.write("user --name %s" % user["name"]) + if "home" in user: + f.write(" --homedir %s" % user["home"]) + + if "password" in user: + if any(user["password"].startswith(prefix) for prefix in ["$2b$", "$6$", "$5$"]): + log.debug("Detected pre-crypted password") + f.write(" --iscrypted") + else: + log.debug("Detected plaintext password") + f.write(" --plaintext") + + f.write(" --password \"%s\"" % user["password"]) + + if "shell" in user: + f.write(" --shell %s" % user["shell"]) + + if "uid" in user: + f.write(" --uid %d" % int(user["uid"])) + + if "gid" in user: + f.write(" --gid %d" % int(user["gid"])) + + if "description" in user: + f.write(" --gecos \"%s\"" % user["description"]) + + if "groups" in user: + f.write(" --groups %s" % ",".join(user["groups"])) + + f.write("\n")
+ + +
[docs]def write_ks_group(f, group): + """ Write kickstart group entry + + :param f: kickstart file object + :type f: open file object + :param group: A blueprint group dictionary + :type user: dict + + gid is optional + """ + if "name" not in group: + raise RuntimeError("group entry requires a name") + + f.write("group --name %s" % group["name"]) + if "gid" in group: + f.write(" --gid %d" % int(group["gid"])) + + f.write("\n")
+ + +
[docs]def add_customizations(f, recipe): + """ Add customizations to the kickstart file + + :param f: kickstart file object + :type f: open file object + :param recipe: + :type recipe: Recipe object + :returns: None + :raises: RuntimeError if there was a problem writing to the kickstart + """ + if "customizations" not in recipe: + f.write('rootpw --lock\n') + return + customizations = recipe["customizations"] + + # allow customizations to be incorrectly specified as [[customizations]] instead of [customizations] + if isinstance(customizations, list): + customizations = customizations[0] + + if "hostname" in customizations: + f.write("network --hostname=%s\n" % customizations["hostname"]) + + # TODO - remove this, should use user section to define this + if "sshkey" in customizations: + # This is a list of entries + for sshkey in customizations["sshkey"]: + if "user" not in sshkey or "key" not in sshkey: + log.error("%s is incorrect, skipping", sshkey) + continue + f.write('sshkey --user %s "%s"\n' % (sshkey["user"], sshkey["key"])) + + # Creating a user also creates a group. Make a list of the names for later + user_groups = [] + # kickstart requires a rootpw line + wrote_rootpw = False + if "user" in customizations: + # only name is required, everything else is optional + for user in customizations["user"]: + if "name" not in user: + raise RuntimeError("user entry requires a name") + + # root is special, cannot use normal user command for it + if user["name"] == "root": + wrote_rootpw = write_ks_root(f, user) + continue + + write_ks_user(f, user) + user_groups.append(user["name"]) + + if "group" in customizations: + for group in customizations["group"]: + if group["name"] not in user_groups: + write_ks_group(f, group) + else: + log.warning("Skipping group %s, already created by user", group["name"]) + + # Lock the root account if no root user password has been specified + if not wrote_rootpw: + f.write('rootpw --lock\n')
+ + +
[docs]def get_extra_pkgs(dbo, share_dir, compose_type): + """Return extra packages needed for the output type + + :param dbo: dnf base object + :type dbo: dnf.Base + :param share_dir: Path to the top level share directory + :type share_dir: str + :param compose_type: The type of output to create from the recipe + :type compose_type: str + :returns: List of package names (name only, not NEVRA) + :rtype: list + + Currently this is only needed by live-iso, it reads ./live/live-install.tmpl and + processes only the installpkg lines. It lists the packages needed to complete creation of the + iso using the templates such as x86.tmpl + + Keep in mind that the live-install.tmpl is shared between livemedia-creator and lorax-composer, + even though the results are applied differently. + """ + if compose_type != "live-iso": + return [] + + # get the arch information to pass to the runner + arch = ArchData(get_buildarch(dbo)) + defaults = DataHolder(basearch=arch.basearch) + templatedir = joinpaths(find_templates(share_dir), "live") + runner = LiveTemplateRunner(dbo, templatedir=templatedir, defaults=defaults) + runner.run("live-install.tmpl") + log.debug("extra pkgs = %s", runner.pkgs) + + return runner.pkgnames
+ + +
[docs]def start_build(cfg, dnflock, gitlock, branch, recipe_name, compose_type, test_mode=0): + """ Start the build + + :param cfg: Configuration object + :type cfg: ComposerConfig + :param dnflock: Lock and YumBase for depsolving + :type dnflock: YumLock + :param recipe: The recipe to build + :type recipe: str + :param compose_type: The type of output to create from the recipe + :type compose_type: str + :returns: Unique ID for the build that can be used to track its status + :rtype: str + """ + share_dir = cfg.get("composer", "share_dir") + lib_dir = cfg.get("composer", "lib_dir") + + # Make sure compose_type is valid, only allow enabled types + type_enabled = dict(compose_types(share_dir)).get(compose_type) + if type_enabled is None: + raise RuntimeError("Invalid compose type (%s), must be one of %s" % (compose_type, [t for t, e in compose_types(share_dir)])) + if not type_enabled: + raise RuntimeError("Compose type '%s' is disabled on this architecture" % compose_type) + + # Some image types (live-iso) need extra packages for composer to execute the output template + with dnflock.lock: + extra_pkgs = get_extra_pkgs(dnflock.dbo, share_dir, compose_type) + log.debug("Extra packages needed for %s: %s", compose_type, extra_pkgs) + + with gitlock.lock: + (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch, recipe_name) + + # Combine modules and packages and depsolve the list + module_nver = recipe.module_nver + package_nver = recipe.package_nver + package_nver.extend([(name, '*') for name in extra_pkgs]) + + projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) + deps = [] + log.info("depsolving %s", recipe["name"]) + try: + # This can possibly update repodata and reset the YumBase object. + with dnflock.lock_check: + (installed_size, deps) = projects_depsolve_with_size(dnflock.dbo, projects, recipe.group_names, with_core=False) + except ProjectsError as e: + log.error("start_build depsolve: %s", str(e)) + raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) + + # Read the kickstart template for this type + ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" + ks_template = open(ks_template_path, "r").read() + + # How much space will the packages in the default template take? + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstartFromString(ks_template+"\n%end\n") + pkgs = [(name, "*") for name in ks.handler.packages.packageList] + grps = [grp.name for grp in ks.handler.packages.groupList] + try: + with dnflock.lock: + (template_size, _) = projects_depsolve_with_size(dnflock.dbo, pkgs, grps, with_core=not ks.handler.packages.nocore) + except ProjectsError as e: + log.error("start_build depsolve: %s", str(e)) + raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) + log.debug("installed_size = %d, template_size=%d", installed_size, template_size) + + # Minimum LMC disk size is 1GiB, and anaconda bumps the estimated size up by 10% (which doesn't always work). + installed_size = int((installed_size+template_size)) * 1.2 + log.debug("/ partition size = %d", installed_size) + + # Create the results directory + build_id = str(uuid4()) + results_dir = joinpaths(lib_dir, "results", build_id) + os.makedirs(results_dir) + + # Write the recipe commit hash + commit_path = joinpaths(results_dir, "COMMIT") + with open(commit_path, "w") as f: + f.write(commit_id) + + # Write the original recipe + recipe_path = joinpaths(results_dir, "blueprint.toml") + with open(recipe_path, "w") as f: + f.write(recipe.toml()) + + # Write the frozen recipe + frozen_recipe = recipe.freeze(deps) + recipe_path = joinpaths(results_dir, "frozen.toml") + with open(recipe_path, "w") as f: + f.write(frozen_recipe.toml()) + + # Write out the dependencies to the results dir + deps_path = joinpaths(results_dir, "deps.toml") + with open(deps_path, "w") as f: + f.write(toml.dumps({"packages":deps})) + + # Save a copy of the original kickstart + shutil.copy(ks_template_path, results_dir) + + with dnflock.lock: + repos = list(dnflock.dbo.repos.iter_enabled()) + if not repos: + raise RuntimeError("No enabled repos, canceling build.") + + # Create the git rpms, if any, and return the path to the repo under results_dir + gitrpm_repo = create_gitrpm_repo(results_dir, recipe) + + # Create the final kickstart with repos and package list + ks_path = joinpaths(results_dir, "final-kickstart.ks") + with open(ks_path, "w") as f: + ks_url = repo_to_ks(repos[0], "url") + log.debug("url = %s", ks_url) + f.write('url %s\n' % ks_url) + for idx, r in enumerate(repos[1:]): + ks_repo = repo_to_ks(r, "baseurl") + log.debug("repo composer-%s = %s", idx, ks_repo) + f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo)) + + if gitrpm_repo: + log.debug("repo gitrpms = %s", gitrpm_repo) + f.write('repo --name="gitrpms" --baseurl="file://%s"\n' % gitrpm_repo) + + # Setup the disk for booting + # TODO Add GPT and UEFI boot support + f.write('clearpart --all --initlabel\n') + + # Write the root partition and it's size in MB (rounded up) + f.write('part / --size=%d\n' % ceil(installed_size / 1024**2)) + + # Some customizations modify the template before writing it + f.write(customize_ks_template(ks_template, recipe)) + + for d in deps: + f.write(dep_nevra(d)+"\n") + + # Include the rpms from the gitrpm repo directory + if gitrpm_repo: + for rpm in glob(os.path.join(gitrpm_repo, "*.rpm")): + f.write(os.path.basename(rpm)[:-4]+"\n") + + f.write("%end\n") + + # Other customizations can be appended to the kickstart + add_customizations(f, recipe) + + # Setup the config to pass to novirt_install + log_dir = joinpaths(results_dir, "logs/") + cfg_args = compose_args(compose_type) + + # Get the title, project, and release version from the host + if not os.path.exists("/etc/os-release"): + log.error("/etc/os-release is missing, cannot determine product or release version") + os_release = flatconfig("/etc/os-release") + + log.debug("os_release = %s", dict(os_release.items())) + + cfg_args["title"] = os_release.get("PRETTY_NAME", "") + cfg_args["project"] = os_release.get("NAME", "") + cfg_args["releasever"] = os_release.get("VERSION_ID", "") + cfg_args["volid"] = "" + cfg_args["extra_boot_args"] = get_kernel_append(recipe) + + if "compression" not in cfg_args: + cfg_args["compression"] = "xz" + + if "compress_args" not in cfg_args: + cfg_args["compress_args"] = [] + + cfg_args.update({ + "ks": [ks_path], + "logfile": log_dir, + "timeout": 60, # 60 minute timeout + }) + with open(joinpaths(results_dir, "config.toml"), "w") as f: + f.write(toml.dumps(cfg_args)) + + # Set the initial status + open(joinpaths(results_dir, "STATUS"), "w").write("WAITING") + + # Set the test mode, if requested + if test_mode > 0: + open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode) + + write_timestamp(results_dir, TS_CREATED) + log.info("Adding %s (%s %s) to compose queue", build_id, recipe["name"], compose_type) + os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id)) + + return build_id
+ +# Supported output types +
[docs]def compose_types(share_dir): + r""" Returns a list of tuples of the supported output types, and their state + + The output types come from the kickstart names in /usr/share/lorax/composer/\*ks + + If they are disabled on the current arch their state is False. If enabled, it is True. + eg. [("alibaba", False), ("ext4-filesystem", True), ...] + """ + # These are compose types that are not supported on an architecture. eg. hyper-v on s390 + # If it is not listed, it is allowed + disable_map = { + "arm": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "armhfp": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "aarch64": ["alibaba", "google", "hyper-v", "vhd", "vmdk"], + "ppc": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "ppc64": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "ppc64le": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "s390": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + "s390x": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], + } + + all_types = sorted([os.path.basename(ks)[:-3] for ks in glob(joinpaths(share_dir, "composer/*.ks"))]) + arch_disabled = disable_map.get(os.uname().machine, []) + + return [(t, t not in arch_disabled) for t in all_types]
+ +
[docs]def compose_args(compose_type): + """ Returns the settings to pass to novirt_install for the compose type + + :param compose_type: The type of compose to create, from `compose_types()` + :type compose_type: str + + This will return a dict of options that match the ArgumentParser options for livemedia-creator. + These are the ones the define the type of output, it's filename, etc. + Other options will be filled in by `make_compose()` + """ + _MAP = {"tar": {"make_iso": False, + "make_disk": False, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": True, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": default_image_name("xz", "root.tar"), + "tar_disk_name": None, + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "liveimg-tar": {"make_iso": False, + "make_disk": False, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": True, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": default_image_name("xz", "root.tar"), + "tar_disk_name": None, + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "live-iso": {"make_iso": True, + "make_disk": False, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": "live.iso", + "tar_disk_name": None, + "fs_label": "Anaconda", # Live booting may expect this to be 'Anaconda' + "image_only": False, + "app_name": None, + "app_template": None, + "app_file": None, + "iso_only": True, + "iso_name": "live.iso", + "squashfs_only": False, + }, + "partitioned-disk": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": "disk.img", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "qcow2": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "qcow2", + "qemu_args": [], + "image_name": "disk.qcow2", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "ext4-filesystem": {"make_iso": False, + "make_disk": False, + "make_fsimage": True, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": "filesystem.img", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "ami": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": False, + "qemu_args": [], + "image_name": "disk.ami", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "vhd": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "vpc", + "qemu_args": ["-o", "subformat=fixed,force_size"], + "image_name": "disk.vhd", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "vmdk": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "vmdk", + "qemu_args": [], + "image_name": "disk.vmdk", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "openstack": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "qcow2", + "qemu_args": [], + "image_name": "disk.qcow2", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "google": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": True, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 1024, + "image_type": False, # False instead of None because of TOML + "qemu_args": [], + "image_name": "disk.tar.gz", + "tar_disk_name": "disk.raw", + "compression": "gzip", + "compress_args": ["-9"], + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "hyper-v": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "vhdx", + "qemu_args": [], + "image_name": "disk.vhdx", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + "alibaba": {"make_iso": False, + "make_disk": True, + "make_fsimage": False, + "make_appliance": False, + "make_ami": False, + "make_tar": False, + "make_tar_disk": False, + "make_pxe_live": False, + "make_ostree_live": False, + "make_oci": False, + "make_vagrant": False, + "ostree": False, + "live_rootfs_keep_size": False, + "live_rootfs_size": 0, + "image_size_align": 0, + "image_type": "qcow2", + "qemu_args": [], + "image_name": "disk.qcow2", + "tar_disk_name": None, + "fs_label": "", + "image_only": True, + "app_name": None, + "app_template": None, + "app_file": None, + "squashfs_only": False, + }, + } + return _MAP[compose_type]
+ +
[docs]def move_compose_results(cfg, results_dir): + """Move the final image to the results_dir and cleanup the unneeded compose files + + :param cfg: Build configuration + :type cfg: DataHolder + :param results_dir: Directory to put the results into + :type results_dir: str + """ + if cfg["make_tar"]: + shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), results_dir) + elif cfg["make_iso"]: + # Output from live iso is always a boot.iso under images/, move and rename it + shutil.move(joinpaths(cfg["result_dir"], cfg["iso_name"]), joinpaths(results_dir, cfg["image_name"])) + elif cfg["make_disk"] or cfg["make_fsimage"]: + shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), joinpaths(results_dir, cfg["image_name"])) + + + # Cleanup the compose directory, but only if it looks like a compose directory + if os.path.basename(cfg["result_dir"]) == "compose": + shutil.rmtree(cfg["result_dir"]) + else: + log.error("Incorrect compose directory, not cleaning up")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/config.html b/f32-branch/_modules/pylorax/api/config.html new file mode 100644 index 00000000..4964426a --- /dev/null +++ b/f32-branch/_modules/pylorax/api/config.html @@ -0,0 +1,341 @@ + + + + + + + + + + + pylorax.api.config — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.config

+#
+# Copyright (C) 2017  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import configparser
+import grp
+import os
+import pwd
+
+from pylorax.sysutils import joinpaths
+
+
[docs]class ComposerConfig(configparser.ConfigParser): +
[docs] def get_default(self, section, option, default): + try: + return self.get(section, option) + except configparser.Error: + return default
+ + +
[docs]def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=False): + """lorax-composer configuration + + :param conf_file: Path to the config file overriding the default settings + :type conf_file: str + :param root_dir: Directory to prepend to paths, defaults to / + :type root_dir: str + :param test_config: Set to True to skip reading conf_file + :type test_config: bool + :returns: Configuration + :rtype: ComposerConfig + """ + conf = ComposerConfig() + + # set defaults + conf.add_section("composer") + conf.set("composer", "share_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/"))) + conf.set("composer", "lib_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/"))) + conf.set("composer", "repo_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/repos.d/"))) + conf.set("composer", "dnf_conf", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf.conf"))) + conf.set("composer", "dnf_root", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf/root/"))) + conf.set("composer", "cache_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/cache/"))) + conf.set("composer", "tmp", os.path.realpath(joinpaths(root_dir, "/var/tmp/"))) + + conf.add_section("users") + conf.set("users", "root", "1") + + # Enable all available repo files by default + conf.add_section("repos") + conf.set("repos", "use_system_repos", "1") + conf.set("repos", "enabled", "*") + + conf.add_section("dnf") + + if not test_config: + # read the config file + if os.path.isfile(conf_file): + conf.read(conf_file) + + return conf
+ +
[docs]def make_owned_dir(p_dir, uid, gid): + """Make a directory and its parents, setting owner and group + + :param p_dir: path to directory to create + :type p_dir: string + :param uid: uid of owner + :type uid: int + :param gid: gid of owner + :type gid: int + :returns: list of errors + :rtype: list of str + + Check to make sure it does not have o+rw permissions and that it is owned by uid:gid + """ + errors = [] + if not os.path.isdir(p_dir): + # Make sure no o+rw permissions are set + orig_umask = os.umask(0o006) + os.makedirs(p_dir, 0o771) + os.chown(p_dir, uid, gid) + os.umask(orig_umask) + else: + p_stat = os.stat(p_dir) + if p_stat.st_mode & 0o006 != 0: + errors.append("Incorrect permissions on %s, no o+rw permissions are allowed." % p_dir) + + if p_stat.st_gid != gid or p_stat.st_uid != 0: + gr_name = grp.getgrgid(gid).gr_name + u_name = pwd.getpwuid(uid) + errors.append("%s should be owned by %s:%s" % (p_dir, u_name, gr_name)) + + return errors
+ +
[docs]def make_dnf_dirs(conf, uid, gid): + """Make any missing dnf directories owned by user:group + + :param conf: The configuration to use + :type conf: ComposerConfig + :param uid: uid of owner + :type uid: int + :param gid: gid of owner + :type gid: int + :returns: list of errors + :rtype: list of str + """ + errors = [] + for p in ["dnf_conf", "repo_dir", "cache_dir", "dnf_root"]: + p_dir = os.path.abspath(conf.get("composer", p)) + if p == "dnf_conf": + p_dir = os.path.dirname(p_dir) + errors.extend(make_owned_dir(p_dir, uid, gid))
+ +
[docs]def make_queue_dirs(conf, gid): + """Make any missing queue directories + + :param conf: The configuration to use + :type conf: ComposerConfig + :param gid: Group ID that has access to the queue directories + :type gid: int + :returns: list of errors + :rtype: list of str + """ + errors = [] + lib_dir = conf.get("composer", "lib_dir") + for p in ["queue/run", "queue/new", "results"]: + p_dir = joinpaths(lib_dir, p) + errors.extend(make_owned_dir(p_dir, 0, gid)) + return errors
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/crossdomain.html b/f32-branch/_modules/pylorax/api/crossdomain.html new file mode 100644 index 00000000..d8f653fe --- /dev/null +++ b/f32-branch/_modules/pylorax/api/crossdomain.html @@ -0,0 +1,264 @@ + + + + + + + + + + + pylorax.api.crossdomain — Lorax 31.7 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.crossdomain

+#
+# Copyright (C) 2017  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+# crossdomain decorator from - http://flask.pocoo.org/snippets/56/
+from datetime import timedelta
+from flask import make_response, request, current_app
+from functools import update_wrapper
+
+
+
[docs]def crossdomain(origin, methods=None, headers=None, + max_age=21600, attach_to_all=True, + automatic_options=True): + if methods is not None: + methods = ', '.join(sorted(x.upper() for x in methods)) + if headers is not None and not isinstance(headers, str): + headers = ', '.join(x.upper() for x in headers) + if not isinstance(origin, list): + origin = [origin] + if isinstance(max_age, timedelta): + max_age = int(max_age.total_seconds()) + + def get_methods(): + if methods is not None: + return methods + + options_resp = current_app.make_default_options_response() + return options_resp.headers['allow'] + + def decorator(f): + def wrapped_function(*args, **kwargs): + if automatic_options and request.method == 'OPTIONS': + resp = current_app.make_default_options_response() + else: + resp = make_response(f(*args, **kwargs)) + if not attach_to_all and request.method != 'OPTIONS': + return resp + + h = resp.headers + + h.extend([("Access-Control-Allow-Origin", orig) for orig in origin]) + h['Access-Control-Allow-Methods'] = get_methods() + h['Access-Control-Max-Age'] = str(max_age) + if headers is not None: + h['Access-Control-Allow-Headers'] = headers + return resp + + f.provide_automatic_options = False + f.required_methods = ['OPTIONS'] + return update_wrapper(wrapped_function, f) + return decorator
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/dnfbase.html b/f32-branch/_modules/pylorax/api/dnfbase.html new file mode 100644 index 00000000..dda95ebe --- /dev/null +++ b/f32-branch/_modules/pylorax/api/dnfbase.html @@ -0,0 +1,387 @@ + + + + + + + + + + + pylorax.api.dnfbase — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.dnfbase

+#
+# Copyright (C) 2017-2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# pylint: disable=bad-preconf-access
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+import dnf
+import dnf.logging
+from glob import glob
+import os
+import shutil
+from threading import Lock
+import time
+
+from pylorax import DEFAULT_PLATFORM_ID
+from pylorax.sysutils import flatconfig
+
+
[docs]class DNFLock(object): + """Hold the dnf.Base object and a Lock to control access to it. + + self.dbo is a property that returns the dnf.Base object, but it *may* change + from one call to the next if the upstream repositories have changed. + """ + def __init__(self, conf, expire_secs=6*60*60): + self._conf = conf + self._lock = Lock() + self.dbo = get_base_object(self._conf) + self._expire_secs = expire_secs + self._expire_time = time.time() + self._expire_secs + + @property + def lock(self): + """Check for repo updates (using expiration time) and return the lock + + If the repository has been updated, tear down the old dnf.Base and + create a new one. This is the only way to force dnf to use the new + metadata. + """ + if time.time() > self._expire_time: + return self.lock_check + return self._lock + + @property + def lock_check(self): + """Force a check for repo updates and return the lock + + Use this method sparingly, it removes the repodata and downloads a new copy every time. + """ + self._expire_time = time.time() + self._expire_secs + self.dbo.update_cache() + return self._lock
+ +
[docs]def get_base_object(conf): + """Get the DNF object with settings from the config file + + :param conf: configuration object + :type conf: ComposerParser + :returns: A DNF Base object + :rtype: dnf.Base + """ + cachedir = os.path.abspath(conf.get("composer", "cache_dir")) + dnfconf = os.path.abspath(conf.get("composer", "dnf_conf")) + dnfroot = os.path.abspath(conf.get("composer", "dnf_root")) + repodir = os.path.abspath(conf.get("composer", "repo_dir")) + + # Setup the config for the DNF Base object + dbo = dnf.Base() + dbc = dbo.conf +# TODO - Handle this +# dbc.logdir = logdir + dbc.installroot = dnfroot + if not os.path.isdir(dnfroot): + os.makedirs(dnfroot) + if not os.path.isdir(repodir): + os.makedirs(repodir) + + dbc.cachedir = cachedir + dbc.reposdir = [repodir] + dbc.install_weak_deps = False + dbc.prepend_installroot('persistdir') + # this is a weird 'AppendOption' thing that, when you set it, + # actually appends. Doing this adds 'nodocs' to the existing list + # of values, over in libdnf, it does not replace the existing values. + dbc.tsflags = ['nodocs'] + + if conf.get_default("dnf", "proxy", None): + dbc.proxy = conf.get("dnf", "proxy") + + if conf.has_option("dnf", "sslverify") and not conf.getboolean("dnf", "sslverify"): + dbc.sslverify = False + + # If the system repos are enabled read the dnf vars from /etc/dnf/vars/ + if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"): + dbc.substitutions.update_from_etc("/") + log.info("dnf vars: %s", dbc.substitutions) + + _releasever = conf.get_default("composer", "releasever", None) + if not _releasever: + # Use the releasever of the host system + _releasever = dnf.rpm.detect_releasever("/") + log.info("releasever = %s", _releasever) + dbc.releasever = _releasever + + # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly + if not os.path.exists("/etc/os-release"): + log.warning("/etc/os-release is missing, cannot determine platform id, falling back to %s", DEFAULT_PLATFORM_ID) + platform_id = DEFAULT_PLATFORM_ID + else: + os_release = flatconfig("/etc/os-release") + platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID) + log.info("Using %s for module_platform_id", platform_id) + dbc.module_platform_id = platform_id + + # Make sure metadata is always current + dbc.metadata_expire = 0 + dbc.metadata_expire_filter = "never" + + # write the dnf configuration file + with open(dnfconf, "w") as f: + f.write(dbc.dump()) + + # dnf needs the repos all in one directory, composer uses repodir for this + # if system repos are supposed to be used, copy them into repodir, overwriting any previous copies + if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"): + for repo_file in glob("/etc/yum.repos.d/*.repo"): + shutil.copy2(repo_file, repodir) + dbo.read_all_repos() + + # Remove any duplicate repo entries. These can cause problems with Anaconda, which will fail + # with space problems. + repos = sorted(list(r.id for r in dbo.repos.iter_enabled())) + seen = {"baseurl": [], "mirrorlist": [], "metalink": []} + for source_name in repos: + remove = False + repo = dbo.repos.get(source_name, None) + if repo is None: + log.warning("repo %s vanished while removing duplicates", source_name) + continue + if repo.baseurl: + if repo.baseurl[0] in seen["baseurl"]: + log.info("Removing duplicate repo: %s baseurl=%s", source_name, repo.baseurl[0]) + remove = True + else: + seen["baseurl"].append(repo.baseurl[0]) + elif repo.mirrorlist: + if repo.mirrorlist in seen["mirrorlist"]: + log.info("Removing duplicate repo: %s mirrorlist=%s", source_name, repo.mirrorlist) + remove = True + else: + seen["mirrorlist"].append(repo.mirrorlist) + elif repo.metalink: + if repo.metalink in seen["metalink"]: + log.info("Removing duplicate repo: %s metalink=%s", source_name, repo.metalink) + remove = True + else: + seen["metalink"].append(repo.metalink) + + if remove: + del dbo.repos[source_name] + + # Update the metadata from the enabled repos to speed up later operations + log.info("Updating repository metadata") + try: + dbo.fill_sack(load_system_repo=False) + dbo.read_comps() + dbo.update_cache() + except dnf.exceptions.Error as e: + log.error("Failed to update metadata: %s", str(e)) + raise RuntimeError("Fetching metadata failed: %s" % str(e)) + + return dbo
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/flask_blueprint.html b/f32-branch/_modules/pylorax/api/flask_blueprint.html new file mode 100644 index 00000000..e8391df0 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/flask_blueprint.html @@ -0,0 +1,255 @@ + + + + + + + + + + + pylorax.api.flask_blueprint — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.flask_blueprint

+#
+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Flask Blueprints that support skipping routes
+
+When using Blueprints for API versioning you will usually want to fall back
+to the previous version's rules for routes that have no new behavior. To do
+this we add a 'skip_rule' list to the Blueprint's options dictionary. It lists
+all of the routes that you do not want to register.
+
+For example:
+    from pylorax.api.v0 import v0
+    from pylorax.api.v1 import v1
+
+    server.register_blueprint(v0, url_prefix="/api/v0/")
+    server.register_blueprint(v0, url_prefix="/api/v1/", skip_rules=["/blueprints/list"]
+    server.register_blueprint(v1, url_prefix="/api/v1/")
+
+This will register all of v0's routes under `/api/v0`, and all but `/blueprints/list` under /api/v1,
+and then register v1's version of `/blueprints/list` under `/api/v1`
+
+"""
+from flask import Blueprint
+from flask.blueprints import BlueprintSetupState
+
+
[docs]class BlueprintSetupStateSkip(BlueprintSetupState): + def __init__(self, blueprint, app, options, first_registration, skip_rules): + self._skip_rules = skip_rules + super(BlueprintSetupStateSkip, self).__init__(blueprint, app, options, first_registration) + +
[docs] def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + if rule not in self._skip_rules: + super(BlueprintSetupStateSkip, self).add_url_rule(rule, endpoint, view_func, **options)
+ +
[docs]class BlueprintSkip(Blueprint): + def __init__(self, *args, **kwargs): + super(BlueprintSkip, self).__init__(*args, **kwargs) + +
[docs] def make_setup_state(self, app, options, first_registration=False): + skip_rules = options.pop("skip_rules", []) + return BlueprintSetupStateSkip(self, app, options, first_registration, skip_rules)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/gitrpm.html b/f32-branch/_modules/pylorax/api/gitrpm.html new file mode 100644 index 00000000..a0dfb21c --- /dev/null +++ b/f32-branch/_modules/pylorax/api/gitrpm.html @@ -0,0 +1,423 @@ + + + + + + + + + + + pylorax.api.gitrpm — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.gitrpm

+# Copyright (C) 2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Clone a git repository and package it as an rpm
+
+This module contains functions for cloning a git repo, creating a tar archive of
+the selected commit, branch, or tag, and packaging the files into an rpm that will
+be installed by anaconda when creating the image.
+"""
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from rpmfluff import SimpleRpmBuild
+import shutil
+import subprocess
+import tempfile
+import time
+
+from pylorax.sysutils import joinpaths
+
+
[docs]def get_repo_description(gitRepo): + """ Return a description including the git repo and reference + + :param gitRepo: A dict with the repository details + :type gitRepo: dict + :returns: A string with the git repo url and reference + :rtype: str + """ + return "Created from %s, reference '%s', on %s" % (gitRepo["repo"], gitRepo["ref"], time.ctime())
+ +
[docs]class GitArchiveTarball: + """Create a git archive of the selected git repo and reference""" + def __init__(self, gitRepo): + self._gitRepo = gitRepo + self.sourceName = self._gitRepo["rpmname"]+".tar.xz" + +
[docs] def write_file(self, sourcesDir): + """ Create the tar archive + + :param sourcesDir: Path to use for creating the archive + :type sourcesDir: str + + This clones the git repository and creates a git archive from the specified reference. + The result is in RPMNAME.tar.xz under the sourcesDir + """ + # Clone the repository into a temporary location + cmd = ["git", "clone", self._gitRepo["repo"], joinpaths(sourcesDir, "gitrepo")] + log.debug(cmd) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + log.error("Failed to clone %s: %s", self._gitRepo["repo"], e.output) + raise RuntimeError("Failed to clone %s" % self._gitRepo["repo"]) + + oldcwd = os.getcwd() + try: + os.chdir(joinpaths(sourcesDir, "gitrepo")) + + # Configure archive to create a .tar.xz + cmd = ["git", "config", "tar.tar.xz.command", "xz -c"] + log.debug(cmd) + subprocess.check_call(cmd) + + cmd = ["git", "archive", "--prefix", self._gitRepo["rpmname"] + "/", "-o", joinpaths(sourcesDir, self.sourceName), self._gitRepo["ref"]] + log.debug(cmd) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + log.error("Failed to archive %s: %s", self._gitRepo["repo"], e.output) + raise RuntimeError('Failed to archive %s from ref "%s"' % (self._gitRepo["repo"], + self._gitRepo["ref"])) + finally: + # Cleanup even if there was an error + os.chdir(oldcwd) + shutil.rmtree(joinpaths(sourcesDir, "gitrepo"))
+ +
[docs]class GitRpmBuild(SimpleRpmBuild): + """Build an rpm containing files from a git repository""" + def __init__(self, *args, **kwargs): + self._base_dir = None + super().__init__(*args, **kwargs) + +
[docs] def check(self): + raise NotImplementedError
+ +
[docs] def get_base_dir(self): + """Place all the files under a temporary directory + rpmbuild/ + """ + if not self._base_dir: + self._base_dir = tempfile.mkdtemp(prefix="lorax-git-rpm.") + return joinpaths(self._base_dir, "rpmbuild")
+ +
[docs] def cleanup_tmpdir(self): + """Remove the temporary directory and all of its contents + """ + if len(self._base_dir) < 5: + raise RuntimeError("Invalid base_dir: %s" % self.get_base_dir()) + + shutil.rmtree(self._base_dir)
+ +
[docs] def clean(self): + """Remove the base directory from inside the tmpdir""" + if len(self.get_base_dir()) < 5: + raise RuntimeError("Invalid base_dir: %s" % self.get_base_dir()) + shutil.rmtree(self.get_base_dir(), ignore_errors=True)
+ +
[docs] def add_git_tarball(self, gitRepo): + """Add a tar archive of a git repository to the rpm + + :param gitRepo: A dict with the repository details + :type gitRepo: dict + + This populates the rpm with the URL of the git repository, the summary + describing the repo, the description of the repository and reference used, + and sets up the rpm to install the archive contents into the destination + path. + """ + self.addUrl(gitRepo["repo"]) + self.add_summary(gitRepo["summary"]) + self.add_description(get_repo_description(gitRepo)) + self.addLicense("Unknown") + sourceIndex = self.add_source(GitArchiveTarball(gitRepo)) + self.section_build += "tar -xvf %s\n" % self.sources[sourceIndex].sourceName + dest = os.path.normpath(gitRepo["destination"]) + # Prevent double slash root + if dest == "/": + dest = "" + self.create_parent_dirs(dest) + self.section_install += "cp -r %s/. $RPM_BUILD_ROOT/%s\n" % (gitRepo["rpmname"], dest) + sub = self.get_subpackage(None) + if not dest: + # / is special, we don't want to include / itself, just what's under it + sub.section_files += "/*\n" + else: + sub.section_files += "%s/\n" % dest
+ +
[docs]def make_git_rpm(gitRepo, dest): + """ Create an rpm from the specified git repo + + :param gitRepo: A dict with the repository details + :type gitRepo: dict + + This will clone the git repository, create an archive of the selected reference, + and build an rpm that will install the files from the repository under the destination + directory. The gitRepo dict should have the following fields:: + + rpmname: "server-config" + rpmversion: "1.0" + rpmrelease: "1" + summary: "Setup files for server deployment" + repo: "PATH OF GIT REPO TO CLONE" + ref: "v1.0" + destination: "/opt/server/" + + * rpmname: Name of the rpm to create, also used as the prefix name in the tar archive + * rpmversion: Version of the rpm, eg. "1.0.0" + * rpmrelease: Release of the rpm, eg. "1" + * summary: Summary string for the rpm + * repo: URL of the get repo to clone and create the archive from + * ref: Git reference to check out. eg. origin/branch-name, git tag, or git commit hash + * destination: Path to install the / of the git repo at when installing the rpm + """ + gitRpm = GitRpmBuild(gitRepo["rpmname"], gitRepo["rpmversion"], gitRepo["rpmrelease"], ["noarch"]) + try: + gitRpm.add_git_tarball(gitRepo) + gitRpm.do_make() + rpmfile = gitRpm.get_built_rpm("noarch") + shutil.move(rpmfile, dest) + except Exception as e: + log.error("Creating git repo rpm: %s", e) + raise RuntimeError("Creating git repo rpm: %s" % e) + finally: + gitRpm.cleanup_tmpdir() + + return os.path.basename(rpmfile)
+ +# Create the git rpms, if any, and return the path to the repo under results_dir +
[docs]def create_gitrpm_repo(results_dir, recipe): + """Create a dnf repository with the rpms from the recipe + + :param results_dir: Path to create the repository under + :type results_dir: str + :param recipe: The recipe to get the repos.git entries from + :type recipe: Recipe + :returns: Path to the dnf repository or "" + :rtype: str + + This function creates a dnf repository directory at results_dir+"repo/", + creates rpms for all of the repos.git entries in the recipe, runs createrepo_c + on the dnf repository so that Anaconda can use it, and returns the path to the + repository to the caller. + """ + if "repos" not in recipe or "git" not in recipe["repos"]: + return "" + + gitrepo = joinpaths(results_dir, "repo/") + if not os.path.exists(gitrepo): + os.makedirs(gitrepo) + for r in recipe["repos"]["git"]: + make_git_rpm(r, gitrepo) + cmd = ["createrepo_c", gitrepo] + log.debug(cmd) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + log.error("Failed to create repo at %s: %s", gitrepo, e.output) + raise RuntimeError("Failed to create repo at %s" % gitrepo) + + return gitrepo
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/projects.html b/f32-branch/_modules/pylorax/api/projects.html new file mode 100644 index 00000000..f5a9647a --- /dev/null +++ b/f32-branch/_modules/pylorax/api/projects.html @@ -0,0 +1,898 @@ + + + + + + + + + + + pylorax.api.projects — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.projects

+#
+# Copyright (C) 2017  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+from configparser import ConfigParser
+import dnf
+from glob import glob
+import os
+import time
+
+from pylorax.api.bisect import insort_left
+from pylorax.sysutils import joinpaths
+
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
+
+
+
[docs]class ProjectsError(Exception): + pass
+ + +
[docs]def api_time(t): + """Convert time since epoch to a string + + :param t: Seconds since epoch + :type t: int + :returns: Time string + :rtype: str + """ + return time.strftime(TIME_FORMAT, time.localtime(t))
+ + +
[docs]def api_changelog(changelog): + """Convert the changelog to a string + + :param changelog: A list of time, author, string tuples. + :type changelog: tuple + :returns: The most recent changelog text or "" + :rtype: str + + This returns only the most recent changelog entry. + """ + try: + entry = changelog[0][2] + except IndexError: + entry = "" + return entry
+ + +
[docs]def pkg_to_project(pkg): + """Extract the details from a hawkey.Package object + + :param pkgs: hawkey.Package object with package details + :type pkgs: hawkey.Package + :returns: A dict with the name, summary, description, and url. + :rtype: dict + + upstream_vcs is hard-coded to UPSTREAM_VCS + """ + return {"name": pkg.name, + "summary": pkg.summary, + "description": pkg.description, + "homepage": pkg.url, + "upstream_vcs": "UPSTREAM_VCS"}
+ + +
[docs]def pkg_to_build(pkg): + """Extract the build details from a hawkey.Package object + + :param pkg: hawkey.Package object with package details + :type pkg: hawkey.Package + :returns: A dict with the build details, epoch, release, arch, build_time, changelog, ... + :rtype: dict + + metadata entries are hard-coded to {} + + Note that this only returns the build dict, it does not include the name, description, etc. + """ + return {"epoch": pkg.epoch, + "release": pkg.release, + "arch": pkg.arch, + "build_time": api_time(pkg.buildtime), + "changelog": "CHANGELOG_NEEDED", # XXX Not in hawkey.Package + "build_config_ref": "BUILD_CONFIG_REF", + "build_env_ref": "BUILD_ENV_REF", + "metadata": {}, + "source": {"license": pkg.license, + "version": pkg.version, + "source_ref": "SOURCE_REF", + "metadata": {}}}
+ + +
[docs]def pkg_to_project_info(pkg): + """Extract the details from a hawkey.Package object + + :param pkg: hawkey.Package object with package details + :type pkg: hawkey.Package + :returns: A dict with the project details, as well as epoch, release, arch, build_time, changelog, ... + :rtype: dict + + metadata entries are hard-coded to {} + """ + return {"name": pkg.name, + "summary": pkg.summary, + "description": pkg.description, + "homepage": pkg.url, + "upstream_vcs": "UPSTREAM_VCS", + "builds": [pkg_to_build(pkg)]}
+ + +
[docs]def pkg_to_dep(pkg): + """Extract the info from a hawkey.Package object + + :param pkg: A hawkey.Package object + :type pkg: hawkey.Package + :returns: A dict with name, epoch, version, release, arch + :rtype: dict + """ + return {"name": pkg.name, + "epoch": pkg.epoch, + "version": pkg.version, + "release": pkg.release, + "arch": pkg.arch}
+ + +
[docs]def proj_to_module(proj): + """Extract the name from a project_info dict + + :param pkg: dict with package details + :type pkg: dict + :returns: A dict with name, and group_type + :rtype: dict + + group_type is hard-coded to "rpm" + """ + return {"name": proj["name"], + "group_type": "rpm"}
+ + +
[docs]def dep_evra(dep): + """Return the epoch:version-release.arch for the dep + + :param dep: dependency dict + :type dep: dict + :returns: epoch:version-release.arch + :rtype: str + """ + if dep["epoch"] == 0: + return dep["version"]+"-"+dep["release"]+"."+dep["arch"] + else: + return str(dep["epoch"])+":"+dep["version"]+"-"+dep["release"]+"."+dep["arch"]
+ +
[docs]def dep_nevra(dep): + """Return the name-epoch:version-release.arch""" + return dep["name"]+"-"+dep_evra(dep)
+ + +
[docs]def projects_list(dbo): + """Return a list of projects + + :param dbo: dnf base object + :type dbo: dnf.Base + :returns: List of project info dicts with name, summary, description, homepage, upstream_vcs + :rtype: list of dicts + """ + return projects_info(dbo, None)
+ + +
[docs]def projects_info(dbo, project_names): + """Return details about specific projects + + :param dbo: dnf base object + :type dbo: dnf.Base + :param project_names: List of names of projects to get info about + :type project_names: str + :returns: List of project info dicts with pkg_to_project as well as epoch, version, release, etc. + :rtype: list of dicts + + If project_names is None it will return the full list of available packages + """ + if project_names: + pkgs = dbo.sack.query().available().filter(name__glob=project_names) + else: + pkgs = dbo.sack.query().available() + + # iterate over pkgs + # - if pkg.name isn't in the results yet, add pkg_to_project_info in sorted position + # - if pkg.name is already in results, get its builds. If the build for pkg is different + # in any way (version, arch, etc.) add it to the entry's builds list. If it is the same, + # skip it. + results = [] + results_names = {} + for p in pkgs: + if p.name.lower() not in results_names: + idx = insort_left(results, pkg_to_project_info(p), key=lambda p: p["name"].lower()) + results_names[p.name.lower()] = idx + else: + build = pkg_to_build(p) + if build not in results[results_names[p.name.lower()]]["builds"]: + results[results_names[p.name.lower()]]["builds"].append(build) + + return results
+ +def _depsolve(dbo, projects, groups): + """Add projects to a new transaction + + :param dbo: dnf base object + :type dbo: dnf.Base + :param projects: The projects and version globs to find the dependencies for + :type projects: List of tuples + :param groups: The groups to include in dependency solving + :type groups: List of str + :returns: None + :rtype: None + :raises: ProjectsError if there was a problem installing something + """ + # This resets the transaction and updates the cache. + # It is important that the cache always be synchronized because Anaconda will grab its own copy + # and if that is different the NEVRAs will not match and the build will fail. + dbo.reset(goal=True) + install_errors = [] + for name in groups: + try: + dbo.group_install(name, ["mandatory", "default"]) + except dnf.exceptions.MarkingError as e: + install_errors.append(("Group %s" % (name), str(e))) + + for name, version in projects: + # Find the best package matching the name + version glob + # dnf can return multiple packages if it is in more than 1 repository + query = dbo.sack.query().filterm(provides__glob=name) + if version: + query.filterm(version__glob=version) + + query.filterm(latest=1) + if not query: + install_errors.append(("%s-%s" % (name, version), "No match")) + continue + sltr = dnf.selector.Selector(dbo.sack).set(pkg=query) + + # NOTE: dnf says in near future there will be a "goal" attribute of Base class + # so yes, we're using a 'private' attribute here on purpose and with permission. + dbo._goal.install(select=sltr, optional=False) + + if install_errors: + raise ProjectsError("The following package(s) had problems: %s" % ",".join(["%s (%s)" % (pattern, err) for pattern, err in install_errors])) + +
[docs]def projects_depsolve(dbo, projects, groups): + """Return the dependencies for a list of projects + + :param dbo: dnf base object + :type dbo: dnf.Base + :param projects: The projects to find the dependencies for + :type projects: List of Strings + :param groups: The groups to include in dependency solving + :type groups: List of str + :returns: NEVRA's of the project and its dependencies + :rtype: list of dicts + :raises: ProjectsError if there was a problem installing something + """ + _depsolve(dbo, projects, groups) + + try: + dbo.resolve() + except dnf.exceptions.DepsolveError as e: + raise ProjectsError("There was a problem depsolving %s: %s" % (projects, str(e))) + + if len(dbo.transaction) == 0: + return [] + + return sorted(map(pkg_to_dep, dbo.transaction.install_set), key=lambda p: p["name"].lower())
+ + +
[docs]def estimate_size(packages, block_size=6144): + """Estimate the installed size of a package list + + :param packages: The packages to be installed + :type packages: list of hawkey.Package objects + :param block_size: The block size to use for rounding up file sizes. + :type block_size: int + :returns: The estimated size of installed packages + :rtype: int + + Estimating actual requirements is difficult without the actual file sizes, which + dnf doesn't provide access to. So use the file count and block size to estimate + a minimum size for each package. + """ + installed_size = 0 + for p in packages: + installed_size += len(p.files) * block_size + installed_size += p.installsize + return installed_size
+ + +
[docs]def projects_depsolve_with_size(dbo, projects, groups, with_core=True): + """Return the dependencies and installed size for a list of projects + + :param dbo: dnf base object + :type dbo: dnf.Base + :param project_names: The projects to find the dependencies for + :type project_names: List of Strings + :param groups: The groups to include in dependency solving + :type groups: List of str + :returns: installed size and a list of NEVRA's of the project and its dependencies + :rtype: tuple of (int, list of dicts) + :raises: ProjectsError if there was a problem installing something + """ + _depsolve(dbo, projects, groups) + + if with_core: + dbo.group_install("core", ['mandatory', 'default', 'optional']) + + try: + dbo.resolve() + except dnf.exceptions.DepsolveError as e: + raise ProjectsError("There was a problem depsolving %s: %s" % (projects, str(e))) + + if len(dbo.transaction) == 0: + return (0, []) + + installed_size = estimate_size(dbo.transaction.install_set) + deps = sorted(map(pkg_to_dep, dbo.transaction.install_set), key=lambda p: p["name"].lower()) + return (installed_size, deps)
+ + +
[docs]def modules_list(dbo, module_names): + """Return a list of modules + + :param dbo: dnf base object + :type dbo: dnf.Base + :param offset: Number of modules to skip + :type limit: int + :param limit: Maximum number of modules to return + :type limit: int + :returns: List of module information and total count + :rtype: tuple of a list of dicts and an Int + + Modules don't exist in RHEL7 so this only returns projects + and sets the type to "rpm" + + """ + # TODO - Figure out what to do with this for Fedora 'modules' + return list(map(proj_to_module, projects_info(dbo, module_names)))
+ +
[docs]def modules_info(dbo, module_names): + """Return details about a module, including dependencies + + :param dbo: dnf base object + :type dbo: dnf.Base + :param module_names: Names of the modules to get info about + :type module_names: str + :returns: List of dicts with module details and dependencies. + :rtype: list of dicts + """ + modules = projects_info(dbo, module_names) + + # Add the dependency info to each one + for module in modules: + module["dependencies"] = projects_depsolve(dbo, [(module["name"], "*.*")], []) + + return modules
+ +
[docs]def dnf_repo_to_file_repo(repo): + """Return a string representation of a DNF Repo object suitable for writing to a .repo file + + :param repo: DNF Repository + :type repo: dnf.RepoDict + :returns: A string + :rtype: str + + The DNF Repo.dump() function does not produce a string that can be used as a dnf .repo file, + it ouputs baseurl and gpgkey as python lists which DNF cannot read. So do this manually with + only the attributes we care about. + """ + repo_str = "[%s]\nname = %s\n" % (repo.id, repo.name) + if repo.metalink: + repo_str += "metalink = %s\n" % repo.metalink + elif repo.mirrorlist: + repo_str += "mirrorlist = %s\n" % repo.mirrorlist + elif repo.baseurl: + repo_str += "baseurl = %s\n" % repo.baseurl[0] + else: + raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") + + # proxy is optional + if repo.proxy: + repo_str += "proxy = %s\n" % repo.proxy + + repo_str += "sslverify = %s\n" % repo.sslverify + repo_str += "gpgcheck = %s\n" % repo.gpgcheck + if repo.gpgkey: + repo_str += "gpgkey = %s\n" % ",".join(repo.gpgkey) + + if repo.skip_if_unavailable: + repo_str += "skip_if_unavailable=1\n" + + return repo_str
+ +
[docs]def repo_to_source(repo, system_source, api=1): + """Return a Weldr Source dict created from the DNF Repository + + :param repo: DNF Repository + :type repo: dnf.RepoDict + :param system_source: True if this source is an immutable system source + :type system_source: bool + :param api: Select which api version of the dict to return (default 1) + :type api: int + :returns: A dict with Weldr Source fields filled in + :rtype: dict + + Example:: + + { + "check_gpg": true, + "check_ssl": true, + "gpgkey_url": [ + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" + ], + "id": "fedora", + "name": "Fedora $releasever - $basearch", + "proxy": "http://proxy.brianlane.com:8123", + "system": true + "type": "yum-metalink", + "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" + } + + The ``name`` field has changed in v1 of the API. + In v0 of the API ``name`` is the repo.id, in v1 it is the repo.name and a new field, + ``id`` has been added for the repo.id + + """ + if api==0: + source = {"name": repo.id, "system": system_source} + else: + source = {"id": repo.id, "name": repo.name, "system": system_source} + if repo.baseurl: + source["url"] = repo.baseurl[0] + source["type"] = "yum-baseurl" + elif repo.metalink: + source["url"] = repo.metalink + source["type"] = "yum-metalink" + elif repo.mirrorlist: + source["url"] = repo.mirrorlist + source["type"] = "yum-mirrorlist" + else: + raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") + + # proxy is optional + if repo.proxy: + source["proxy"] = repo.proxy + + if not repo.sslverify: + source["check_ssl"] = False + else: + source["check_ssl"] = True + + if not repo.gpgcheck: + source["check_gpg"] = False + else: + source["check_gpg"] = True + + if repo.gpgkey: + source["gpgkey_urls"] = list(repo.gpgkey) + + return source
+ +
[docs]def source_to_repodict(source): + """Return a tuple suitable for use with dnf.add_new_repo + + :param source: A Weldr source dict + :type source: dict + :returns: A tuple of dnf.Repo attributes + :rtype: (str, list, dict) + + Return a tuple with (id, baseurl|(), kwargs) that can be used + with dnf.repos.add_new_repo + """ + kwargs = {} + if "id" in source: + # This is an API v1 source definition + repoid = source["id"] + if "name" in source: + kwargs["name"] = source["name"] + else: + repoid = source["name"] + + # This will allow errors to be raised so we can catch them + # without this they are logged, but the repo is silently disabled + kwargs["skip_if_unavailable"] = False + + if source["type"] == "yum-baseurl": + baseurl = [source["url"]] + elif source["type"] == "yum-metalink": + kwargs["metalink"] = source["url"] + baseurl = () + elif source["type"] == "yum-mirrorlist": + kwargs["mirrorlist"] = source["url"] + baseurl = () + + if "proxy" in source: + kwargs["proxy"] = source["proxy"] + + if source["check_ssl"]: + kwargs["sslverify"] = True + else: + kwargs["sslverify"] = False + + if source["check_gpg"]: + kwargs["gpgcheck"] = True + else: + kwargs["gpgcheck"] = False + + if "gpgkey_urls" in source: + kwargs["gpgkey"] = tuple(source["gpgkey_urls"]) + + return (repoid, baseurl, kwargs)
+ + +
[docs]def source_to_repo(source, dnf_conf): + """Return a dnf Repo object created from a source dict + + :param source: A Weldr source dict + :type source: dict + :param dnf_conf: The dnf Config object + :type dnf_conf: dnf.conf + :returns: A dnf Repo object + :rtype: dnf.Repo + + Example:: + + { + "check_gpg": True, + "check_ssl": True, + "gpgkey_urls": [ + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" + ], + "id": "fedora", + "name": "Fedora $releasever - $basearch", + "proxy": "http://proxy.brianlane.com:8123", + "system": True + "type": "yum-metalink", + "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" + } + + If the ``id`` field is included it is used for the repo id, otherwise ``name`` is used. + v0 of the API only used ``name``, v1 added the distinction between ``id`` and ``name``. + """ + repoid, baseurl, kwargs = source_to_repodict(source) + repo = dnf.repo.Repo(repoid, dnf_conf) + if baseurl: + repo.baseurl = baseurl + + # Apply the rest of the kwargs to the Repo object + for k, v in kwargs.items(): + setattr(repo, k, v) + + repo.enable() + + return repo
+ +
[docs]def get_source_ids(source_path): + """Return a list of the source ids in a file + + :param source_path: Full path and filename of the source (yum repo) file + :type source_path: str + :returns: A list of source id strings + :rtype: list of str + """ + if not os.path.exists(source_path): + return [] + + cfg = ConfigParser() + cfg.read(source_path) + return cfg.sections()
+ +
[docs]def get_repo_sources(source_glob): + """Return a list of sources from a directory of yum repositories + + :param source_glob: A glob to use to match the source files, including full path + :type source_glob: str + :returns: A list of the source ids in all of the matching files + :rtype: list of str + """ + sources = [] + for f in glob(source_glob): + sources.extend(get_source_ids(f)) + return sources
+ +
[docs]def delete_repo_source(source_glob, source_id): + """Delete a source from a repo file + + :param source_glob: A glob of the repo sources to search + :type source_glob: str + :param source_id: The repo id to delete + :type source_id: str + :returns: None + :raises: ProjectsError if there was a problem + + A repo file may have multiple sources in it, delete only the selected source. + If it is the last one in the file, delete the file. + + WARNING: This will delete ANY source, the caller needs to ensure that a system + source_id isn't passed to it. + """ + found = False + for f in glob(source_glob): + try: + cfg = ConfigParser() + cfg.read(f) + if source_id in cfg.sections(): + found = True + cfg.remove_section(source_id) + # If there are other sections, rewrite the file without the deleted one + if len(cfg.sections()) > 0: + with open(f, "w") as cfg_file: + cfg.write(cfg_file) + else: + # No sections left, just delete the file + os.unlink(f) + except Exception as e: + raise ProjectsError("Problem deleting repo source %s: %s" % (source_id, str(e))) + if not found: + raise ProjectsError("source %s not found" % source_id)
+ +
[docs]def new_repo_source(dbo, repoid, source, repo_dir): + """Add a new repo source from a Weldr source dict + + :param dbo: dnf base object + :type dbo: dnf.Base + :param id: The repo id (API v0 uses the name, v1 uses the id) + :type id: str + :param source: A Weldr source dict + :type source: dict + :returns: None + :raises: ... + + Make sure access to the dbo has been locked before calling this. + The `id` parameter will the the 'name' field for API v0, and the 'id' field for API v1 + + DNF variables will be substituted at load time, and on restart. + """ + try: + # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) + # If this repo already exists, delete it and replace it with the new one + repos = list(r.id for r in dbo.repos.iter_enabled()) + if repoid in repos: + del dbo.repos[repoid] + + # Add the repo and substitute any dnf variables + _, baseurl, kwargs = source_to_repodict(source) + log.debug("repoid=%s, baseurl=%s, kwargs=%s", repoid, baseurl, kwargs) + r = dbo.repos.add_new_repo(repoid, dbo.conf, baseurl, **kwargs) + r.enable() + + log.info("Updating repository metadata after adding %s", repoid) + dbo.fill_sack(load_system_repo=False) + dbo.read_comps() + + # Remove any previous sources with this id, ignore it if it isn't found + try: + delete_repo_source(joinpaths(repo_dir, "*.repo"), repoid) + except ProjectsError: + pass + + # Make sure the source id can't contain a path traversal by taking the basename + source_path = joinpaths(repo_dir, os.path.basename("%s.repo" % repoid)) + # Write the un-substituted version of the repo to disk + with open(source_path, "w") as f: + repo = source_to_repo(source, dbo.conf) + f.write(dnf_repo_to_file_repo(repo)) + except Exception as e: + log.error("(new_repo_source) adding %s failed: %s", repoid, str(e)) + + # Cleanup the mess, if loading it failed we don't want to leave it in memory + repos = list(r.id for r in dbo.repos.iter_enabled()) + if repoid in repos: + del dbo.repos[repoid] + + log.info("Updating repository metadata after adding %s failed", repoid) + dbo.fill_sack(load_system_repo=False) + dbo.read_comps() + + raise
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/queue.html b/f32-branch/_modules/pylorax/api/queue.html new file mode 100644 index 00000000..f72ac4b1 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/queue.html @@ -0,0 +1,1064 @@ + + + + + + + + + + + pylorax.api.queue — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.queue

+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Functions to monitor compose queue and run anaconda"""
+import logging
+log = logging.getLogger("pylorax")
+program_log = logging.getLogger("program")
+dnf_log = logging.getLogger("dnf")
+
+import os
+import grp
+from glob import glob
+import multiprocessing as mp
+import pwd
+import shutil
+import subprocess
+from subprocess import Popen, PIPE
+import time
+
+from pylorax import find_templates
+from pylorax.api.compose import move_compose_results
+from pylorax.api.recipes import recipe_from_file
+from pylorax.api.timestamp import TS_CREATED, TS_STARTED, TS_FINISHED, write_timestamp, timestamp_dict
+import pylorax.api.toml as toml
+from pylorax.base import DataHolder
+from pylorax.creator import run_creator
+from pylorax.sysutils import joinpaths, read_tail
+
+from lifted.queue import create_upload, get_uploads, ready_upload, delete_upload
+
+
[docs]def check_queues(cfg): + """Check to make sure the new and run queue symlinks are correct + + :param cfg: Configuration settings + :type cfg: DataHolder + + Also check all of the existing results and make sure any with WAITING + set in STATUS have a symlink in queue/new/ + """ + # Remove broken symlinks from the new and run queues + queue_symlinks = glob(joinpaths(cfg.composer_dir, "queue/new/*")) + \ + glob(joinpaths(cfg.composer_dir, "queue/run/*")) + for link in queue_symlinks: + if not os.path.isdir(os.path.realpath(link)): + log.info("Removing broken symlink %s", link) + os.unlink(link) + + # Write FAILED to the STATUS of any run queue symlinks and remove them + for link in glob(joinpaths(cfg.composer_dir, "queue/run/*")): + log.info("Setting build %s to FAILED, and removing symlink from queue/run/", os.path.basename(link)) + open(joinpaths(link, "STATUS"), "w").write("FAILED\n") + os.unlink(link) + + # Check results STATUS messages + # - If STATUS is missing, set it to FAILED + # - RUNNING should be changed to FAILED + # - WAITING should have a symlink in the new queue + for link in glob(joinpaths(cfg.composer_dir, "results/*")): + if not os.path.exists(joinpaths(link, "STATUS")): + open(joinpaths(link, "STATUS"), "w").write("FAILED\n") + continue + + status = open(joinpaths(link, "STATUS")).read().strip() + if status == "RUNNING": + log.info("Setting build %s to FAILED", os.path.basename(link)) + open(joinpaths(link, "STATUS"), "w").write("FAILED\n") + elif status == "WAITING": + if not os.path.islink(joinpaths(cfg.composer_dir, "queue/new/", os.path.basename(link))): + log.info("Creating missing symlink to new build %s", os.path.basename(link)) + os.symlink(link, joinpaths(cfg.composer_dir, "queue/new/", os.path.basename(link)))
+ +
[docs]def start_queue_monitor(cfg, uid, gid): + """Start the queue monitor as a mp process + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uid: User ID that owns the queue + :type uid: int + :param gid: Group ID that owns the queue + :type gid: int + :returns: None + """ + lib_dir = cfg.get("composer", "lib_dir") + share_dir = cfg.get("composer", "share_dir") + tmp = cfg.get("composer", "tmp") + monitor_cfg = DataHolder(cfg=cfg, composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid, tmp=tmp) + p = mp.Process(target=monitor, args=(monitor_cfg,)) + p.daemon = True + p.start()
+ +
[docs]def monitor(cfg): + """Monitor the queue for new compose requests + + :param cfg: Configuration settings + :type cfg: DataHolder + :returns: Does not return + + The queue has 2 subdirectories, new and run. When a compose is ready to be run + a symlink to the uniquely named results directory should be placed in ./queue/new/ + + When the it is ready to be run (it is checked every 30 seconds or after a previous + compose is finished) the symlink will be moved into ./queue/run/ and a STATUS file + will be created in the results directory. + + STATUS can contain one of: WAITING, RUNNING, FINISHED, FAILED + + If the system is restarted while a compose is running it will move any old symlinks + from ./queue/run/ to ./queue/new/ and rerun them. + """ + def queue_sort(uuid): + """Sort the queue entries by their mtime, not their names""" + return os.stat(joinpaths(cfg.composer_dir, "queue/new", uuid)).st_mtime + + check_queues(cfg) + while True: + uuids = sorted(os.listdir(joinpaths(cfg.composer_dir, "queue/new")), key=queue_sort) + + # Pick the oldest and move it into ./run/ + if not uuids: + # No composes left to process, sleep for a bit + time.sleep(5) + else: + src = joinpaths(cfg.composer_dir, "queue/new", uuids[0]) + dst = joinpaths(cfg.composer_dir, "queue/run", uuids[0]) + try: + os.rename(src, dst) + except OSError: + # The symlink may vanish if uuid_cancel() has been called + continue + + # The anaconda logs are also copied into ./anaconda/ in this directory + os.makedirs(joinpaths(dst, "logs"), exist_ok=True) + + def open_handler(loggers, file_name): + handler = logging.FileHandler(joinpaths(dst, "logs", file_name)) + handler.setLevel(logging.DEBUG) + handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s")) + for logger in loggers: + logger.addHandler(handler) + return (handler, loggers) + + loggers = (((log, program_log, dnf_log), "combined.log"), + ((log,), "composer.log"), + ((program_log,), "program.log"), + ((dnf_log,), "dnf.log")) + handlers = [open_handler(loggers, file_name) for loggers, file_name in loggers] + + log.info("Starting new compose: %s", dst) + open(joinpaths(dst, "STATUS"), "w").write("RUNNING\n") + + try: + make_compose(cfg, os.path.realpath(dst)) + log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst)) + open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n") + write_timestamp(dst, TS_FINISHED) + + upload_cfg = cfg.cfg["upload"] + for upload in get_uploads(upload_cfg, uuid_get_uploads(cfg.cfg, uuids[0])): + log.info("Readying upload %s", upload.uuid) + uuid_ready_upload(cfg.cfg, uuids[0], upload.uuid) + except Exception: + import traceback + log.error("traceback: %s", traceback.format_exc()) + +# TODO - Write the error message to an ERROR-LOG file to include with the status +# log.error("Error running compose: %s", e) + open(joinpaths(dst, "STATUS"), "w").write("FAILED\n") + write_timestamp(dst, TS_FINISHED) + finally: + for handler, loggers in handlers: + for logger in loggers: + logger.removeHandler(handler) + handler.close() + + os.unlink(dst)
+ +
[docs]def make_compose(cfg, results_dir): + """Run anaconda with the final-kickstart.ks from results_dir + + :param cfg: Configuration settings + :type cfg: DataHolder + :param results_dir: The directory containing the metadata and results for the build + :type results_dir: str + :returns: Nothing + :raises: May raise various exceptions + + This takes the final-kickstart.ks, and the settings in config.toml and runs Anaconda + in no-virt mode (directly on the host operating system). Exceptions should be caught + at the higer level. + + If there is a failure, the build artifacts will be cleaned up, and any logs will be + moved into logs/anaconda/ and their ownership will be set to the user from the cfg + object. + """ + + # Check on the ks's presence + ks_path = joinpaths(results_dir, "final-kickstart.ks") + if not os.path.exists(ks_path): + raise RuntimeError("Missing kickstart file at %s" % ks_path) + + # Load the compose configuration + cfg_path = joinpaths(results_dir, "config.toml") + if not os.path.exists(cfg_path): + raise RuntimeError("Missing config.toml for %s" % results_dir) + cfg_dict = toml.loads(open(cfg_path, "r").read()) + + # The keys in cfg_dict correspond to the arguments setup in livemedia-creator + # keys that define what to build should be setup in compose_args, and keys with + # defaults should be setup here. + + # Make sure that image_name contains no path components + cfg_dict["image_name"] = os.path.basename(cfg_dict["image_name"]) + + # Only support novirt installation, set some other defaults + cfg_dict["no_virt"] = True + cfg_dict["disk_image"] = None + cfg_dict["fs_image"] = None + cfg_dict["keep_image"] = False + cfg_dict["domacboot"] = False + cfg_dict["anaconda_args"] = "" + cfg_dict["proxy"] = "" + cfg_dict["armplatform"] = "" + cfg_dict["squashfs_args"] = None + + cfg_dict["lorax_templates"] = find_templates(cfg.share_dir) + cfg_dict["tmp"] = cfg.tmp + # Use default args for dracut + cfg_dict["dracut_conf"] = None + cfg_dict["dracut_args"] = None + + # TODO How to support other arches? + cfg_dict["arch"] = None + + # Compose things in a temporary directory inside the results directory + cfg_dict["result_dir"] = joinpaths(results_dir, "compose") + os.makedirs(cfg_dict["result_dir"]) + + install_cfg = DataHolder(**cfg_dict) + + # Some kludges for the 99-copy-logs %post, failure in it will crash the build + for f in ["/tmp/NOSAVE_INPUT_KS", "/tmp/NOSAVE_LOGS"]: + open(f, "w") + + # Placing a CANCEL file in the results directory will make execWithRedirect send anaconda a SIGTERM + def cancel_build(): + return os.path.exists(joinpaths(results_dir, "CANCEL")) + + log.debug("cfg = %s", install_cfg) + try: + test_path = joinpaths(results_dir, "TEST") + write_timestamp(results_dir, TS_STARTED) + if os.path.exists(test_path): + # Pretend to run the compose + time.sleep(5) + try: + test_mode = int(open(test_path, "r").read()) + except Exception: + test_mode = 1 + if test_mode == 1: + raise RuntimeError("TESTING FAILED compose") + else: + open(joinpaths(results_dir, install_cfg.image_name), "w").write("TEST IMAGE") + else: + run_creator(install_cfg, cancel_func=cancel_build) + + # Extract the results of the compose into results_dir and cleanup the compose directory + move_compose_results(install_cfg, results_dir) + finally: + # Make sure any remaining temporary directories are removed (eg. if there was an exception) + for d in glob(joinpaths(cfg.tmp, "lmc-*")): + if os.path.isdir(d): + shutil.rmtree(d) + elif os.path.isfile(d): + os.unlink(d) + + # Make sure that everything under the results directory is owned by the user + user = pwd.getpwuid(cfg.uid).pw_name + group = grp.getgrgid(cfg.gid).gr_name + log.debug("Install finished, chowning results to %s:%s", user, group) + subprocess.call(["chown", "-R", "%s:%s" % (user, group), results_dir])
+ +
[docs]def get_compose_type(results_dir): + """Return the type of composition. + + :param results_dir: The directory containing the metadata and results for the build + :type results_dir: str + :returns: The type of compose (eg. 'tar') + :rtype: str + :raises: RuntimeError if no kickstart template can be found. + """ + # Should only be 2 kickstarts, the final-kickstart.ks and the template + t = [os.path.basename(ks)[:-3] for ks in glob(joinpaths(results_dir, "*.ks")) + if "final-kickstart" not in ks] + if len(t) != 1: + raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir)) + return t[0]
+ +
[docs]def compose_detail(cfg, results_dir, api=1): + """Return details about the build. + + :param cfg: Configuration settings (required for api=1) + :type cfg: ComposerConfig + :param results_dir: The directory containing the metadata and results for the build + :type results_dir: str + :param api: Select which api version of the dict to return (default 1) + :type api: int + :returns: A dictionary with details about the compose + :rtype: dict + :raises: IOError if it cannot read the directory, STATUS, or blueprint file. + + The following details are included in the dict: + + * id - The uuid of the comoposition + * queue_status - The final status of the composition (FINISHED or FAILED) + * compose_type - The type of output generated (tar, iso, etc.) + * blueprint - Blueprint name + * version - Blueprint version + * image_size - Size of the image, if finished. 0 otherwise. + * uploads - For API v1 details about uploading the image are included + + Various timestamps are also included in the dict. These are all Unix UTC timestamps. + It is possible for these timestamps to not always exist, in which case they will be + None in Python (or null in JSON). The following timestamps are included: + + * job_created - When the user submitted the compose + * job_started - Anaconda started running + * job_finished - Job entered FINISHED or FAILED state + """ + build_id = os.path.basename(os.path.abspath(results_dir)) + status = open(joinpaths(results_dir, "STATUS")).read().strip() + blueprint = recipe_from_file(joinpaths(results_dir, "blueprint.toml")) + + compose_type = get_compose_type(results_dir) + + image_path = get_image_name(results_dir)[1] + if status == "FINISHED" and os.path.exists(image_path): + image_size = os.stat(image_path).st_size + else: + image_size = 0 + + times = timestamp_dict(results_dir) + + detail = {"id": build_id, + "queue_status": status, + "job_created": times.get(TS_CREATED), + "job_started": times.get(TS_STARTED), + "job_finished": times.get(TS_FINISHED), + "compose_type": compose_type, + "blueprint": blueprint["name"], + "version": blueprint["version"], + "image_size": image_size, + } + + if api == 1: + # Get uploads for this build_id + upload_uuids = uuid_get_uploads(cfg, build_id) + summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)] + detail["uploads"] = summaries + return detail
+ +
[docs]def queue_status(cfg, api=1): + """Return details about what is in the queue. + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param api: Select which api version of the dict to return (default 1) + :type api: int + :returns: A list of the new composes, and a list of the running composes + :rtype: dict + + This returns a dict with 2 lists. "new" is the list of uuids that are waiting to be built, + and "run" has the uuids that are being built (currently limited to 1 at a time). + """ + queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue") + new_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "new/*"))] + run_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "run/*"))] + + new_details = [] + for n in new_queue: + try: + d = compose_detail(cfg, n, api) + except IOError: + continue + new_details.append(d) + + run_details = [] + for r in run_queue: + try: + d = compose_detail(cfg, r, api) + except IOError: + continue + run_details.append(d) + + return { + "new": new_details, + "run": run_details + }
+ +
[docs]def uuid_status(cfg, uuid, api=1): + """Return the details of a specific UUID compose + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param api: Select which api version of the dict to return (default 1) + :type api: int + :returns: Details about the build + :rtype: dict or None + + Returns the same dict as `compose_detail()` + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + try: + return compose_detail(cfg, uuid_dir, api) + except IOError: + return None
+ +
[docs]def build_status(cfg, status_filter=None, api=1): + """Return the details of finished or failed builds + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param status_filter: What builds to return. None == all, "FINISHED", or "FAILED" + :type status_filter: str + :param api: Select which api version of the dict to return (default 1) + :type api: int + :returns: A list of the build details (from compose_detail) + :rtype: list of dicts + + This returns a list of build details for each of the matching builds on the + system. It does not return the status of builds that have not been finished. + Use queue_status() for those. + """ + if status_filter: + status_filter = [status_filter] + else: + status_filter = ["FINISHED", "FAILED"] + + results = [] + result_dir = joinpaths(cfg.get("composer", "lib_dir"), "results") + for build in glob(result_dir + "/*"): + log.debug("Checking status of build %s", build) + + try: + status = open(joinpaths(build, "STATUS"), "r").read().strip() + if status in status_filter: + results.append(compose_detail(cfg, build, api)) + except IOError: + pass + return results
+ +def _upload_list_path(cfg, uuid): + """Return the path to the UPLOADS file + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: Path to the UPLOADS file listing the build's associated uploads + :rtype: str + :raises: RuntimeError if the uuid is not found + """ + results_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + if not os.path.isdir(results_dir): + raise RuntimeError(f'"{uuid}" is not a valid build uuid!') + return joinpaths(results_dir, "UPLOADS") + +
[docs]def uuid_schedule_upload(cfg, uuid, provider_name, image_name, settings): + """Schedule an upload of an image + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param provider_name: The name of the cloud provider, e.g. "azure" + :type provider_name: str + :param image_name: Path of the image to upload + :type image_name: str + :param settings: Settings to use for the selected provider + :type settings: dict + :returns: uuid of the upload + :rtype: str + :raises: RuntimeError if the uuid is not a valid build uuid + """ + status = uuid_status(cfg, uuid) + if status is None: + raise RuntimeError(f'"{uuid}" is not a valid build uuid!') + + upload = create_upload(cfg["upload"], provider_name, image_name, settings) + uuid_add_upload(cfg, uuid, upload.uuid) + return upload.uuid
+ +
[docs]def uuid_get_uploads(cfg, uuid): + """Return the list of uploads for a build uuid + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: The upload UUIDs associated with the build UUID + :rtype: frozenset + """ + try: + with open(_upload_list_path(cfg, uuid)) as uploads_file: + return frozenset(uploads_file.read().split()) + except FileNotFoundError: + return frozenset()
+ +
[docs]def uuid_add_upload(cfg, uuid, upload_uuid): + """Add an upload UUID to a build + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param upload_uuid: The UUID of the upload + :type upload_uuid: str + :returns: None + :rtype: None + """ + if upload_uuid not in uuid_get_uploads(cfg, uuid): + with open(_upload_list_path(cfg, uuid), "a") as uploads_file: + print(upload_uuid, file=uploads_file) + status = uuid_status(cfg, uuid) + if status and status["queue_status"] == "FINISHED": + uuid_ready_upload(cfg, uuid, upload_uuid)
+ +
[docs]def uuid_remove_upload(cfg, upload_uuid): + """Remove an upload UUID from the build + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param upload_uuid: The UUID of the upload + :type upload_uuid: str + :returns: None + :rtype: None + :raises: RuntimeError if the upload_uuid is not found + """ + for build_uuid in (os.path.basename(b) for b in glob(joinpaths(cfg.get("composer", "lib_dir"), "results/*"))): + uploads = uuid_get_uploads(cfg, build_uuid) + if upload_uuid not in uploads: + continue + + uploads = uploads - frozenset((upload_uuid,)) + with open(_upload_list_path(cfg, build_uuid), "w") as uploads_file: + for upload in uploads: + print(upload, file=uploads_file) + return + + raise RuntimeError(f"{upload_uuid} is not a valid upload id!")
+ +
[docs]def uuid_ready_upload(cfg, uuid, upload_uuid): + """Set an upload to READY if the build is in FINISHED state + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param upload_uuid: The UUID of the upload + :type upload_uuid: str + :returns: None + :rtype: None + :raises: RuntimeError if the build uuid is invalid or not in FINISHED state. + """ + status = uuid_status(cfg, uuid) + if not status: + raise RuntimeError(f"{uuid} is not a valid build id!") + if status["queue_status"] != "FINISHED": + raise RuntimeError(f"Build {uuid} is not finished!") + _, image_path = uuid_image(cfg, uuid) + ready_upload(cfg["upload"], upload_uuid, image_path)
+ +
[docs]def uuid_cancel(cfg, uuid): + """Cancel a build and delete its results + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: True if it was canceled and deleted + :rtype: bool + + Only call this if the build status is WAITING or RUNNING + """ + cancel_path = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid, "CANCEL") + if os.path.exists(cancel_path): + log.info("Cancel has already been requested for %s", uuid) + return False + + # This status can change (and probably will) while it is in the middle of doing this: + # It can move from WAITING -> RUNNING or it can move from RUNNING -> FINISHED|FAILED + + # If it is in WAITING remove the symlink and then check to make sure it didn't show up + # in the run queue + queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue") + uuid_new = joinpaths(queue_dir, "new", uuid) + if os.path.exists(uuid_new): + try: + os.unlink(uuid_new) + except OSError: + # The symlink may vanish if the queue monitor started the build + pass + uuid_run = joinpaths(queue_dir, "run", uuid) + if not os.path.exists(uuid_run): + # Make sure the build is still in the waiting state + status = uuid_status(cfg, uuid) + if status is None or status["queue_status"] == "WAITING": + # Successfully removed it before the build started + return uuid_delete(cfg, uuid) + + # At this point the build has probably started. Write to the CANCEL file. + open(cancel_path, "w").write("\n") + + # Wait for status to move to FAILED or FINISHED + started = time.time() + while True: + status = uuid_status(cfg, uuid) + if status is None or status["queue_status"] == "FAILED": + break + elif status is not None and status["queue_status"] == "FINISHED": + # The build finished successfully, no point in deleting it now + return False + + # Is this taking too long? Exit anyway and try to cleanup. + if time.time() > started + (10 * 60): + log.error("Failed to cancel the build of %s", uuid) + break + + time.sleep(5) + + # Remove the partial results + uuid_delete(cfg, uuid)
+ +
[docs]def uuid_delete(cfg, uuid): + """Delete all of the results from a compose + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: True if it was deleted + :rtype: bool + :raises: This will raise an error if the delete failed + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + if not uuid_dir or len(uuid_dir) < 10: + raise RuntimeError("Directory length is too short: %s" % uuid_dir) + + for upload in get_uploads(cfg["upload"], uuid_get_uploads(cfg, uuid)): + delete_upload(cfg["upload"], upload.uuid) + + shutil.rmtree(uuid_dir) + return True
+ +
[docs]def uuid_info(cfg, uuid, api=1): + """Return information about the composition + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: dictionary of information about the composition or None + :rtype: dict + :raises: RuntimeError if there was a problem + + This will return a dict with the following fields populated: + + * id - The uuid of the comoposition + * config - containing the configuration settings used to run Anaconda + * blueprint - The depsolved blueprint used to generate the kickstart + * commit - The (local) git commit hash for the blueprint used + * deps - The NEVRA of all of the dependencies used in the composition + * compose_type - The type of output generated (tar, iso, etc.) + * queue_status - The final status of the composition (FINISHED or FAILED) + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + if not os.path.exists(uuid_dir): + return None + + # Load the compose configuration + cfg_path = joinpaths(uuid_dir, "config.toml") + if not os.path.exists(cfg_path): + raise RuntimeError("Missing config.toml for %s" % uuid) + cfg_dict = toml.loads(open(cfg_path, "r").read()) + + frozen_path = joinpaths(uuid_dir, "frozen.toml") + if not os.path.exists(frozen_path): + raise RuntimeError("Missing frozen.toml for %s" % uuid) + frozen_dict = toml.loads(open(frozen_path, "r").read()) + + deps_path = joinpaths(uuid_dir, "deps.toml") + if not os.path.exists(deps_path): + raise RuntimeError("Missing deps.toml for %s" % uuid) + deps_dict = toml.loads(open(deps_path, "r").read()) + + details = compose_detail(cfg, uuid_dir, api) + + commit_path = joinpaths(uuid_dir, "COMMIT") + if not os.path.exists(commit_path): + raise RuntimeError("Missing commit hash for %s" % uuid) + commit_id = open(commit_path, "r").read().strip() + + info = {"id": uuid, + "config": cfg_dict, + "blueprint": frozen_dict, + "commit": commit_id, + "deps": deps_dict, + "compose_type": details["compose_type"], + "queue_status": details["queue_status"], + "image_size": details["image_size"], + } + if api == 1: + upload_uuids = uuid_get_uploads(cfg, uuid) + summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)] + info["uploads"] = summaries + return info
+ +
[docs]def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False): + """Return a tar of the build data + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param metadata: Set to true to include all the metadata needed to reproduce the build + :type metadata: bool + :param image: Set to true to include the output image + :type image: bool + :param logs: Set to true to include the logs from the build + :type logs: bool + :returns: A stream of bytes from tar + :rtype: A generator + :raises: RuntimeError if there was a problem (eg. missing config file) + + This yields an uncompressed tar's data to the caller. It includes + the selected data to the caller by returning the Popen stdout from the tar process. + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + if not os.path.exists(uuid_dir): + raise RuntimeError("%s is not a valid build_id" % uuid) + + # Load the compose configuration + cfg_path = joinpaths(uuid_dir, "config.toml") + if not os.path.exists(cfg_path): + raise RuntimeError("Missing config.toml for %s" % uuid) + cfg_dict = toml.loads(open(cfg_path, "r").read()) + image_name = cfg_dict["image_name"] + + def include_file(f): + if f.endswith("/logs"): + return logs + if f.endswith(image_name): + return image + return metadata + filenames = [os.path.basename(f) for f in glob(joinpaths(uuid_dir, "*")) if include_file(f)] + + tar = Popen(["tar", "-C", uuid_dir, "-cf-"] + filenames, stdout=PIPE) + return tar.stdout
+ +
[docs]def uuid_image(cfg, uuid): + """Return the filename and full path of the build's image file + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :returns: The image filename and full path + :rtype: tuple of strings + :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + return get_image_name(uuid_dir)
+ +
[docs]def get_image_name(uuid_dir): + """Return the filename and full path of the build's image file + + :param uuid: The UUID of the build + :type uuid: str + :returns: The image filename and full path + :rtype: tuple of strings + :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) + """ + uuid = os.path.basename(os.path.abspath(uuid_dir)) + if not os.path.exists(uuid_dir): + raise RuntimeError("%s is not a valid build_id" % uuid) + + # Load the compose configuration + cfg_path = joinpaths(uuid_dir, "config.toml") + if not os.path.exists(cfg_path): + raise RuntimeError("Missing config.toml for %s" % uuid) + cfg_dict = toml.loads(open(cfg_path, "r").read()) + image_name = cfg_dict["image_name"] + + return (image_name, joinpaths(uuid_dir, image_name))
+ +
[docs]def uuid_log(cfg, uuid, size=1024): + """Return `size` KiB from the end of the most currently relevant log for a + given compose + + :param cfg: Configuration settings + :type cfg: ComposerConfig + :param uuid: The UUID of the build + :type uuid: str + :param size: Number of KiB to read. Default is 1024 + :type size: int + :returns: Up to `size` KiB from the end of the log + :rtype: str + :raises: RuntimeError if there was a problem (eg. no log file available) + + This function will return the end of either the anaconda log, the packaging + log, or the combined composer logs, depending on the progress of the + compose. It tries to return lines from the end of the log, it will attempt + to start on a line boundary, and it may return less than `size` kbytes. + """ + uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) + if not os.path.exists(uuid_dir): + raise RuntimeError("%s is not a valid build_id" % uuid) + + # While a build is running the logs will be in /tmp/anaconda.log and when it + # has finished they will be in the results directory + status = uuid_status(cfg, uuid) + if status is None: + raise RuntimeError("Status is missing for %s" % uuid) + + def get_log_path(): + # Try to return the most relevant log at any given time during the + # compose. If the compose is not running, return the composer log. + anaconda_log = "/tmp/anaconda.log" + packaging_log = "/tmp/packaging.log" + combined_log = joinpaths(uuid_dir, "logs", "combined.log") + if status["queue_status"] != "RUNNING" or not os.path.isfile(anaconda_log): + return combined_log + if not os.path.isfile(packaging_log): + return anaconda_log + try: + anaconda_mtime = os.stat(anaconda_log).st_mtime + packaging_mtime = os.stat(packaging_log).st_mtime + # If the packaging log exists and its last message is at least 15 + # seconds newer than the anaconda log, return the packaging log. + if packaging_mtime > anaconda_mtime + 15: + return packaging_log + return anaconda_log + except OSError: + # Return the combined log if anaconda_log or packaging_log disappear + return combined_log + try: + tail = read_tail(get_log_path(), size) + except OSError as e: + raise RuntimeError("No log available.") from e + return tail
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/recipes.html b/f32-branch/_modules/pylorax/api/recipes.html new file mode 100644 index 00000000..266d2158 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/recipes.html @@ -0,0 +1,1477 @@ + + + + + + + + + + + pylorax.api.recipes — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.recipes

+#
+# Copyright (C) 2017-2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import gi
+gi.require_version("Ggit", "1.0")
+from gi.repository import Ggit as Git
+from gi.repository import Gio
+from gi.repository import GLib
+
+import os
+import semantic_version as semver
+
+from pylorax.api.projects import dep_evra
+from pylorax.base import DataHolder
+from pylorax.sysutils import joinpaths
+import pylorax.api.toml as toml
+
+
+
[docs]class CommitTimeValError(Exception): + pass
+ +
[docs]class RecipeFileError(Exception): + pass
+ +
[docs]class RecipeError(Exception): + pass
+ + +
[docs]class Recipe(dict): + """A Recipe of package and modules + + This is a subclass of dict that enforces the constructor arguments + and adds a .filename property to return the recipe's filename, + and a .toml() function to return the recipe as a TOML string. + """ + def __init__(self, name, description, version, modules, packages, groups, customizations=None, gitrepos=None): + # Check that version is empty or semver compatible + if version: + semver.Version(version) + + # Make sure modules, packages, and groups are listed by their case-insensitive names + if modules is not None: + modules = sorted(modules, key=lambda m: m["name"].lower()) + if packages is not None: + packages = sorted(packages, key=lambda p: p["name"].lower()) + if groups is not None: + groups = sorted(groups, key=lambda g: g["name"].lower()) + + # Only support [[repos.git]] for now + if gitrepos is not None: + repos = {"git": sorted(gitrepos, key=lambda g: g["repo"].lower())} + else: + repos = None + dict.__init__(self, name=name, + description=description, + version=version, + modules=modules, + packages=packages, + groups=groups, + customizations=customizations, + repos=repos) + + # We don't want customizations=None to show up in the TOML so remove it + if customizations is None: + del self["customizations"] + + # Don't include empty repos or repos.git + if repos is None or not repos["git"]: + del self["repos"] + + @property + def package_names(self): + """Return the names of the packages""" + return [p["name"] for p in self["packages"] or []] + + @property + def package_nver(self): + """Return the names and version globs of the packages""" + return [(p["name"], p["version"]) for p in self["packages"] or []] + + @property + def module_names(self): + """Return the names of the modules""" + return [m["name"] for m in self["modules"] or []] + + @property + def module_nver(self): + """Return the names and version globs of the modules""" + return [(m["name"], m["version"]) for m in self["modules"] or []] + + @property + def group_names(self): + """Return the names of the groups. Groups do not have versions.""" + return map(lambda g: g["name"], self["groups"] or []) + + @property + def filename(self): + """Return the Recipe's filename + + Replaces spaces in the name with '-' and appends .toml + """ + return recipe_filename(self.get("name")) + +
[docs] def toml(self): + """Return the Recipe in TOML format""" + return toml.dumps(self)
+ +
[docs] def bump_version(self, old_version=None): + """semver recipe version number bump + + :param old_version: An optional old version number + :type old_version: str + :returns: The new version number or None + :rtype: str + :raises: ValueError + + If neither have a version, 0.0.1 is returned + If there is no old version the new version is checked and returned + If there is no new version, but there is a old one, bump its patch level + If the old and new versions are the same, bump the patch level + If they are different, check and return the new version + """ + new_version = self.get("version") + if not new_version and not old_version: + self["version"] = "0.0.1" + + elif new_version and not old_version: + semver.Version(new_version) + self["version"] = new_version + + elif not new_version or new_version == old_version: + new_version = str(semver.Version(old_version).next_patch()) + self["version"] = new_version + + else: + semver.Version(new_version) + self["version"] = new_version + + # Return the new version + return str(semver.Version(self["version"]))
+ +
[docs] def freeze(self, deps): + """ Return a new Recipe with full module and package NEVRA + + :param deps: A list of dependency NEVRA to use to fill in the modules and packages + :type deps: list( + :returns: A new Recipe object + :rtype: Recipe + """ + module_names = self.module_names + package_names = self.package_names + group_names = self.group_names + + new_modules = [] + new_packages = [] + new_groups = [] + for dep in deps: + if dep["name"] in package_names: + new_packages.append(RecipePackage(dep["name"], dep_evra(dep))) + elif dep["name"] in module_names: + new_modules.append(RecipeModule(dep["name"], dep_evra(dep))) + elif dep["name"] in group_names: + new_groups.append(RecipeGroup(dep["name"])) + if "customizations" in self: + customizations = self["customizations"] + else: + customizations = None + if "repos" in self and "git" in self["repos"]: + gitrepos = self["repos"]["git"] + else: + gitrepos = None + + return Recipe(self["name"], self["description"], self["version"], + new_modules, new_packages, new_groups, customizations, gitrepos)
+ +
[docs]class RecipeModule(dict): + def __init__(self, name, version): + dict.__init__(self, name=name, version=version)
+ +
[docs]class RecipePackage(RecipeModule): + pass
+ +
[docs]class RecipeGroup(dict): + def __init__(self, name): + dict.__init__(self, name=name)
+ +
[docs]def NewRecipeGit(toml_dict): + """Create a RecipeGit object from fields in a TOML dict + + :param rpmname: Name of the rpm to create, also used as the prefix name in the tar archive + :type rpmname: str + :param rpmversion: Version of the rpm, eg. "1.0.0" + :type rpmversion: str + :param rpmrelease: Release of the rpm, eg. "1" + :type rpmrelease: str + :param summary: Summary string for the rpm + :type summary: str + :param repo: URL of the get repo to clone and create the archive from + :type repo: str + :param ref: Git reference to check out. eg. origin/branch-name, git tag, or git commit hash + :type ref: str + :param destination: Path to install the / of the git repo at when installing the rpm + :type destination: str + :returns: A populated RecipeGit object + :rtype: RecipeGit + + The TOML should look like this:: + + [[repos.git]] + rpmname="server-config" + rpmversion="1.0" + rpmrelease="1" + summary="Setup files for server deployment" + repo="PATH OF GIT REPO TO CLONE" + ref="v1.0" + destination="/opt/server/" + + Note that the repo path supports anything that git supports, file://, https://, http:// + + Currently there is no support for authentication + """ + return RecipeGit(toml_dict.get("rpmname"), + toml_dict.get("rpmversion"), + toml_dict.get("rpmrelease"), + toml_dict.get("summary", ""), + toml_dict.get("repo"), + toml_dict.get("ref"), + toml_dict.get("destination"))
+ +
[docs]class RecipeGit(dict): + def __init__(self, rpmname, rpmversion, rpmrelease, summary, repo, ref, destination): + dict.__init__(self, rpmname=rpmname, rpmversion=rpmversion, rpmrelease=rpmrelease, + summary=summary, repo=repo, ref=ref, destination=destination)
+ +
[docs]def recipe_from_file(recipe_path): + """Return a recipe file as a Recipe object + + :param recipe_path: Path to the recipe fila + :type recipe_path: str + :returns: A Recipe object + :rtype: Recipe + """ + with open(recipe_path, 'rb') as f: + return recipe_from_toml(f.read())
+ +
[docs]def recipe_from_toml(recipe_str): + """Create a Recipe object from a toml string. + + :param recipe_str: The Recipe TOML string + :type recipe_str: str + :returns: A Recipe object + :rtype: Recipe + :raises: TomlError + """ + recipe_dict = toml.loads(recipe_str) + return recipe_from_dict(recipe_dict)
+ +
[docs]def check_required_list(lst, fields): + """Check a list of dicts for required fields + + :param lst: A list of dicts with fields + :type lst: list of dict + :param fields: A list of field name strings + :type fields: list of str + :returns: A list of error strings + :rtype: list of str + """ + errors = [] + for i, m in enumerate(lst): + m_errs = [] + errors.extend(check_list_case(fields, m.keys(), prefix="%d " % (i+1))) + for f in fields: + if f not in m: + m_errs.append("'%s'" % f) + if m_errs: + errors.append("%d is missing %s" % (i+1, ", ".join(m_errs))) + return errors
+ +
[docs]def check_list_case(expected_keys, recipe_keys, prefix=""): + """Check the case of the recipe keys + + :param expected_keys: A list of expected key strings + :type expected_keys: list of str + :param recipe_keys: A list of the recipe's key strings + :type recipe_keys: list of str + :returns: list of errors + :rtype: list of str + """ + errors = [] + for k in recipe_keys: + if k in expected_keys: + continue + if k.lower() in expected_keys: + errors.append(prefix + "%s should be %s" % (k, k.lower())) + return errors
+ +
[docs]def check_recipe_dict(recipe_dict): + """Check a dict before using it to create a new Recipe + + :param recipe_dict: A plain dict of the recipe + :type recipe_dict: dict + :returns: True if dict is ok + :rtype: bool + :raises: RecipeError + + This checks a dict to make sure required fields are present, + that optional fields are correct, and that other optional fields + are of the correct format, when included. + + This collects all of the errors and returns a single RecipeError with + a string that can be presented to users. + """ + errors = [] + + # Check for wrong case of top level keys + top_keys = ["name", "description", "version", "modules", "packages", "groups", "repos", "customizations"] + errors.extend(check_list_case(recipe_dict.keys(), top_keys)) + + if "name" not in recipe_dict: + errors.append("Missing 'name'") + if "description" not in recipe_dict: + errors.append("Missing 'description'") + if "version" in recipe_dict: + try: + semver.Version(recipe_dict["version"]) + except ValueError: + errors.append("Invalid 'version', must use Semantic Versioning") + + # Examine all the modules + if recipe_dict.get("modules"): + module_errors = check_required_list(recipe_dict["modules"], ["name", "version"]) + if module_errors: + errors.append("'modules' errors:\n%s" % "\n".join(module_errors)) + + # Examine all the packages + if recipe_dict.get("packages"): + package_errors = check_required_list(recipe_dict["packages"], ["name", "version"]) + if package_errors: + errors.append("'packages' errors:\n%s" % "\n".join(package_errors)) + + if recipe_dict.get("groups"): + groups_errors = check_required_list(recipe_dict["groups"], ["name"]) + if groups_errors: + errors.append("'groups' errors:\n%s" % "\n".join(groups_errors)) + + if recipe_dict.get("repos") and recipe_dict.get("repos").get("git"): + repos_errors = check_required_list(recipe_dict.get("repos").get("git"), + ["rpmname", "rpmversion", "rpmrelease", "summary", "repo", "ref", "destination"]) + if repos_errors: + errors.append("'repos.git' errors:\n%s" % "\n".join(repos_errors)) + + # No customizations to check, exit now + c = recipe_dict.get("customizations") + if not c: + return errors + + # Make sure to catch empty sections by testing for keywords, not just looking at .get() result. + if "kernel" in c: + errors.extend(check_list_case(["append"], c["kernel"].keys(), prefix="kernel ")) + if "append" not in c.get("kernel", []): + errors.append("'customizations.kernel': missing append field.") + + if "sshkey" in c: + sshkey_errors = check_required_list(c.get("sshkey"), ["user", "key"]) + if sshkey_errors: + errors.append("'customizations.sshkey' errors:\n%s" % "\n".join(sshkey_errors)) + + if "user" in c: + user_errors = check_required_list(c.get("user"), ["name"]) + if user_errors: + errors.append("'customizations.user' errors:\n%s" % "\n".join(user_errors)) + + if "group" in c: + group_errors = check_required_list(c.get("group"), ["name"]) + if group_errors: + errors.append("'customizations.group' errors:\n%s" % "\n".join(group_errors)) + + if "timezone" in c: + errors.extend(check_list_case(["timezone", "ntpservers"], c["timezone"].keys(), prefix="timezone ")) + if not c.get("timezone"): + errors.append("'customizations.timezone': missing timezone or ntpservers fields.") + + if "locale" in c: + errors.extend(check_list_case(["languages", "keyboard"], c["locale"].keys(), prefix="locale ")) + if not c.get("locale"): + errors.append("'customizations.locale': missing languages or keyboard fields.") + + if "firewall" in c: + errors.extend(check_list_case(["ports"], c["firewall"].keys(), prefix="firewall ")) + if not c.get("firewall"): + errors.append("'customizations.firewall': missing ports field or services section.") + + if "services" in c.get("firewall", []): + errors.extend(check_list_case(["enabled", "disabled"], c["firewall"]["services"].keys(), prefix="firewall.services ")) + if not c.get("firewall").get("services"): + errors.append("'customizations.firewall.services': missing enabled or disabled fields.") + + if "services" in c: + errors.extend(check_list_case(["enabled", "disabled"], c["services"].keys(), prefix="services ")) + if not c.get("services"): + errors.append("'customizations.services': missing enabled or disabled fields.") + + return errors
+ +
[docs]def recipe_from_dict(recipe_dict): + """Create a Recipe object from a plain dict. + + :param recipe_dict: A plain dict of the recipe + :type recipe_dict: dict + :returns: A Recipe object + :rtype: Recipe + :raises: RecipeError + """ + errors = check_recipe_dict(recipe_dict) + if errors: + msg = "\n".join(errors) + raise RecipeError(msg) + + # Make RecipeModule objects from the toml + # The TOML may not have modules or packages in it. Set them to None in this case + try: + if recipe_dict.get("modules"): + modules = [RecipeModule(m.get("name"), m.get("version")) for m in recipe_dict["modules"]] + else: + modules = [] + if recipe_dict.get("packages"): + packages = [RecipePackage(p.get("name"), p.get("version")) for p in recipe_dict["packages"]] + else: + packages = [] + if recipe_dict.get("groups"): + groups = [RecipeGroup(g.get("name")) for g in recipe_dict["groups"]] + else: + groups = [] + if recipe_dict.get("repos") and recipe_dict.get("repos").get("git"): + gitrepos = [NewRecipeGit(r) for r in recipe_dict["repos"]["git"]] + else: + gitrepos = [] + name = recipe_dict["name"] + description = recipe_dict["description"] + version = recipe_dict.get("version", None) + customizations = recipe_dict.get("customizations", None) + + # [customizations] was incorrectly documented at first, so we have to support using it + # as [[customizations]] by grabbing the first element. + if isinstance(customizations, list): + customizations = customizations[0] + + except KeyError as e: + raise RecipeError("There was a problem parsing the recipe: %s" % str(e)) + + return Recipe(name, description, version, modules, packages, groups, customizations, gitrepos)
+ +
[docs]def gfile(path): + """Convert a string path to GFile for use with Git""" + return Gio.file_new_for_path(path)
+ +
[docs]def recipe_filename(name): + """Return the toml filename for a recipe + + Replaces spaces with '-' and appends '.toml' + """ + # XXX Raise and error if this is empty? + return name.replace(" ", "-") + ".toml"
+ +
[docs]def head_commit(repo, branch): + """Get the branch's HEAD Commit Object + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :returns: Branch's head commit + :rtype: Git.Commit + :raises: Can raise errors from Ggit + """ + branch_obj = repo.lookup_branch(branch, Git.BranchType.LOCAL) + commit_id = branch_obj.get_target() + return repo.lookup(commit_id, Git.Commit)
+ +
[docs]def prepare_commit(repo, branch, builder): + """Prepare for a commit + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param builder: instance of TreeBuilder + :type builder: TreeBuilder + :returns: (Tree, Sig, Ref) + :rtype: tuple + :raises: Can raise errors from Ggit + """ + tree_id = builder.write() + tree = repo.lookup(tree_id, Git.Tree) + sig = Git.Signature.new_now("bdcs-api-server", "user-email") + ref = "refs/heads/%s" % branch + return (tree, sig, ref)
+ +
[docs]def open_or_create_repo(path): + """Open an existing repo, or create a new one + + :param path: path to recipe directory + :type path: string + :returns: A repository object + :rtype: Git.Repository + :raises: Can raise errors from Ggit + + A bare git repo will be created in the git directory of the specified path. + If a repo already exists it will be opened and returned instead of + creating a new one. + """ + Git.init() + git_path = joinpaths(path, "git") + if os.path.exists(joinpaths(git_path, "HEAD")): + return Git.Repository.open(gfile(git_path)) + + repo = Git.Repository.init_repository(gfile(git_path), True) + + # Make an initial empty commit + sig = Git.Signature.new_now("bdcs-api-server", "user-email") + tree_id = repo.get_index().write_tree() + tree = repo.lookup(tree_id, Git.Tree) + repo.create_commit("HEAD", sig, sig, "UTF-8", "Initial Recipe repository commit", tree, []) + return repo
+ +
[docs]def write_commit(repo, branch, filename, message, content): + """Make a new commit to a repository's branch + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: full path of the file to add + :type filename: str + :param message: The commit message + :type message: str + :param content: The data to write + :type content: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + try: + parent_commit = head_commit(repo, branch) + except GLib.GError: + # Branch doesn't exist, make a new one based on master + master_head = head_commit(repo, "master") + repo.create_branch(branch, master_head, 0) + parent_commit = head_commit(repo, branch) + + parent_commit = head_commit(repo, branch) + blob_id = repo.create_blob_from_buffer(content.encode("UTF-8")) + + # Use treebuilder to make a new entry for this filename and blob + parent_tree = parent_commit.get_tree() + builder = repo.create_tree_builder_from_tree(parent_tree) + builder.insert(filename, blob_id, Git.FileMode.BLOB) + (tree, sig, ref) = prepare_commit(repo, branch, builder) + return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+ +
[docs]def read_commit_spec(repo, spec): + """Return the raw content of the blob specified by the spec + + :param repo: Open repository + :type repo: Git.Repository + :param spec: Git revparse spec + :type spec: str + :returns: Contents of the commit + :rtype: str + :raises: Can raise errors from Ggit + + eg. To read the README file from master the spec is "master:README" + """ + commit_id = repo.revparse(spec).get_id() + blob = repo.lookup(commit_id, Git.Blob) + return blob.get_raw_content()
+ +
[docs]def read_commit(repo, branch, filename, commit=None): + """Return the contents of a file on a specific branch or commit. + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: filename to read + :type filename: str + :param commit: Optional commit hash + :type commit: str + :returns: The commit id, and the contents of the commit + :rtype: tuple(str, str) + :raises: Can raise errors from Ggit + + If no commit is passed the master:filename is returned, otherwise it will be + commit:filename + """ + if not commit: + # Find the most recent commit for filename on the selected branch + commits = list_commits(repo, branch, filename, 1) + if not commits: + raise RecipeError("No commits for %s on the %s branch." % (filename, branch)) + commit = commits[0].commit + return (commit, read_commit_spec(repo, "%s:%s" % (commit, filename)))
+ +
[docs]def read_recipe_commit(repo, branch, recipe_name, commit=None): + """Read a recipe commit from git and return a Recipe object + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to read + :type recipe_name: str + :param commit: Optional commit hash + :type commit: str + :returns: A Recipe object + :rtype: Recipe + :raises: Can raise errors from Ggit + + If no commit is passed the master:filename is returned, otherwise it will be + commit:filename + """ + if not repo_file_exists(repo, branch, recipe_filename(recipe_name)): + raise RecipeFileError("Unknown blueprint") + + (_, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit) + return recipe_from_toml(recipe_toml)
+ +
[docs]def read_recipe_and_id(repo, branch, recipe_name, commit=None): + """Read a recipe commit and its id from git + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to read + :type recipe_name: str + :param commit: Optional commit hash + :type commit: str + :returns: The commit id, and a Recipe object + :rtype: tuple(str, Recipe) + :raises: Can raise errors from Ggit + + If no commit is passed the master:filename is returned, otherwise it will be + commit:filename + """ + (commit_id, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit) + return (commit_id, recipe_from_toml(recipe_toml))
+ +
[docs]def list_branch_files(repo, branch): + """Return a sorted list of the files on the branch HEAD + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :returns: A sorted list of the filenames + :rtype: list(str) + :raises: Can raise errors from Ggit + """ + commit = head_commit(repo, branch).get_id().to_string() + return list_commit_files(repo, commit)
+ +
[docs]def list_commit_files(repo, commit): + """Return a sorted list of the files on a commit + + :param repo: Open repository + :type repo: Git.Repository + :param commit: The commit hash to list + :type commit: str + :returns: A sorted list of the filenames + :rtype: list(str) + :raises: Can raise errors from Ggit + """ + commit_id = Git.OId.new_from_string(commit) + commit_obj = repo.lookup(commit_id, Git.Commit) + tree = commit_obj.get_tree() + return sorted([tree.get(i).get_name() for i in range(0, tree.size())])
+ +
[docs]def delete_recipe(repo, branch, recipe_name): + """Delete a recipe from a branch. + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to delete + :type recipe_name: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + return delete_file(repo, branch, recipe_filename(recipe_name))
+ +
[docs]def delete_file(repo, branch, filename): + """Delete a file from a branch. + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: filename to delete + :type filename: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + parent_commit = head_commit(repo, branch) + parent_tree = parent_commit.get_tree() + builder = repo.create_tree_builder_from_tree(parent_tree) + builder.remove(filename) + (tree, sig, ref) = prepare_commit(repo, branch, builder) + message = "Recipe %s deleted" % filename + return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+ +
[docs]def revert_recipe(repo, branch, recipe_name, commit): + """Revert the contents of a recipe to that of a previous commit + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to revert + :type recipe_name: str + :param commit: Commit hash + :type commit: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + return revert_file(repo, branch, recipe_filename(recipe_name), commit)
+ +
[docs]def revert_file(repo, branch, filename, commit): + """Revert the contents of a file to that of a previous commit + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: filename to revert + :type filename: str + :param commit: Commit hash + :type commit: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + commit_id = Git.OId.new_from_string(commit) + commit_obj = repo.lookup(commit_id, Git.Commit) + revert_tree = commit_obj.get_tree() + entry = revert_tree.get_by_name(filename) + blob_id = entry.get_id() + parent_commit = head_commit(repo, branch) + + # Use treebuilder to modify the tree + parent_tree = parent_commit.get_tree() + builder = repo.create_tree_builder_from_tree(parent_tree) + builder.insert(filename, blob_id, Git.FileMode.BLOB) + (tree, sig, ref) = prepare_commit(repo, branch, builder) + commit_hash = commit_id.to_string() + message = "%s reverted to commit %s" % (filename, commit_hash) + return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit])
+ +
[docs]def commit_recipe(repo, branch, recipe): + """Commit a recipe to a branch + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe: Recipe to commit + :type recipe: Recipe + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit + """ + try: + old_recipe = read_recipe_commit(repo, branch, recipe["name"]) + old_version = old_recipe["version"] + except Exception: + old_version = None + + recipe.bump_version(old_version) + recipe_toml = recipe.toml() + message = "Recipe %s, version %s saved." % (recipe["name"], recipe["version"]) + return write_commit(repo, branch, recipe.filename, message, recipe_toml)
+ +
[docs]def commit_recipe_file(repo, branch, filename): + """Commit a recipe file to a branch + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: Path to the recipe file to commit + :type filename: str + :returns: OId of the new commit + :rtype: Git.OId + :raises: Can raise errors from Ggit or RecipeFileError + """ + try: + recipe = recipe_from_file(filename) + except IOError: + raise RecipeFileError + + return commit_recipe(repo, branch, recipe)
+ +
[docs]def commit_recipe_directory(repo, branch, directory): + r"""Commit all \*.toml files from a directory, if they aren't already in git. + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param directory: The directory of \*.toml recipes to commit + :type directory: str + :returns: None + :raises: Can raise errors from Ggit or RecipeFileError + + Files with Toml or RecipeFileErrors will be skipped, and the remainder will + be tried. + """ + dir_files = set([e for e in os.listdir(directory) if e.endswith(".toml")]) + branch_files = set(list_branch_files(repo, branch)) + new_files = dir_files.difference(branch_files) + + for f in new_files: + # Skip files with errors, but try the others + try: + commit_recipe_file(repo, branch, joinpaths(directory, f)) + except (RecipeError, RecipeFileError, toml.TomlError): + pass
+ +
[docs]def tag_recipe_commit(repo, branch, recipe_name): + """Tag a file's most recent commit + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to tag + :type recipe_name: str + :returns: Tag id or None if it failed. + :rtype: Git.OId + :raises: Can raise errors from Ggit + + Uses tag_file_commit() + """ + if not repo_file_exists(repo, branch, recipe_filename(recipe_name)): + raise RecipeFileError("Unknown blueprint") + + return tag_file_commit(repo, branch, recipe_filename(recipe_name))
+ +
[docs]def tag_file_commit(repo, branch, filename): + """Tag a file's most recent commit + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: Filename to tag + :type filename: str + :returns: Tag id or None if it failed. + :rtype: Git.OId + :raises: Can raise errors from Ggit + + This uses git tags, of the form `refs/tags/<branch>/<filename>/r<revision>` + Only the most recent recipe commit can be tagged to prevent out of order tagging. + Revisions start at 1 and increment for each new commit that is tagged. + If the commit has already been tagged it will return false. + """ + file_commits = list_commits(repo, branch, filename) + if not file_commits: + return None + + # Find the most recently tagged version (may not be one) and add 1 to it. + for details in file_commits: + if details.revision is not None: + new_revision = details.revision + 1 + break + else: + new_revision = 1 + + name = "%s/%s/r%d" % (branch, filename, new_revision) + sig = Git.Signature.new_now("bdcs-api-server", "user-email") + commit_id = Git.OId.new_from_string(file_commits[0].commit) + commit = repo.lookup(commit_id, Git.Commit) + return repo.create_tag(name, commit, sig, name, Git.CreateFlags.NONE)
+ +
[docs]def find_commit_tag(repo, branch, filename, commit_id): + """Find the tag that matches the commit_id + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: filename to revert + :type filename: str + :param commit_id: The commit id to check + :type commit_id: Git.OId + :returns: The tag or None if there isn't one + :rtype: str or None + + There should be only 1 tag pointing to a commit, but there may not + be a tag at all. + + The tag will look like: 'refs/tags/<branch>/<filename>/r<revision>' + """ + pattern = "%s/%s/r*" % (branch, filename) + tags = [t for t in repo.list_tags_match(pattern) if is_commit_tag(repo, commit_id, t)] + if len(tags) != 1: + return None + else: + return tags[0]
+ +
[docs]def is_commit_tag(repo, commit_id, tag): + """Check to see if a tag points to a specific commit. + + :param repo: Open repository + :type repo: Git.Repository + :param commit_id: The commit id to check + :type commit_id: Git.OId + :param tag: The tag to check + :type tag: str + :returns: True if the tag points to the commit, False otherwise + :rtype: bool + """ + ref = repo.lookup_reference("refs/tags/" + tag) + tag_id = ref.get_target() + tag = repo.lookup(tag_id, Git.Tag) + target_id = tag.get_target_id() + return commit_id.compare(target_id) == 0
+ +
[docs]def get_revision_from_tag(tag): + """Return the revision number from a tag + + :param tag: The tag to exract the revision from + :type tag: str + :returns: The integer revision or None + :rtype: int or None + + The revision is the part after the r in 'branch/filename/rXXX' + """ + if tag is None: + return None + try: + return int(tag.rsplit('r', 2)[-1]) + except (ValueError, IndexError): + return None
+ +
[docs]class CommitDetails(DataHolder): + def __init__(self, commit, timestamp, message, revision=None): + DataHolder.__init__(self, + commit = commit, + timestamp = timestamp, + message = message, + revision = revision)
+ +
[docs]def list_commits(repo, branch, filename, limit=0): + """List the commit history of a file on a branch. + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: filename to revert + :type filename: str + :param limit: Number of commits to return (0=all) + :type limit: int + :returns: A list of commit details + :rtype: list(CommitDetails) + :raises: Can raise errors from Ggit + """ + revwalk = Git.RevisionWalker.new(repo) + branch_ref = "refs/heads/%s" % branch + revwalk.push_ref(branch_ref) + + commits = [] + while True: + commit_id = revwalk.next() + if not commit_id: + break + commit = repo.lookup(commit_id, Git.Commit) + + parents = commit.get_parents() + # No parents? Must be the first commit. + if parents.get_size() == 0: + continue + + tree = commit.get_tree() + # Is the filename in this tree? If not, move on. + if not tree.get_by_name(filename): + continue + + # Is filename different in all of the parent commits? + parent_commits = list(map(parents.get, range(0, parents.get_size()))) + is_diff = all([is_parent_diff(repo, filename, tree, pc) for pc in parent_commits]) + # No changes from parents, skip it. + if not is_diff: + continue + + tag = find_commit_tag(repo, branch, filename, commit.get_id()) + try: + commits.append(get_commit_details(commit, get_revision_from_tag(tag))) + if limit and len(commits) > limit: + break + except CommitTimeValError: + # Skip any commits that have trouble converting the time + # TODO - log details about this failure + pass + + # These will be in reverse time sort order thanks to revwalk + return commits
+ +
[docs]def get_commit_details(commit, revision=None): + """Return the details about a specific commit. + + :param commit: The commit to get details from + :type commit: Git.Commit + :param revision: Optional commit revision + :type revision: int + :returns: Details about the commit + :rtype: CommitDetails + :raises: CommitTimeValError or Ggit exceptions + + """ + message = commit.get_message() + commit_str = commit.get_id().to_string() + sig = commit.get_committer() + + datetime = sig.get_time() + # XXX What do we do with timezone? + _timezone = sig.get_time_zone() + time_str = datetime.format_iso8601() + if not time_str: + raise CommitTimeValError + + return CommitDetails(commit_str, time_str, message, revision)
+ +
[docs]def is_parent_diff(repo, filename, tree, parent): + """Check to see if the commit is different from its parents + + :param repo: Open repository + :type repo: Git.Repository + :param filename: filename to revert + :type filename: str + :param tree: The commit's tree + :type tree: Git.Tree + :param parent: The commit's parent commit + :type parent: Git.Commit + :retuns: True if filename in the commit is different from its parents + :rtype: bool + """ + diff_opts = Git.DiffOptions.new() + diff_opts.set_pathspec([filename]) + diff = Git.Diff.new_tree_to_tree(repo, parent.get_tree(), tree, diff_opts) + return diff.get_num_deltas() > 0
+ +
[docs]def find_field_value(field, value, lst): + """Find a field matching value in the list of dicts. + + :param field: field to search for + :type field: str + :param value: value to match in the field + :type value: str + :param lst: List of dict's with field + :type lst: list of dict + :returns: First dict with matching field:value, or None + :rtype: dict or None + + Used to return a specific entry from a list that looks like this: + + [{"name": "one", "attr": "green"}, ...] + + find_field_value("name", "one", lst) will return the matching dict. + """ + for d in lst: + if d.get(field) and d.get(field) == value: + return d + return None
+ +
[docs]def find_name(name, lst): + """Find the dict matching the name in a list and return it. + + :param name: Name to search for + :type name: str + :param lst: List of dict's with "name" field + :type lst: list of dict + :returns: First dict with matching name, or None + :rtype: dict or None + + This is just a wrapper for find_field_value with field set to "name" + """ + return find_field_value("name", name, lst)
+ +
[docs]def find_recipe_obj(path, recipe, default=None): + """Find a recipe object + + :param path: A list of dict field names + :type path: list of str + :param recipe: The recipe to search + :type recipe: Recipe + :param default: The value to return if it is not found + :type default: Any + + Return the object found by applying the path to the dicts in the recipe, or + return the default if it doesn't exist. + + eg. {"customizations": {"hostname": "foo", "users": [...]}} + + find_recipe_obj(["customizations", "hostname"], recipe, "") + """ + o = recipe + try: + for p in path: + if not o.get(p): + return default + o = o.get(p) + except AttributeError: + return default + + return o
+ +
[docs]def diff_lists(title, field, old_items, new_items): + """Return the differences between two lists of dicts. + + :param title: Title of the entry + :type title: str + :param field: Field to use as the key for comparisons + :type field: str + :param old_items: List of item dicts with "name" field + :type old_items: list(dict) + :param new_items: List of item dicts with "name" field + :type new_items: list(dict) + :returns: List of diff dicts with old/new entries + :rtype: list(dict) + """ + diffs = [] + old_fields= set(m[field] for m in old_items) + new_fields= set(m[field] for m in new_items) + + added_items = new_fields.difference(old_fields) + added_items = sorted(added_items, key=lambda n: n.lower()) + + removed_items = old_fields.difference(new_fields) + removed_items = sorted(removed_items, key=lambda n: n.lower()) + + same_items = old_fields.intersection(new_fields) + same_items = sorted(same_items, key=lambda n: n.lower()) + + for v in added_items: + diffs.append({"old":None, + "new":{title:find_field_value(field, v, new_items)}}) + + for v in removed_items: + diffs.append({"old":{title:find_field_value(field, v, old_items)}, + "new":None}) + + for v in same_items: + old_item = find_field_value(field, v, old_items) + new_item = find_field_value(field, v, new_items) + if old_item != new_item: + diffs.append({"old":{title:old_item}, + "new":{title:new_item}}) + + return diffs
+ +
[docs]def customizations_diff(old_recipe, new_recipe): + """Diff the customizations sections from two versions of a recipe + """ + diffs = [] + old_keys = set(old_recipe.get("customizations", {}).keys()) + new_keys = set(new_recipe.get("customizations", {}).keys()) + + added_keys = new_keys.difference(old_keys) + added_keys = sorted(added_keys, key=lambda n: n.lower()) + + removed_keys = old_keys.difference(new_keys) + removed_keys = sorted(removed_keys, key=lambda n: n.lower()) + + same_keys = old_keys.intersection(new_keys) + same_keys = sorted(same_keys, key=lambda n: n.lower()) + + for v in added_keys: + diffs.append({"old": None, + "new": {"Customizations."+v: new_recipe["customizations"][v]}}) + + for v in removed_keys: + diffs.append({"old": {"Customizations."+v: old_recipe["customizations"][v]}, + "new": None}) + + for v in same_keys: + if new_recipe["customizations"][v] == old_recipe["customizations"][v]: + continue + + if type(new_recipe["customizations"][v]) == type([]): + # Lists of dicts need to use diff_lists + # sshkey uses 'user', user and group use 'name' + if "user" in new_recipe["customizations"][v][0]: + field_name = "user" + elif "name" in new_recipe["customizations"][v][0]: + field_name = "name" + else: + raise RuntimeError("%s list has unrecognized key, not 'name' or 'user'" % "customizations."+v) + + diffs.extend(diff_lists("Customizations."+v, field_name, old_recipe["customizations"][v], new_recipe["customizations"][v])) + else: + diffs.append({"old": {"Customizations."+v: old_recipe["customizations"][v]}, + "new": {"Customizations."+v: new_recipe["customizations"][v]}}) + + return diffs
+ + +
[docs]def recipe_diff(old_recipe, new_recipe): + """Diff two versions of a recipe + + :param old_recipe: The old version of the recipe + :type old_recipe: Recipe + :param new_recipe: The new version of the recipe + :type new_recipe: Recipe + :returns: A list of diff dict entries with old/new + :rtype: list(dict) + """ + + diffs = [] + # These cannot be added or removed, just different + for element in ["name", "description", "version"]: + if old_recipe[element] != new_recipe[element]: + diffs.append({"old":{element.title():old_recipe[element]}, + "new":{element.title():new_recipe[element]}}) + + # These lists always exist + diffs.extend(diff_lists("Module", "name", old_recipe["modules"], new_recipe["modules"])) + diffs.extend(diff_lists("Package", "name", old_recipe["packages"], new_recipe["packages"])) + diffs.extend(diff_lists("Group", "name", old_recipe["groups"], new_recipe["groups"])) + + # The customizations section can contain a number of different types + diffs.extend(customizations_diff(old_recipe, new_recipe)) + + # repos contains keys that are lists (eg. [[repos.git]]) + diffs.extend(diff_lists("Repos.git", "rpmname", + find_recipe_obj(["repos", "git"], old_recipe, []), + find_recipe_obj(["repos", "git"], new_recipe, []))) + + return diffs
+ +
[docs]def repo_file_exists(repo, branch, filename): + """Return True if the filename exists on the branch + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param filename: Filename to check + :type filename: str + :returns: True if the filename exists on the HEAD of the branch, False otherwise. + :rtype: bool + """ + commit = head_commit(repo, branch).get_id().to_string() + commit_id = Git.OId.new_from_string(commit) + commit_obj = repo.lookup(commit_id, Git.Commit) + tree = commit_obj.get_tree() + return tree.get_by_name(filename) is not None
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/server.html b/f32-branch/_modules/pylorax/api/server.html new file mode 100644 index 00000000..34cb9aea --- /dev/null +++ b/f32-branch/_modules/pylorax/api/server.html @@ -0,0 +1,304 @@ + + + + + + + + + + + pylorax.api.server — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.server

+#
+# Copyright (C) 2017-2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("lorax-composer")
+
+from collections import namedtuple
+from flask import Flask, jsonify, redirect, send_from_directory
+from glob import glob
+import os
+import werkzeug
+
+from pylorax import vernum
+from pylorax.api.errors import HTTP_ERROR
+from pylorax.api.v0 import v0_api
+from pylorax.api.v1 import v1_api
+from pylorax.sysutils import joinpaths
+
+GitLock = namedtuple("GitLock", ["repo", "lock", "dir"])
+
+server = Flask(__name__)
+
+__all__ = ["server", "GitLock"]
+
+@server.route('/')
+def server_root():
+    redirect("/api/docs/")
+
+@server.route("/api/docs/")
+@server.route("/api/docs/<path:path>")
+def api_docs(path=None):
+    # Find the html docs
+    try:
+        # This assumes it is running from the source tree
+        docs_path = os.path.abspath(joinpaths(os.path.dirname(__file__), "../../../docs/html"))
+    except IndexError:
+        docs_path = glob("/usr/share/doc/lorax-*/html/")[0]
+
+    if not path:
+        path="index.html"
+    return send_from_directory(docs_path, path)
+
+@server.route("/api/status")
+def api_status():
+    """
+    `/api/status`
+    ^^^^^^^^^^^^^^^^
+    Return the status of the API Server::
+
+          { "api": "0",
+            "build": "devel",
+            "db_supported": true,
+            "db_version": "0",
+            "schema_version": "0",
+            "backend": "lorax-composer",
+            "msgs": []}
+
+    The 'msgs' field can be a list of strings describing startup problems or status that
+    should be displayed to the user. eg. if the compose templates are not depsolving properly
+    the errors will be in 'msgs'.
+    """
+    return jsonify(backend="lorax-composer",
+                   build=vernum,
+                   api="1",
+                   db_version="0",
+                   schema_version="0",
+                   db_supported=True,
+                   msgs=server.config["TEMPLATE_ERRORS"])
+
+@server.errorhandler(werkzeug.exceptions.HTTPException)
+def bad_request(error):
+    return jsonify(status=False, errors=[{ "id": HTTP_ERROR, "code": error.code, "msg": error.name }]), error.code
+
+# Register the v0 API on /api/v0/
+server.register_blueprint(v0_api, url_prefix="/api/v0/")
+
+# Register the v1 API on /api/v1/
+# Use v0 routes by default
+skip_rules = [
+    "/compose",
+    "/compose/queue",
+    "/compose/finished",
+    "/compose/failed",
+    "/compose/status/<uuids>",
+    "/compose/info/<uuid>",
+    "/projects/source/info/<source_names>",
+    "/projects/source/new",
+]
+server.register_blueprint(v0_api, url_prefix="/api/v1/", skip_rules=skip_rules)
+server.register_blueprint(v1_api, url_prefix="/api/v1/")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/timestamp.html b/f32-branch/_modules/pylorax/api/timestamp.html new file mode 100644 index 00000000..3c321d9e --- /dev/null +++ b/f32-branch/_modules/pylorax/api/timestamp.html @@ -0,0 +1,252 @@ + + + + + + + + + + + pylorax.api.timestamp — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.timestamp

+#
+# Copyright (C) 2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+
+from pylorax.sysutils import joinpaths
+import pylorax.api.toml as toml
+
+TS_CREATED  = "created"
+TS_STARTED  = "started"
+TS_FINISHED = "finished"
+
+
[docs]def write_timestamp(destdir, ty): + path = joinpaths(destdir, "times.toml") + + try: + contents = toml.loads(open(path, "r").read()) + except IOError: + contents = toml.loads("") + + if ty == TS_CREATED: + contents[TS_CREATED] = time.time() + elif ty == TS_STARTED: + contents[TS_STARTED] = time.time() + elif ty == TS_FINISHED: + contents[TS_FINISHED] = time.time() + + with open(path, "w") as f: + f.write(toml.dumps(contents))
+ +
[docs]def timestamp_dict(destdir): + path = joinpaths(destdir, "times.toml") + + try: + return toml.loads(open(path, "r").read()) + except IOError: + return toml.loads("")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/toml.html b/f32-branch/_modules/pylorax/api/toml.html new file mode 100644 index 00000000..c39ba1db --- /dev/null +++ b/f32-branch/_modules/pylorax/api/toml.html @@ -0,0 +1,243 @@ + + + + + + + + + + + pylorax.api.toml — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.toml

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import toml
+
+
[docs]class TomlError(toml.TomlDecodeError): + pass
+ +
[docs]def loads(s): + if isinstance(s, bytes): + s = s.decode('utf-8') + try: + return toml.loads(s) + except toml.TomlDecodeError as e: + raise TomlError(e.msg, e.doc, e.pos)
+ +
[docs]def dumps(o): + # strip the result, because `toml.dumps` adds a lot of newlines + return toml.dumps(o, encoder=toml.TomlEncoder(dict)).strip()
+ +
[docs]def load(file): + try: + return toml.load(file) + except toml.TomlDecodeError as e: + raise TomlError(e.msg, e.doc, e.pos)
+ +
[docs]def dump(o, file): + return toml.dump(o, file)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/utils.html b/f32-branch/_modules/pylorax/api/utils.html new file mode 100644 index 00000000..006df2b1 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/utils.html @@ -0,0 +1,250 @@ + + + + + + + + + + + pylorax.api.utils — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.utils

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+""" API utility functions
+"""
+from pylorax.api.recipes import RecipeError, RecipeFileError, read_recipe_commit
+
+
[docs]def take_limits(iterable, offset, limit): + """ Apply offset and limit to an iterable object + + :param iterable: The object to limit + :type iterable: iter + :param offset: The number of items to skip + :type offset: int + :param limit: The total number of items to return + :type limit: int + :returns: A subset of the iterable + """ + return iterable[offset:][:limit]
+ +
[docs]def blueprint_exists(api, branch, blueprint_name): + """Return True if the blueprint exists + + :param api: flask object + :type api: Flask + :param branch: Branch name + :type branch: str + :param recipe_name: Recipe name to read + :type recipe_name: str + """ + try: + with api.config["GITLOCK"].lock: + read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + + return True + except (RecipeError, RecipeFileError): + return False
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/v0.html b/f32-branch/_modules/pylorax/api/v0.html new file mode 100644 index 00000000..a1579042 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/v0.html @@ -0,0 +1,2187 @@ + + + + + + + + + + + pylorax.api.v0 — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.v0

+#
+# Copyright (C) 2017-2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Setup v0 of the API server
+
+v0_api() must be called to setup the API routes for Flask
+
+Status Responses
+----------------
+
+Some requests only return a status/error response.
+
+  The response will be a status response with `status` set to true, or an
+  error response with it set to false and an error message included.
+
+  Example response::
+
+      {
+        "status": true
+      }
+
+  Error response::
+
+      {
+        "errors": ["ggit-error: Failed to remove entry. File isn't in the tree - jboss.toml (-1)"]
+        "status": false
+      }
+
+API Routes
+----------
+
+All of the blueprints routes support the optional `branch` argument. If it is not
+used then the API will use the `master` branch for blueprints. If you want to create
+a new branch use the `new` or `workspace` routes with ?branch=<branch-name> to
+store the new blueprint on the new branch.
+"""
+
+import logging
+log = logging.getLogger("lorax-composer")
+
+import os
+from flask import jsonify, request, Response, send_file
+from flask import current_app as api
+
+from pylorax.sysutils import joinpaths
+from pylorax.api.checkparams import checkparams
+from pylorax.api.compose import start_build, compose_types
+from pylorax.api.errors import *                               # pylint: disable=wildcard-import,unused-wildcard-import
+from pylorax.api.flask_blueprint import BlueprintSkip
+from pylorax.api.projects import projects_list, projects_info, projects_depsolve
+from pylorax.api.projects import modules_list, modules_info, ProjectsError, repo_to_source
+from pylorax.api.projects import get_repo_sources, delete_repo_source, new_repo_source
+from pylorax.api.queue import queue_status, build_status, uuid_delete, uuid_status, uuid_info
+from pylorax.api.queue import uuid_tar, uuid_image, uuid_cancel, uuid_log
+from pylorax.api.recipes import list_branch_files, read_recipe_commit, recipe_filename, list_commits
+from pylorax.api.recipes import recipe_from_dict, recipe_from_toml, commit_recipe, delete_recipe, revert_recipe
+from pylorax.api.recipes import tag_recipe_commit, recipe_diff, RecipeFileError
+from pylorax.api.regexes import VALID_API_STRING, VALID_BLUEPRINT_NAME
+import pylorax.api.toml as toml
+from pylorax.api.utils import take_limits, blueprint_exists
+from pylorax.api.workspace import workspace_read, workspace_write, workspace_delete
+
+# The API functions don't actually get called by any code here
+# pylint: disable=unused-variable
+
+# Create the v0 routes Blueprint with skip_routes support
+v0_api = BlueprintSkip("v0_routes", __name__)
+
+
[docs]@v0_api.route("/blueprints/list") +def v0_blueprints_list(): + """List the available blueprints on a branch. + + **/api/v0/blueprints/list** + + List the available blueprints:: + + { "limit": 20, + "offset": 0, + "blueprints": [ + "atlas", + "development", + "glusterfs", + "http-server", + "jboss", + "kubernetes" ], + "total": 6 } + """ + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + limit = int(request.args.get("limit", "20")) + offset = int(request.args.get("offset", "0")) + except ValueError as e: + return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 + + with api.config["GITLOCK"].lock: + blueprints = [f[:-5] for f in list_branch_files(api.config["GITLOCK"].repo, branch)] + limited_blueprints = take_limits(blueprints, offset, limit) + return jsonify(blueprints=limited_blueprints, limit=limit, offset=offset, total=len(blueprints))
+ +
[docs]@v0_api.route("/blueprints/info", defaults={'blueprint_names': ""}) +@v0_api.route("/blueprints/info/<blueprint_names>") +@checkparams([("blueprint_names", "", "no blueprint names given")]) +def v0_blueprints_info(blueprint_names): + """Return the contents of the blueprint, or a list of blueprints + + **/api/v0/blueprints/info/<blueprint_names>[?format=<json|toml>]** + + Return the JSON representation of the blueprint. This includes 3 top level + objects. `changes` which lists whether or not the workspace is different from + the most recent commit. `blueprints` which lists the JSON representation of the + blueprint, and `errors` which will list any errors, like non-existant blueprints. + + By default the response is JSON, but if `?format=toml` is included in the URL's + arguments it will return the response as the blueprint's raw TOML content. + *Unless* there is an error which will only return a 400 and a standard error + `Status Responses`_. + + If there is an error when JSON is requested the successful blueprints and the + errors will both be returned. + + Example of json response:: + + { + "changes": [ + { + "changed": false, + "name": "glusterfs" + } + ], + "errors": [], + "blueprints": [ + { + "description": "An example GlusterFS server with samba", + "modules": [ + { + "name": "glusterfs", + "version": "3.7.*" + }, + { + "name": "glusterfs-cli", + "version": "3.7.*" + } + ], + "name": "glusterfs", + "packages": [ + { + "name": "2ping", + "version": "3.2.1" + }, + { + "name": "samba", + "version": "4.2.*" + } + ], + "version": "0.0.6" + } + ] + } + + Error example:: + + { + "changes": [], + "errors": ["ggit-error: the path 'missing.toml' does not exist in the given tree (-3)"] + "blueprints": [] + } + """ + if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + out_fmt = request.args.get("format", "json") + if VALID_API_STRING.match(out_fmt) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 + + blueprints = [] + changes = [] + errors = [] + for blueprint_name in [n.strip() for n in blueprint_names.split(",")]: + exceptions = [] + # Get the workspace version (if it exists) + try: + with api.config["GITLOCK"].lock: + ws_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) + except Exception as e: + ws_blueprint = None + exceptions.append(str(e)) + log.error("(v0_blueprints_info) %s", str(e)) + + # Get the git version (if it exists) + try: + with api.config["GITLOCK"].lock: + git_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + except RecipeFileError as e: + # Adding an exception would be redundant, skip it + git_blueprint = None + log.error("(v0_blueprints_info) %s", str(e)) + except Exception as e: + git_blueprint = None + exceptions.append(str(e)) + log.error("(v0_blueprints_info) %s", str(e)) + + if not ws_blueprint and not git_blueprint: + # Neither blueprint, return an error + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: %s" % (blueprint_name, ", ".join(exceptions))}) + elif ws_blueprint and not git_blueprint: + # No git blueprint, return the workspace blueprint + changes.append({"name":blueprint_name, "changed":True}) + blueprints.append(ws_blueprint) + elif not ws_blueprint and git_blueprint: + # No workspace blueprint, no change, return the git blueprint + changes.append({"name":blueprint_name, "changed":False}) + blueprints.append(git_blueprint) + else: + # Both exist, maybe changed, return the workspace blueprint + changes.append({"name":blueprint_name, "changed":ws_blueprint != git_blueprint}) + blueprints.append(ws_blueprint) + + # Sort all the results by case-insensitive blueprint name + changes = sorted(changes, key=lambda c: c["name"].lower()) + blueprints = sorted(blueprints, key=lambda r: r["name"].lower()) + + if out_fmt == "toml": + if errors: + # If there are errors they need to be reported, use JSON and 400 for this + return jsonify(status=False, errors=errors), 400 + else: + # With TOML output we just want to dump the raw blueprint, skipping the rest. + return "\n\n".join([r.toml() for r in blueprints]) + else: + return jsonify(changes=changes, blueprints=blueprints, errors=errors)
+ +
[docs]@v0_api.route("/blueprints/changes", defaults={'blueprint_names': ""}) +@v0_api.route("/blueprints/changes/<blueprint_names>") +@checkparams([("blueprint_names", "", "no blueprint names given")]) +def v0_blueprints_changes(blueprint_names): + """Return the changes to a blueprint or list of blueprints + + **/api/v0/blueprints/changes/<blueprint_names>[?offset=0&limit=20]** + + Return the commits to a blueprint. By default it returns the first 20 commits, this + can be changed by passing `offset` and/or `limit`. The response will include the + commit hash, summary, timestamp, and optionally the revision number. The commit + hash can be passed to `/api/v0/blueprints/diff/` to retrieve the exact changes. + + Example:: + + { + "errors": [], + "limit": 20, + "offset": 0, + "blueprints": [ + { + "changes": [ + { + "commit": "e083921a7ed1cf2eec91ad12b9ad1e70ef3470be", + "message": "blueprint glusterfs, version 0.0.6 saved.", + "revision": null, + "timestamp": "2017-11-23T00:18:13Z" + }, + { + "commit": "cee5f4c20fc33ea4d54bfecf56f4ad41ad15f4f3", + "message": "blueprint glusterfs, version 0.0.5 saved.", + "revision": null, + "timestamp": "2017-11-11T01:00:28Z" + }, + { + "commit": "29b492f26ed35d80800b536623bafc51e2f0eff2", + "message": "blueprint glusterfs, version 0.0.4 saved.", + "revision": null, + "timestamp": "2017-11-11T00:28:30Z" + }, + { + "commit": "03374adbf080fe34f5c6c29f2e49cc2b86958bf2", + "message": "blueprint glusterfs, version 0.0.3 saved.", + "revision": null, + "timestamp": "2017-11-10T23:15:52Z" + }, + { + "commit": "0e08ecbb708675bfabc82952599a1712a843779d", + "message": "blueprint glusterfs, version 0.0.2 saved.", + "revision": null, + "timestamp": "2017-11-10T23:14:56Z" + }, + { + "commit": "3e11eb87a63d289662cba4b1804a0947a6843379", + "message": "blueprint glusterfs, version 0.0.1 saved.", + "revision": null, + "timestamp": "2017-11-08T00:02:47Z" + } + ], + "name": "glusterfs", + "total": 6 + } + ] + } + """ + if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + limit = int(request.args.get("limit", "20")) + offset = int(request.args.get("offset", "0")) + except ValueError as e: + return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 + + blueprints = [] + errors = [] + for blueprint_name in [n.strip() for n in blueprint_names.split(",")]: + filename = recipe_filename(blueprint_name) + try: + with api.config["GITLOCK"].lock: + commits = list_commits(api.config["GITLOCK"].repo, branch, filename) + except Exception as e: + errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) + log.error("(v0_blueprints_changes) %s", str(e)) + else: + if commits: + limited_commits = take_limits(commits, offset, limit) + blueprints.append({"name":blueprint_name, "changes":limited_commits, "total":len(commits)}) + else: + # no commits means there is no blueprint in the branch + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s" % blueprint_name}) + + blueprints = sorted(blueprints, key=lambda r: r["name"].lower()) + + return jsonify(blueprints=blueprints, errors=errors, offset=offset, limit=limit)
+ +
[docs]@v0_api.route("/blueprints/new", methods=["POST"]) +def v0_blueprints_new(): + """Commit a new blueprint + + **POST /api/v0/blueprints/new** + + Create a new blueprint, or update an existing blueprint. This supports both JSON and TOML + for the blueprint format. The blueprint should be in the body of the request with the + `Content-Type` header set to either `application/json` or `text/x-toml`. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + if request.headers['Content-Type'] == "text/x-toml": + blueprint = recipe_from_toml(request.data) + else: + blueprint = recipe_from_dict(request.get_json(cache=False)) + + if VALID_BLUEPRINT_NAME.match(blueprint["name"]) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + with api.config["GITLOCK"].lock: + commit_recipe(api.config["GITLOCK"].repo, branch, blueprint) + + # Read the blueprint with new version and write it to the workspace + blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint["name"]) + workspace_write(api.config["GITLOCK"].repo, branch, blueprint) + except Exception as e: + log.error("(v0_blueprints_new) %s", str(e)) + return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/delete", defaults={'blueprint_name': ""}, methods=["DELETE"]) +@v0_api.route("/blueprints/delete/<blueprint_name>", methods=["DELETE"]) +@checkparams([("blueprint_name", "", "no blueprint name given")]) +def v0_blueprints_delete(blueprint_name): + """Delete a blueprint from git + + **DELETE /api/v0/blueprints/delete/<blueprint_name>** + + Delete a blueprint. The blueprint is deleted from the branch, and will no longer + be listed by the `list` route. A blueprint can be undeleted using the `undo` route + to revert to a previous commit. This will also delete the workspace copy of the + blueprint. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + with api.config["GITLOCK"].lock: + workspace_delete(api.config["GITLOCK"].repo, branch, blueprint_name) + delete_recipe(api.config["GITLOCK"].repo, branch, blueprint_name) + except Exception as e: + log.error("(v0_blueprints_delete) %s", str(e)) + return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/workspace", methods=["POST"]) +def v0_blueprints_workspace(): + """Write a blueprint to the workspace + + **POST /api/v0/blueprints/workspace** + + Write a blueprint to the temporary workspace. This works exactly the same as `new` except + that it does not create a commit. JSON and TOML bodies are supported. + + The workspace is meant to be used as a temporary blueprint storage for clients. + It will be read by the `info` and `diff` routes if it is different from the + most recent commit. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + if request.headers['Content-Type'] == "text/x-toml": + blueprint = recipe_from_toml(request.data) + else: + blueprint = recipe_from_dict(request.get_json(cache=False)) + + if VALID_BLUEPRINT_NAME.match(blueprint["name"]) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + with api.config["GITLOCK"].lock: + workspace_write(api.config["GITLOCK"].repo, branch, blueprint) + except Exception as e: + log.error("(v0_blueprints_workspace) %s", str(e)) + return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/workspace", defaults={'blueprint_name': ""}, methods=["DELETE"]) +@v0_api.route("/blueprints/workspace/<blueprint_name>", methods=["DELETE"]) +@checkparams([("blueprint_name", "", "no blueprint name given")]) +def v0_blueprints_delete_workspace(blueprint_name): + """Delete a blueprint from the workspace + + **DELETE /api/v0/blueprints/workspace/<blueprint_name>** + + Remove the temporary workspace copy of a blueprint. The `info` route will now + return the most recent commit of the blueprint. Any changes that were in the + workspace will be lost. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + with api.config["GITLOCK"].lock: + workspace_delete(api.config["GITLOCK"].repo, branch, blueprint_name) + except Exception as e: + log.error("(v0_blueprints_delete_workspace) %s", str(e)) + return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/undo", defaults={'blueprint_name': "", 'commit': ""}, methods=["POST"]) +@v0_api.route("/blueprints/undo/<blueprint_name>", defaults={'commit': ""}, methods=["POST"]) +@v0_api.route("/blueprints/undo/<blueprint_name>/<commit>", methods=["POST"]) +@checkparams([("blueprint_name", "", "no blueprint name given"), + ("commit", "", "no commit ID given")]) +def v0_blueprints_undo(blueprint_name, commit): + """Undo changes to a blueprint by reverting to a previous commit. + + **POST /api/v0/blueprints/undo/<blueprint_name>/<commit>** + + This will revert the blueprint to a previous commit. The commit hash from the `changes` + route can be used in this request. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + with api.config["GITLOCK"].lock: + revert_recipe(api.config["GITLOCK"].repo, branch, blueprint_name, commit) + + # Read the new recipe and write it to the workspace + blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + workspace_write(api.config["GITLOCK"].repo, branch, blueprint) + except Exception as e: + log.error("(v0_blueprints_undo) %s", str(e)) + return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/tag", defaults={'blueprint_name': ""}, methods=["POST"]) +@v0_api.route("/blueprints/tag/<blueprint_name>", methods=["POST"]) +@checkparams([("blueprint_name", "", "no blueprint name given")]) +def v0_blueprints_tag(blueprint_name): + """Tag a blueprint's latest blueprint commit as a 'revision' + + **POST /api/v0/blueprints/tag/<blueprint_name>** + + Tag a blueprint as a new release. This uses git tags with a special format. + `refs/tags/<branch>/<filename>/r<revision>`. Only the most recent blueprint commit + can be tagged. Revisions start at 1 and increment for each new tag + (per-blueprint). If the commit has already been tagged it will return false. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + try: + with api.config["GITLOCK"].lock: + tag_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + except RecipeFileError as e: + log.error("(v0_blueprints_tag) %s", str(e)) + return jsonify(status=False, errors=[{"id": UNKNOWN_BLUEPRINT, "msg": str(e)}]), 400 + except Exception as e: + log.error("(v0_blueprints_tag) %s", str(e)) + return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 + else: + return jsonify(status=True)
+ +
[docs]@v0_api.route("/blueprints/diff", defaults={'blueprint_name': "", 'from_commit': "", 'to_commit': ""}) +@v0_api.route("/blueprints/diff/<blueprint_name>", defaults={'from_commit': "", 'to_commit': ""}) +@v0_api.route("/blueprints/diff/<blueprint_name>/<from_commit>", defaults={'to_commit': ""}) +@v0_api.route("/blueprints/diff/<blueprint_name>/<from_commit>/<to_commit>") +@checkparams([("blueprint_name", "", "no blueprint name given"), + ("from_commit", "", "no from commit ID given"), + ("to_commit", "", "no to commit ID given")]) +def v0_blueprints_diff(blueprint_name, from_commit, to_commit): + """Return the differences between two commits of a blueprint + + **/api/v0/blueprints/diff/<blueprint_name>/<from_commit>/<to_commit>** + + Return the differences between two commits, or the workspace. The commit hash + from the `changes` response can be used here, or several special strings: + + - NEWEST will select the newest git commit. This works for `from_commit` or `to_commit` + - WORKSPACE will select the workspace copy. This can only be used in `to_commit` + + eg. `/api/v0/blueprints/diff/glusterfs/NEWEST/WORKSPACE` will return the differences + between the most recent git commit and the contents of the workspace. + + Each entry in the response's diff object contains the old blueprint value and the new one. + If old is null and new is set, then it was added. + If new is null and old is set, then it was removed. + If both are set, then it was changed. + + The old/new entries will have the name of the blueprint field that was changed. This + can be one of: Name, Description, Version, Module, or Package. + The contents for these will be the old/new values for them. + + In the example below the version was changed and the ping package was added. + + Example:: + + { + "diff": [ + { + "new": { + "Version": "0.0.6" + }, + "old": { + "Version": "0.0.5" + } + }, + { + "new": { + "Package": { + "name": "ping", + "version": "3.2.1" + } + }, + "old": null + } + ] + } + """ + for s in [blueprint_name, from_commit, to_commit]: + if VALID_API_STRING.match(s) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + if not blueprint_exists(api, branch, blueprint_name): + return jsonify(status=False, errors=[{"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}]) + + try: + if from_commit == "NEWEST": + with api.config["GITLOCK"].lock: + old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + else: + with api.config["GITLOCK"].lock: + old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, from_commit) + except Exception as e: + log.error("(v0_blueprints_diff) %s", str(e)) + return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 + + try: + if to_commit == "WORKSPACE": + with api.config["GITLOCK"].lock: + new_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) + # If there is no workspace, use the newest commit instead + if not new_blueprint: + with api.config["GITLOCK"].lock: + new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + elif to_commit == "NEWEST": + with api.config["GITLOCK"].lock: + new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + else: + with api.config["GITLOCK"].lock: + new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, to_commit) + except Exception as e: + log.error("(v0_blueprints_diff) %s", str(e)) + return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 + + diff = recipe_diff(old_blueprint, new_blueprint) + return jsonify(diff=diff)
+ +
[docs]@v0_api.route("/blueprints/freeze", defaults={'blueprint_names': ""}) +@v0_api.route("/blueprints/freeze/<blueprint_names>") +@checkparams([("blueprint_names", "", "no blueprint names given")]) +def v0_blueprints_freeze(blueprint_names): + """Return the blueprint with the exact modules and packages selected by depsolve + + **/api/v0/blueprints/freeze/<blueprint_names>** + + Return a JSON representation of the blueprint with the package and module versions set + to the exact versions chosen by depsolving the blueprint. + + Example:: + + { + "errors": [], + "blueprints": [ + { + "blueprint": { + "description": "An example GlusterFS server with samba", + "modules": [ + { + "name": "glusterfs", + "version": "3.8.4-18.4.el7.x86_64" + }, + { + "name": "glusterfs-cli", + "version": "3.8.4-18.4.el7.x86_64" + } + ], + "name": "glusterfs", + "packages": [ + { + "name": "ping", + "version": "2:3.2.1-2.el7.noarch" + }, + { + "name": "samba", + "version": "4.6.2-8.el7.x86_64" + } + ], + "version": "0.0.6" + } + } + ] + } + """ + if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + out_fmt = request.args.get("format", "json") + if VALID_API_STRING.match(out_fmt) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 + + blueprints = [] + errors = [] + for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]: + # get the blueprint + # Get the workspace version (if it exists) + blueprint = None + try: + with api.config["GITLOCK"].lock: + blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) + except Exception: + pass + + if not blueprint: + # No workspace version, get the git version (if it exists) + try: + with api.config["GITLOCK"].lock: + blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + except RecipeFileError as e: + # adding an error here would be redundant, skip it + log.error("(v0_blueprints_freeze) %s", str(e)) + except Exception as e: + errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) + log.error("(v0_blueprints_freeze) %s", str(e)) + + # No blueprint found, skip it. + if not blueprint: + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: blueprint_not_found" % blueprint_name}) + continue + + # Combine modules and packages and depsolve the list + # TODO include the version/glob in the depsolving + module_nver = blueprint.module_nver + package_nver = blueprint.package_nver + projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) + deps = [] + try: + with api.config["DNFLOCK"].lock: + deps = projects_depsolve(api.config["DNFLOCK"].dbo, projects, blueprint.group_names) + except ProjectsError as e: + errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) + log.error("(v0_blueprints_freeze) %s", str(e)) + + blueprints.append({"blueprint": blueprint.freeze(deps)}) + + if out_fmt == "toml": + # With TOML output we just want to dump the raw blueprint, skipping the rest. + return "\n\n".join([e["blueprint"].toml() for e in blueprints]) + else: + return jsonify(blueprints=blueprints, errors=errors)
+ +
[docs]@v0_api.route("/blueprints/depsolve", defaults={'blueprint_names': ""}) +@v0_api.route("/blueprints/depsolve/<blueprint_names>") +@checkparams([("blueprint_names", "", "no blueprint names given")]) +def v0_blueprints_depsolve(blueprint_names): + """Return the dependencies for a blueprint + + **/api/v0/blueprints/depsolve/<blueprint_names>** + + Depsolve the blueprint using yum, return the blueprint used, and the NEVRAs of the packages + chosen to satisfy the blueprint's requirements. The response will include a list of results, + with the full dependency list in `dependencies`, the NEVRAs for the blueprint's direct modules + and packages in `modules`, and any error will be in `errors`. + + Example:: + + { + "errors": [], + "blueprints": [ + { + "dependencies": [ + { + "arch": "noarch", + "epoch": "0", + "name": "2ping", + "release": "2.el7", + "version": "3.2.1" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "acl", + "release": "12.el7", + "version": "2.2.51" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "audit-libs", + "release": "3.el7", + "version": "2.7.6" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "avahi-libs", + "release": "17.el7", + "version": "0.6.31" + }, + ... + ], + "modules": [ + { + "arch": "noarch", + "epoch": "0", + "name": "2ping", + "release": "2.el7", + "version": "3.2.1" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "glusterfs", + "release": "18.4.el7", + "version": "3.8.4" + }, + ... + ], + "blueprint": { + "description": "An example GlusterFS server with samba", + "modules": [ + { + "name": "glusterfs", + "version": "3.7.*" + }, + ... + } + } + ] + } + """ + if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + branch = request.args.get("branch", "master") + if VALID_API_STRING.match(branch) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 + + blueprints = [] + errors = [] + for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]: + # get the blueprint + # Get the workspace version (if it exists) + blueprint = None + try: + with api.config["GITLOCK"].lock: + blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) + except Exception: + pass + + if not blueprint: + # No workspace version, get the git version (if it exists) + try: + with api.config["GITLOCK"].lock: + blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) + except RecipeFileError as e: + # adding an error here would be redundant, skip it + log.error("(v0_blueprints_depsolve) %s", str(e)) + except Exception as e: + errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) + log.error("(v0_blueprints_depsolve) %s", str(e)) + + # No blueprint found, skip it. + if not blueprint: + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: blueprint not found" % blueprint_name}) + continue + + # Combine modules and packages and depsolve the list + # TODO include the version/glob in the depsolving + module_nver = blueprint.module_nver + package_nver = blueprint.package_nver + projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) + deps = [] + try: + with api.config["DNFLOCK"].lock: + deps = projects_depsolve(api.config["DNFLOCK"].dbo, projects, blueprint.group_names) + except ProjectsError as e: + errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) + log.error("(v0_blueprints_depsolve) %s", str(e)) + + # Get the NEVRA's of the modules and projects, add as "modules" + modules = [] + for dep in deps: + if dep["name"] in projects: + modules.append(dep) + modules = sorted(modules, key=lambda m: m["name"].lower()) + + blueprints.append({"blueprint":blueprint, "dependencies":deps, "modules":modules}) + + return jsonify(blueprints=blueprints, errors=errors)
+ +
[docs]@v0_api.route("/projects/list") +def v0_projects_list(): + """List all of the available projects/packages + + **/api/v0/projects/list[?offset=0&limit=20]** + + List all of the available projects. By default this returns the first 20 items, + but this can be changed by setting the `offset` and `limit` arguments. + + Example:: + + { + "limit": 20, + "offset": 0, + "projects": [ + { + "description": "0 A.D. (pronounced \"zero ey-dee\") is a ...", + "homepage": "http://play0ad.com", + "name": "0ad", + "summary": "Cross-Platform RTS Game of Ancient Warfare", + "upstream_vcs": "UPSTREAM_VCS" + }, + ... + ], + "total": 21770 + } + """ + try: + limit = int(request.args.get("limit", "20")) + offset = int(request.args.get("offset", "0")) + except ValueError as e: + return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 + + try: + with api.config["DNFLOCK"].lock: + available = projects_list(api.config["DNFLOCK"].dbo) + except ProjectsError as e: + log.error("(v0_projects_list) %s", str(e)) + return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 + + projects = take_limits(available, offset, limit) + return jsonify(projects=projects, offset=offset, limit=limit, total=len(available))
+ +
[docs]@v0_api.route("/projects/info", defaults={'project_names': ""}) +@v0_api.route("/projects/info/<project_names>") +@checkparams([("project_names", "", "no project names given")]) +def v0_projects_info(project_names): + """Return detailed information about the listed projects + + **/api/v0/projects/info/<project_names>** + + Return information about the comma-separated list of projects. It includes the description + of the package along with the list of available builds. + + Example:: + + { + "projects": [ + { + "builds": [ + { + "arch": "x86_64", + "build_config_ref": "BUILD_CONFIG_REF", + "build_env_ref": "BUILD_ENV_REF", + "build_time": "2017-03-01T08:39:23", + "changelog": "- restore incremental backups correctly, files ...", + "epoch": "2", + "metadata": {}, + "release": "32.el7", + "source": { + "license": "GPLv3+", + "metadata": {}, + "source_ref": "SOURCE_REF", + "version": "1.26" + } + } + ], + "description": "The GNU tar program saves many ...", + "homepage": "http://www.gnu.org/software/tar/", + "name": "tar", + "summary": "A GNU file archiving program", + "upstream_vcs": "UPSTREAM_VCS" + } + ] + } + """ + if VALID_API_STRING.match(project_names) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + with api.config["DNFLOCK"].lock: + projects = projects_info(api.config["DNFLOCK"].dbo, project_names.split(",")) + except ProjectsError as e: + log.error("(v0_projects_info) %s", str(e)) + return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 + + if not projects: + msg = "one of the requested projects does not exist: %s" % project_names + log.error("(v0_projects_info) %s", msg) + return jsonify(status=False, errors=[{"id": UNKNOWN_PROJECT, "msg": msg}]), 400 + + return jsonify(projects=projects)
+ +
[docs]@v0_api.route("/projects/depsolve", defaults={'project_names': ""}) +@v0_api.route("/projects/depsolve/<project_names>") +@checkparams([("project_names", "", "no project names given")]) +def v0_projects_depsolve(project_names): + """Return detailed information about the listed projects + + **/api/v0/projects/depsolve/<project_names>** + + Depsolve the comma-separated list of projects and return the list of NEVRAs needed + to satisfy the request. + + Example:: + + { + "projects": [ + { + "arch": "noarch", + "epoch": "0", + "name": "basesystem", + "release": "7.el7", + "version": "10.0" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "bash", + "release": "28.el7", + "version": "4.2.46" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "filesystem", + "release": "21.el7", + "version": "3.2" + }, + ... + ] + } + """ + if VALID_API_STRING.match(project_names) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + with api.config["DNFLOCK"].lock: + deps = projects_depsolve(api.config["DNFLOCK"].dbo, [(n, "*") for n in project_names.split(",")], []) + except ProjectsError as e: + log.error("(v0_projects_depsolve) %s", str(e)) + return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 + + if not deps: + msg = "one of the requested projects does not exist: %s" % project_names + log.error("(v0_projects_depsolve) %s", msg) + return jsonify(status=False, errors=[{"id": UNKNOWN_PROJECT, "msg": msg}]), 400 + + return jsonify(projects=deps)
+ +
[docs]@v0_api.route("/projects/source/list") +def v0_projects_source_list(): + """Return the list of source names + + **/api/v0/projects/source/list** + + Return the list of repositories used for depsolving and installing packages. + + Example:: + + { + "sources": [ + "fedora", + "fedora-cisco-openh264", + "fedora-updates-testing", + "fedora-updates" + ] + } + """ + with api.config["DNFLOCK"].lock: + repos = list(api.config["DNFLOCK"].dbo.repos.iter_enabled()) + sources = sorted([r.id for r in repos]) + return jsonify(sources=sources)
+ +
[docs]@v0_api.route("/projects/source/info", defaults={'source_names': ""}) +@v0_api.route("/projects/source/info/<source_names>") +@checkparams([("source_names", "", "no source names given")]) +def v0_projects_source_info(source_names): + """Return detailed info about the list of sources + + **/api/v0/projects/source/info/<source-names>** + + Return information about the comma-separated list of source names. Or all of the + sources if '*' is passed. Note that general globbing is not supported, only '*'. + + immutable system sources will have the "system" field set to true. User added sources + will have it set to false. System sources cannot be changed or deleted. + + Example:: + + { + "errors": [], + "sources": { + "fedora": { + "check_gpg": true, + "check_ssl": true, + "gpgkey_urls": [ + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" + ], + "name": "fedora", + "proxy": "http://proxy.brianlane.com:8123", + "system": true, + "type": "yum-metalink", + "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" + } + } + } + """ + if VALID_API_STRING.match(source_names) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + out_fmt = request.args.get("format", "json") + if VALID_API_STRING.match(out_fmt) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 + + # Return info on all of the sources + if source_names == "*": + with api.config["DNFLOCK"].lock: + source_names = ",".join(r.id for r in api.config["DNFLOCK"].dbo.repos.iter_enabled()) + + sources = {} + errors = [] + system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") + for source in source_names.split(","): + with api.config["DNFLOCK"].lock: + repo = api.config["DNFLOCK"].dbo.repos.get(source, None) + if not repo: + errors.append({"id": UNKNOWN_SOURCE, "msg": "%s is not a valid source" % source}) + continue + sources[repo.id] = repo_to_source(repo, repo.id in system_sources, api=0) + + if out_fmt == "toml" and not errors: + # With TOML output we just want to dump the raw sources, skipping the errors + return toml.dumps(sources) + elif out_fmt == "toml" and errors: + # TOML requested, but there was an error + return jsonify(status=False, errors=errors), 400 + else: + return jsonify(sources=sources, errors=errors)
+ +
[docs]@v0_api.route("/projects/source/new", methods=["POST"]) +def v0_projects_source_new(): + """Add a new package source. Or change an existing one + + **POST /api/v0/projects/source/new** + + Add (or change) a source for use when depsolving blueprints and composing images. + + The ``proxy`` and ``gpgkey_urls`` entries are optional. All of the others are required. The supported + types for the urls are: + + * ``yum-baseurl`` is a URL to a yum repository. + * ``yum-mirrorlist`` is a URL for a mirrorlist. + * ``yum-metalink`` is a URL for a metalink. + + If ``check_ssl`` is true the https certificates must be valid. If they are self-signed you can either set + this to false, or add your Certificate Authority to the host system. + + If ``check_gpg`` is true the GPG key must either be installed on the host system, or ``gpgkey_urls`` + should point to it. + + You can edit an existing source (other than system sources), by doing a POST + of the new version of the source. It will overwrite the previous one. + + Example:: + + { + "name": "custom-source-1", + "url": "https://url/path/to/repository/", + "type": "yum-baseurl", + "check_ssl": true, + "check_gpg": true, + "gpgkey_urls": [ + "https://url/path/to/gpg-key" + ] + } + + + """ + if request.headers['Content-Type'] == "text/x-toml": + source = toml.loads(request.data) + else: + source = request.get_json(cache=False) + + system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") + if source["name"] in system_sources: + return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be changed." % source["name"]}]), 400 + + try: + # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) + with api.config["DNFLOCK"].lock: + repo_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") + new_repo_source(api.config["DNFLOCK"].dbo, source["name"], source, repo_dir) + except Exception as e: + return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 + + return jsonify(status=True)
+ +
[docs]@v0_api.route("/projects/source/delete", defaults={'source_name': ""}, methods=["DELETE"]) +@v0_api.route("/projects/source/delete/<source_name>", methods=["DELETE"]) +@checkparams([("source_name", "", "no source name given")]) +def v0_projects_source_delete(source_name): + """Delete the named source and return a status response + + **DELETE /api/v0/projects/source/delete/<source-name>** + + Delete a user added source. This will fail if a system source is passed to + it. + + The response will be a status response with `status` set to true, or an + error response with it set to false and an error message included. + """ + if VALID_API_STRING.match(source_name) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") + if source_name in system_sources: + return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be deleted." % source_name}]), 400 + share_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") + try: + # Remove the file entry for the source + delete_repo_source(joinpaths(share_dir, "*.repo"), source_name) + + # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) + with api.config["DNFLOCK"].lock: + if source_name in api.config["DNFLOCK"].dbo.repos: + del api.config["DNFLOCK"].dbo.repos[source_name] + log.info("Updating repository metadata after removing %s", source_name) + api.config["DNFLOCK"].dbo.fill_sack(load_system_repo=False) + api.config["DNFLOCK"].dbo.read_comps() + + except ProjectsError as e: + log.error("(v0_projects_source_delete) %s", str(e)) + return jsonify(status=False, errors=[{"id": UNKNOWN_SOURCE, "msg": str(e)}]), 400 + + return jsonify(status=True)
+ +
[docs]@v0_api.route("/modules/list") +@v0_api.route("/modules/list/<module_names>") +def v0_modules_list(module_names=None): + """List available modules, filtering by module_names + + **/api/v0/modules/list[?offset=0&limit=20]** + + Return a list of all of the available modules. This includes the name and the + group_type, which is always "rpm" for lorax-composer. By default this returns + the first 20 items. This can be changed by setting the `offset` and `limit` + arguments. + + Example:: + + { + "limit": 20, + "modules": [ + { + "group_type": "rpm", + "name": "0ad" + }, + { + "group_type": "rpm", + "name": "0ad-data" + }, + { + "group_type": "rpm", + "name": "0install" + }, + { + "group_type": "rpm", + "name": "2048-cli" + }, + ... + ] + "total": 21770 + } + + **/api/v0/modules/list/<module_names>[?offset=0&limit=20]** + + Return the list of comma-separated modules. Output is the same as `/modules/list` + + Example:: + + { + "limit": 20, + "modules": [ + { + "group_type": "rpm", + "name": "tar" + } + ], + "offset": 0, + "total": 1 + } + """ + if module_names and VALID_API_STRING.match(module_names) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + limit = int(request.args.get("limit", "20")) + offset = int(request.args.get("offset", "0")) + except ValueError as e: + return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 + + if module_names: + module_names = module_names.split(",") + + try: + with api.config["DNFLOCK"].lock: + available = modules_list(api.config["DNFLOCK"].dbo, module_names) + except ProjectsError as e: + log.error("(v0_modules_list) %s", str(e)) + return jsonify(status=False, errors=[{"id": MODULES_ERROR, "msg": str(e)}]), 400 + + if module_names and not available: + msg = "one of the requested modules does not exist: %s" % module_names + log.error("(v0_modules_list) %s", msg) + return jsonify(status=False, errors=[{"id": UNKNOWN_MODULE, "msg": msg}]), 400 + + modules = take_limits(available, offset, limit) + return jsonify(modules=modules, offset=offset, limit=limit, total=len(available))
+ +
[docs]@v0_api.route("/modules/info", defaults={'module_names': ""}) +@v0_api.route("/modules/info/<module_names>") +@checkparams([("module_names", "", "no module names given")]) +def v0_modules_info(module_names): + """Return detailed information about the listed modules + + **/api/v0/modules/info/<module_names>** + + Return the module's dependencies, and the information about the module. + + Example:: + + { + "modules": [ + { + "dependencies": [ + { + "arch": "noarch", + "epoch": "0", + "name": "basesystem", + "release": "7.el7", + "version": "10.0" + }, + { + "arch": "x86_64", + "epoch": "0", + "name": "bash", + "release": "28.el7", + "version": "4.2.46" + }, + ... + ], + "description": "The GNU tar program saves ...", + "homepage": "http://www.gnu.org/software/tar/", + "name": "tar", + "summary": "A GNU file archiving program", + "upstream_vcs": "UPSTREAM_VCS" + } + ] + } + """ + if VALID_API_STRING.match(module_names) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + try: + with api.config["DNFLOCK"].lock: + modules = modules_info(api.config["DNFLOCK"].dbo, module_names.split(",")) + except ProjectsError as e: + log.error("(v0_modules_info) %s", str(e)) + return jsonify(status=False, errors=[{"id": MODULES_ERROR, "msg": str(e)}]), 400 + + if not modules: + msg = "one of the requested modules does not exist: %s" % module_names + log.error("(v0_modules_info) %s", msg) + return jsonify(status=False, errors=[{"id": UNKNOWN_MODULE, "msg": msg}]), 400 + + return jsonify(modules=modules)
+ +
[docs]@v0_api.route("/compose", methods=["POST"]) +def v0_compose_start(): + """Start a compose + + The body of the post should have these fields: + blueprint_name - The blueprint name from /blueprints/list/ + compose_type - The type of output to create, from /compose/types + branch - Optional, defaults to master, selects the git branch to use for the blueprint. + + **POST /api/v0/compose** + + Start a compose. The content type should be 'application/json' and the body of the POST + should look like this + + Example:: + + { + "blueprint_name": "http-server", + "compose_type": "tar", + "branch": "master" + } + + Pass it the name of the blueprint, the type of output (from '/api/v0/compose/types'), and the + blueprint branch to use. 'branch' is optional and will default to master. It will create a new + build and add it to the queue. It returns the build uuid and a status if it succeeds + + Example:: + + { + "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf", + "status": true + } + """ + # Passing ?test=1 will generate a fake FAILED compose. + # Passing ?test=2 will generate a fake FINISHED compose. + try: + test_mode = int(request.args.get("test", "0")) + except ValueError: + test_mode = 0 + + compose = request.get_json(cache=False) + + errors = [] + if not compose: + return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 + + if "blueprint_name" not in compose: + errors.append({"id": UNKNOWN_BLUEPRINT,"msg": "No 'blueprint_name' in the JSON request"}) + else: + blueprint_name = compose["blueprint_name"] + + if "branch" not in compose or not compose["branch"]: + branch = "master" + else: + branch = compose["branch"] + + if "compose_type" not in compose: + errors.append({"id": BAD_COMPOSE_TYPE, "msg": "No 'compose_type' in the JSON request"}) + else: + compose_type = compose["compose_type"] + + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + errors.append({"id": INVALID_CHARS, "msg": "Invalid characters in API path"}) + + if not blueprint_exists(api, branch, blueprint_name): + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}) + + if errors: + return jsonify(status=False, errors=errors), 400 + + try: + build_id = start_build(api.config["COMPOSER_CFG"], api.config["DNFLOCK"], api.config["GITLOCK"], + branch, blueprint_name, compose_type, test_mode) + except Exception as e: + if "Invalid compose type" in str(e): + return jsonify(status=False, errors=[{"id": BAD_COMPOSE_TYPE, "msg": str(e)}]), 400 + else: + return jsonify(status=False, errors=[{"id": BUILD_FAILED, "msg": str(e)}]), 400 + + return jsonify(status=True, build_id=build_id)
+ +
[docs]@v0_api.route("/compose/types") +def v0_compose_types(): + """Return the list of enabled output types + + (only enabled types are returned) + + **/api/v0/compose/types** + + Returns the list of supported output types that are valid for use with 'POST /api/v0/compose' + + Example:: + + { + "types": [ + { + "enabled": true, + "name": "tar" + } + ] + } + """ + share_dir = api.config["COMPOSER_CFG"].get("composer", "share_dir") + return jsonify(types=[{"name": t, "enabled": e} for t, e in compose_types(share_dir)])
+ +
[docs]@v0_api.route("/compose/queue") +def v0_compose_queue(): + """Return the status of the new and running queues + + **/api/v0/compose/queue** + + Return the status of the build queue. It includes information about the builds waiting, + and the build that is running. + + Example:: + + { + "new": [ + { + "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", + "blueprint": "glusterfs", + "queue_status": "WAITING", + "job_created": 1517362647.4570868, + "version": "0.0.6" + }, + { + "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73", + "blueprint": "kubernetes", + "queue_status": "WAITING", + "job_created": 1517362659.0034983, + "version": "0.0.1" + } + ], + "run": [ + { + "id": "745712b2-96db-44c0-8014-fe925c35e795", + "blueprint": "glusterfs", + "queue_status": "RUNNING", + "job_created": 1517362633.7965999, + "job_started": 1517362633.8001345, + "version": "0.0.6" + } + ] + } + """ + return jsonify(queue_status(api.config["COMPOSER_CFG"], api=0))
+ +
[docs]@v0_api.route("/compose/finished") +def v0_compose_finished(): + """Return the list of finished composes + + **/api/v0/compose/finished** + + Return the details on all of the finished composes on the system. + + Example:: + + { + "finished": [ + { + "id": "70b84195-9817-4b8a-af92-45e380f39894", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517351003.8210032, + "job_started": 1517351003.8230415, + "job_finished": 1517359234.1003145, + "version": "0.0.6" + }, + { + "id": "e695affd-397f-4af9-9022-add2636e7459", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517362289.7193348, + "job_started": 1517362289.9751132, + "job_finished": 1517363500.1234567, + "version": "0.0.6" + } + ] + } + """ + return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED", api=0))
+ +
[docs]@v0_api.route("/compose/failed") +def v0_compose_failed(): + """Return the list of failed composes + + **/api/v0/compose/failed** + + Return the details on all of the failed composes on the system. + + Example:: + + { + "failed": [ + { + "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", + "blueprint": "http-server", + "queue_status": "FAILED", + "job_created": 1517523249.9301329, + "job_started": 1517523249.9314211, + "job_finished": 1517523255.5623411, + "version": "0.0.2" + } + ] + } + """ + return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED", api=0))
+ +
[docs]@v0_api.route("/compose/status", defaults={'uuids': ""}) +@v0_api.route("/compose/status/<uuids>") +@checkparams([("uuids", "", "no UUIDs given")]) +def v0_compose_status(uuids): + """Return the status of the listed uuids + + **/api/v0/compose/status/<uuids>[?blueprint=<blueprint_name>&status=<compose_status>&type=<compose_type>]** + + Return the details for each of the comma-separated list of uuids. A uuid of '*' will return + details for all composes. + + Example:: + + { + "uuids": [ + { + "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", + "blueprint": "http-server", + "queue_status": "FINISHED", + "job_created": 1517523644.2384307, + "job_started": 1517523644.2551234, + "job_finished": 1517523689.9864314, + "version": "0.0.2" + }, + { + "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517363442.188399, + "job_started": 1517363442.325324, + "job_finished": 1517363451.653621, + "version": "0.0.6" + } + ] + } + """ + if VALID_API_STRING.match(uuids) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + blueprint = request.args.get("blueprint", None) + status = request.args.get("status", None) + compose_type = request.args.get("type", None) + + results = [] + errors = [] + + if uuids.strip() == '*': + queue_status_dict = queue_status(api.config["COMPOSER_CFG"], api=0) + queue_new = queue_status_dict["new"] + queue_running = queue_status_dict["run"] + candidates = queue_new + queue_running + build_status(api.config["COMPOSER_CFG"], api=0) + else: + candidates = [] + for uuid in [n.strip().lower() for n in uuids.split(",")]: + details = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if details is None: + errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) + else: + candidates.append(details) + + for details in candidates: + if blueprint is not None and details['blueprint'] != blueprint: + continue + + if status is not None and details['queue_status'] != status: + continue + + if compose_type is not None and details['compose_type'] != compose_type: + continue + + results.append(details) + + return jsonify(uuids=results, errors=errors)
+ +
[docs]@v0_api.route("/compose/cancel", defaults={'uuid': ""}, methods=["DELETE"]) +@v0_api.route("/compose/cancel/<uuid>", methods=["DELETE"]) +@checkparams([("uuid", "", "no UUID given")]) +def v0_compose_cancel(uuid): + """Cancel a running compose and delete its results directory + + **DELETE /api/v0/compose/cancel/<uuid>** + + Cancel the build, if it is not finished, and delete the results. It will return a + status of True if it is successful. + + Example:: + + { + "status": true, + "uuid": "03397f8d-acff-4cdb-bd31-f629b7a948f5" + } + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + + if status["queue_status"] not in ["WAITING", "RUNNING"]: + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s is not in WAITING or RUNNING." % uuid}]) + + try: + uuid_cancel(api.config["COMPOSER_CFG"], uuid) + except Exception as e: + return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": "%s: %s" % (uuid, str(e))}]),400 + else: + return jsonify(status=True, uuid=uuid)
+ +
[docs]@v0_api.route("/compose/delete", defaults={'uuids': ""}, methods=["DELETE"]) +@v0_api.route("/compose/delete/<uuids>", methods=["DELETE"]) +@checkparams([("uuids", "", "no UUIDs given")]) +def v0_compose_delete(uuids): + """Delete the compose results for the listed uuids + + **DELETE /api/v0/compose/delete/<uuids>** + + Delete the list of comma-separated uuids from the compose results. + + Example:: + + { + "errors": [], + "uuids": [ + { + "status": true, + "uuid": "ae1bf7e3-7f16-4c9f-b36e-3726a1093fd0" + } + ] + } + """ + if VALID_API_STRING.match(uuids) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + results = [] + errors = [] + for uuid in [n.strip().lower() for n in uuids.split(",")]: + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) + elif status["queue_status"] not in ["FINISHED", "FAILED"]: + errors.append({"id": BUILD_IN_WRONG_STATE, "msg": "Build %s is not in FINISHED or FAILED." % uuid}) + else: + try: + uuid_delete(api.config["COMPOSER_CFG"], uuid) + except Exception as e: + errors.append({"id": COMPOSE_ERROR, "msg": "%s: %s" % (uuid, str(e))}) + else: + results.append({"uuid":uuid, "status":True}) + return jsonify(uuids=results, errors=errors)
+ +
[docs]@v0_api.route("/compose/info", defaults={'uuid': ""}) +@v0_api.route("/compose/info/<uuid>") +@checkparams([("uuid", "", "no UUID given")]) +def v0_compose_info(uuid): + """Return detailed info about a compose + + **/api/v0/compose/info/<uuid>** + + Get detailed information about the compose. The returned JSON string will + contain the following information: + + * id - The uuid of the comoposition + * config - containing the configuration settings used to run Anaconda + * blueprint - The depsolved blueprint used to generate the kickstart + * commit - The (local) git commit hash for the blueprint used + * deps - The NEVRA of all of the dependencies used in the composition + * compose_type - The type of output generated (tar, iso, etc.) + * queue_status - The final status of the composition (FINISHED or FAILED) + + Example:: + + { + "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d", + "compose_type": "tar", + "config": { + "anaconda_args": "", + "armplatform": "", + "compress_args": [], + "compression": "xz", + "image_name": "root.tar.xz", + ... + }, + "deps": { + "packages": [ + { + "arch": "x86_64", + "epoch": "0", + "name": "acl", + "release": "14.el7", + "version": "2.2.51" + } + ] + }, + "id": "c30b7d80-523b-4a23-ad52-61b799739ce8", + "queue_status": "FINISHED", + "blueprint": { + "description": "An example kubernetes master", + ... + } + } + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + info = uuid_info(api.config["COMPOSER_CFG"], uuid, api=0) + except Exception as e: + return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 + + if info is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + else: + return jsonify(**info)
+ +
[docs]@v0_api.route("/compose/metadata", defaults={'uuid': ""}) +@v0_api.route("/compose/metadata/<uuid>") +@checkparams([("uuid","", "no UUID given")]) +def v0_compose_metadata(uuid): + """Return a tar of the metadata for the build + + **/api/v0/compose/metadata/<uuid>** + + Returns a .tar of the metadata used for the build. This includes all the + information needed to reproduce the build, including the final kickstart + populated with repository and package NEVRA. + + The mime type is set to 'application/x-tar' and the filename is set to + UUID-metadata.tar + + The .tar is uncompressed, but is not large. + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + if status["queue_status"] not in ["FINISHED", "FAILED"]: + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 + else: + return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=False, logs=False), + mimetype="application/x-tar", + headers=[("Content-Disposition", "attachment; filename=%s-metadata.tar;" % uuid)], + direct_passthrough=True)
+ +
[docs]@v0_api.route("/compose/results", defaults={'uuid': ""}) +@v0_api.route("/compose/results/<uuid>") +@checkparams([("uuid","", "no UUID given")]) +def v0_compose_results(uuid): + """Return a tar of the metadata and the results for the build + + **/api/v0/compose/results/<uuid>** + + Returns a .tar of the metadata, logs, and output image of the build. This + includes all the information needed to reproduce the build, including the + final kickstart populated with repository and package NEVRA. The output image + is already in compressed form so the returned tar is not compressed. + + The mime type is set to 'application/x-tar' and the filename is set to + UUID.tar + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + elif status["queue_status"] not in ["FINISHED", "FAILED"]: + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 + else: + return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=True, logs=True), + mimetype="application/x-tar", + headers=[("Content-Disposition", "attachment; filename=%s.tar;" % uuid)], + direct_passthrough=True)
+ +
[docs]@v0_api.route("/compose/logs", defaults={'uuid': ""}) +@v0_api.route("/compose/logs/<uuid>") +@checkparams([("uuid","", "no UUID given")]) +def v0_compose_logs(uuid): + """Return a tar of the metadata for the build + + **/api/v0/compose/logs/<uuid>** + + Returns a .tar of the anaconda build logs. The tar is not compressed, but is + not large. + + The mime type is set to 'application/x-tar' and the filename is set to + UUID-logs.tar + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + elif status["queue_status"] not in ["FINISHED", "FAILED"]: + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 + else: + return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=False, image=False, logs=True), + mimetype="application/x-tar", + headers=[("Content-Disposition", "attachment; filename=%s-logs.tar;" % uuid)], + direct_passthrough=True)
+ +
[docs]@v0_api.route("/compose/image", defaults={'uuid': ""}) +@v0_api.route("/compose/image/<uuid>") +@checkparams([("uuid","", "no UUID given")]) +def v0_compose_image(uuid): + """Return the output image for the build + + **/api/v0/compose/image/<uuid>** + + Returns the output image from the build. The filename is set to the filename + from the build with the UUID as a prefix. eg. UUID-root.tar.xz or UUID-boot.iso. + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + elif status["queue_status"] not in ["FINISHED", "FAILED"]: + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 + else: + image_name, image_path = uuid_image(api.config["COMPOSER_CFG"], uuid) + + # Make sure it really exists + if not os.path.exists(image_path): + return jsonify(status=False, errors=[{"id": BUILD_MISSING_FILE, "msg": "Build %s is missing image file %s" % (uuid, image_name)}]), 400 + + # Make the image name unique + image_name = uuid + "-" + image_name + # XXX - Will mime type guessing work for all our output? + return send_file(image_path, as_attachment=True, attachment_filename=image_name, add_etags=False)
+ +
[docs]@v0_api.route("/compose/log", defaults={'uuid': ""}) +@v0_api.route("/compose/log/<uuid>") +@checkparams([("uuid","", "no UUID given")]) +def v0_compose_log_tail(uuid): + """Return the tail of the most currently relevant log + + **/api/v0/compose/log/<uuid>[?size=KiB]** + + Returns the end of either the anaconda log, the packaging log, or the + composer logs, depending on the progress of the compose. The size + parameter is optional and defaults to 1 MiB if it is not included. The + returned data is raw text from the end of the log file, starting on a + line boundary. + + Example:: + + 12:59:24,222 INFO anaconda: Running Thread: AnaConfigurationThread (140629395244800) + 12:59:24,223 INFO anaconda: Configuring installed system + 12:59:24,912 INFO anaconda: Configuring installed system + 12:59:24,912 INFO anaconda: Creating users + 12:59:24,913 INFO anaconda: Clearing libuser.conf at /tmp/libuser.Dyy8Gj + 12:59:25,154 INFO anaconda: Creating users + 12:59:25,155 INFO anaconda: Configuring addons + 12:59:25,155 INFO anaconda: Configuring addons + 12:59:25,155 INFO anaconda: Generating initramfs + 12:59:49,467 INFO anaconda: Generating initramfs + 12:59:49,467 INFO anaconda: Running post-installation scripts + 12:59:49,467 INFO anaconda: Running kickstart %%post script(s) + 12:59:50,782 INFO anaconda: All kickstart %%post script(s) have been run + 12:59:50,782 INFO anaconda: Running post-installation scripts + 12:59:50,784 INFO anaconda: Thread Done: AnaConfigurationThread (140629395244800) + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + size = int(request.args.get("size", "1024")) + except ValueError as e: + return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 + + status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) + if status is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + elif status["queue_status"] == "WAITING": + return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s has not started yet. No logs to view" % uuid}]) + try: + return Response(uuid_log(api.config["COMPOSER_CFG"], uuid, size), direct_passthrough=True) + except RuntimeError as e: + return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/v1.html b/f32-branch/_modules/pylorax/api/v1.html new file mode 100644 index 00000000..9c04cdf4 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/v1.html @@ -0,0 +1,1243 @@ + + + + + + + + + + + pylorax.api.v1 — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.v1

+#
+# Copyright (C) 2019  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+""" Setup v1 of the API server
+
+"""
+import logging
+log = logging.getLogger("lorax-composer")
+
+from flask import jsonify, request
+from flask import current_app as api
+
+from lifted.queue import get_upload, reset_upload, cancel_upload, delete_upload
+from lifted.providers import list_providers, resolve_provider, load_profiles, validate_settings, save_settings
+from lifted.providers import load_settings, delete_profile
+from pylorax.api.checkparams import checkparams
+from pylorax.api.compose import start_build
+from pylorax.api.errors import BAD_COMPOSE_TYPE, BUILD_FAILED, INVALID_CHARS, MISSING_POST, PROJECTS_ERROR
+from pylorax.api.errors import SYSTEM_SOURCE, UNKNOWN_BLUEPRINT, UNKNOWN_SOURCE, UNKNOWN_UUID, UPLOAD_ERROR
+from pylorax.api.errors import COMPOSE_ERROR
+from pylorax.api.flask_blueprint import BlueprintSkip
+from pylorax.api.queue import queue_status, build_status, uuid_status, uuid_schedule_upload, uuid_remove_upload
+from pylorax.api.queue import uuid_info
+from pylorax.api.projects import get_repo_sources, repo_to_source
+from pylorax.api.projects import new_repo_source
+from pylorax.api.regexes import VALID_API_STRING, VALID_BLUEPRINT_NAME
+import pylorax.api.toml as toml
+from pylorax.api.utils import blueprint_exists
+
+
+# Create the v1 routes Blueprint with skip_routes support
+v1_api = BlueprintSkip("v1_routes", __name__)
+
+
[docs]@v1_api.route("/projects/source/info", defaults={'source_ids': ""}) +@v1_api.route("/projects/source/info/<source_ids>") +@checkparams([("source_ids", "", "no source names given")]) +def v1_projects_source_info(source_ids): + """Return detailed info about the list of sources + + **/api/v1/projects/source/info/<source-ids>** + + Return information about the comma-separated list of source ids. Or all of the + sources if '*' is passed. Note that general globbing is not supported, only '*'. + + Immutable system sources will have the "system" field set to true. User added sources + will have it set to false. System sources cannot be changed or deleted. + + Example:: + + { + "errors": [], + "sources": { + "fedora": { + "check_gpg": true, + "check_ssl": true, + "gpgkey_urls": [ + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" + ], + "id": "fedora", + "name": "Fedora $releasever - $basearch", + "proxy": "http://proxy.brianlane.com:8123", + "system": true, + "type": "yum-metalink", + "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" + } + } + } + + In v0 the ``name`` field was used for the id (a short name for the repo). In v1 ``name`` changed + to ``id`` and ``name`` is now used for the longer descriptive name of the repository. + """ + if VALID_API_STRING.match(source_ids) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + out_fmt = request.args.get("format", "json") + if VALID_API_STRING.match(out_fmt) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 + + # Return info on all of the sources + if source_ids == "*": + with api.config["DNFLOCK"].lock: + source_ids = ",".join(r.id for r in api.config["DNFLOCK"].dbo.repos.iter_enabled()) + + sources = {} + errors = [] + system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") + for source in source_ids.split(","): + with api.config["DNFLOCK"].lock: + repo = api.config["DNFLOCK"].dbo.repos.get(source, None) + if not repo: + errors.append({"id": UNKNOWN_SOURCE, "msg": "%s is not a valid source" % source}) + continue + sources[repo.id] = repo_to_source(repo, repo.id in system_sources, api=1) + + if out_fmt == "toml" and not errors: + # With TOML output we just want to dump the raw sources, skipping the errors + return toml.dumps(sources) + elif out_fmt == "toml" and errors: + # TOML requested, but there was an error + return jsonify(status=False, errors=errors), 400 + else: + return jsonify(sources=sources, errors=errors)
+ +
[docs]@v1_api.route("/projects/source/new", methods=["POST"]) +def v1_projects_source_new(): + """Add a new package source. Or change an existing one + + **POST /api/v1/projects/source/new** + + Add (or change) a source for use when depsolving blueprints and composing images. + + The ``proxy`` and ``gpgkey_urls`` entries are optional. All of the others are required. The supported + types for the urls are: + + * ``yum-baseurl`` is a URL to a yum repository. + * ``yum-mirrorlist`` is a URL for a mirrorlist. + * ``yum-metalink`` is a URL for a metalink. + + If ``check_ssl`` is true the https certificates must be valid. If they are self-signed you can either set + this to false, or add your Certificate Authority to the host system. + + If ``check_gpg`` is true the GPG key must either be installed on the host system, or ``gpgkey_urls`` + should point to it. + + You can edit an existing source (other than system sources), by doing a POST + of the new version of the source. It will overwrite the previous one. + + Example:: + + { + "id": "custom-source-1", + "name": "Custom Package Source #1", + "url": "https://url/path/to/repository/", + "type": "yum-baseurl", + "check_ssl": true, + "check_gpg": true, + "gpgkey_urls": [ + "https://url/path/to/gpg-key" + ] + } + + In v0 the ``name`` field was used for the id (a short name for the repo). In v1 ``name`` changed + to ``id`` and ``name`` is now used for the longer descriptive name of the repository. + """ + if request.headers['Content-Type'] == "text/x-toml": + source = toml.loads(request.data) + else: + source = request.get_json(cache=False) + + # Check for id in source, return error if not + if "id" not in source: + return jsonify(status=False, errors=[{"id": UNKNOWN_SOURCE, "msg": "'id' field is missing from API v1 request."}]), 400 + + system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") + if source["id"] in system_sources: + return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be changed." % source["id"]}]), 400 + + try: + # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) + with api.config["DNFLOCK"].lock: + repo_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") + new_repo_source(api.config["DNFLOCK"].dbo, source["id"], source, repo_dir) + except Exception as e: + return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 + + return jsonify(status=True)
+ +
[docs]@v1_api.route("/compose", methods=["POST"]) +def v1_compose_start(): + """Start a compose + + The body of the post should have these fields: + blueprint_name - The blueprint name from /blueprints/list/ + compose_type - The type of output to create, from /compose/types + branch - Optional, defaults to master, selects the git branch to use for the blueprint. + + **POST /api/v1/compose** + + Start a compose. The content type should be 'application/json' and the body of the POST + should look like this. The "upload" object is optional. + + The upload object can specify either a pre-existing profile to use (as returned by + `/uploads/providers`) or one-time use settings for the provider. + + Example with upload profile:: + + { + "blueprint_name": "http-server", + "compose_type": "tar", + "branch": "master", + "upload": { + "image_name": "My Image", + "provider": "azure", + "profile": "production-azure-settings" + } + } + + Example with upload settings:: + + { + "blueprint_name": "http-server", + "compose_type": "tar", + "branch": "master", + "upload": { + "image_name": "My Image", + "provider": "azure", + "settings": { + "resource_group": "SOMEBODY", + "storage_account_name": "ONCE", + "storage_container": "TOLD", + "location": "ME", + "subscription_id": "THE", + "client_id": "WORLD", + "secret": "IS", + "tenant": "GONNA" + } + } + } + + Pass it the name of the blueprint, the type of output (from + '/api/v1/compose/types'), and the blueprint branch to use. 'branch' is + optional and will default to master. It will create a new build and add + it to the queue. It returns the build uuid and a status if it succeeds. + If an "upload" is given, it will schedule an upload to run when the build + finishes. + + Example response:: + + { + "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf", + "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb", + "status": true + } + """ + # Passing ?test=1 will generate a fake FAILED compose. + # Passing ?test=2 will generate a fake FINISHED compose. + try: + test_mode = int(request.args.get("test", "0")) + except ValueError: + test_mode = 0 + + compose = request.get_json(cache=False) + + errors = [] + if not compose: + return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 + + if "blueprint_name" not in compose: + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "No 'blueprint_name' in the JSON request"}) + else: + blueprint_name = compose["blueprint_name"] + + if "branch" not in compose or not compose["branch"]: + branch = "master" + else: + branch = compose["branch"] + + if "compose_type" not in compose: + errors.append({"id": BAD_COMPOSE_TYPE, "msg": "No 'compose_type' in the JSON request"}) + else: + compose_type = compose["compose_type"] + + if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: + errors.append({"id": INVALID_CHARS, "msg": "Invalid characters in API path"}) + + if not blueprint_exists(api, branch, blueprint_name): + errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}) + + if "upload" in compose: + try: + image_name = compose["upload"]["image_name"] + + if "profile" in compose["upload"]: + # Load a specific profile for this provider + profile = compose["upload"]["profile"] + provider_name = compose["upload"]["provider"] + settings = load_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) + else: + provider_name = compose["upload"]["provider"] + settings = compose["upload"]["settings"] + except KeyError as e: + errors.append({"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'}) + try: + provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name) + if "supported_types" in provider and compose_type not in provider["supported_types"]: + raise RuntimeError(f'Type "{compose_type}" is not supported by provider "{provider_name}"!') + validate_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, settings, image_name) + except Exception as e: + errors.append({"id": UPLOAD_ERROR, "msg": str(e)}) + + if errors: + return jsonify(status=False, errors=errors), 400 + + try: + build_id = start_build(api.config["COMPOSER_CFG"], api.config["DNFLOCK"], api.config["GITLOCK"], + branch, blueprint_name, compose_type, test_mode) + except Exception as e: + if "Invalid compose type" in str(e): + return jsonify(status=False, errors=[{"id": BAD_COMPOSE_TYPE, "msg": str(e)}]), 400 + else: + return jsonify(status=False, errors=[{"id": BUILD_FAILED, "msg": str(e)}]), 400 + + if "upload" in compose: + upload_id = uuid_schedule_upload( + api.config["COMPOSER_CFG"], + build_id, + provider_name, + image_name, + settings + ) + else: + upload_id = "" + + return jsonify(status=True, build_id=build_id, upload_id=upload_id)
+ +
[docs]@v1_api.route("/compose/queue") +def v1_compose_queue(): + """Return the status of the new and running queues + + **/api/v1/compose/queue** + + Return the status of the build queue. It includes information about the builds waiting, + and the build that is running. + + Example:: + + { + "new": [ + { + "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", + "blueprint": "glusterfs", + "queue_status": "WAITING", + "job_created": 1517362647.4570868, + "version": "0.0.6" + }, + { + "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73", + "blueprint": "kubernetes", + "queue_status": "WAITING", + "job_created": 1517362659.0034983, + "version": "0.0.1" + } + ], + "run": [ + { + "id": "745712b2-96db-44c0-8014-fe925c35e795", + "blueprint": "glusterfs", + "queue_status": "RUNNING", + "job_created": 1517362633.7965999, + "job_started": 1517362633.8001345, + "version": "0.0.6", + "uploads": [ + { + "creation_time": 1568150660.524401, + "image_name": "glusterfs server", + "image_path": null, + "provider_name": "azure", + "settings": { + "client_id": "need", + "location": "need", + "resource_group": "group", + "secret": "need", + "storage_account_name": "need", + "storage_container": "need", + "subscription_id": "need", + "tenant": "need" + }, + "status": "WAITING", + "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" + } + ] + } + ] + } + """ + return jsonify(queue_status(api.config["COMPOSER_CFG"], api=1))
+ +
[docs]@v1_api.route("/compose/finished") +def v1_compose_finished(): + """Return the list of finished composes + + **/api/v1/compose/finished** + + Return the details on all of the finished composes on the system. + + Example:: + + { + "finished": [ + { + "id": "70b84195-9817-4b8a-af92-45e380f39894", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517351003.8210032, + "job_started": 1517351003.8230415, + "job_finished": 1517359234.1003145, + "version": "0.0.6" + }, + { + "id": "e695affd-397f-4af9-9022-add2636e7459", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517362289.7193348, + "job_started": 1517362289.9751132, + "job_finished": 1517363500.1234567, + "version": "0.0.6", + "uploads": [ + { + "creation_time": 1568150660.524401, + "image_name": "glusterfs server", + "image_path": "/var/lib/lorax/composer/results/e695affd-397f-4af9-9022-add2636e7459/disk.vhd", + "provider_name": "azure", + "settings": { + "client_id": "need", + "location": "need", + "resource_group": "group", + "secret": "need", + "storage_account_name": "need", + "storage_container": "need", + "subscription_id": "need", + "tenant": "need" + }, + "status": "WAITING", + "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" + } + ] + } + ] + } + """ + return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED", api=1))
+ +
[docs]@v1_api.route("/compose/failed") +def v1_compose_failed(): + """Return the list of failed composes + + **/api/v1/compose/failed** + + Return the details on all of the failed composes on the system. + + Example:: + + { + "failed": [ + { + "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", + "blueprint": "http-server", + "queue_status": "FAILED", + "job_created": 1517523249.9301329, + "job_started": 1517523249.9314211, + "job_finished": 1517523255.5623411, + "version": "0.0.2", + "uploads": [ + { + "creation_time": 1568150660.524401, + "image_name": "http-server", + "image_path": null, + "provider_name": "azure", + "settings": { + "client_id": "need", + "location": "need", + "resource_group": "group", + "secret": "need", + "storage_account_name": "need", + "storage_container": "need", + "subscription_id": "need", + "tenant": "need" + }, + "status": "WAITING", + "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" + } + ] + } + ] + } + """ + return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED", api=1))
+ +
[docs]@v1_api.route("/compose/status", defaults={'uuids': ""}) +@v1_api.route("/compose/status/<uuids>") +@checkparams([("uuids", "", "no UUIDs given")]) +def v1_compose_status(uuids): + """Return the status of the listed uuids + + **/api/v1/compose/status/<uuids>[?blueprint=<blueprint_name>&status=<compose_status>&type=<compose_type>]** + + Return the details for each of the comma-separated list of uuids. A uuid of '*' will return + details for all composes. + + Example:: + + { + "uuids": [ + { + "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", + "blueprint": "http-server", + "queue_status": "FINISHED", + "job_created": 1517523644.2384307, + "job_started": 1517523644.2551234, + "job_finished": 1517523689.9864314, + "version": "0.0.2" + }, + { + "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", + "blueprint": "glusterfs", + "queue_status": "FINISHED", + "job_created": 1517363442.188399, + "job_started": 1517363442.325324, + "job_finished": 1517363451.653621, + "version": "0.0.6", + "uploads": [ + { + "creation_time": 1568150660.524401, + "image_name": "glusterfs server", + "image_path": null, + "provider_name": "azure", + "settings": { + "client_id": "need", + "location": "need", + "resource_group": "group", + "secret": "need", + "storage_account_name": "need", + "storage_container": "need", + "subscription_id": "need", + "tenant": "need" + }, + "status": "WAITING", + "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" + } + ] + } + ] + } + """ + if VALID_API_STRING.match(uuids) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + blueprint = request.args.get("blueprint", None) + status = request.args.get("status", None) + compose_type = request.args.get("type", None) + + results = [] + errors = [] + + if uuids.strip() == '*': + queue_status_dict = queue_status(api.config["COMPOSER_CFG"], api=1) + queue_new = queue_status_dict["new"] + queue_running = queue_status_dict["run"] + candidates = queue_new + queue_running + build_status(api.config["COMPOSER_CFG"], api=1) + else: + candidates = [] + for uuid in [n.strip().lower() for n in uuids.split(",")]: + details = uuid_status(api.config["COMPOSER_CFG"], uuid, api=1) + if details is None: + errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) + else: + candidates.append(details) + + for details in candidates: + if blueprint is not None and details['blueprint'] != blueprint: + continue + + if status is not None and details['queue_status'] != status: + continue + + if compose_type is not None and details['compose_type'] != compose_type: + continue + + results.append(details) + + return jsonify(uuids=results, errors=errors)
+ +
[docs]@v1_api.route("/compose/info", defaults={'uuid': ""}) +@v1_api.route("/compose/info/<uuid>") +@checkparams([("uuid", "", "no UUID given")]) +def v1_compose_info(uuid): + """Return detailed info about a compose + + **/api/v1/compose/info/<uuid>** + + Get detailed information about the compose. The returned JSON string will + contain the following information: + + * id - The uuid of the comoposition + * config - containing the configuration settings used to run Anaconda + * blueprint - The depsolved blueprint used to generate the kickstart + * commit - The (local) git commit hash for the blueprint used + * deps - The NEVRA of all of the dependencies used in the composition + * compose_type - The type of output generated (tar, iso, etc.) + * queue_status - The final status of the composition (FINISHED or FAILED) + + Example:: + + { + "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d", + "compose_type": "tar", + "config": { + "anaconda_args": "", + "armplatform": "", + "compress_args": [], + "compression": "xz", + "image_name": "root.tar.xz", + ... + }, + "deps": { + "packages": [ + { + "arch": "x86_64", + "epoch": "0", + "name": "acl", + "release": "14.el7", + "version": "2.2.51" + } + ] + }, + "id": "c30b7d80-523b-4a23-ad52-61b799739ce8", + "queue_status": "FINISHED", + "blueprint": { + "description": "An example kubernetes master", + ... + }, + "uploads": [ + { + "creation_time": 1568150660.524401, + "image_name": "glusterfs server", + "image_path": "/var/lib/lorax/composer/results/c30b7d80-523b-4a23-ad52-61b799739ce8/disk.vhd", + "provider_name": "azure", + "settings": { + "client_id": "need", + "location": "need", + "resource_group": "group", + "secret": "need", + "storage_account_name": "need", + "storage_container": "need", + "subscription_id": "need", + "tenant": "need" + }, + "status": "FAILED", + "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" + } + ] + } + """ + if VALID_API_STRING.match(uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + info = uuid_info(api.config["COMPOSER_CFG"], uuid, api=1) + except Exception as e: + return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 + + if info is None: + return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 + else: + return jsonify(**info)
+ +
[docs]@v1_api.route("/compose/uploads/schedule", defaults={'compose_uuid': ""}, methods=["POST"]) +@v1_api.route("/compose/uploads/schedule/<compose_uuid>", methods=["POST"]) +@checkparams([("compose_uuid", "", "no compose UUID given")]) +def v1_compose_uploads_schedule(compose_uuid): + """Schedule an upload of a compose to a given cloud provider + + **POST /api/v1/uploads/schedule/<compose_uuid>** + + The body can specify either a pre-existing profile to use (as returned by + `/uploads/providers`) or one-time use settings for the provider. + + Example with upload profile:: + + { + "image_name": "My Image", + "provider": "azure", + "profile": "production-azure-settings" + } + + Example with upload settings:: + + { + "image_name": "My Image", + "provider": "azure", + "settings": { + "resource_group": "SOMEBODY", + "storage_account_name": "ONCE", + "storage_container": "TOLD", + "location": "ME", + "subscription_id": "THE", + "client_id": "WORLD", + "secret": "IS", + "tenant": "GONNA" + } + } + + Example response:: + + { + "status": true, + "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb" + } + """ + if VALID_API_STRING.match(compose_uuid) is None: + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + parsed = request.get_json(cache=False) + if not parsed: + return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 + + try: + image_name = parsed["image_name"] + provider_name = parsed["provider"] + if "profile" in parsed: + # Load a specific profile for this provider + profile = parsed["profile"] + settings = load_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) + else: + settings = parsed["settings"] + except KeyError as e: + error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'} + return jsonify(status=False, errors=[error]), 400 + try: + compose_type = uuid_status(api.config["COMPOSER_CFG"], compose_uuid)["compose_type"] + provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name) + if "supported_types" in provider and compose_type not in provider["supported_types"]: + raise RuntimeError( + f'Type "{compose_type}" is not supported by provider "{provider_name}"!' + ) + except Exception as e: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400 + + try: + upload_id = uuid_schedule_upload( + api.config["COMPOSER_CFG"], + compose_uuid, + provider_name, + image_name, + settings + ) + except RuntimeError as e: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400 + return jsonify(status=True, upload_id=upload_id)
+ +
[docs]@v1_api.route("/upload/delete", defaults={"upload_uuid": ""}, methods=["DELETE"]) +@v1_api.route("/upload/delete/<upload_uuid>", methods=["DELETE"]) +@checkparams([("upload_uuid", "", "no upload UUID given")]) +def v1_compose_uploads_delete(upload_uuid): + """Delete an upload and disassociate it from its compose + + **DELETE /api/v1/upload/delete/<upload_uuid>** + + Example response:: + + { + "status": true, + "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb" + } + """ + if VALID_API_STRING.match(upload_uuid) is None: + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + try: + uuid_remove_upload(api.config["COMPOSER_CFG"], upload_uuid) + delete_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) + except RuntimeError as error: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) + return jsonify(status=True, upload_id=upload_uuid)
+ +
[docs]@v1_api.route("/upload/info", defaults={"upload_uuid": ""}) +@v1_api.route("/upload/info/<upload_uuid>") +@checkparams([("upload_uuid", "", "no UUID given")]) +def v1_upload_info(upload_uuid): + """Returns information about a given upload + + **GET /api/v1/upload/info/<upload_uuid>** + + Example response:: + + { + "status": true, + "upload": { + "creation_time": 1565620940.069004, + "image_name": "My Image", + "image_path": "/var/lib/lorax/composer/results/b6218e8f-0fa2-48ec-9394-f5c2918544c4/disk.vhd", + "provider_name": "azure", + "settings": { + "resource_group": "SOMEBODY", + "storage_account_name": "ONCE", + "storage_container": "TOLD", + "location": "ME", + "subscription_id": "THE", + "client_id": "WORLD", + "secret": "IS", + "tenant": "GONNA" + }, + "status": "FAILED", + "uuid": "b637c411-9d9d-4279-b067-6c8d38e3b211" + } + } + """ + if VALID_API_STRING.match(upload_uuid) is None: + return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 + + try: + upload = get_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid).summary() + except RuntimeError as error: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) + return jsonify(status=True, upload=upload)
+ +
[docs]@v1_api.route("/upload/log", defaults={"upload_uuid": ""}) +@v1_api.route("/upload/log/<upload_uuid>") +@checkparams([("upload_uuid", "", "no UUID given")]) +def v1_upload_log(upload_uuid): + """Returns an upload's log + + **GET /api/v1/upload/log/<upload_uuid>** + + Example response:: + + { + "status": true, + "upload_id": "b637c411-9d9d-4279-b067-6c8d38e3b211", + "log": "< PLAY [localhost] >..." + } + """ + if VALID_API_STRING.match(upload_uuid) is None: + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + try: + upload = get_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) + except RuntimeError as error: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) + return jsonify(status=True, upload_id=upload_uuid, log=upload.upload_log)
+ +
[docs]@v1_api.route("/upload/reset", defaults={"upload_uuid": ""}, methods=["POST"]) +@v1_api.route("/upload/reset/<upload_uuid>", methods=["POST"]) +@checkparams([("upload_uuid", "", "no UUID given")]) +def v1_upload_reset(upload_uuid): + """Reset an upload so it can be attempted again + + **POST /api/v1/upload/reset/<upload_uuid>** + + Optionally pass in a new image name and/or new settings. + + Example request:: + + { + "image_name": "My renamed image", + "settings": { + "resource_group": "ROLL", + "storage_account_name": "ME", + "storage_container": "I", + "location": "AIN'T", + "subscription_id": "THE", + "client_id": "SHARPEST", + "secret": "TOOL", + "tenant": "IN" + } + } + + Example response:: + + { + "status": true, + "upload_id": "c75d5d62-9d26-42fc-a8ef-18bb14679fc7" + } + """ + if VALID_API_STRING.match(upload_uuid) is None: + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + parsed = request.get_json(cache=False) + image_name = parsed.get("image_name") if parsed else None + settings = parsed.get("settings") if parsed else None + + try: + reset_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid, image_name, settings) + except RuntimeError as error: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) + return jsonify(status=True, upload_id=upload_uuid)
+ +
[docs]@v1_api.route("/upload/cancel", defaults={"upload_uuid": ""}, methods=["DELETE"]) +@v1_api.route("/upload/cancel/<upload_uuid>", methods=["DELETE"]) +@checkparams([("upload_uuid", "", "no UUID given")]) +def v1_upload_cancel(upload_uuid): + """Cancel an upload that is either queued or in progress + + **DELETE /api/v1/upload/cancel/<upload_uuid>** + + Example response:: + + { + "status": true, + "upload_id": "037a3d56-b421-43e9-9935-c98350c89996" + } + """ + if VALID_API_STRING.match(upload_uuid) is None: + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + try: + cancel_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) + except RuntimeError as error: + return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) + return jsonify(status=True, upload_id=upload_uuid)
+ +
[docs]@v1_api.route("/upload/providers") +def v1_upload_providers(): + """Return the information about all upload providers, including their + display names, expected settings, and saved profiles. Refer to the + `resolve_provider` function. + + **GET /api/v1/upload/providers** + + Example response:: + + { + "providers": { + "azure": { + "display": "Azure", + "profiles": { + "default": { + "client_id": "example", + ... + } + }, + "settings-info": { + "client_id": { + "display": "Client ID", + "placeholder": "", + "regex": "", + "type": "string" + }, + ... + }, + "supported_types": ["vhd"] + }, + ... + } + } + """ + + ucfg = api.config["COMPOSER_CFG"]["upload"] + + provider_names = list_providers(ucfg) + + def get_provider_info(provider_name): + provider = resolve_provider(ucfg, provider_name) + provider["profiles"] = load_profiles(ucfg, provider_name) + return provider + + providers = {provider_name: get_provider_info(provider_name) + for provider_name in provider_names} + return jsonify(status=True, providers=providers)
+ +
[docs]@v1_api.route("/upload/providers/save", methods=["POST"]) +def v1_providers_save(): + """Save provider settings as a profile for later use + + **POST /api/v1/upload/providers/save** + + Example request:: + + { + "provider": "azure", + "profile": "my-profile", + "settings": { + "resource_group": "SOMEBODY", + "storage_account_name": "ONCE", + "storage_container": "TOLD", + "location": "ME", + "subscription_id": "THE", + "client_id": "WORLD", + "secret": "IS", + "tenant": "GONNA" + } + } + + Saving to an existing profile will overwrite it. + + Example response:: + + { + "status": true + } + """ + parsed = request.get_json(cache=False) + + if parsed is None: + return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 + + try: + provider_name = parsed["provider"] + profile = parsed["profile"] + settings = parsed["settings"] + except KeyError as e: + error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'} + return jsonify(status=False, errors=[error]), 400 + try: + save_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile, settings) + except Exception as e: + error = {"id": UPLOAD_ERROR, "msg": str(e)} + return jsonify(status=False, errors=[error]) + return jsonify(status=True)
+ +
[docs]@v1_api.route("/upload/providers/delete", defaults={"provider_name": "", "profile": ""}, methods=["DELETE"]) +@v1_api.route("/upload/providers/delete/<provider_name>/<profile>", methods=["DELETE"]) +@checkparams([("provider_name", "", "no provider name given"), ("profile", "", "no profile given")]) +def v1_providers_delete(provider_name, profile): + """Delete a provider's profile settings + + **DELETE /api/v1/upload/providers/delete/<provider_name>/<profile>** + + Example response:: + + { + "status": true + } + """ + if None in (VALID_API_STRING.match(provider_name), VALID_API_STRING.match(profile)): + error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} + return jsonify(status=False, errors=[error]), 400 + + try: + delete_profile(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) + except Exception as e: + error = {"id": UPLOAD_ERROR, "msg": str(e)} + return jsonify(status=False, errors=[error]) + return jsonify(status=True)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/api/workspace.html b/f32-branch/_modules/pylorax/api/workspace.html new file mode 100644 index 00000000..496e8536 --- /dev/null +++ b/f32-branch/_modules/pylorax/api/workspace.html @@ -0,0 +1,300 @@ + + + + + + + + + + + pylorax.api.workspace — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.api.workspace

+#
+# Copyright (C) 2017  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+
+from pylorax.api.recipes import recipe_filename, recipe_from_toml, RecipeFileError
+from pylorax.sysutils import joinpaths
+
+
+
[docs]def workspace_dir(repo, branch): + """Create the workspace's path from a Repository and branch + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :returns: The path to the branch's workspace directory + :rtype: str + + """ + repo_path = repo.get_location().get_path() + return joinpaths(repo_path, "workspace", branch)
+ + +
[docs]def workspace_read(repo, branch, recipe_name): + """Read a Recipe from the branch's workspace + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: The name of the recipe + :type recipe_name: str + :returns: The workspace copy of the recipe, or None if it doesn't exist + :rtype: Recipe or None + :raises: RecipeFileError + """ + ws_dir = workspace_dir(repo, branch) + if not os.path.isdir(ws_dir): + os.makedirs(ws_dir) + filename = joinpaths(ws_dir, recipe_filename(recipe_name)) + if not os.path.exists(filename): + return None + try: + f = open(filename, 'rb') + recipe = recipe_from_toml(f.read().decode("UTF-8")) + except IOError: + raise RecipeFileError + return recipe
+ + +
[docs]def workspace_write(repo, branch, recipe): + """Write a recipe to the workspace + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe: The recipe to write to the workspace + :type recipe: Recipe + :returns: None + :raises: IO related errors + """ + ws_dir = workspace_dir(repo, branch) + if not os.path.isdir(ws_dir): + os.makedirs(ws_dir) + filename = joinpaths(ws_dir, recipe.filename) + open(filename, 'wb').write(recipe.toml().encode("UTF-8"))
+ + +
[docs]def workspace_delete(repo, branch, recipe_name): + """Delete the recipe from the workspace + + :param repo: Open repository + :type repo: Git.Repository + :param branch: Branch name + :type branch: str + :param recipe_name: The name of the recipe + :type recipe_name: str + :returns: None + :raises: IO related errors + """ + ws_dir = workspace_dir(repo, branch) + filename = joinpaths(ws_dir, recipe_filename(recipe_name)) + if os.path.exists(filename): + os.unlink(filename)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/base.html b/f32-branch/_modules/pylorax/base.html new file mode 100644 index 00000000..d31d5710 --- /dev/null +++ b/f32-branch/_modules/pylorax/base.html @@ -0,0 +1,268 @@ + + + + + + + + + + + pylorax.base — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.base

+#
+# base.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+import pylorax.output as output
+
+
+
[docs]class BaseLoraxClass(object, metaclass=ABCMeta): + @abstractmethod + def __init__(self): + self.output = output.LoraxOutput() + +
[docs] def pcritical(self, msg, fobj=sys.stdout): + self.output.critical(msg, fobj)
+ +
[docs] def perror(self, msg, fobj=sys.stdout): + self.output.error(msg, fobj)
+ +
[docs] def pwarning(self, msg, fobj=sys.stdout): + self.output.warning(msg, fobj)
+ +
[docs] def pinfo(self, msg, fobj=sys.stdout): + self.output.info(msg, fobj)
+ +
[docs] def pdebug(self, msg, fobj=sys.stdout): + self.output.debug(msg, fobj)
+ + +
[docs]class DataHolder(dict): + + def __init__(self, **kwargs): + dict.__init__(self) + + for attr, value in kwargs.items(): + self[attr] = value + + def __getattr__(self, attr): + if attr in self: + return self[attr] + else: + raise AttributeError + + def __setattr__(self, attr, value): + self[attr] = value + +
[docs] def copy(self): + return DataHolder(**dict.copy(self))
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/buildstamp.html b/f32-branch/_modules/pylorax/buildstamp.html new file mode 100644 index 00000000..1423a560 --- /dev/null +++ b/f32-branch/_modules/pylorax/buildstamp.html @@ -0,0 +1,267 @@ + + + + + + + + + + + pylorax.buildstamp — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.buildstamp

+#
+# buildstamp.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.buildstamp")
+
+import datetime
+import os
+
+
+
[docs]class BuildStamp(object): + + def __init__(self, product, version, bugurl, isfinal, buildarch, variant=""): + self.product = product + self.version = version + self.bugurl = bugurl + self.isfinal = isfinal + self.variant = variant + + if 'SOURCE_DATE_EPOCH' in os.environ: + now = datetime.datetime.utcfromtimestamp( + int(os.environ['SOURCE_DATE_EPOCH'])) + else: + now = datetime.datetime.now() + now = now.strftime("%Y%m%d%H%M") + self.uuid = "{0}.{1}".format(now, buildarch) + +
[docs] def write(self, outfile): + # get lorax version + try: + import pylorax.version + except ImportError: + vernum = "devel" + else: + vernum = pylorax.version.num + + logger.info("writing .buildstamp file") + with open(outfile, "w") as fobj: + fobj.write("[Main]\n") + fobj.write("Product={0.product}\n".format(self)) + fobj.write("Version={0.version}\n".format(self)) + fobj.write("BugURL={0.bugurl}\n".format(self)) + fobj.write("IsFinal={0.isfinal}\n".format(self)) + fobj.write("UUID={0.uuid}\n".format(self)) + if self.variant: + fobj.write("Variant={0.variant}\n".format(self)) + fobj.write("[Compose]\n") + fobj.write("Lorax={0}\n".format(vernum))
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/cmdline.html b/f32-branch/_modules/pylorax/cmdline.html new file mode 100644 index 00000000..b485e68e --- /dev/null +++ b/f32-branch/_modules/pylorax/cmdline.html @@ -0,0 +1,522 @@ + + + + + + + + + + + pylorax.cmdline — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.cmdline

+#
+# cmdline.py
+#
+# Copyright (C) 2016  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Brian C. Lane <bcl@redhat.com>
+
+import os
+import sys
+import argparse
+
+from pylorax import vernum
+
+version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
+
+
[docs]def lorax_parser(dracut_default=""): + """ Return the ArgumentParser for lorax""" + + parser = argparse.ArgumentParser(description="Create the Anaconda boot.iso") + + # required arguments for image creation + required = parser.add_argument_group("required arguments") + required.add_argument("-p", "--product", help="product name", required=True, metavar="PRODUCT") + required.add_argument("-v", "--version", help="version identifier", required=True, metavar="VERSION") + required.add_argument("-r", "--release", help="release information", required=True, metavar="RELEASE") + required.add_argument("-s", "--source", help="source repository (may be listed multiple times)", + metavar="REPOSITORY", action="append", default=[]) + required.add_argument("--repo", help="source dnf repository file", type=os.path.abspath, + dest="repos", metavar="REPOSITORY", action="append", default=[]) + + # optional arguments + optional = parser.add_argument_group("optional arguments") + optional.add_argument("-m", "--mirrorlist", + help="mirrorlist repository (may be listed multiple times)", + metavar="REPOSITORY", action="append", default=[]) + optional.add_argument("-t", "--variant", default="", + help="variant name", metavar="VARIANT") + optional.add_argument("-b", "--bugurl", + help="bug reporting URL for the product", metavar="URL", + default="your distribution provided bug reporting tool") + optional.add_argument("--isfinal", help="", + action="store_true", default=False, dest="isfinal") + optional.add_argument("-c", "--config", default="/etc/lorax/lorax.conf", + help="config file", metavar="CONFIGFILE") + optional.add_argument("--proxy", default=None, + help="repo proxy url:port", metavar="HOST") + optional.add_argument("-i", "--installpkgs", default=[], + action="append", metavar="PACKAGE", + help="package glob to install before runtime-install.tmpl runs. (may be listed multiple times)") + optional.add_argument("-e", "--excludepkgs", default=[], + action="append", metavar="PACKAGE", + help="package glob to remove before runtime-install.tmpl runs. (may be listed multiple times)") + optional.add_argument("--buildarch", default=None, + help="build architecture", metavar="ARCH") + optional.add_argument("--volid", default=None, + help="volume id", metavar="VOLID") + optional.add_argument("--macboot", help="", + action="store_true", default=True, dest="domacboot") + optional.add_argument("--nomacboot", help="", + action="store_false", dest="domacboot") + optional.add_argument("--noupgrade", help="", + action="store_false", default=True, dest="doupgrade") + optional.add_argument("--logfile", default="./lorax.log", type=os.path.abspath, + help="Path to logfile") + optional.add_argument("--tmp", default="/var/tmp/lorax", + help="Top level temporary directory" ) + optional.add_argument("--cachedir", default=None, type=os.path.abspath, + help="DNF cache directory. Default is a temporary dir.") + optional.add_argument("--workdir", default=None, type=os.path.abspath, + help="Work directory, overrides --tmp. Default is a temporary dir under /var/tmp/lorax") + optional.add_argument("--force", default=False, action="store_true", + help="Run even when the destination directory exists") + optional.add_argument("--add-template", dest="add_templates", + action="append", help="Additional template for runtime image", + default=[]) + optional.add_argument("--add-template-var", dest="add_template_vars", + action="append", help="Set variable for runtime image template", + default=[]) + optional.add_argument("--add-arch-template", dest="add_arch_templates", + action="append", help="Additional template for architecture-specific image", + default=[]) + optional.add_argument("--add-arch-template-var", dest="add_arch_template_vars", + action="append", help="Set variable for architecture-specific image", + default=[]) + optional.add_argument("--noverify", action="store_false", default=True, dest="verify", + help="Do not verify the install root") + optional.add_argument("--sharedir", metavar="SHAREDIR", type=os.path.abspath, + help="Directory containing all the templates. Overrides config file sharedir") + optional.add_argument("--enablerepo", action="append", default=[], dest="enablerepos", + metavar="[repo]", help="Names of repos to enable") + optional.add_argument("--disablerepo", action="append", default=[], dest="disablerepos", + metavar="[repo]", help="Names of repos to disable") + optional.add_argument("--rootfs-size", type=int, default=2, + help="Size of root filesystem in GiB. Defaults to 2.") + optional.add_argument("--noverifyssl", action="store_true", default=False, + help="Do not verify SSL certificates") + optional.add_argument("--dnfplugin", action="append", default=[], dest="dnfplugins", + help="Enable a DNF plugin by name/glob, or * to enable all of them.") + optional.add_argument("--squashfs-only", action="store_true", default=False, + help="Use a plain squashfs filesystem for the runtime.") + optional.add_argument("--skip-branding", action="store_true", default=False, + help="Disable automatic branding package selection. Use --installpkgs to add custom branding.") + + # dracut arguments + dracut_group = parser.add_argument_group("dracut arguments: (default: %s)" % dracut_default) + dracut_group.add_argument("--dracut-conf", + help="Path to a dracut.conf file to use instead of the " + "default arguments. See the dracut.conf(5) manpage.") + dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args", + help="Argument to pass to dracut when " + "rebuilding the initramfs. Pass this " + "once for each argument. NOTE: this " + "overrides the defaults.") + + # add the show version option + parser.add_argument("-V", help="show program's version number and exit", + action="version", version=version) + + parser.add_argument("outputdir", help="Output directory", metavar="OUTPUTDIR", type=os.path.abspath) + + return parser
+ + +
[docs]def lmc_parser(dracut_default=""): + """ Return a ArgumentParser object for live-media-creator.""" + parser = argparse.ArgumentParser(description="Create Live Install Media", + fromfile_prefix_chars="@") + + # These are mutually exclusive, one is required + action = parser.add_mutually_exclusive_group(required=True) + action.add_argument("--make-iso", action="store_true", + help="Build a live iso") + action.add_argument("--make-disk", action="store_true", + help="Build a partitioned disk image") + action.add_argument("--make-fsimage", action="store_true", + help="Build a filesystem image") + action.add_argument("--make-appliance", action="store_true", + help="Build an appliance image and XML description") + action.add_argument("--make-ami", action="store_true", + help="Build an ami image") + action.add_argument("--make-tar", action="store_true", + help="Build a tar of the root filesystem") + action.add_argument("--make-tar-disk", action="store_true", + help="Build a tar of a partitioned disk image") + action.add_argument("--make-pxe-live", action="store_true", + help="Build a live pxe boot squashfs image") + action.add_argument("--make-ostree-live", action="store_true", + help="Build a live pxe boot squashfs image of Atomic Host") + action.add_argument("--make-oci", action="store_true", + help="Build an Open Container Initiative image") + action.add_argument("--make-vagrant", action="store_true", + help="Build a Vagrant Box image") + + parser.add_argument("--iso", type=os.path.abspath, + help="Anaconda installation .iso path to use for qemu") + parser.add_argument("--iso-only", action="store_true", + help="Remove all iso creation artifacts except the boot.iso, " + "combine with --iso-name to rename the boot.iso") + parser.add_argument("--iso-name", default=None, + help="Name of output iso file for --iso-only. Default is boot.iso") + parser.add_argument("--ks", action="append", type=os.path.abspath, + help="Kickstart file defining the install.") + parser.add_argument("--image-only", action="store_true", + help="Exit after creating fs/disk image.") + + parser.add_argument("--no-virt", action="store_true", + help="Run anaconda directly on host instead of using qemu") + parser.add_argument("--proxy", + help="proxy URL to use for the install") + parser.add_argument("--anaconda-arg", action="append", dest="anaconda_args", + help="Additional argument to pass to anaconda (no-virt " + "mode). Pass once for each argument") + parser.add_argument("--armplatform", + help="the platform to use when creating images for ARM, " + "i.e., highbank, mvebu, omap, tegra, etc.") + parser.add_argument("--location", default=None, type=os.path.abspath, + help="location of iso directory tree with initrd.img " + "and vmlinuz. Used to run qemu with a newer initrd " + "than the iso.") + + parser.add_argument("--logfile", default="./livemedia.log", + type=os.path.abspath, + help="Name and path for primary logfile, other logs will " + "be created in the same directory.") + parser.add_argument("--lorax-templates", default=None, + type=os.path.abspath, + help="Path to mako templates for lorax") + parser.add_argument("--tmp", default="/var/tmp", type=os.path.abspath, + help="Top level temporary directory") + parser.add_argument("--resultdir", default=None, dest="result_dir", + type=os.path.abspath, + help="Directory to copy the resulting images and iso into. " + "Defaults to the temporary working directory") + + parser.add_argument("--macboot", action="store_true", default=True, + dest="domacboot") + parser.add_argument("--nomacboot", action="store_false", + dest="domacboot") + + parser.add_argument("--extra-boot-args", default="", dest="extra_boot_args", + help="Extra arguments to add to the bootloader kernel cmdline in the templates") + + image_group = parser.add_argument_group("disk/fs image arguments") + image_group.add_argument("--disk-image", type=os.path.abspath, + help="Path to existing disk image to use for creating final image.") + image_group.add_argument("--keep-image", action="store_true", + help="Keep raw disk image after .iso creation") + image_group.add_argument("--fs-image", type=os.path.abspath, + help="Path to existing filesystem image to use for creating final image.") + image_group.add_argument("--image-name", default=None, + help="Name of output file to create. Used for tar, fs and disk image. Default is a random name.") + image_group.add_argument("--tar-disk-name", default=None, + help="Name of the archive member for make-tar-disk.") + image_group.add_argument("--fs-label", default="Anaconda", + help="Label to set on fsimage, default is 'Anaconda'") + image_group.add_argument("--image-size-align", type=int, default=0, + help="Create a disk image with a size that is a multiple of this value in MiB.") + image_group.add_argument("--image-type", default=None, + help="Create an image with qemu-img. See qemu-img --help for supported formats.") + image_group.add_argument("--qemu-arg", action="append", dest="qemu_args", default=[], + help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.") + image_group.add_argument("--qcow2", action="store_true", + help="Create qcow2 image instead of raw sparse image when making disk images.") + image_group.add_argument("--qcow2-arg", action="append", dest="qemu_args", default=[], + help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.") + image_group.add_argument("--compression", default="xz", + help="Compression binary for make-tar. xz, lzma, gzip, and bzip2 are supported. xz is the default.") + image_group.add_argument("--compress-arg", action="append", dest="compress_args", default=[], + help="Arguments to pass to compression. Pass once for each argument") + # Group of arguments for appliance creation + app_group = parser.add_argument_group("appliance arguments") + app_group.add_argument("--app-name", default=None, + help="Name of appliance to pass to template") + app_group.add_argument("--app-template", default=None, + help="Path to template to use for appliance data.") + app_group.add_argument("--app-file", default="appliance.xml", + help="Appliance template results file.") + + # Group of arguments to pass to qemu + virt_group = parser.add_argument_group("qemu arguments") + virt_group.add_argument("--ram", metavar="MEMORY", type=int, default=2048, + help="Memory to allocate for installer in megabytes.") + virt_group.add_argument("--vcpus", type=int, default=None, + help="Passed to qemu -smp command") + virt_group.add_argument("--vnc", + help="Passed to qemu -display command. eg. vnc=127.0.0.1:5, default is to " + "choose the first unused vnc port.") + virt_group.add_argument("--arch", default=None, + help="System arch to build for. Used to select qemu-system-* command. " + "Defaults to qemu-system-<arch>") + virt_group.add_argument("--kernel-args", + help="Additional argument to pass to the installation kernel") + virt_group.add_argument("--ovmf-path", default="/usr/share/edk2/ovmf/", + help="Path to OVMF firmware") + virt_group.add_argument("--virt-uefi", action="store_true", default=False, + help="Use OVMF firmware to boot the VM in UEFI mode") + virt_group.add_argument("--no-kvm", action="store_true", default=False, + help="Skip using kvm with qemu even if it is available.") + virt_group.add_argument("--with-rng", default="/dev/random", + help="RNG device for QEMU (none for no RNG)") + + # dracut arguments + dracut_group = parser.add_argument_group("dracut arguments: (default: %s)" % dracut_default) + dracut_group.add_argument("--dracut-conf", + help="Path to a dracut.conf file to use instead of the " + "default arguments. See the dracut.conf(5) manpage.") + dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args", + help="Argument to pass to dracut when " + "rebuilding the initramfs. Pass this " + "once for each argument. NOTE: this " + "overrides the defaults.") + + # pxe to live arguments + pxelive_group = parser.add_argument_group("pxe to live arguments") + pxelive_group.add_argument("--live-rootfs-size", type=int, default=0, + help="Size of root filesystem of live image in GiB") + pxelive_group.add_argument("--live-rootfs-keep-size", action="store_true", + help="Keep the original size of root filesystem in live image") + + # OCI specific commands + oci_group = parser.add_argument_group("OCI arguments") + oci_group.add_argument("--oci-config", + help="config.json OCI configuration file") + oci_group.add_argument("--oci-runtime", + help="runtime.json OCI configuration file") + + # Vagrant specific commands + vagrant_group = parser.add_argument_group("Vagrant arguments") + vagrant_group.add_argument("--vagrant-metadata", + help="optional metadata.json file") + vagrant_group.add_argument("--vagrantfile", + help="optional vagrantfile") + + parser.add_argument("--project", default="Linux", + help="substituted for @PROJECT@ in bootloader config files") + parser.add_argument("--releasever", default="32", + help="substituted for @VERSION@ in bootloader config files") + parser.add_argument("--volid", default=None, help="volume id") + parser.add_argument("--squashfs-only", action="store_true", default=False, + help="Use a plain squashfs filesystem for the runtime.") + parser.add_argument("--timeout", default=None, type=int, + help="Cancel installer after X minutes") + + # add the show version option + parser.add_argument("-V", help="show program's version number and exit", + action="version", version=version) + + return parser
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/creator.html b/f32-branch/_modules/pylorax/creator.html new file mode 100644 index 00000000..4f3c2da4 --- /dev/null +++ b/f32-branch/_modules/pylorax/creator.html @@ -0,0 +1,952 @@ + + + + + + + + + + + pylorax.creator — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.creator

+#
+# Copyright (C) 2011-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import tempfile
+import subprocess
+import shutil
+import hashlib
+import glob
+
+# Use Mako templates for appliance builder descriptions
+from mako.template import Template
+from mako.exceptions import text_error_template
+
+# Use pykickstart to calculate disk image size
+from pykickstart.parser import KickstartParser
+from pykickstart.constants import KS_SHUTDOWN
+from pykickstart.version import makeVersion
+
+# Use the Lorax treebuilder branch for iso creation
+from pylorax import ArchData
+from pylorax.base import DataHolder
+from pylorax.executils import execWithRedirect, runcmd
+from pylorax.imgutils import PartitionMount
+from pylorax.imgutils import mount, umount, Mount
+from pylorax.imgutils import mksquashfs, mkrootfsimg
+from pylorax.imgutils import copytree
+from pylorax.installer import novirt_install, virt_install, InstallError
+from pylorax.treebuilder import TreeBuilder, RuntimeBuilder
+from pylorax.treebuilder import findkernels
+from pylorax.sysutils import joinpaths, remove
+
+
+# Default parameters for rebuilding initramfs, override with --dracut-arg or --dracut-conf
+DRACUT_DEFAULT = ["--xz", "--add", "livenet dmsquash-live dmsquash-live-ntfs convertfs pollcdrom qemu qemu-net",
+                  "--omit", "plymouth", "--no-hostonly", "--debug", "--no-early-microcode"]
+
+RUNTIME = "images/install.img"
+
+
[docs]class FakeDNF(object): + """ + A minimal DNF object suitable for passing to RuntimeBuilder + + lmc uses RuntimeBuilder to run the arch specific iso creation + templates, so the the installroot config value is the important part of + this. Everything else should be a nop. + """ + def __init__(self, conf): + self.conf = conf + +
[docs] def reset(self): + pass
+ +
[docs]def is_image_mounted(disk_img): + """ + Check to see if the disk_img is mounted + + :returns: True if disk_img is in /proc/mounts + :rtype: bool + """ + with open("/proc/mounts") as mounts: + for mnt in mounts: + fields = mnt.split() + if len(fields) > 2 and fields[1] == disk_img: + return True + return False
+ +
[docs]def find_ostree_root(phys_root): + """ + Find root of ostree deployment + + :param str phys_root: Path to physical root + :returns: Relative path of ostree deployment root + :rtype: str + :raise Exception: More than one deployment roots were found + """ + ostree_root = "" + ostree_sysroots = glob.glob(joinpaths(phys_root, "ostree/boot.?/*/*/0")) + log.debug("ostree_sysroots = %s", ostree_sysroots) + if ostree_sysroots: + if len(ostree_sysroots) > 1: + raise Exception("Too many deployment roots found: %s" % ostree_sysroots) + ostree_root = os.path.relpath(ostree_sysroots[0], phys_root) + return ostree_root
+ +
[docs]def get_arch(mount_dir): + """ + Get the kernel arch + + :returns: Arch of first kernel found at mount_dir/boot/ or i386 + :rtype: str + """ + kernels = findkernels(mount_dir) + if not kernels: + return "i386" + return kernels[0].arch
+ +
[docs]def squashfs_args(opts): + """ Returns the compression type and args to use when making squashfs + + :param opts: ArgumentParser object with compression and compressopts + :returns: tuple of compression type and args + :rtype: tuple + """ + compression = opts.compression or "xz" + arch = ArchData(opts.arch or os.uname().machine) + if compression == "xz" and arch.bcj and not opts.compress_args: + # default to bcj when using xz + compressargs = ["-Xbcj", arch.bcj] + elif opts.compress_args: + compressargs = [] + for arg in opts.compress_args: + compressargs += arg.split(" ", 1) + else: + compressargs = [] + return (compression, compressargs)
+ +
[docs]def dracut_args(opts): + """Return a list of the args to pass to dracut + + Return the default argument list unless one of the dracut cmdline arguments + has been used. + """ + if opts.dracut_conf: + return ["--conf", opts.dracut_conf] + elif opts.dracut_args: + args = [] + for arg in opts.dracut_args: + args += arg.split(" ", 1) + return args + else: + return DRACUT_DEFAULT
+ +
[docs]def make_appliance(disk_img, name, template, outfile, networks=None, ram=1024, + vcpus=1, arch=None, title="Linux", project="Linux", + releasever="32"): + """ + Generate an appliance description file + + :param str disk_img: Full path of the disk image + :param str name: Name of the appliance, passed to the template + :param str template: Full path of Mako template + :param str outfile: Full path of file to write, using template + :param list networks: List of networks(str) from the kickstart + :param int ram: Ram, in MiB, passed to template. Default is 1024 + :param int vcpus: CPUs, passed to template. Default is 1 + :param str arch: CPU architecture. Default is 'x86_64' + :param str title: Title, passed to template. Default is 'Linux' + :param str project: Project, passed to template. Default is 'Linux' + :param str releasever: Release version, passed to template. Default is 32 + """ + if not (disk_img and template and outfile): + return None + + log.info("Creating appliance definition using %s", template) + + if not arch: + arch = "x86_64" + + log.info("Calculating SHA256 checksum of %s", disk_img) + sha256 = hashlib.sha256() + with open(disk_img, "rb") as f: + while True: + data = f.read(1024**2) + if not data: + break + sha256.update(data) + log.info("SHA256 of %s is %s", disk_img, sha256.hexdigest()) + disk_info = DataHolder(name=os.path.basename(disk_img), format="raw", + checksum_type="sha256", checksum=sha256.hexdigest()) + try: + result = Template(filename=template).render(disks=[disk_info], name=name, + arch=arch, memory=ram, vcpus=vcpus, networks=networks, + title=title, project=project, releasever=releasever) + except Exception: + log.error(text_error_template().render()) + raise + + with open(outfile, "w") as f: + f.write(result)
+ + +
[docs]def make_runtime(opts, mount_dir, work_dir, size=None): + """ + Make the squashfs image from a directory + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str mount_dir: Directory tree to compress + :param str work_dir: Output compressed image to work_dir+images/install.img + :param int size: Size of disk image, in GiB + """ + kernel_arch = get_arch(mount_dir) + + # Fake dnf object + fake_dbo = FakeDNF(conf=DataHolder(installroot=mount_dir)) + # Fake arch with only basearch set + arch = ArchData(kernel_arch) + # TODO: Need to get release info from someplace... + product = DataHolder(name=opts.project, version=opts.releasever, release="", + variant="", bugurl="", isfinal=False) + + rb = RuntimeBuilder(product, arch, fake_dbo) + compression, compressargs = squashfs_args(opts) + + if opts.squashfs_only: + log.info("Creating a squashfs only runtime") + rb.create_squashfs_runtime(joinpaths(work_dir, RUNTIME), size=size, + compression=compression, compressargs=compressargs) + else: + log.info("Creating a squashfs+ext4 runtime") + rb.create_ext4_runtime(joinpaths(work_dir, RUNTIME), size=size, + compression=compression, compressargs=compressargs)
+ + +
[docs]def rebuild_initrds_for_live(opts, sys_root_dir, results_dir): + """ + Rebuild intrds for pxe live image (root=live:http://) + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str sys_root_dir: Path to root of the system + :param str results_dir: Path of directory for storing results + """ + # cmdline dracut args override the defaults, but need to be parsed + log.info("dracut args = %s", dracut_args(opts)) + + dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + dracut_args(opts) + + kdir = "boot" + if opts.ostree: + kernels_dir = glob.glob(joinpaths(sys_root_dir, "boot/ostree/*")) + if kernels_dir: + kdir = os.path.relpath(kernels_dir[0], sys_root_dir) + + kernels = [kernel for kernel in findkernels(sys_root_dir, kdir)] + if not kernels: + raise Exception("No initrds found, cannot rebuild_initrds") + + # Hush some dracut warnings. TODO: bind-mount proc in place? + open(joinpaths(sys_root_dir,"/proc/modules"),"w") + + if opts.ostree: + # Dracut assumes to have some dirs in disk image + # /var/tmp for temp files + vartmp_dir = joinpaths(sys_root_dir, "var/tmp") + if not os.path.isdir(vartmp_dir): + os.mkdir(vartmp_dir) + # /root (maybe not fatal) + root_dir = joinpaths(sys_root_dir, "var/roothome") + if not os.path.isdir(root_dir): + os.mkdir(root_dir) + # /tmp (maybe not fatal) + tmp_dir = joinpaths(sys_root_dir, "sysroot/tmp") + if not os.path.isdir(tmp_dir): + os.mkdir(tmp_dir) + + # Write the new initramfs directly to the results directory + os.mkdir(joinpaths(sys_root_dir, "results")) + mount(results_dir, opts="bind", mnt=joinpaths(sys_root_dir, "results")) + # Dracut runs out of space inside the minimal rootfs image + mount("/var/tmp", opts="bind", mnt=joinpaths(sys_root_dir, "var/tmp")) + for kernel in kernels: + if hasattr(kernel, "initrd"): + outfile = os.path.basename(kernel.initrd.path) + else: + # Construct an initrd from the kernel name + outfile = os.path.basename(kernel.path.replace("vmlinuz-", "initrd-") + ".img") + log.info("rebuilding %s", outfile) + + kver = kernel.version + + cmd = dracut + ["/results/"+outfile, kver] + runcmd(cmd, root=sys_root_dir) + + shutil.copy2(joinpaths(sys_root_dir, kernel.path), results_dir) + umount(joinpaths(sys_root_dir, "var/tmp"), delete=False) + umount(joinpaths(sys_root_dir, "results"), delete=False) + os.unlink(joinpaths(sys_root_dir,"/proc/modules"))
+ +
[docs]def create_pxe_config(template, images_dir, live_image_name, add_args = None): + """ + Create template for pxe to live configuration + + :param str images_dir: Path of directory with images to be used + :param str live_image_name: Name of live rootfs image file + :param list add_args: Arguments to be added to initrd= pxe config + """ + + add_args = add_args or [] + + kernels = [kernel for kernel in findkernels(images_dir, kdir="") + if hasattr(kernel, "initrd")] + if not kernels: + return + + kernel = kernels[0] + + add_args_str = " ".join(add_args) + + + try: + result = Template(filename=template).render(kernel=kernel.path, + initrd=kernel.initrd.path, liveimg=live_image_name, + addargs=add_args_str) + except Exception: + log.error(text_error_template().render()) + raise + + with open (joinpaths(images_dir, "PXE_CONFIG"), "w") as f: + f.write(result)
+ + +
[docs]def make_livecd(opts, mount_dir, work_dir): + """ + Take the content from the disk image and make a livecd out of it + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str mount_dir: Directory tree to compress + :param str work_dir: Output compressed image to work_dir+images/install.img + + This uses wwood's squashfs live initramfs method: + * put the real / into LiveOS/rootfs.img + * make a squashfs of the LiveOS/rootfs.img tree + * This is loaded by dracut when the cmdline is passed to the kernel: + root=live:CDLABEL=<volid> rd.live.image + """ + kernel_arch = get_arch(mount_dir) + + arch = ArchData(kernel_arch) + # TODO: Need to get release info from someplace... + product = DataHolder(name=opts.project, version=opts.releasever, release="", + variant="", bugurl="", isfinal=False) + + # Link /images to work_dir/images to make the templates happy + if os.path.islink(joinpaths(mount_dir, "images")): + os.unlink(joinpaths(mount_dir, "images")) + rc = execWithRedirect("/bin/ln", ["-s", joinpaths(work_dir, "images"), + joinpaths(mount_dir, "images")]) + if rc: + raise RuntimeError("Failed to symlink images from mount_dir to work_dir") + + # The templates expect the config files to be in /tmp/config_files + # I think these should be release specific, not from lorax, but for now + configdir = joinpaths(opts.lorax_templates,"live/config_files/") + configdir_path = "tmp/config_files" + fullpath = joinpaths(mount_dir, configdir_path) + if os.path.exists(fullpath): + remove(fullpath) + copytree(configdir, fullpath) + + isolabel = opts.volid or "{0.name}-{0.version}-{1.basearch}".format(product, arch) + if len(isolabel) > 32: + isolabel = isolabel[:32] + log.warning("Truncating isolabel to 32 chars: %s", isolabel) + + tb = TreeBuilder(product=product, arch=arch, domacboot=opts.domacboot, + inroot=mount_dir, outroot=work_dir, + runtime=RUNTIME, isolabel=isolabel, + templatedir=joinpaths(opts.lorax_templates,"live/"), + extra_boot_args=opts.extra_boot_args) + log.info("Rebuilding initrds") + log.info("dracut args = %s", dracut_args(opts)) + tb.rebuild_initrds(add_args=dracut_args(opts)) + log.info("Building boot.iso") + tb.build() + + return work_dir
+ +
[docs]def mount_boot_part_over_root(img_mount): + """ + Mount boot partition to /boot of root fs mounted in img_mount + + Used for OSTree so it finds deployment configurations on live rootfs + + param img_mount: object with mounted disk image root partition + type img_mount: imgutils.PartitionMount + """ + root_dir = img_mount.mount_dir + is_boot_part = lambda dir: os.path.exists(dir+"/loader.0") + tmp_mount_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + sysroot_boot_dir = None + for dev, _size in img_mount.loop_devices: + if dev is img_mount.mount_dev: + continue + try: + mount("/dev/mapper/"+dev, mnt=tmp_mount_dir) + if is_boot_part(tmp_mount_dir): + umount(tmp_mount_dir) + sysroot_boot_dir = joinpaths(root_dir, "boot") + mount("/dev/mapper/"+dev, mnt=sysroot_boot_dir) + break + else: + umount(tmp_mount_dir) + except subprocess.CalledProcessError as e: + log.debug("Looking for boot partition error: %s", e) + remove(tmp_mount_dir) + return sysroot_boot_dir
+ +
[docs]def calculate_disk_size(opts, ks): + """ Calculate the disk size from the kickstart + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str ks: Path to the kickstart to use for the installation + :returns: Disk size in MiB + :rtype: int + + Also takes into account the use of reqpart or reqpart --add-boot + """ + # Disk size for a filesystem image should only be the size of / + # to prevent surprises when using the same kickstart for different installations. + unique_partitions = dict((p.mountpoint, p) for p in ks.handler.partition.partitions) + if opts.no_virt and (opts.make_iso or opts.make_fsimage): + disk_size = 2 + sum(p.size for p in unique_partitions.values() if p.mountpoint == "/") + else: + disk_size = 2 + sum(p.size for p in unique_partitions.values()) + + # reqpart can add 1M, 2M, 200M based on platform. Add 500M to be sure + if ks.handler.reqpart.seen: + log.info("Adding 500M for reqpart") + disk_size += 500 + + # It can also request adding /boot which is 1G + if ks.handler.reqpart.addBoot: + log.info("Adding 1024M for reqpart --addboot") + disk_size += 1024 + + if opts.image_size_align: + disk_size += opts.image_size_align - (disk_size % opts.image_size_align) + + log.info("Using disk size of %sMiB", disk_size) + return disk_size
+ +
[docs]def make_image(opts, ks, cancel_func=None): + """ + Install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str ks: Path to the kickstart to use for the installation + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :returns: Path of the image created + :rtype: str + + Use qemu+boot.iso or anaconda to install to a disk image. + """ + + # For make_tar_disk, opts.image_name is the name of the final tarball. + # Use opts.tar_disk_name as the name of the disk image + if opts.make_tar_disk: + disk_img = joinpaths(opts.result_dir, opts.tar_disk_name) + elif opts.image_name: + disk_img = joinpaths(opts.result_dir, opts.image_name) + else: + disk_img = tempfile.mktemp(prefix="lmc-disk-", suffix=".img", dir=opts.result_dir) + log.info("disk_img = %s", disk_img) + disk_size = calculate_disk_size(opts, ks) + + # For make_tar_disk, pass a second path parameter for the final tarball + # not the final output file. + if opts.make_tar_disk: + tar_img = joinpaths(opts.result_dir, opts.image_name) + else: + tar_img = None + + try: + if opts.no_virt: + novirt_install(opts, disk_img, disk_size, cancel_func=cancel_func, tar_img=tar_img) + else: + install_log = os.path.abspath(os.path.dirname(opts.logfile))+"/virt-install.log" + log.info("install_log = %s", install_log) + + virt_install(opts, install_log, disk_img, disk_size, cancel_func=cancel_func, tar_img=tar_img) + except InstallError as e: + log.error("Install failed: %s", e) + if not opts.keep_image: + if os.path.exists(disk_img): + log.info("Removing bad disk image") + os.unlink(disk_img) + if tar_img and os.path.exists(tar_img): + log.info("Removing bad tar file") + os.unlink(tar_img) + raise + + log.info("Disk Image install successful") + + if opts.make_tar_disk: + return tar_img + + return disk_img
+ + +
[docs]def make_live_images(opts, work_dir, disk_img): + """ + Create live images from direcory or rootfs image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str work_dir: Directory for storing results + :param str disk_img: Path to disk image (fsimage or partitioned) + :returns: Path of directory with created images or None + :rtype: str + + fsck.ext4 is run on the rootfs_image to make sure there are no errors and to zero + out any deleted blocks to make it compress better. If this fails for any reason + it will return None and log the error. + """ + sys_root = "" + + squashfs_root_dir = joinpaths(work_dir, "squashfs_root") + liveos_dir = joinpaths(squashfs_root_dir, "LiveOS") + os.makedirs(liveos_dir) + rootfs_img = joinpaths(liveos_dir, "rootfs.img") + + if opts.fs_image or opts.no_virt: + # Find the ostree root in the fsimage + if opts.ostree: + with Mount(disk_img, opts="loop") as mnt_dir: + sys_root = find_ostree_root(mnt_dir) + + # Try to hardlink the image, if that fails, copy it + rc = execWithRedirect("/bin/ln", [disk_img, rootfs_img]) + if rc != 0: + shutil.copy2(disk_img, rootfs_img) + else: + is_root_part = None + if opts.ostree: + is_root_part = lambda dir: os.path.exists(dir+"/ostree/deploy") + with PartitionMount(disk_img, mount_ok=is_root_part) as img_mount: + if img_mount and img_mount.mount_dir: + try: + mounted_sysroot_boot_dir = None + if opts.ostree: + sys_root = find_ostree_root(img_mount.mount_dir) + mounted_sysroot_boot_dir = mount_boot_part_over_root(img_mount) + if opts.live_rootfs_keep_size: + size = img_mount.mount_size / 1024**3 + else: + size = opts.live_rootfs_size or None + log.info("Creating live rootfs image") + mkrootfsimg(img_mount.mount_dir, rootfs_img, "LiveOS", size=size, sysroot=sys_root) + finally: + if mounted_sysroot_boot_dir: + umount(mounted_sysroot_boot_dir) + log.debug("sys_root = %s", sys_root) + + # Make sure free blocks are actually zeroed so it will compress + rc = execWithRedirect("/usr/sbin/fsck.ext4", ["-y", "-f", "-E", "discard", rootfs_img]) + if rc != 0: + log.error("Problem zeroing free blocks of %s", disk_img) + return None + + log.info("Packing live rootfs image") + add_pxe_args = [] + live_image_name = "live-rootfs.squashfs.img" + compression, compressargs = squashfs_args(opts) + mksquashfs(squashfs_root_dir, joinpaths(work_dir, live_image_name), compression, compressargs) + + log.info("Rebuilding initramfs for live") + with Mount(rootfs_img, opts="loop") as mnt_dir: + try: + mount(joinpaths(mnt_dir, "boot"), opts="bind", mnt=joinpaths(mnt_dir, sys_root, "boot")) + rebuild_initrds_for_live(opts, joinpaths(mnt_dir, sys_root), work_dir) + finally: + umount(joinpaths(mnt_dir, sys_root, "boot"), delete=False) + + remove(squashfs_root_dir) + + if opts.ostree: + add_pxe_args.append("ostree=/%s" % sys_root) + template = joinpaths(opts.lorax_templates, "pxe-live/pxe-config.tmpl") + create_pxe_config(template, work_dir, live_image_name, add_pxe_args) + + return work_dir
+ +
[docs]def check_kickstart(ks, opts): + """Check the parsed kickstart object for errors + + :param ks: Parsed Kickstart object + :type ks: pykickstart.parser.KickstartParser + :param opts: Commandline options to control the process + :type opts: Either a DataHolder or ArgumentParser + :returns: List of error strings or empty list + :rtype: list + """ + errors = [] + if opts.no_virt and ks.handler.method.method not in ("url", "nfs") \ + and not ks.handler.ostreesetup.seen: + errors.append("Only url, nfs and ostreesetup install methods are currently supported." + "Please fix your kickstart file." ) + + if ks.handler.repo.seen and ks.handler.method.method != "url": + errors.append("repo can only be used with the url install method. Add url to your " + "kickstart file.") + + if ks.handler.method.method in ("url", "nfs") and not ks.handler.network.seen: + errors.append("The kickstart must activate networking if " + "the url or nfs install method is used.") + + if ks.handler.displaymode.displayMode is not None: + errors.append("The kickstart must not set a display mode (text, cmdline, " + "graphical), this will interfere with livemedia-creator.") + + if opts.make_fsimage or (opts.make_pxe_live and opts.no_virt): + # Make sure the kickstart isn't using autopart and only has a / mountpoint + part_ok = not any(p for p in ks.handler.partition.partitions + if p.mountpoint not in ["/", "swap"]) + if not part_ok or ks.handler.autopart.seen: + errors.append("Filesystem images must use a single / part, not autopart or " + "multiple partitions. swap is allowed but not used.") + + if not opts.no_virt and ks.handler.reboot.action != KS_SHUTDOWN: + errors.append("The kickstart must include shutdown when using virt installation.") + + return errors
+ +
[docs]def run_creator(opts, cancel_func=None): + """Run the image creator process + + :param opts: Commandline options to control the process + :type opts: Either a DataHolder or ArgumentParser + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :returns: The result directory and the disk image path. + :rtype: Tuple of str + + This function takes the opts arguments and creates the selected output image. + See the cmdline --help for livemedia-creator for the possible options + + (Yes, this is not ideal, but we can fix that later) + """ + result_dir = None + + # Parse the kickstart + if opts.ks: + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstart(opts.ks[0]) + + # live iso usually needs dracut-live so warn the user if it is missing + if opts.ks and opts.make_iso: + if "dracut-live" not in ks.handler.packages.packageList: + log.error("dracut-live package is missing from the kickstart.") + raise RuntimeError("dracut-live package is missing from the kickstart.") + + # Make the disk or filesystem image + if not opts.disk_image and not opts.fs_image: + if not opts.ks: + raise RuntimeError("Image creation requires a kickstart file") + + # Check the kickstart for problems + errors = check_kickstart(ks, opts) + if errors: + list(log.error(e) for e in errors) + raise RuntimeError("\n".join(errors)) + + # Make the image. Output of this is either a partitioned disk image or a fsimage + try: + disk_img = make_image(opts, ks, cancel_func=cancel_func) + except InstallError as e: + log.error("ERROR: Image creation failed: %s", e) + raise RuntimeError("Image creation failed: %s" % e) + + if opts.image_only: + return (result_dir, disk_img) + + if opts.make_iso: + work_dir = tempfile.mkdtemp(prefix="lmc-work-") + log.info("working dir is %s", work_dir) + + if (opts.fs_image or opts.no_virt) and not opts.disk_image: + # Create iso from a filesystem image + disk_img = opts.fs_image or disk_img + with Mount(disk_img, opts="loop") as mount_dir: + # TODO check rc + make_runtime(opts, mount_dir, work_dir, calculate_disk_size(opts, ks)/1024.0) + + if cancel_func and cancel_func(): + raise RuntimeError("ISO creation canceled") + + result_dir = make_livecd(opts, mount_dir, work_dir) + else: + # Create iso from a partitioned disk image + disk_img = opts.disk_image or disk_img + with PartitionMount(disk_img) as img_mount: + if img_mount and img_mount.mount_dir: + make_runtime(opts, img_mount.mount_dir, work_dir, calculate_disk_size(opts, ks)/1024.0) + result_dir = make_livecd(opts, img_mount.mount_dir, work_dir) + + # --iso-only removes the extra build artifacts, keeping only the boot.iso + if opts.iso_only and result_dir: + boot_iso = joinpaths(result_dir, "images/boot.iso") + if not os.path.exists(boot_iso): + log.error("%s is missing, skipping --iso-only.", boot_iso) + else: + iso_dir = tempfile.mkdtemp(prefix="lmc-result-") + dest_file = joinpaths(iso_dir, opts.iso_name or "boot.iso") + shutil.move(boot_iso, dest_file) + shutil.rmtree(result_dir) + result_dir = iso_dir + + # cleanup the mess + # cleanup work_dir? + if disk_img and not (opts.keep_image or opts.disk_image or opts.fs_image): + os.unlink(disk_img) + log.info("Disk image erased") + disk_img = None + elif opts.make_appliance: + if not opts.ks: + networks = [] + else: + networks = ks.handler.network.network + make_appliance(opts.disk_image or disk_img, opts.app_name, + opts.app_template, opts.app_file, networks, opts.ram, + opts.vcpus or 1, opts.arch, opts.title, opts.project, opts.releasever) + elif opts.make_pxe_live: + work_dir = tempfile.mkdtemp(prefix="lmc-work-") + log.info("working dir is %s", work_dir) + disk_img = opts.fs_image or opts.disk_image or disk_img + log.debug("disk image is %s", disk_img) + + result_dir = make_live_images(opts, work_dir, disk_img) + if result_dir is None: + log.error("Creating PXE live image failed.") + raise RuntimeError("Creating PXE live image failed.") + + if opts.result_dir != opts.tmp and result_dir: + copytree(result_dir, opts.result_dir, preserve=False) + shutil.rmtree(result_dir) + result_dir = None + + return (result_dir, disk_img)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/decorators.html b/f32-branch/_modules/pylorax/decorators.html new file mode 100644 index 00000000..5abd4bd1 --- /dev/null +++ b/f32-branch/_modules/pylorax/decorators.html @@ -0,0 +1,231 @@ + + + + + + + + + + + pylorax.decorators — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.decorators

+#
+# decorators.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+
[docs]def singleton(cls): + instances = {} + + def get_instance(): + if cls not in instances: + instances[cls] = cls() + return instances[cls] + + return get_instance
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/discinfo.html b/f32-branch/_modules/pylorax/discinfo.html new file mode 100644 index 00000000..2864b246 --- /dev/null +++ b/f32-branch/_modules/pylorax/discinfo.html @@ -0,0 +1,246 @@ + + + + + + + + + + + pylorax.discinfo — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.discinfo

+#
+# discinfo.py
+#
+# Copyright (C) 2010-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.discinfo")
+
+import os
+import time
+
+
+
[docs]class DiscInfo(object): + + def __init__(self, release, basearch): + self.release = release + self.basearch = basearch + +
[docs] def write(self, outfile): + if 'SOURCE_DATE_EPOCH' in os.environ: + timestamp = int(os.environ['SOURCE_DATE_EPOCH']) + else: + timestamp = time.time() + + logger.info("writing .discinfo file") + with open(outfile, "w") as fobj: + fobj.write("{0:f}\n".format(timestamp)) + fobj.write("{0.release}\n".format(self)) + fobj.write("{0.basearch}\n".format(self))
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/dnfbase.html b/f32-branch/_modules/pylorax/dnfbase.html new file mode 100644 index 00000000..134ab084 --- /dev/null +++ b/f32-branch/_modules/pylorax/dnfbase.html @@ -0,0 +1,388 @@ + + + + + + + + + + + pylorax.dnfbase — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.dnfbase

+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import dnf
+import os
+import shutil
+
+from pylorax import DEFAULT_PLATFORM_ID
+from pylorax.sysutils import flatconfig
+
+
[docs]def get_dnf_base_object(installroot, sources, mirrorlists=None, repos=None, + enablerepos=None, disablerepos=None, + tempdir="/var/tmp", proxy=None, releasever="32", + cachedir=None, logdir=None, sslverify=True, dnfplugins=None): + """ Create a dnf Base object and setup the repositories and installroot + + :param string installroot: Full path to the installroot + :param list sources: List of source repo urls to use for the installation + :param list enablerepos: List of repo names to enable + :param list disablerepos: List of repo names to disable + :param list mirrorlist: List of mirrors to use + :param string tempdir: Path of temporary directory + :param string proxy: http proxy to use when fetching packages + :param string releasever: Release version to pass to dnf + :param string cachedir: Directory to use for caching packages + :param bool noverifyssl: Set to True to ignore the CA of ssl certs. eg. use self-signed ssl for https repos. + + If tempdir is not set /var/tmp is used. + If cachedir is None a dnf.cache directory is created inside tmpdir + """ + def sanitize_repo(repo): + """Convert bare paths to file:/// URIs, and silently reject protocols unhandled by yum""" + if repo.startswith("/"): + return "file://{0}".format(repo) + elif any(repo.startswith(p) for p in ('http://', 'https://', 'ftp://', 'file://')): + return repo + else: + return None + + mirrorlists = mirrorlists or [] + + # sanitize the repositories + sources = list(sanitize_repo(r) for r in sources) + mirrorlists = list(sanitize_repo(r) for r in mirrorlists) + + # remove invalid repositories + sources = list(r for r in sources if r) + mirrorlists = list(r for r in mirrorlists if r) + + if not cachedir: + cachedir = os.path.join(tempdir, "dnf.cache") + if not os.path.isdir(cachedir): + os.mkdir(cachedir) + + if not logdir: + logdir = os.path.join(tempdir, "dnf.logs") + if not os.path.isdir(logdir): + os.mkdir(logdir) + + dnfbase = dnf.Base() + # Enable DNF pluings + # NOTE: These come from the HOST system's environment + if dnfplugins: + if dnfplugins[0] == "*": + # Enable them all + dnfbase.init_plugins() + else: + # Only enable the listed plugins + dnfbase.init_plugins(disabled_glob=["*"], enable_plugins=dnfplugins) + conf = dnfbase.conf + conf.logdir = logdir + conf.cachedir = cachedir + + conf.install_weak_deps = False + conf.releasever = releasever + conf.installroot = installroot + conf.prepend_installroot('persistdir') + # this is a weird 'AppendOption' thing that, when you set it, + # actually appends. Doing this adds 'nodocs' to the existing list + # of values, over in libdnf, it does not replace the existing values. + conf.tsflags = ['nodocs'] + # Log details about the solver + conf.debug_solver = True + + if proxy: + conf.proxy = proxy + + if sslverify == False: + conf.sslverify = False + + # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly + if not os.path.exists("/etc/os-release"): + log.warning("/etc/os-release is missing, cannot determine platform id, falling back to %s", DEFAULT_PLATFORM_ID) + platform_id = DEFAULT_PLATFORM_ID + else: + os_release = flatconfig("/etc/os-release") + platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID) + log.info("Using %s for module_platform_id", platform_id) + conf.module_platform_id = platform_id + + # Add .repo files + if repos: + reposdir = os.path.join(tempdir, "dnf.repos") + if not os.path.isdir(reposdir): + os.mkdir(reposdir) + for r in repos: + shutil.copy2(r, reposdir) + conf.reposdir = [reposdir] + dnfbase.read_all_repos() + + # add the sources + for i, r in enumerate(sources): + if "SRPM" in r or "srpm" in r: + log.info("Skipping source repo: %s", r) + continue + repo_name = "lorax-repo-%d" % i + repo = dnf.repo.Repo(repo_name, conf) + repo.baseurl = [r] + if proxy: + repo.proxy = proxy + repo.enable() + dnfbase.repos.add(repo) + log.info("Added '%s': %s", repo_name, r) + log.info("Fetching metadata...") + try: + repo.load() + except dnf.exceptions.RepoError as e: + log.error("Error fetching metadata for %s: %s", repo_name, e) + return None + + # add the mirrorlists + for i, r in enumerate(mirrorlists): + if "SRPM" in r or "srpm" in r: + log.info("Skipping source repo: %s", r) + continue + repo_name = "lorax-mirrorlist-%d" % i + repo = dnf.repo.Repo(repo_name, conf) + repo.mirrorlist = r + if proxy: + repo.proxy = proxy + repo.enable() + dnfbase.repos.add(repo) + log.info("Added '%s': %s", repo_name, r) + log.info("Fetching metadata...") + try: + repo.load() + except dnf.exceptions.RepoError as e: + log.error("Error fetching metadata for %s: %s", repo_name, e) + return None + + # Enable repos listed on the cmdline + for r in enablerepos: + repolist = dnfbase.repos.get_matching(r) + if not repolist: + log.warning("%s is an unknown repo, not enabling it", r) + else: + repolist.enable() + log.info("Enabled repo %s", r) + + # Disable repos listed on the cmdline + for r in disablerepos: + repolist = dnfbase.repos.get_matching(r) + if not repolist: + log.warning("%s is an unknown repo, not disabling it", r) + else: + repolist.disable() + log.info("Disabled repo %s", r) + + dnfbase.fill_sack(load_system_repo=False) + dnfbase.read_comps() + + return dnfbase
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/dnfhelper.html b/f32-branch/_modules/pylorax/dnfhelper.html new file mode 100644 index 00000000..c0420ce6 --- /dev/null +++ b/f32-branch/_modules/pylorax/dnfhelper.html @@ -0,0 +1,311 @@ + + + + + + + + + + + pylorax.dnfhelper — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.dnfhelper

+#
+# dnfhelper.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     Brian C. Lane <bcl@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.dnfhelper")
+import dnf
+import dnf.transaction
+import collections
+import time
+import pylorax.output as output
+
+__all__ = ['LoraxDownloadCallback', 'LoraxRpmCallback']
+
+def _paced(fn):
+    """Execute `fn` no more often then every 2 seconds."""
+    def paced_fn(self, *args):
+        now = time.time()
+        if now - self.last_time < 2:
+            return
+        self.last_time = now
+        return fn(self, *args)
+    return paced_fn
+
+
+
[docs]class LoraxDownloadCallback(dnf.callback.DownloadProgress): + def __init__(self): + self.downloads = collections.defaultdict(int) + self.last_time = time.time() + self.total_files = 0 + self.total_size = 0 + + self.pkgno = 0 + self.total = 0 + + self.output = output.LoraxOutput() + + @_paced + def _update(self): + msg = "Downloading %(pkgno)s / %(total_files)s RPMs, " \ + "%(downloaded)s / %(total_size)s (%(percent)d%%) done.\n" + downloaded = sum(self.downloads.values()) + vals = { + 'downloaded' : downloaded, + 'percent' : int(100 * downloaded/self.total_size), + 'pkgno' : self.pkgno, + 'total_files' : self.total_files, + 'total_size' : self.total_size + } + self.output.write(msg % vals) + +
[docs] def end(self, payload, status, msg): + nevra = str(payload) + if status is dnf.callback.STATUS_OK: + self.downloads[nevra] = payload.download_size + self.pkgno += 1 + self._update() + return + logger.critical("Failed to download '%s': %d - %s", nevra, status, msg)
+ +
[docs] def progress(self, payload, done): + nevra = str(payload) + self.downloads[nevra] = done + self._update()
+ + # dnf 2.5.0 adds a new argument, accept it if it is passed + # pylint: disable=arguments-differ +
[docs] def start(self, total_files, total_size, total_drpms=0): + self.total_files = total_files + self.total_size = total_size
+ + +
[docs]class LoraxRpmCallback(dnf.callback.TransactionProgress): + def __init__(self): + super(LoraxRpmCallback, self).__init__() + self._last_ts = None + +
[docs] def progress(self, package, action, ti_done, ti_total, ts_done, ts_total): + if action == dnf.transaction.PKG_INSTALL: + # do not report same package twice + if self._last_ts == ts_done: + return + self._last_ts = ts_done + + msg = '(%d/%d) %s' % (ts_done, ts_total, package) + logger.info(msg) + elif action == dnf.transaction.TRANS_POST: + msg = "Performing post-installation setup tasks" + logger.info(msg)
+ +
[docs] def error(self, message): + logger.warning(message)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/executils.html b/f32-branch/_modules/pylorax/executils.html new file mode 100644 index 00000000..ba0f7fee --- /dev/null +++ b/f32-branch/_modules/pylorax/executils.html @@ -0,0 +1,550 @@ + + + + + + + + + + + pylorax.executils — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.executils

+#
+# executil.py - subprocess execution utility functions
+#
+# Copyright (C) 1999-2015
+# Red Hat, Inc.  All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import subprocess
+from subprocess import TimeoutExpired
+import signal
+
+import logging
+log = logging.getLogger("pylorax")
+program_log = logging.getLogger("program")
+
+# pylint: disable=not-context-manager
+from threading import Lock
+program_log_lock = Lock()
+
+_child_env = {}
+
+
[docs]def setenv(name, value): + """ Set an environment variable to be used by child processes. + + This method does not modify os.environ for the running process, which + is not thread-safe. If setenv has already been called for a particular + variable name, the old value is overwritten. + + :param str name: The name of the environment variable + :param str value: The value of the environment variable + """ + + _child_env[name] = value
+ +
[docs]def augmentEnv(): + env = os.environ.copy() + env.update(_child_env) + return env
+ +
[docs]class ExecProduct(object): + def __init__(self, rc, stdout, stderr): + self.rc = rc + self.stdout = stdout + self.stderr = stderr
+ +
[docs]def startProgram(argv, root='/', stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + env_prune=None, env_add=None, reset_handlers=True, reset_lang=True, **kwargs): + """ Start an external program and return the Popen object. + + The root and reset_handlers arguments are handled by passing a + preexec_fn argument to subprocess.Popen, but an additional preexec_fn + can still be specified and will be run. The user preexec_fn will be run + last. + + :param argv: The command to run and argument + :param root: The directory to chroot to before running command. + :param stdin: The file object to read stdin from. + :param stdout: The file object to write stdout to. + :param stderr: The file object to write stderr to. + :param env_prune: environment variables to remove before execution + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :param kwargs: Additional parameters to pass to subprocess.Popen + :return: A Popen object for the running command. + :keyword preexec_fn: A function to run before execution starts. + """ + if env_prune is None: + env_prune = [] + + # Check for and save a preexec_fn argument + preexec_fn = kwargs.pop("preexec_fn", None) + + def preexec(): + # If a target root was specificed, chroot into it + if root and root != '/': + os.chroot(root) + os.chdir("/") + + # Signal handlers set to SIG_IGN persist across exec. Reset + # these to SIG_DFL if requested. In particular this will include the + # SIGPIPE handler set by python. + if reset_handlers: + for signum in range(1, signal.NSIG): + if signal.getsignal(signum) == signal.SIG_IGN: + signal.signal(signum, signal.SIG_DFL) + + # If the user specified an additional preexec_fn argument, run it + if preexec_fn is not None: + preexec_fn() + + with program_log_lock: + program_log.info("Running... %s", " ".join(argv)) + + env = augmentEnv() + for var in env_prune: + env.pop(var, None) + + if reset_lang: + env.update({"LC_ALL": "C"}) + + if env_add: + env.update(env_add) + + # pylint: disable=subprocess-popen-preexec-fn + return subprocess.Popen(argv, + stdin=stdin, + stdout=stdout, + stderr=stderr, + close_fds=True, + preexec_fn=preexec, cwd=root, env=env, **kwargs)
+ +def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None, log_output=True, + binary_output=False, filter_stderr=False, raise_err=False, callback=None, + env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program, log the output and return it to the caller + + :param argv: The command to run and argument + :param root: The directory to chroot to before running command. + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to write the output to. + :param env_prune: environment variable to remove before execution + :param log_output: whether to log the output of command + :param binary_output: whether to treat the output of command as binary data + :param filter_stderr: whether to exclude the contents of stderr from the returned output + :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The return code of the command and the output + :raises: OSError or CalledProcessError + """ + try: + if filter_stderr: + stderr = subprocess.PIPE + else: + stderr = subprocess.STDOUT + + proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, + env_prune=env_prune, universal_newlines=not binary_output, + env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang) + + output_string = None + err_string = None + if callback: + while callback(proc) and proc.poll() is None: + try: + (output_string, err_string) = proc.communicate(timeout=1) + break + except TimeoutExpired: + pass + else: + (output_string, err_string) = proc.communicate() + if output_string: + if binary_output: + output_lines = [output_string] + else: + if output_string[-1] != "\n": + output_string = output_string + "\n" + output_lines = output_string.splitlines(True) + + if log_output: + with program_log_lock: + for line in output_lines: + program_log.info(line.strip()) + + if stdout: + stdout.write(output_string) + + # If stderr was filtered, log it separately + if filter_stderr and err_string and log_output: + err_lines = err_string.splitlines(True) + + with program_log_lock: + for line in err_lines: + program_log.info(line.strip()) + + except OSError as e: + with program_log_lock: + program_log.error("Error running %s: %s", argv[0], e.strerror) + raise + + with program_log_lock: + program_log.debug("Return code: %s", proc.returncode) + + if proc.returncode and raise_err: + output = (output_string or "") + (err_string or "") + raise subprocess.CalledProcessError(proc.returncode, argv, output) + + return (proc.returncode, output_string) + +
[docs]def execWithRedirect(command, argv, stdin=None, stdout=None, root='/', env_prune=None, + log_output=True, binary_output=False, raise_err=False, callback=None, + env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program and redirect the output to a file. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to redirect stdout and stderr to. + :param root: The directory to chroot to before running command. + :param env_prune: environment variable to remove before execution + :param log_output: whether to log the output of command + :param binary_output: whether to treat the output of command as binary data + :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The return code of the command + """ + argv = [command] + list(argv) + return _run_program(argv, stdin=stdin, stdout=stdout, root=root, env_prune=env_prune, + log_output=log_output, binary_output=binary_output, raise_err=raise_err, callback=callback, + env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang)[0]
+ +
[docs]def execWithCapture(command, argv, stdin=None, root='/', log_output=True, filter_stderr=False, + raise_err=False, callback=None, env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program and capture standard out and err. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param root: The directory to chroot to before running command. + :param log_output: Whether to log the output of command + :param filter_stderr: Whether stderr should be excluded from the returned output + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The output of the command + """ + argv = [command] + list(argv) + return _run_program(argv, stdin=stdin, root=root, log_output=log_output, filter_stderr=filter_stderr, + raise_err=raise_err, callback=callback, env_add=env_add, + reset_handlers=reset_handlers, reset_lang=reset_lang)[1]
+ +
[docs]def execReadlines(command, argv, stdin=None, root='/', env_prune=None, filter_stderr=False, + callback=lambda x: True, env_add=None, reset_handlers=True, reset_lang=True): + """ Execute an external command and return the line output of the command + in real-time. + + This method assumes that there is a reasonably low delay between the + end of output and the process exiting. If the child process closes + stdout and then keeps on truckin' there will be problems. + + NOTE/WARNING: UnicodeDecodeError will be raised if the output of the + external command can't be decoded as UTF-8. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to redirect stdout and stderr to. + :param root: The directory to chroot to before running command. + :param env_prune: environment variable to remove before execution + :param filter_stderr: Whether stderr should be excluded from the returned output + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: Iterator of the lines from the command + + Output from the file is not logged to program.log + This returns an iterator with the lines from the command until it has finished + """ + + class ExecLineReader(object): + """Iterator class for returning lines from a process and cleaning + up the process when the output is no longer needed. + """ + + def __init__(self, proc, argv, callback): + self._proc = proc + self._argv = argv + self._callback = callback + + def __iter__(self): + return self + + def __del__(self): + # See if the process is still running + if self._proc.poll() is None: + # Stop the process and ignore any problems that might arise + try: + self._proc.terminate() + except OSError: + pass + + def __next__(self): + # Read the next line, blocking if a line is not yet available + line = self._proc.stdout.readline().decode("utf-8") + if line == '' or not self._callback(self._proc): + # Output finished, wait for the process to end + self._proc.communicate() + + # Check for successful exit + if self._proc.returncode < 0: + raise OSError("process '%s' was killed by signal %s" % + (self._argv, -self._proc.returncode)) + elif self._proc.returncode > 0: + raise OSError("process '%s' exited with status %s" % + (self._argv, self._proc.returncode)) + raise StopIteration + + return line.strip() + + argv = [command] + argv + + if filter_stderr: + stderr = subprocess.DEVNULL + else: + stderr = subprocess.STDOUT + + try: + proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, + env_prune=env_prune, env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang) + except OSError as e: + with program_log_lock: + program_log.error("Error running %s: %s", argv[0], e.strerror) + raise + + return ExecLineReader(proc, argv, callback)
+ +
[docs]def runcmd(cmd, **kwargs): + """ run execWithRedirect with raise_err=True + """ + kwargs["raise_err"] = True + return execWithRedirect(cmd[0], cmd[1:], **kwargs)
+ +
[docs]def runcmd_output(cmd, **kwargs): + """ run execWithCapture with raise_err=True + """ + kwargs["raise_err"] = True + return execWithCapture(cmd[0], cmd[1:], **kwargs)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/imgutils.html b/f32-branch/_modules/pylorax/imgutils.html new file mode 100644 index 00000000..849dfcc6 --- /dev/null +++ b/f32-branch/_modules/pylorax/imgutils.html @@ -0,0 +1,755 @@ + + + + + + + + + + + pylorax.imgutils — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.imgutils

+# imgutils.py - utility functions/classes for building disk images
+#
+# Copyright (C) 2011-2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s):  Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.imgutils")
+
+import os, tempfile
+from os.path import join, dirname
+from subprocess import Popen, PIPE, CalledProcessError
+import sys
+import time
+import traceback
+import multiprocessing
+from time import sleep
+import shutil
+
+from pylorax.sysutils import cpfile
+from pylorax.executils import execWithRedirect, execWithCapture
+from pylorax.executils import runcmd, runcmd_output
+
+######## Functions for making container images (cpio, tar, squashfs) ##########
+
+
[docs]def compress(command, root, outfile, compression="xz", compressargs=None): + '''Make a compressed archive of the given rootdir or file. + command is a list of the archiver commands to run + compression should be "xz", "gzip", "lzma", "bzip2", or None. + compressargs will be used on the compression commandline.''' + if compression not in (None, "xz", "gzip", "lzma", "bzip2"): + raise ValueError("Unknown compression type %s" % compression) + compressargs = compressargs or ["-9"] + if compression == "xz": + compressargs.insert(0, "--check=crc32") + if compression is None: + compression = "cat" # this is a little silly + compressargs = [] + + # make compression run with multiple threads if possible + if compression in ("xz", "lzma"): + compressargs.insert(0, "-T%d" % multiprocessing.cpu_count()) + elif compression == "gzip": + compression = "pigz" + compressargs.insert(0, "-p%d" % multiprocessing.cpu_count()) + elif compression == "bzip2": + compression = "pbzip2" + compressargs.insert(0, "-p%d" % multiprocessing.cpu_count()) + + find, archive, comp = None, None, None + + try: + if os.path.isdir(root): + logger.debug("find %s -print0 |%s | %s %s > %s", root, " ".join(command), + compression, " ".join(compressargs), outfile) + + find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=root) + archive = Popen(command, stdin=find.stdout, stdout=PIPE, cwd=root) + else: + logger.debug("echo %s |%s | %s %s > %s", root, " ".join(command), + compression, " ".join(compressargs), outfile) + + archive = Popen(command, stdin=PIPE, stdout=PIPE, cwd=os.path.dirname(root)) + archive.stdin.write(os.path.basename(root).encode("utf-8") + b"\0") + archive.stdin.close() + + comp = Popen([compression] + compressargs, + stdin=archive.stdout, stdout=open(outfile, "wb")) + comp.wait() + return comp.returncode + except OSError as e: + logger.error(e) + # Kill off any hanging processes + list(p.kill() for p in (find, archive, comp) if p) + return 1
+ +
[docs]def mkcpio(root, outfile, compression="xz", compressargs=None): + compressargs = compressargs or ["-9"] + return compress(["cpio", "--null", "--quiet", "-H", "newc", "-o"], + root, outfile, compression, compressargs)
+ +
[docs]def mktar(root, outfile, compression="xz", compressargs=None, selinux=True): + compressargs = compressargs or ["-9"] + tar_cmd = ["tar", "--no-recursion"] + if selinux: + tar_cmd += ["--selinux", "--acls", "--xattrs"] + tar_cmd += ["-cf-", "--null", "-T-"] + return compress(tar_cmd, root, outfile, compression, compressargs)
+ +
[docs]def mksquashfs(rootdir, outfile, compression="default", compressargs=None): + '''Make a squashfs image containing the given rootdir.''' + compressargs = compressargs or [] + if compression != "default": + compressargs = ["-comp", compression] + compressargs + return execWithRedirect("mksquashfs", [rootdir, outfile] + compressargs)
+ +
[docs]def mkrootfsimg(rootdir, outfile, label, size=2, sysroot=""): + """ + Make rootfs image from a directory + + :param str rootdir: Root directory + :param str outfile: Path of output image file + :param str label: Filesystem label + :param int size: Size of the image in GiB, if None computed automatically + :param str sysroot: path to system (deployment) root relative to physical root + """ + if size: + fssize = size * (1024*1024*1024) # 2GB sparse file compresses down to nothin' + else: + fssize = None # Let mkext4img figure out the needed size + + mkext4img(rootdir, outfile, label=label, size=fssize)
+ + +######## Utility functions ############################################### + +
[docs]def mksparse(outfile, size): + '''use os.ftruncate to create a sparse file of the given size.''' + fobj = open(outfile, "w") + os.ftruncate(fobj.fileno(), size)
+ +
[docs]def mkqcow2(outfile, size, options=None): + '''use qemu-img to create a file of the given size. + options is a list of options passed to qemu-img + + Default format is qcow2, override by passing "-f", fmt + in options. + ''' + mkqemu_img(outfile, size, options)
+ +
[docs]def mkqemu_img(outfile, size, options=None): + '''use qemu-img to create a file of the given size. + options is a list of options passed to qemu-img + + Default format is qcow2, override by passing "-f", fmt + in options. + ''' + options = options or [] + if "-f" not in options: + options.extend(["-f", "qcow2"]) + runcmd(["qemu-img", "create"] + options + [outfile, str(size)])
+ +
[docs]def loop_waitfor(loop_dev, outfile): + """Make sure the loop device is attached to the outfile. + + It seems that on rare occasions losetup can return before the /dev/loopX is + ready for use, causing problems with mkfs. This tries to make sure that the + loop device really is associated with the backing file before continuing. + + Raise RuntimeError if it isn't setup after 5 tries. + """ + for _x in range(0,5): + runcmd(["udevadm", "settle", "--timeout", "300"]) + ## XXX Note that losetup --list output can be truncated to 64 bytes in some + ## situations. Don't use it to lookup backing file, go the other way + ## and lookup the loop for the backing file. See util-linux lib/loopdev.c + ## loopcxt_get_backing_file() + if get_loop_name(outfile) == os.path.basename(loop_dev): + return + + # If this really is a race, give it some time to settle down + time.sleep(1) + + raise RuntimeError("Unable to setup %s on %s" % (loop_dev, outfile))
+ +
[docs]def loop_attach(outfile): + """Attach a loop device to the given file. Return the loop device name. + + On rare occasions it appears that the device never shows up, some experiments + seem to indicate that it may be a race with another process using /dev/loop* devices. + + So we now try 3 times before actually failing. + + Raises CalledProcessError if losetup fails. + """ + retries = 0 + while True: + try: + retries += 1 + dev = runcmd_output(["losetup", "--find", "--show", outfile]).strip() + + # Sometimes the loop device isn't ready yet, make extra sure before returning + loop_waitfor(dev, outfile) + except RuntimeError: + # Try to setup the loop device 3 times + if retries == 3: + logger.error("loop_attach failed, retries exhausted.") + raise + logger.debug("Try %d failed, %s did not appear.", retries, dev) + break + return dev
+ +
[docs]def loop_detach(loopdev): + '''Detach the given loop device. Return False on failure.''' + return (execWithRedirect("losetup", ["--detach", loopdev]) == 0)
+ +
[docs]def get_loop_name(path): + '''Return the loop device associated with the path. + Raises RuntimeError if more than one loop is associated''' + buf = runcmd_output(["losetup", "-j", path]) + if len(buf.splitlines()) > 1: + # there should never be more than one loop device listed + raise RuntimeError("multiple loops associated with %s" % path) + name = os.path.basename(buf.split(":")[0]) + return name
+ +
[docs]def dm_attach(dev, size, name=None): + '''Attach a devicemapper device to the given device, with the given size. + If name is None, a random name will be chosen. Returns the device name. + raises CalledProcessError if dmsetup fails.''' + if name is None: + name = tempfile.mktemp(prefix="lorax.imgutils.", dir="") + runcmd(["dmsetup", "create", name, "--table", + "0 %i linear %s 0" % (size/512, dev)]) + return name
+ +
[docs]def dm_detach(dev): + '''Detach the named devicemapper device. Returns False if dmsetup fails.''' + dev = dev.replace("/dev/mapper/", "") # strip prefix, if it's there + return execWithRedirect("dmsetup", ["remove", dev])
+ +
[docs]def mount(dev, opts="", mnt=None): + '''Mount the given device at the given mountpoint, using the given opts. + opts should be a comma-separated string of mount options. + if mnt is none, a temporary directory will be created and its path will be + returned. + raises CalledProcessError if mount fails.''' + if mnt is None: + mnt = tempfile.mkdtemp(prefix="lorax.imgutils.") + logger.debug("make tmp mountdir %s", mnt) + cmd = ["mount"] + if opts: + cmd += ["-o", opts] + cmd += [dev, mnt] + runcmd(cmd) + return mnt
+ +
[docs]def umount(mnt, lazy=False, maxretry=3, retrysleep=1.0, delete=True): + '''Unmount the given mountpoint. If lazy is True, do a lazy umount (-l). + If the mount was a temporary dir created by mount, it will be deleted. + raises CalledProcessError if umount fails.''' + cmd = ["umount"] + if lazy: cmd += ["-l"] + cmd += [mnt] + count = 0 + while maxretry > 0: + try: + rv = runcmd(cmd) + except CalledProcessError: + count += 1 + if count == maxretry: + raise + logger.warning("failed to unmount %s. retrying (%d/%d)...", + mnt, count, maxretry) + if logger.getEffectiveLevel() <= logging.DEBUG: + fuser = execWithCapture("fuser", ["-vm", mnt]) + logger.debug("fuser -vm:\n%s\n", fuser) + sleep(retrysleep) + else: + break + if delete and 'lorax.imgutils' in mnt: + os.rmdir(mnt) + logger.debug("remove tmp mountdir %s", mnt) + return (rv == 0)
+ +
[docs]def copytree(src, dest, preserve=True): + '''Copy a tree of files using cp -a, thus preserving modes, timestamps, + links, acls, sparse files, xattrs, selinux contexts, etc. + If preserve is False, uses cp -R (useful for modeless filesystems) + raises CalledProcessError if copy fails.''' + logger.debug("copytree %s %s", src, dest) + cp = ["cp", "-a"] if preserve else ["cp", "-R", "-L", "--preserve=timestamps"] + cp += [join(src, "."), os.path.abspath(dest)] + runcmd(cp)
+ +
[docs]def do_grafts(grafts, dest, preserve=True): + '''Copy each of the items listed in grafts into dest. + If the key ends with '/' it's assumed to be a directory which should be + created, otherwise just the leading directories will be created.''' + for imgpath, filename in grafts.items(): + if imgpath[-1] == '/': + targetdir = join(dest, imgpath) + imgpath = imgpath[:-1] + else: + targetdir = join(dest, dirname(imgpath)) + if not os.path.isdir(targetdir): + os.makedirs(targetdir) + if os.path.isdir(filename): + copytree(filename, join(dest, imgpath), preserve) + else: + cpfile(filename, join(dest, imgpath))
+ +
[docs]def round_to_blocks(size, blocksize): + '''If size isn't a multiple of blocksize, round up to the next multiple''' + diff = size % blocksize + if diff or not size: + size += blocksize - diff + return size
+ +# TODO: move filesystem data outside this function +
[docs]def estimate_size(rootdir, graft=None, fstype=None, blocksize=4096, overhead=256): + graft = graft or {} + getsize = lambda f: os.lstat(f).st_size + if fstype == "btrfs": + overhead = 64*1024 # don't worry, it's all sparse + if fstype == "hfsplus": + overhead = 200 # hack to deal with two bootloader copies + if fstype in ("vfat", "msdos"): + blocksize = 2048 + getsize = lambda f: os.stat(f).st_size # no symlinks, count as copies + total = overhead*blocksize + dirlist = list(graft.values()) + if rootdir: + dirlist.append(rootdir) + for root in dirlist: + for top, dirs, files in os.walk(root): + for f in files + dirs: + total += round_to_blocks(getsize(join(top,f)), blocksize) + if fstype == "btrfs": + total = max(256*1024*1024, total) # btrfs minimum size: 256MB + logger.info("Size of %s block %s fs at %s estimated to be %s", blocksize, fstype, rootdir, total) + return total
+ +######## Execution contexts - use with the 'with' statement ############## + +
[docs]class LoopDev(object): + def __init__(self, filename, size=None): + self.loopdev = None + self.filename = filename + if size: + mksparse(self.filename, size) + def __enter__(self): + self.loopdev = loop_attach(self.filename) + return self.loopdev + def __exit__(self, exc_type, exc_value, tracebk): + loop_detach(self.loopdev)
+ +
[docs]class DMDev(object): + def __init__(self, dev, size, name=None): + self.mapperdev = None + (self.dev, self.size, self.name) = (dev, size, name) + def __enter__(self): + self.mapperdev = dm_attach(self.dev, self.size, self.name) + return self.mapperdev + def __exit__(self, exc_type, exc_value, tracebk): + dm_detach(self.mapperdev)
+ +
[docs]class Mount(object): + def __init__(self, dev, opts="", mnt=None): + (self.dev, self.opts, self.mnt) = (dev, opts, mnt) + def __enter__(self): + self.mnt = mount(self.dev, self.opts, self.mnt) + return self.mnt + def __exit__(self, exc_type, exc_value, tracebk): + umount(self.mnt)
+ +
[docs]def kpartx_disk_img(disk_img): + """Attach a disk image's partitions to /dev/loopX using kpartx + + :param disk_img: The full path to a partitioned disk image + :type disk_img: str + :returns: list of (loopXpN, size) + :rtype: list of tuples + """ + # Example kpartx output + # kpartx -p p -v -a /tmp/diskV2DiCW.im + # add map loop2p1 (253:2): 0 3481600 linear /dev/loop2 2048 + # add map loop2p2 (253:3): 0 614400 linear /dev/loop2 3483648 + kpartx_output = runcmd_output(["kpartx", "-v", "-a", "-s", disk_img]) + logger.debug(kpartx_output) + + # list of (deviceName, sizeInBytes) + loop_devices = [] + for line in kpartx_output.splitlines(): + # add map loop2p3 (253:4): 0 7139328 linear /dev/loop2 528384 + # 3rd element is size in 512 byte blocks + if line.startswith("add map "): + fields = line[8:].split() + loop_devices.append( (fields[0], int(fields[3])*512) ) + return loop_devices
+ +
[docs]class PartitionMount(object): + """ Mount a partitioned image file using kpartx """ + def __init__(self, disk_img, mount_ok=None, submount=None): + """ + :param str disk_img: The full path to a partitioned disk image + :param mount_ok: A function that is passed the mount point and + returns True if it should be mounted. + :param str submount: Directory inside mount_dir to mount at + + If mount_ok is not set it will look for /etc/passwd + + If the partition is found it will be mounted under a temporary + directory and self.temp_dir set to it. If submount is passed it will be + created and mounted there instead, with self.mount_dir set to point to + it. self.mount_dev is set to the loop device, and self.mount_size is + set to the size of the partition. + + When no subdir is passed self.temp_dir and self.mount_dir will be the same. + """ + self.mount_dev = None + self.mount_size = None + self.mount_dir = None + self.disk_img = disk_img + self.mount_ok = mount_ok + self.submount = submount + self.temp_dir = None + + # Default is to mount partition with /etc/passwd + if not self.mount_ok: + self.mount_ok = lambda mount_dir: os.path.isfile(mount_dir+"/etc/passwd") + + # list of (deviceName, sizeInBytes) + self.loop_devices = kpartx_disk_img(self.disk_img) + + def __enter__(self): + # Mount the device selected by mount_ok, if possible + self.temp_dir = tempfile.mkdtemp() + if self.submount: + mount_dir = os.path.normpath(os.path.sep.join([self.temp_dir, self.submount])) + os.makedirs(mount_dir, mode=0o755, exist_ok=True) + else: + mount_dir = self.temp_dir + for dev, size in self.loop_devices: + try: + mount( "/dev/mapper/"+dev, mnt=mount_dir ) + if self.mount_ok(mount_dir): + self.mount_dir = mount_dir + self.mount_dev = dev + self.mount_size = size + break + umount( mount_dir ) + except CalledProcessError: + logger.debug(traceback.format_exc()) + if self.mount_dir: + logger.info("Partition mounted on %s size=%s", self.mount_dir, self.mount_size) + else: + logger.debug("Unable to mount anything from %s", self.disk_img) + os.rmdir(self.temp_dir) + self.temp_dir = None + return self + + def __exit__(self, exc_type, exc_value, tracebk): + if self.temp_dir: + umount(self.mount_dir) + shutil.rmtree(self.temp_dir) + self.mount_dir = None + self.temp_dir = None + execWithRedirect("kpartx", ["-d", "-s", self.disk_img])
+ + +######## Functions for making filesystem images ########################## + +
[docs]def mkfsimage(fstype, rootdir, outfile, size=None, mkfsargs=None, mountargs="", graft=None): + '''Generic filesystem image creation function. + fstype should be a filesystem type - "mkfs.${fstype}" must exist. + graft should be a dict: {"some/path/in/image": "local/file/or/dir"}; + if the path ends with a '/' it's assumed to be a directory. + Will raise CalledProcessError if something goes wrong.''' + mkfsargs = mkfsargs or [] + graft = graft or {} + preserve = (fstype not in ("msdos", "vfat")) + if not size: + size = estimate_size(rootdir, graft, fstype) + with LoopDev(outfile, size) as loopdev: + try: + runcmd(["mkfs.%s" % fstype] + mkfsargs + [loopdev]) + except CalledProcessError as e: + logger.error("mkfs exited with a non-zero return code: %d", e.returncode) + logger.error(e.output) + sys.exit(e.returncode) + + with Mount(loopdev, mountargs) as mnt: + if rootdir: + copytree(rootdir, mnt, preserve) + do_grafts(graft, mnt, preserve) + + # Save information about filesystem usage + execWithRedirect("df", [mnt]) + + # Make absolutely sure that the data has been written + runcmd(["sync"])
+ +# convenience functions with useful defaults +
[docs]def mkdosimg(rootdir, outfile, size=None, label="", mountargs="shortname=winnt,umask=0077", graft=None): + graft = graft or {} + mkfsargs = ["-n", label] + if 'SOURCE_DATE_EPOCH' in os.environ: + mkfsargs.extend(["-i", + "{:x}".format(int(os.environ['SOURCE_DATE_EPOCH']))]) + mkfsimage("msdos", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=mkfsargs, graft=graft)
+ +
[docs]def mkext4img(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("ext4", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-L", label, "-b", "4096", "-m", "0"], graft=graft)
+ +
[docs]def mkbtrfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("btrfs", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-L", label], graft=graft)
+ +
[docs]def mkhfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("hfsplus", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-v", label], graft=graft)
+ +
[docs]def mkfsimage_from_disk(diskimage, fsimage, img_size=None, label="Anaconda"): + """ + Copy the / partition of a partitioned disk image to an un-partitioned + disk image. + + :param str diskimage: The full path to partitioned disk image with a / + :param str fsimage: The full path of the output fs image file + :param int img_size: Optional size of the fsimage in MiB or None to make + it as small as possible + :param str label: The label to apply to the image. Defaults to "Anaconda" + """ + with PartitionMount(diskimage) as img_mount: + if not img_mount or not img_mount.mount_dir: + return None + + logger.info("Creating fsimage %s (%s)", fsimage, img_size or "minimized") + if img_size: + # convert to Bytes + img_size *= 1024**2 + + mkext4img(img_mount.mount_dir, fsimage, size=img_size, label=label)
+ +
[docs]def default_image_name(compression, basename): + """ Return a default image name with the correct suffix for the compression type. + + :param str compression: Compression type + :param str basename: Base filename + :returns: basename with compression suffix + + If the compression is unknown it defaults to xz + """ + SUFFIXES = {"xz": ".xz", "gzip": ".gz", "bzip2": ".bz2", "lzma": ".lzma"} + return basename + SUFFIXES.get(compression, ".xz")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/installer.html b/f32-branch/_modules/pylorax/installer.html new file mode 100644 index 00000000..0db2ecc6 --- /dev/null +++ b/f32-branch/_modules/pylorax/installer.html @@ -0,0 +1,879 @@ + + + + + + + + + + + pylorax.installer — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.installer

+#
+# Copyright (C) 2011-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import glob
+import json
+from math import ceil
+import os
+import subprocess
+import shutil
+import socket
+import tempfile
+
+# Use the Lorax treebuilder branch for iso creation
+from pylorax.executils import execWithRedirect, execReadlines
+from pylorax.imgutils import PartitionMount, mksparse, mkext4img, loop_detach
+from pylorax.imgutils import get_loop_name, dm_detach, mount, umount
+from pylorax.imgutils import mkqemu_img, mktar, mkcpio, mkfsimage_from_disk
+from pylorax.monitor import LogMonitor
+from pylorax.mount import IsoMountpoint
+from pylorax.sysutils import joinpaths
+from pylorax.treebuilder import udev_escape
+
+
+ROOT_PATH = "/mnt/sysimage/"
+
+
[docs]class InstallError(Exception): + pass
+ + +
[docs]def create_vagrant_metadata(path, size=0): + """ Create a default Vagrant metadata.json file + + :param str path: Path to metadata.json file + :param int size: Disk size in MiB + """ + metadata = { "provider":"libvirt", "format":"qcow2", "virtual_size": ceil(size / 1024) } + with open(path, "wt") as f: + json.dump(metadata, f, indent=4)
+ + +
[docs]def update_vagrant_metadata(path, size): + """ Update the Vagrant metadata.json file + + :param str path: Path to metadata.json file + :param int size: Disk size in MiB + + This function makes sure that the provider, format and virtual size of the + metadata file are set correctly. All other values are left untouched. + """ + with open(path, "rt") as f: + try: + metadata = json.load(f) + except ValueError as e: + log.error("Problem reading metadata file %s: %s", path, e) + return + + metadata["provider"] = "libvirt" + metadata["format"] = "qcow2" + metadata["virtual_size"] = ceil(size / 1024) + with open(path, "wt") as f: + json.dump(metadata, f, indent=4)
+ + +
[docs]def find_free_port(start=5900, end=5999, host="127.0.0.1"): + """ Return first free port in range. + + :param int start: Starting port number + :param int end: Ending port number + :param str host: Host IP to search + :returns: First free port or -1 if none found + :rtype: int + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + for port in range(start, end+1): + try: + s.bind((host, port)) + s.close() + return port + except OSError: + pass + + return -1
+ +
[docs]def append_initrd(initrd, files): + """ Append files to an initrd. + + :param str initrd: Path to initrd + :param list files: list of file paths to add + :returns: Path to a new initrd + :rtype: str + + The files are added to the initrd by creating a cpio image + of the files (stored at /) and writing the cpio to the end of a + copy of the initrd. + + The initrd is not changed, a copy is made before appending the + cpio archive. + """ + qemu_initrd = tempfile.mktemp(prefix="lmc-initrd-", suffix=".img") + shutil.copy2(initrd, qemu_initrd) + ks_dir = tempfile.mkdtemp(prefix="lmc-ksdir-") + for ks in files: + shutil.copy2(ks, ks_dir) + ks_initrd = tempfile.mktemp(prefix="lmc-ks-", suffix=".img") + mkcpio(ks_dir, ks_initrd) + shutil.rmtree(ks_dir) + with open(qemu_initrd, "ab") as initrd_fp: + with open(ks_initrd, "rb") as ks_fp: + while True: + data = ks_fp.read(1024**2) + if not data: + break + initrd_fp.write(data) + os.unlink(ks_initrd) + + return qemu_initrd
+ +
[docs]class QEMUInstall(object): + """ + Run qemu using an iso and a kickstart + """ + # Mapping of arch to qemu command + QEMU_CMDS = {"x86_64": "qemu-system-x86_64", + "i386": "qemu-system-i386", + "arm": "qemu-system-arm", + "aarch64": "qemu-system-aarch64", + "ppc64le": "qemu-system-ppc64" + } + + def __init__(self, opts, iso, ks_paths, disk_img, img_size=2048, + kernel_args=None, memory=1024, vcpus=None, vnc=None, arch=None, + cancel_func=None, virtio_host="127.0.0.1", virtio_port=6080, + image_type=None, boot_uefi=False, ovmf_path=None): + """ + Start the installation + + :param iso: Information about the iso to use for the installation + :type iso: IsoMountpoint + :param list ks_paths: Paths to kickstart files. All are injected, the + first one is the one executed. + :param str disk_img: Path to a disk image, created it it doesn't exist + :param int img_size: The image size, in MiB, to create if it doesn't exist + :param str kernel_args: Extra kernel arguments to pass on the kernel cmdline + :param int memory: Amount of RAM to assign to the virt, in MiB + :param int vcpus: Number of virtual cpus + :param str vnc: Arguments to pass to qemu -display + :param str arch: Optional architecture to use in the virt + :param cancel_func: Function that returns True if the installation fails + :type cancel_func: function + :param str virtio_host: Hostname to connect virtio log to + :param int virtio_port: Port to connect virtio log to + :param str image_type: Type of qemu-img disk to create, or None. + :param bool boot_uefi: Use OVMF to boot the VM in UEFI mode + :param str ovmf_path: Path to the OVMF firmware + """ + # Lookup qemu-system- for arch if passed, or try to guess using host arch + qemu_cmd = [self.QEMU_CMDS.get(arch or os.uname().machine, "qemu-system-"+os.uname().machine)] + if not os.path.exists("/usr/bin/"+qemu_cmd[0]): + raise InstallError("%s does not exist, cannot run qemu" % qemu_cmd[0]) + + qemu_cmd += ["-no-user-config"] + qemu_cmd += ["-m", str(memory)] + if vcpus: + qemu_cmd += ["-smp", str(vcpus)] + + if not opts.no_kvm and os.path.exists("/dev/kvm"): + qemu_cmd += ["-machine", "accel=kvm"] + + if boot_uefi: + qemu_cmd += ["-machine", "q35,smm=on"] + qemu_cmd += ["-global", "driver=cfi.pflash01,property=secure,value=on"] + + # Copy the initrd from the iso, create a cpio archive of the kickstart files + # and append it to the temporary initrd. + qemu_initrd = append_initrd(iso.initrd, ks_paths) + qemu_cmd += ["-kernel", iso.kernel] + qemu_cmd += ["-initrd", qemu_initrd] + + # Add the disk and cdrom + if not os.path.isfile(disk_img): + mksparse(disk_img, img_size * 1024**2) + drive_args = "file=%s" % disk_img + drive_args += ",cache=unsafe,discard=unmap" + if image_type: + drive_args += ",format=%s" % image_type + else: + drive_args += ",format=raw" + qemu_cmd += ["-drive", drive_args] + + drive_args = "file=%s,media=cdrom,readonly=on" % iso.iso_path + qemu_cmd += ["-drive", drive_args] + + # Setup the cmdline args + # ====================== + cmdline_args = "ks=file:/%s" % os.path.basename(ks_paths[0]) + cmdline_args += " inst.stage2=hd:LABEL=%s" % udev_escape(iso.label) + if opts.proxy: + cmdline_args += " inst.proxy=%s" % opts.proxy + if kernel_args: + cmdline_args += " "+kernel_args + cmdline_args += " inst.text inst.cmdline" + + qemu_cmd += ["-append", cmdline_args] + + if not opts.vnc: + vnc_port = find_free_port() + if vnc_port == -1: + raise InstallError("No free VNC ports") + display_args = "vnc=127.0.0.1:%d" % (vnc_port - 5900) + else: + display_args = opts.vnc + log.info("qemu %s", display_args) + qemu_cmd += ["-nographic", "-monitor", "none", "-serial", "null", "-display", display_args ] + + # Setup the virtio log port + qemu_cmd += ["-device", "virtio-serial-pci,id=virtio-serial0"] + qemu_cmd += ["-device", "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0" + ",id=channel0,name=org.fedoraproject.anaconda.log.0"] + qemu_cmd += ["-chardev", "socket,id=charchannel0,host=%s,port=%s" % (virtio_host, virtio_port)] + + # Pass through rng from host + if opts.with_rng != "none": + qemu_cmd += ["-object", "rng-random,id=virtio-rng0,filename=%s" % opts.with_rng] + if boot_uefi: + qemu_cmd += ["-device", "virtio-rng-pci,rng=virtio-rng0,id=rng0,bus=pcie.0,addr=0x9"] + else: + qemu_cmd += ["-device", "virtio-rng-pci,rng=virtio-rng0,id=rng0,bus=pci.0,addr=0x9"] + + if boot_uefi and ovmf_path: + qemu_cmd += ["-drive", "file=%s/OVMF_CODE.secboot.fd,if=pflash,format=raw,unit=0,readonly=on" % ovmf_path] + + # Make a copy of the OVMF_VARS.secboot.fd for this run + ovmf_vars = tempfile.mktemp(prefix="lmc-OVMF_VARS-", suffix=".fd") + shutil.copy2(joinpaths(ovmf_path, "/OVMF_VARS.secboot.fd"), ovmf_vars) + + qemu_cmd += ["-drive", "file=%s,if=pflash,format=raw,unit=1" % ovmf_vars] + + log.info("Running qemu") + log.debug(qemu_cmd) + try: + execWithRedirect(qemu_cmd[0], qemu_cmd[1:], reset_lang=False, raise_err=True, + callback=lambda p: not (cancel_func and cancel_func())) + except subprocess.CalledProcessError as e: + log.error("Running qemu failed:") + log.error("cmd: %s", " ".join(e.cmd)) + log.error("output: %s", e.output or "") + raise InstallError("QEMUInstall failed") + except (OSError, KeyboardInterrupt) as e: + log.error("Running qemu failed: %s", str(e)) + raise InstallError("QEMUInstall failed") + finally: + os.unlink(qemu_initrd) + if boot_uefi and ovmf_path: + os.unlink(ovmf_vars) + + if cancel_func and cancel_func(): + log.error("Installation error detected. See logfile for details.") + raise InstallError("QEMUInstall failed") + else: + log.info("Installation finished without errors.")
+ + +
[docs]def novirt_cancel_check(cancel_funcs, proc): + """ + Check to see if there has been an error in the logs + + :param cancel_funcs: list of functions to call, True from any one cancels the build + :type cancel_funcs: list + :param proc: Popen object for the anaconda process + :type proc: subprocess.Popen + :returns: True if the process has been terminated + + The cancel_funcs functions should return a True if an error has been detected. + When an error is detected the process is terminated and this returns True + """ + for f in cancel_funcs: + if f(): + proc.terminate() + return True + return False
+ + +
[docs]def anaconda_cleanup(dirinstall_path): + """ + Cleanup any leftover mounts from anaconda + + :param str dirinstall_path: Path where anaconda mounts things + :returns: True if cleanups were successful. False if any of them failed. + + If anaconda crashes it may leave things mounted under this path. It will + typically be set to /mnt/sysimage/ + + Attempts to cleanup may also fail. Catch these and continue trying the + other mountpoints. + + Anaconda may also leave /run/anaconda.pid behind, clean that up as well. + """ + # Anaconda may not clean up its /var/run/anaconda.pid file + # Make sure the process is really finished (it should be, since it was started from a subprocess call) + # and then remove the pid file. + if os.path.exists("/var/run/anaconda.pid"): + # lorax-composer runs anaconda using unshare so the pid is always 1 + if open("/var/run/anaconda.pid").read().strip() == "1": + os.unlink("/var/run/anaconda.pid") + + rc = True + dirinstall_path = os.path.abspath(dirinstall_path) + # unmount filesystems + for mounted in reversed(open("/proc/mounts").readlines()): + (_device, mountpoint, _rest) = mounted.split(" ", 2) + if mountpoint.startswith(dirinstall_path) and os.path.ismount(mountpoint): + try: + umount(mountpoint) + except subprocess.CalledProcessError: + log.error("Cleanup of %s failed. See program.log for details", mountpoint) + rc = False + return rc
+ + +
[docs]def novirt_install(opts, disk_img, disk_size, cancel_func=None, tar_img=None): + """ + Use Anaconda to install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str disk_img: The full path to the disk image to be created + :param int disk_size: The size of the disk_img in MiB + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :param str tar_img: For make_tar_disk, the path to final tarball to be created + + This method runs anaconda to create the image and then based on the opts + passed creates a qemu disk image or tarfile. + """ + dirinstall_path = ROOT_PATH + + # Clean up /tmp/ from previous runs to prevent stale info from being used + for path in ["/tmp/yum.repos.d/", "/tmp/yum.cache/"]: + if os.path.isdir(path): + shutil.rmtree(path) + + args = ["--kickstart", opts.ks[0], "--cmdline", "--loglevel", "debug"] + if opts.anaconda_args: + for arg in opts.anaconda_args: + args += arg.split(" ", 1) + if opts.proxy: + args += ["--proxy", opts.proxy] + if opts.armplatform: + args += ["--armplatform", opts.armplatform] + + if opts.make_iso or opts.make_fsimage or opts.make_pxe_live: + # Make a blank fs image + args += ["--dirinstall"] + + mkext4img(None, disk_img, label=opts.fs_label, size=disk_size * 1024**2) + if not os.path.isdir(dirinstall_path): + os.mkdir(dirinstall_path) + mount(disk_img, opts="loop", mnt=dirinstall_path) + elif opts.make_tar or opts.make_oci: + # Install under dirinstall_path, make sure it starts clean + if os.path.exists(dirinstall_path): + shutil.rmtree(dirinstall_path) + + if opts.make_oci: + # OCI installs under /rootfs/ + dirinstall_path = joinpaths(dirinstall_path, "rootfs") + args += ["--dirinstall", dirinstall_path] + else: + args += ["--dirinstall"] + + os.makedirs(dirinstall_path) + else: + args += ["--image", disk_img] + + # Create the sparse image + mksparse(disk_img, disk_size * 1024**2) + + log_monitor = LogMonitor(timeout=opts.timeout) + args += ["--remotelog", "%s:%s" % (log_monitor.host, log_monitor.port)] + cancel_funcs = [log_monitor.server.log_check] + if cancel_func is not None: + cancel_funcs.append(cancel_func) + + # Make sure anaconda has the right product and release + # Preload libgomp.so.1 to workaround rhbz#1722181 + log.info("Running anaconda.") + try: + unshare_args = [ "--pid", "--kill-child", "--mount", "--propagation", "unchanged", "anaconda" ] + args + for line in execReadlines("unshare", unshare_args, reset_lang=False, + env_add={"ANACONDA_PRODUCTNAME": opts.project, + "ANACONDA_PRODUCTVERSION": opts.releasever, + "LD_PRELOAD": "libgomp.so.1"}, + callback=lambda p: not novirt_cancel_check(cancel_funcs, p)): + log.info(line) + + # Make sure the new filesystem is correctly labeled + setfiles_args = ["-e", "/proc", "-e", "/sys", + "/etc/selinux/targeted/contexts/files/file_contexts", "/"] + + if "--dirinstall" in args: + # setfiles may not be available, warn instead of fail + try: + execWithRedirect("setfiles", setfiles_args, root=dirinstall_path) + except (subprocess.CalledProcessError, OSError) as e: + log.warning("Running setfiles on install tree failed: %s", str(e)) + else: + with PartitionMount(disk_img) as img_mount: + if img_mount and img_mount.mount_dir: + try: + execWithRedirect("setfiles", setfiles_args, root=img_mount.mount_dir) + except (subprocess.CalledProcessError, OSError) as e: + log.warning("Running setfiles on install tree failed: %s", str(e)) + + # For image installs, run fstrim to discard unused blocks. This way + # unused blocks do not need to be allocated for sparse image types + execWithRedirect("fstrim", [img_mount.mount_dir]) + + except (subprocess.CalledProcessError, OSError) as e: + log.error("Running anaconda failed: %s", e) + raise InstallError("novirt_install failed") + finally: + log_monitor.shutdown() + + # Move the anaconda logs over to a log directory + log_dir = os.path.abspath(os.path.dirname(opts.logfile)) + log_anaconda = joinpaths(log_dir, "anaconda") + if not os.path.isdir(log_anaconda): + os.mkdir(log_anaconda) + for l in glob.glob("/tmp/*log")+glob.glob("/tmp/anaconda-tb-*"): + shutil.copy2(l, log_anaconda) + os.unlink(l) + + # Make sure any leftover anaconda mounts have been cleaned up + if not anaconda_cleanup(dirinstall_path): + raise InstallError("novirt_install cleanup of anaconda mounts failed.") + + if not opts.make_iso and not opts.make_fsimage and not opts.make_pxe_live: + dm_name = os.path.splitext(os.path.basename(disk_img))[0] + + # Remove device-mapper for partitions and disk + log.debug("Removing device-mapper setup on %s", dm_name) + for d in sorted(glob.glob("/dev/mapper/"+dm_name+"*"), reverse=True): + dm_detach(d) + + log.debug("Removing loop device for %s", disk_img) + loop_detach("/dev/"+get_loop_name(disk_img)) + + # qemu disk image is used by bare qcow2 images and by Vagrant + if opts.image_type: + log.info("Converting %s to %s", disk_img, opts.image_type) + qemu_args = [] + for arg in opts.qemu_args: + qemu_args += arg.split(" ", 1) + + # convert the image to the selected format + if "-O" not in qemu_args: + qemu_args.extend(["-O", opts.image_type]) + qemu_img = tempfile.mktemp(prefix="lmc-disk-", suffix=".img") + execWithRedirect("qemu-img", ["convert"] + qemu_args + [disk_img, qemu_img], raise_err=True) + if not opts.make_vagrant: + execWithRedirect("mv", ["-f", qemu_img, disk_img], raise_err=True) + else: + # Take the new qcow2 image and package it up for Vagrant + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + vagrant_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + metadata_path = joinpaths(vagrant_dir, "metadata.json") + execWithRedirect("mv", ["-f", qemu_img, joinpaths(vagrant_dir, "box.img")], raise_err=True) + if opts.vagrant_metadata: + shutil.copy2(opts.vagrant_metadata, metadata_path) + else: + create_vagrant_metadata(metadata_path) + update_vagrant_metadata(metadata_path, disk_size) + if opts.vagrantfile: + shutil.copy2(opts.vagrantfile, joinpaths(vagrant_dir, "vagrantfile")) + + log.info("Creating Vagrant image") + rc = mktar(vagrant_dir, disk_img, opts.compression, compress_args, selinux=False) + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + shutil.rmtree(vagrant_dir) + elif opts.make_tar: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(dirinstall_path, disk_img, opts.compression, compress_args) + shutil.rmtree(dirinstall_path) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + elif opts.make_oci: + # An OCI image places the filesystem under /rootfs/ and adds the json files at the top + # And then creates a tar of the whole thing. + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + shutil.copy2(opts.oci_config, ROOT_PATH) + shutil.copy2(opts.oci_runtime, ROOT_PATH) + rc = mktar(ROOT_PATH, disk_img, opts.compression, compress_args) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + else: + # For raw disk images, use fallocate to deallocate unused space + execWithRedirect("fallocate", ["--dig-holes", disk_img], raise_err=True) + + # For make_tar_disk, wrap the result in a tar file, and remove the original disk image. + if opts.make_tar_disk: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(disk_img, tar_img, opts.compression, compress_args, selinux=False) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + + os.unlink(disk_img)
+ +
[docs]def virt_install(opts, install_log, disk_img, disk_size, cancel_func=None, tar_img=None): + """ + Use qemu to install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str install_log: The path to write the log from qemu + :param str disk_img: The full path to the disk image to be created + :param int disk_size: The size of the disk_img in MiB + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :param str tar_img: For make_tar_disk, the path to final tarball to be created + + This uses qemu with a boot.iso and a kickstart to create a disk + image and then optionally, based on the opts passed, creates tarfile. + """ + iso_mount = IsoMountpoint(opts.iso, opts.location) + if not iso_mount.stage2: + iso_mount.umount() + raise InstallError("ISO is missing stage2, cannot continue") + + log_monitor = LogMonitor(install_log, timeout=opts.timeout) + cancel_funcs = [log_monitor.server.log_check] + if cancel_func is not None: + cancel_funcs.append(cancel_func) + + kernel_args = "" + if opts.kernel_args: + kernel_args += opts.kernel_args + if opts.proxy: + kernel_args += " proxy="+opts.proxy + + if opts.image_type and not opts.make_fsimage: + qemu_args = [] + for arg in opts.qemu_args: + qemu_args += arg.split(" ", 1) + if "-f" not in qemu_args: + qemu_args += ["-f", opts.image_type] + + mkqemu_img(disk_img, disk_size*1024**2, qemu_args) + + if opts.make_fsimage or opts.make_tar or opts.make_oci: + diskimg_path = tempfile.mktemp(prefix="lmc-disk-", suffix=".img") + else: + diskimg_path = disk_img + + try: + QEMUInstall(opts, iso_mount, opts.ks, diskimg_path, disk_size, + kernel_args, opts.ram, opts.vcpus, opts.vnc, opts.arch, + cancel_func = lambda : any(f() for f in cancel_funcs), + virtio_host = log_monitor.host, + virtio_port = log_monitor.port, + image_type=opts.image_type, boot_uefi=opts.virt_uefi, + ovmf_path=opts.ovmf_path) + log_monitor.shutdown() + except InstallError as e: + log.error("VirtualInstall failed: %s", e) + raise + finally: + log.info("unmounting the iso") + iso_mount.umount() + + if log_monitor.server.log_check(): + if not log_monitor.server.error_line and opts.timeout: + msg = "virt_install failed due to timeout" + else: + msg = "virt_install failed on line: %s" % log_monitor.server.error_line + raise InstallError(msg) + elif cancel_func and cancel_func(): + raise InstallError("virt_install canceled by cancel_func") + + if opts.make_fsimage: + mkfsimage_from_disk(diskimg_path, disk_img, disk_size, label=opts.fs_label) + os.unlink(diskimg_path) + elif opts.make_tar: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + with PartitionMount(diskimg_path) as img_mount: + if img_mount and img_mount.mount_dir: + rc = mktar(img_mount.mount_dir, disk_img, opts.compression, compress_args) + else: + rc = 1 + os.unlink(diskimg_path) + + if rc: + raise InstallError("virt_install failed") + elif opts.make_oci: + # An OCI image places the filesystem under /rootfs/ and adds the json files at the top + # And then creates a tar of the whole thing. + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + with PartitionMount(diskimg_path, submount="rootfs") as img_mount: + if img_mount and img_mount.temp_dir: + shutil.copy2(opts.oci_config, img_mount.temp_dir) + shutil.copy2(opts.oci_runtime, img_mount.temp_dir) + rc = mktar(img_mount.temp_dir, disk_img, opts.compression, compress_args) + else: + rc = 1 + os.unlink(diskimg_path) + + if rc: + raise InstallError("virt_install failed") + elif opts.make_vagrant: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + vagrant_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + metadata_path = joinpaths(vagrant_dir, "metadata.json") + execWithRedirect("mv", ["-f", disk_img, joinpaths(vagrant_dir, "box.img")], raise_err=True) + if opts.vagrant_metadata: + shutil.copy2(opts.vagrant_metadata, metadata_path) + else: + create_vagrant_metadata(metadata_path) + update_vagrant_metadata(metadata_path, disk_size) + if opts.vagrantfile: + shutil.copy2(opts.vagrantfile, joinpaths(vagrant_dir, "vagrantfile")) + + rc = mktar(vagrant_dir, disk_img, opts.compression, compress_args, selinux=False) + if rc: + raise InstallError("virt_install failed") + shutil.rmtree(vagrant_dir) + + # For make_tar_disk, wrap the result in a tar file, and remove the original disk image. + if opts.make_tar_disk: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(disk_img, tar_img, opts.compression, compress_args, selinux=False) + + if rc: + raise InstallError("virt_install mktar failed: rc=%s" % rc) + + os.unlink(disk_img)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/ltmpl.html b/f32-branch/_modules/pylorax/ltmpl.html new file mode 100644 index 00000000..bfd49771 --- /dev/null +++ b/f32-branch/_modules/pylorax/ltmpl.html @@ -0,0 +1,1085 @@ + + + + + + + + + + + pylorax.ltmpl — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.ltmpl

+#
+# ltmpl.py
+#
+# Copyright (C) 2009-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     Will Woods <wwoods@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.ltmpl")
+
+import os, re, glob, shlex, fnmatch
+from os.path import basename, isdir
+from subprocess import CalledProcessError
+import shutil
+
+from pylorax.sysutils import joinpaths, cpfile, mvfile, replace, remove
+from pylorax.dnfhelper import LoraxDownloadCallback, LoraxRpmCallback
+from pylorax.base import DataHolder
+from pylorax.executils import runcmd, runcmd_output
+from pylorax.imgutils import mkcpio
+
+from mako.lookup import TemplateLookup
+from mako.exceptions import text_error_template
+import sys, traceback
+import struct
+import dnf
+import collections.abc
+
+
[docs]class LoraxTemplate(object): + def __init__(self, directories=None): + directories = directories or ["/usr/share/lorax"] + # we have to add ["/"] to the template lookup directories or the + # file includes won't work properly for absolute paths + self.directories = ["/"] + directories + +
[docs] def parse(self, template_file, variables): + lookup = TemplateLookup(directories=self.directories) + template = lookup.get_template(template_file) + + try: + textbuf = template.render(**variables) + except: + logger.error("Problem rendering %s (%s):", template_file, variables) + logger.error(text_error_template().render()) + raise + + # split, strip and remove empty lines + lines = textbuf.splitlines() + lines = [line.strip() for line in lines] + lines = [line for line in lines if line] + + # remove comments + lines = [line for line in lines if not line.startswith("#")] + + # split with shlex and perform brace expansion. This can fail, so we unroll the loop + # for better error reporting. + expanded_lines = [] + try: + for line in lines: + expanded_lines.append(split_and_expand(line)) + except Exception as e: + logger.error('shlex error processing "%s": %s', line, str(e)) + raise + return expanded_lines
+ +
[docs]def split_and_expand(line): + return [exp for word in shlex.split(line) for exp in brace_expand(word)]
+ +
[docs]def brace_expand(s): + if not ('{' in s and ',' in s and '}' in s): + yield s + else: + right = s.find('}') + left = s[:right].rfind('{') + (prefix, choices, suffix) = (s[:left], s[left+1:right], s[right+1:]) + for choice in choices.split(','): + for alt in brace_expand(prefix+choice+suffix): + yield alt
+ +
[docs]def rglob(pathname, root="/", fatal=False): + seen = set() + rootlen = len(root)+1 + for f in glob.iglob(joinpaths(root, pathname)): + if f not in seen: + seen.add(f) + yield f[rootlen:] # remove the root to produce relative path + if fatal and not seen: + raise IOError("nothing matching %s in %s" % (pathname, root))
+ +
[docs]def rexists(pathname, root=""): + # Generator is always True, even with no values; + # bool(rglob(...)) won't work here. + for _path in rglob(pathname, root): + return True + return False
+ +
[docs]class TemplateRunner(object): + ''' + This class parses and executes Lorax templates. Sample usage: + + # install a bunch of packages + runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, dbo=dnf_obj) + runner.run("install-packages.ltmpl") + + NOTES: + + * Parsing procedure is roughly: + 1. Mako template expansion (on the whole file) + 2. For each line of the result, + + a. Whitespace splitting (using shlex.split()) + b. Brace expansion (using brace_expand()) + c. If the first token is the name of a function, call that function + with the rest of the line as arguments + + * Parsing and execution are *separate* passes - so you can't use the result + of a command in an %if statement (or any other control statements)! + ''' + def __init__(self, fatalerrors=True, templatedir=None, defaults=None, builtins=None): + self.fatalerrors = fatalerrors + self.templatedir = templatedir or "/usr/share/lorax" + self.templatefile = None + self.builtins = builtins or {} + self.defaults = defaults or {} + + +
[docs] def run(self, templatefile, **variables): + for k,v in list(self.defaults.items()) + list(self.builtins.items()): + variables.setdefault(k,v) + logger.debug("executing %s with variables=%s", templatefile, variables) + self.templatefile = templatefile + t = LoraxTemplate(directories=[self.templatedir]) + commands = t.parse(templatefile, variables) + self._run(commands)
+ + + def _run(self, parsed_template): + logger.info("running %s", self.templatefile) + for (num, line) in enumerate(parsed_template,1): + logger.debug("template line %i: %s", num, " ".join(line)) + skiperror = False + (cmd, args) = (line[0], line[1:]) + # Following Makefile convention, if the command is prefixed with + # a dash ('-'), we'll ignore any errors on that line. + if cmd.startswith('-'): + cmd = cmd[1:] + skiperror = True + try: + # grab the method named in cmd and pass it the given arguments + f = getattr(self, cmd, None) + if cmd[0] == '_' or cmd == 'run' or not isinstance(f, collections.abc.Callable): + raise ValueError("unknown command %s" % cmd) + f(*args) + except Exception: # pylint: disable=broad-except + if skiperror: + logger.debug("ignoring error") + continue + logger.error("template command error in %s:", self.templatefile) + logger.error(" %s", " ".join(line)) + # format the exception traceback + exclines = traceback.format_exception(*sys.exc_info()) + # skip the bit about "ltmpl.py, in _run()" - we know that + exclines.pop(1) + # log the "ErrorType: this is what happened" line + logger.error(" %s", exclines[-1].strip()) + # and log the entire traceback to the debug log + for _line in ''.join(exclines).splitlines(): + logger.debug(" %s", _line) + if self.fatalerrors: + raise
+ + +# TODO: operate inside an actual chroot for safety? Not that RPM bothers.. +
[docs]class LoraxTemplateRunner(TemplateRunner): + ''' + This class parses and executes Lorax templates. Sample usage: + + # install a bunch of packages + runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, dbo=dnf_obj) + runner.run("install-packages.ltmpl") + + # modify a runtime dir + runner = LoraxTemplateRunner(inroot=rundir, outroot=newrun) + runner.run("runtime-transmogrify.ltmpl") + + NOTES: + + * Commands that run external programs (e.g. systemctl) currently use + the *host*'s copy of that program, which may cause problems if there's a + big enough difference between the host and the image you're modifying. + + * The commands are not executed under a real chroot, so absolute symlinks + will point *outside* the inroot/outroot. Be careful with symlinks! + + ADDING NEW COMMANDS: + + * Each template command is just a method of the LoraxTemplateRunner + object - so adding a new command is as easy as adding a new function. + + * Each function gets arguments that correspond to the rest of the tokens + on that line (after word splitting and brace expansion) + + * Commands should raise exceptions for errors - don't use sys.exit() + ''' + def __init__(self, inroot, outroot, dbo=None, fatalerrors=True, + templatedir=None, defaults=None): + self.inroot = inroot + self.outroot = outroot + self.dbo = dbo + builtins = DataHolder(exists=lambda p: rexists(p, root=inroot), + glob=lambda g: list(rglob(g, root=inroot))) + self.results = DataHolder(treeinfo=dict()) # just treeinfo for now + + super(LoraxTemplateRunner, self).__init__(fatalerrors, templatedir, defaults, builtins) + # TODO: set up custom logger with a filter to add line info + + def _out(self, path): + return joinpaths(self.outroot, path) + def _in(self, path): + return joinpaths(self.inroot, path) + + def _filelist(self, *pkgs): + """ Return the list of files in the packages """ + pkglist = [] + for pkg_glob in pkgs: + pkglist += list(self.dbo.sack.query().installed().filter(name__glob=pkg_glob)) + + # dnf/hawkey doesn't make any distinction between file, dir or ghost like yum did + # so only return the files. + return set(f for pkg in pkglist for f in pkg.files if not os.path.isdir(self._out(f))) + + def _getsize(self, *files): + return sum(os.path.getsize(self._out(f)) for f in files if os.path.isfile(self._out(f))) + + def _write_package_log(self): + """ + Write the list of installed packages to /root/ on the boot.iso + + If lorax is called with a debug repo find the corresponding debuginfo package + names and write them to /root/debubg-pkgs.log on the boot.iso + The non-debuginfo packages are written to /root/lorax-packages.log + """ + os.makedirs(self._out("root/"), exist_ok=True) + available = self.dbo.sack.query().available() + pkgs = [] + debug_pkgs = [] + for p in list(self.dbo.transaction.install_set): + pkgs.append(f"{p.name}-{p.version}-{p.release}.{p.arch}") + if available.filter(name=p.name+"-debuginfo"): + debug_pkgs.append(f"{p.name}-debuginfo-{p.epoch}:{p.version}-{p.release}") + + with open(self._out("root/lorax-packages.log"), "w") as f: + f.write("\n".join(sorted(pkgs))) + f.write("\n") + + if debug_pkgs: + with open(self._out("root/debug-pkgs.log"), "w") as f: + f.write("\n".join(sorted(debug_pkgs))) + f.write("\n") + +
[docs] def install(self, srcglob, dest): + ''' + install SRC DEST + Copy the given file (or files, if a glob is used) from the input + tree to the given destination in the output tree. + The path to DEST must exist in the output tree. + If DEST is a directory, SRC will be copied into that directory. + If DEST doesn't exist, SRC will be copied to a file with that name, + assuming the rest of the path exists. + This is pretty much like how the 'cp' command works. + + Examples: + install usr/share/myconfig/grub.conf /boot + install /usr/share/myconfig/grub.conf.in /boot/grub.conf + ''' + for src in rglob(self._in(srcglob), fatal=True): + try: + cpfile(src, self._out(dest)) + except shutil.Error as e: + logger.error(e)
+ +
[docs] def installimg(self, *args): + ''' + installimg [--xz|--gzip|--bzip2|--lzma] [-ARG|--ARG=OPTION] SRCDIR DESTFILE + Create a compressed cpio archive of the contents of SRCDIR and place + it in DESTFILE. + + If SRCDIR doesn't exist or is empty nothing is created. + + Examples: + installimg ${LORAXDIR}/product/ images/product.img + installimg ${LORAXDIR}/updates/ images/updates.img + installimg --xz -6 ${LORAXDIR}/updates/ images/updates.img + installimg --xz -9 --memlimit-compress=3700MiB ${LORAXDIR}/updates/ images/updates.img + + Optionally use a different compression type and override the default args + passed to it. The default is xz -9 + ''' + COMPRESSORS = ("--xz", "--gzip", "--bzip2", "--lzma") + if len(args) < 2: + raise ValueError("Not enough args for installimg.") + + srcdir = args[-2] + destfile = args[-1] + if not os.path.isdir(self._in(srcdir)) or not os.listdir(self._in(srcdir)): + return + + compression = "xz" + compressargs = [] + if args[0] in COMPRESSORS: + compression = args[0][2:] + + for arg in args[1:-2]: + if arg.startswith('-'): + compressargs.append(arg) + else: + raise ValueError("Argument is missing -") + + logger.info("Creating image file %s from contents of %s", self._out(destfile), self._in(srcdir)) + logger.debug("Using %s %s compression", compression, compressargs or "") + mkcpio(self._in(srcdir), self._out(destfile), compression=compression, compressargs=compressargs)
+ +
[docs] def mkdir(self, *dirs): + ''' + mkdir DIR [DIR ...] + Create the named DIR(s). Will create leading directories as needed. + + Example: + mkdir /images + ''' + for d in dirs: + d = self._out(d) + if not isdir(d): + os.makedirs(d)
+ +
[docs] def replace(self, pat, repl, *fileglobs): + ''' + replace PATTERN REPLACEMENT FILEGLOB [FILEGLOB ...] + Find-and-replace the given PATTERN (Python-style regex) with the given + REPLACEMENT string for each of the files listed. + + Example: + replace @VERSION@ ${product.version} /boot/grub.conf /boot/isolinux.cfg + ''' + match = False + for g in fileglobs: + for f in rglob(self._out(g)): + match = True + replace(f, pat, repl) + if not match: + raise IOError("no files matched %s" % " ".join(fileglobs))
+ +
[docs] def append(self, filename, data): + ''' + append FILE STRING + Append STRING (followed by a newline character) to FILE. + Python character escape sequences ('\\n', '\\t', etc.) will be + converted to the appropriate characters. + + Examples: + + append /etc/depmod.d/dd.conf "search updates built-in" + append /etc/resolv.conf "" + ''' + with open(self._out(filename), "a") as fobj: + fobj.write(bytes(data, "utf8").decode('unicode_escape')+"\n")
+ +
[docs] def treeinfo(self, section, key, *valuetoks): + ''' + treeinfo SECTION KEY ARG [ARG ...] + Add an item to the treeinfo data store. + The given SECTION will have a new item added where + KEY = ARG ARG ... + + Example: + treeinfo images-${kernel.arch} boot.iso images/boot.iso + ''' + if section not in self.results.treeinfo: + self.results.treeinfo[section] = dict() + self.results.treeinfo[section][key] = " ".join(valuetoks)
+ +
[docs] def installkernel(self, section, src, dest): + ''' + installkernel SECTION SRC DEST + Install the kernel from SRC in the input tree to DEST in the output + tree, and then add an item to the treeinfo data store, in the named + SECTION, where "kernel" = DEST. + + Equivalent to: + install SRC DEST + treeinfo SECTION kernel DEST + ''' + self.install(src, dest) + self.treeinfo(section, "kernel", dest)
+ +
[docs] def installinitrd(self, section, src, dest): + ''' + installinitrd SECTION SRC DEST + Same as installkernel, but for "initrd". + ''' + self.install(src, dest) + self.chmod(dest, '644') + self.treeinfo(section, "initrd", dest)
+ +
[docs] def installupgradeinitrd(self, section, src, dest): + ''' + installupgradeinitrd SECTION SRC DEST + Same as installkernel, but for "upgrade". + ''' + self.install(src, dest) + self.chmod(dest, '644') + self.treeinfo(section, "upgrade", dest)
+ + + + + +
[docs] def copy(self, src, dest): + ''' + copy SRC DEST + Copy SRC to DEST. + If DEST is a directory, SRC will be copied inside it. + If DEST doesn't exist, SRC will be copied to a file with + that name, if the path leading to it exists. + ''' + try: + cpfile(self._out(src), self._out(dest)) + except shutil.Error as e: + logger.error(e)
+ +
[docs] def move(self, src, dest): + ''' + move SRC DEST + Move SRC to DEST. + ''' + mvfile(self._out(src), self._out(dest))
+ +
[docs] def remove(self, *fileglobs): + ''' + remove FILEGLOB [FILEGLOB ...] + Remove all the named files or directories. + Will *not* raise exceptions if the file(s) are not found. + ''' + for g in fileglobs: + for f in rglob(self._out(g)): + remove(f) + logger.debug("removed %s", f)
+ +
[docs] def chmod(self, fileglob, mode): + ''' + chmod FILEGLOB OCTALMODE + Change the mode of all the files matching FILEGLOB to OCTALMODE. + ''' + for f in rglob(self._out(fileglob), fatal=True): + os.chmod(f, int(mode,8))
+ +
[docs] def log(self, msg): + ''' + log MESSAGE + Emit the given log message. Be sure to put it in quotes! + + Example: + log "Reticulating splines, please wait..." + ''' + logger.info(msg)
+ + # TODO: add ssh-keygen, mkisofs(?), find, and other useful commands +
[docs] def runcmd(self, *cmdlist): + ''' + runcmd CMD [ARG ...] + Run the given command with the given arguments. + + NOTE: All paths given MUST be COMPLETE, ABSOLUTE PATHS to the file + or files mentioned. ${root}/${inroot}/${outroot} are good for + constructing these paths. + + FURTHER NOTE: Please use this command only as a last resort! + Whenever possible, you should use the existing template commands. + If the existing commands don't do what you need, fix them! + + Examples: + (this should be replaced with a "find" function) + runcmd find ${root} -name "*.pyo" -type f -delete + %for f in find(root, name="*.pyo"): + remove ${f} + %endfor + ''' + cmd = cmdlist + logger.debug('running command: %s', cmd) + if cmd[0].startswith("--chdir="): + logger.error("--chdir is no longer supported for runcmd.") + raise ValueError("--chdir is no longer supported for runcmd.") + + try: + stdout = runcmd_output(cmd) + if stdout: + logger.debug('command output:\n%s', stdout) + logger.debug("command finished successfully") + except CalledProcessError as e: + if e.output: + logger.error('command output:\n%s', e.output) + logger.error('command returned failure (%d)', e.returncode) + raise
+ +
[docs] def installpkg(self, *pkgs): + ''' + installpkg [--required|--optional] [--except PKGGLOB [--except PKGGLOB ...]] PKGGLOB [PKGGLOB ...] + Request installation of all packages matching the given globs. + Note that this is just a *request* - nothing is *actually* installed + until the 'run_pkg_transaction' command is given. + + --required is now the default. If the PKGGLOB can be missing pass --optional + ''' + if pkgs[0] == '--optional': + pkgs = pkgs[1:] + required = False + elif pkgs[0] == '--required': + pkgs = pkgs[1:] + required = True + else: + required = True + + excludes = [] + while '--except' in pkgs: + idx = pkgs.index('--except') + if len(pkgs) == idx+1: + raise ValueError("installpkg needs an argument after --except") + + excludes.append(pkgs[idx+1]) + pkgs = pkgs[:idx] + pkgs[idx+2:] + + errors = False + for p in pkgs: + try: + # Start by using Subject to generate a package query, which will + # give us a query object similar to what dbo.install would select, + # minus the handling for multilib. This query may contain + # multiple arches. Pull the package names out of that, filter any + # that match the excludes patterns, and pass those names back to + # dbo.install to do the actual, arch and version and multilib + # aware, package selction. + + # dnf queries don't have a concept of negative globs which is why + # the filtering is done the hard way. + + pkgnames = [pkg for pkg in dnf.subject.Subject(p).get_best_query(self.dbo.sack).filter(latest=True)] + if not pkgnames: + raise dnf.exceptions.PackageNotFoundError("no package matched", p) + + # Apply excludes to the name only + for exclude in excludes: + pkgnames = [pkg for pkg in pkgnames if not fnmatch.fnmatch(pkg.name, exclude)] + + # Convert to a sorted NVR list for installation + pkgnvrs = sorted(["{}-{}-{}".format(pkg.name, pkg.version, pkg.release) for pkg in pkgnames]) + + # If the request is a glob, expand it in the log + if any(g for g in ['*','?','.'] if g in p): + logger.info("installpkg: %s expands to %s", p, ",".join(pkgnvrs)) + + for pkgnvr in pkgnvrs: + try: + self.dbo.install(pkgnvr) + except Exception as e: # pylint: disable=broad-except + if required: + raise + # Not required, log it and continue processing pkgs + logger.error("installpkg %s failed: %s", pkgnvr, str(e)) + except Exception as e: # pylint: disable=broad-except + logger.error("installpkg %s failed: %s", p, str(e)) + errors = True + + if errors and required: + raise Exception("Required installpkg failed.")
+ +
[docs] def removepkg(self, *pkgs): + ''' + removepkg PKGGLOB [PKGGLOB...] + Delete the named package(s). + + IMPLEMENTATION NOTES: + RPM scriptlets (%preun/%postun) are *not* run. + Files are deleted, but directories are left behind. + ''' + for p in pkgs: + filepaths = [f.lstrip('/') for f in self._filelist(p)] + # TODO: also remove directories that aren't owned by anything else + if filepaths: + logger.debug("removepkg %s: %ikb", p, self._getsize(*filepaths)/1024) + self.remove(*filepaths) + else: + logger.debug("removepkg %s: no files to remove!", p)
+ +
[docs] def run_pkg_transaction(self): + ''' + run_pkg_transaction + Actually install all the packages requested by previous 'installpkg' + commands. + ''' + try: + logger.info("Checking dependencies") + self.dbo.resolve() + except dnf.exceptions.DepsolveError as e: + logger.error("Dependency check failed: %s", e) + raise + logger.info("%d packages selected", len(self.dbo.transaction)) + if len(self.dbo.transaction) == 0: + raise Exception("No packages in transaction") + + # Write out the packages installed, including debuginfo packages + self._write_package_log() + + pkgs_to_download = self.dbo.transaction.install_set + logger.info("Downloading packages") + progress = LoraxDownloadCallback() + try: + self.dbo.download_packages(pkgs_to_download, progress) + except dnf.exceptions.DownloadError as e: + logger.error("Failed to download the following packages: %s", e) + raise + + logger.info("Preparing transaction from installation source") + try: + display = LoraxRpmCallback() + self.dbo.do_transaction(display=display) + except BaseException as e: + logger.error("The transaction process has ended abruptly: %s", e) + raise + + # Reset the package sack to pick up the installed packages + self.dbo.reset(repos=False) + self.dbo.fill_sack(load_system_repo=True, load_available_repos=False) + + # At this point dnf should know about the installed files. Double check that it really does. + if len(self._filelist("anaconda-core")) == 0: + raise Exception("Failed to reset dbo to installed package set")
+ +
[docs] def removefrom(self, pkg, *globs): + ''' + removefrom PKGGLOB [--allbut] FILEGLOB [FILEGLOB...] + Remove all files matching the given file globs from the package + (or packages) named. + If '--allbut' is used, all the files from the given package(s) will + be removed *except* the ones which match the file globs. + + Examples: + removefrom usbutils /usr/bin/* + removefrom xfsprogs --allbut /sbin/* + ''' + cmd = "%s %s" % (pkg, " ".join(globs)) # save for later logging + keepmatches = False + if globs[0] == '--allbut': + keepmatches = True + globs = globs[1:] + # get pkg filelist and find files that match the globs + filelist = self._filelist(pkg) + matches = set() + for g in globs: + globs_re = re.compile(fnmatch.translate(g)) + m = [f for f in filelist if globs_re.match(f)] + if m: + matches.update(m) + else: + logger.debug("removefrom %s %s: no files matched!", pkg, g) + # are we removing the matches, or keeping only the matches? + if keepmatches: + remove_files = filelist.difference(matches) + else: + remove_files = matches + # remove the files + if remove_files: + logger.debug("removefrom %s: removed %i/%i files, %ikb/%ikb", cmd, + len(remove_files), len(filelist), + self._getsize(*remove_files)/1024, self._getsize(*filelist)/1024) + self.remove(*remove_files) + else: + logger.debug("removefrom %s: no files to remove!", cmd)
+ + # pylint: disable=anomalous-backslash-in-string +
[docs] def removekmod(self, *globs): + ''' + removekmod GLOB [GLOB...] [--allbut] KEEPGLOB [KEEPGLOB...] + Remove all files and directories matching the given file globs from the kernel + modules directory. + + If '--allbut' is used, all the files from the modules will be removed *except* + the ones which match the file globs. There must be at least one initial GLOB + to search and one KEEPGLOB to keep. The KEEPGLOB is expanded to be *KEEPGLOB* + so that it will match anywhere in the path. + + This only removes files from under /lib/modules/\*/kernel/ + + Examples: + removekmod sound drivers/media drivers/hwmon drivers/video + removekmod drivers/char --allbut virtio_console hw_random + ''' + cmd = " ".join(globs) + if "--allbut" in globs: + idx = globs.index("--allbut") + if idx == 0: + raise ValueError("removekmod needs at least one GLOB before --allbut") + + # Apply keepglobs anywhere they appear in the path + keepglobs = globs[idx+1:] + if len(keepglobs) == 0: + raise ValueError("removekmod needs at least one GLOB after --allbut") + + globs = globs[:idx] + else: + # Nothing to keep + keepglobs = [] + + filelist = set() + for g in globs: + for top_dir in rglob(self._out("/lib/modules/*/kernel/"+g)): + for root, _dirs, files in os.walk(top_dir): + filelist.update(root+"/"+f for f in files) + + # Remove anything matching keepglobs from the list + matches = set() + for g in keepglobs: + globs_re = re.compile(fnmatch.translate("*"+g+"*")) + m = [f for f in filelist if globs_re.match(f)] + if m: + matches.update(m) + else: + logger.debug("removekmod %s: no files matched!", g) + remove_files = filelist.difference(matches) + + if remove_files: + logger.debug("removekmod: removing %d files", len(remove_files)) + list(remove(f) for f in remove_files) + else: + logger.debug("removekmod %s: no files to remove!", cmd)
+ +
[docs] def createaddrsize(self, addr, src, dest): + ''' + createaddrsize INITRD_ADDRESS INITRD ADDRSIZE + Create the initrd.addrsize file required in LPAR boot process. + + Examples: + createaddrsize ${INITRD_ADDRESS} ${outroot}/${BOOTDIR}/initrd.img ${outroot}/${BOOTDIR}/initrd.addrsize + ''' + addrsize = open(dest, "wb") + addrsize_data = struct.pack(">iiii", 0, int(addr, 16), 0, os.stat(src).st_size) + addrsize.write(addrsize_data) + addrsize.close()
+ +
[docs] def systemctl(self, cmd, *units): + ''' + systemctl [enable|disable|mask] UNIT [UNIT...] + Enable, disable, or mask the given systemd units. + + Examples: + systemctl disable lvm2-monitor.service + systemctl mask fedora-storage-init.service fedora-configure.service + ''' + if cmd not in ('enable', 'disable', 'mask'): + raise ValueError('unsupported systemctl cmd: %s' % cmd) + if not units: + logger.debug("systemctl: no units given for %s, ignoring", cmd) + return + self.mkdir("/run/systemd/system") # XXX workaround for systemctl bug + systemctl = ['systemctl', '--root', self.outroot, '--no-reload', cmd] + # When a unit doesn't exist systemd aborts the command. Run them one at a time. + # XXX for some reason 'systemctl enable/disable' always returns 1 + for unit in units: + try: + cmd = systemctl + [unit] + runcmd(cmd) + except CalledProcessError: + pass
+ +
[docs]class LiveTemplateRunner(TemplateRunner): + """ + This class parses and executes a limited Lorax template. Sample usage: + + # install a bunch of packages + runner = LiveTemplateRunner(dbo, templatedir, defaults) + runner.run("live-install.tmpl") + + It is meant to be used with the live-install.tmpl which lists the per-arch + pacages needed to build the live-iso output. + """ + def __init__(self, dbo, fatalerrors=True, templatedir=None, defaults=None): + self.dbo = dbo + self.pkgs = [] + self.pkgnames = [] + + super(LiveTemplateRunner, self).__init__(fatalerrors, templatedir, defaults) + +
[docs] def installpkg(self, *pkgs): + ''' + installpkg [--required|--optional] [--except PKGGLOB [--except PKGGLOB ...]] PKGGLOB [PKGGLOB ...] + Request installation of all packages matching the given globs. + Note that this is just a *request* - nothing is *actually* installed + until the 'run_pkg_transaction' command is given. + + --required is now the default. If the PKGGLOB can be missing pass --optional + ''' + if pkgs[0] == '--optional': + pkgs = pkgs[1:] + required = False + elif pkgs[0] == '--required': + pkgs = pkgs[1:] + required = True + else: + required = True + + excludes = [] + while '--except' in pkgs: + idx = pkgs.index('--except') + if len(pkgs) == idx+1: + raise ValueError("installpkg needs an argument after --except") + + excludes.append(pkgs[idx+1]) + pkgs = pkgs[:idx] + pkgs[idx+2:] + + errors = False + for p in pkgs: + try: + # Start by using Subject to generate a package query, which will + # give us a query object similar to what dbo.install would select, + # minus the handling for multilib. This query may contain + # multiple arches. Pull the package names out of that, filter any + # that match the excludes patterns, and pass those names back to + # dbo.install to do the actual, arch and version and multilib + # aware, package selction. + + # dnf queries don't have a concept of negative globs which is why + # the filtering is done the hard way. + + pkgnames = [pkg for pkg in dnf.subject.Subject(p).get_best_query(self.dbo.sack).filter(latest=True)] + if not pkgnames: + raise dnf.exceptions.PackageNotFoundError("no package matched", p) + + # Apply excludes to the name only + for exclude in excludes: + pkgnames = [pkg for pkg in pkgnames if not fnmatch.fnmatch(pkg.name, exclude)] + + # Convert to a sorted NVR list for installation + pkgnvrs = sorted(["{}-{}-{}".format(pkg.name, pkg.version, pkg.release) for pkg in pkgnames]) + + # If the request is a glob, expand it in the log + if any(g for g in ['*','?','.'] if g in p): + logger.info("installpkg: %s expands to %s", p, ",".join(pkgnvrs)) + + self.pkgs.extend(pkgnvrs) + self.pkgnames.extend([pkg.name for pkg in pkgnames]) + except Exception as e: # pylint: disable=broad-except + logger.error("installpkg %s failed: %s", p, str(e)) + errors = True + + if errors and required: + raise Exception("Required installpkg failed.")
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/monitor.html b/f32-branch/_modules/pylorax/monitor.html new file mode 100644 index 00000000..b2bd092a --- /dev/null +++ b/f32-branch/_modules/pylorax/monitor.html @@ -0,0 +1,404 @@ + + + + + + + + + + + pylorax.monitor — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.monitor

+# monitor.py
+#
+# Copyright (C) 2011-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import re
+import socket
+import socketserver
+import threading
+import time
+
+
[docs]class LogRequestHandler(socketserver.BaseRequestHandler): + """ + Handle monitoring and saving the logfiles from the virtual install + + Incoming data is written to self.server.log_path and each line is checked + for patterns that would indicate that the installation failed. + self.server.log_error is set True when this happens. + """ + + simple_tests = [ + "Traceback (", + "traceback script(s) have been run", + "Out of memory:", + "Call Trace:", + "insufficient disk space:", + "Not enough disk space to download the packages", + "error populating transaction after", + "crashed on signal", + "packaging: Missed: NoSuchPackage", + "packaging: Installation failed", + "The following error occurred while installing. This is a fatal error" + ] + + re_tests = [ + r"packaging: base repo .* not valid", + r"packaging: .* requires .*" + ] + +
[docs] def setup(self): + """Start writing to self.server.log_path""" + + if self.server.log_path: + self.fp = open(self.server.log_path, "w") # pylint: disable=attribute-defined-outside-init + else: + self.fp = None + self.request.settimeout(10)
+ +
[docs] def handle(self): + """ + Write incoming data to a logfile and check for errors + + Split incoming data into lines and check for any Tracebacks or other + errors that indicate that the install failed. + + Loops until self.server.kill is True + """ + log.info("Processing logs from %s", self.client_address) + line = "" + while True: + if self.server.kill: + break + + try: + data = str(self.request.recv(4096), "utf8") + if self.fp: + self.fp.write(data) + self.fp.flush() + + # check the data for errors and set error flag + # need to assemble it into lines so we can test for the error + # string. + while data: + more = data.split("\n", 1) + line += more[0] + if len(more) > 1: + self.iserror(line) + line = "" + data = more[1] + else: + data = None + + except socket.timeout: + pass + except Exception as e: # pylint: disable=broad-except + log.info("log processing killed by exception: %s", e) + break
+ +
[docs] def finish(self): + log.info("Shutting down log processing") + self.request.close() + if self.fp: + self.fp.close()
+ +
[docs] def iserror(self, line): + """ + Check a line to see if it contains an error indicating installation failure + + :param str line: log line to check for failure + + If the line contains IGNORED it will be skipped. + """ + if "IGNORED" in line: + return + + for t in self.simple_tests: + if t in line: + self.server.log_error = True + self.server.error_line = line + return + for t in self.re_tests: + if re.search(t, line): + self.server.log_error = True + self.server.error_line = line + return
+ + +
[docs]class LogServer(socketserver.TCPServer): + """A TCP Server that listens for log data""" + + # Number of seconds to wait for a connection after startup + timeout = 60 + + def __init__(self, log_path, *args, **kwargs): + """ + Setup the log server + + :param str log_path: Path to the log file to write + """ + self.kill = False + self.log_error = False + self.error_line = "" + self.log_path = log_path + self._timeout = kwargs.pop("timeout", None) + if self._timeout: + self._start_time = time.time() + socketserver.TCPServer.__init__(self, *args, **kwargs) + +
[docs] def log_check(self): + """ + Check to see if an error has been found in the log + + :returns: True if there has been an error + :rtype: bool + """ + if self._timeout: + taking_too_long = time.time() > self._start_time + (self._timeout * 60) + if taking_too_long: + log.error("Canceling installation due to timeout") + else: + taking_too_long = False + return self.log_error or taking_too_long
+ + +
[docs]class LogMonitor(object): + """ + Setup a server to monitor the logs output by the installation + + This needs to be running before the virt-install runs, it expects + there to be a listener on the port used for the virtio log port. + """ + def __init__(self, log_path=None, host="localhost", port=0, timeout=None, log_request_handler_class=LogRequestHandler): + """ + Start a thread to monitor the logs. + + :param str log_path: Path to the logfile to write + :param str host: Host to bind to. Default is localhost. + :param int port: Port to listen to or 0 to pick a port + + If 0 is passed for the port the dynamically assigned port will be + available as self.port + + If log_path isn't set then it only monitors the logs, instead of + also writing them to disk. + """ + self.server = LogServer(log_path, (host, port), log_request_handler_class, timeout=timeout) + self.host, self.port = self.server.server_address + self.log_path = log_path + self.server_thread = threading.Thread(target=self.server.handle_request) + self.server_thread.daemon = True + self.server_thread.start() + +
[docs] def shutdown(self): + """Force shutdown of the monitoring thread""" + self.server.kill = True + self.server_thread.join()
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/mount.html b/f32-branch/_modules/pylorax/mount.html new file mode 100644 index 00000000..30766b84 --- /dev/null +++ b/f32-branch/_modules/pylorax/mount.html @@ -0,0 +1,305 @@ + + + + + + + + + + + pylorax.mount — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.mount

+# mount.py
+#
+# Copyright (C) 2011-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import os
+import pycdlib
+from pycdlib.pycdlibexception import PyCdlibException
+
+from pylorax.imgutils import mount, umount
+
+
[docs]class IsoMountpoint(object): + """ + Mount the iso and check to make sure the vmlinuz and initrd.img files exist + + Also check the iso for a a stage2 image and set a flag and extract the + iso's label. + + stage2 can be either LiveOS/squashfs.img or images/install.img + """ + def __init__(self, iso_path, initrd_path=None): + """ + Mount the iso + + :param str iso_path: Path to the iso to mount + :param str initrd_path: Optional path to initrd + + initrd_path can be used to point to a tree with a newer + initrd.img than the iso has. The iso is still used for stage2. + + self.kernel and self.initrd point to the kernel and initrd. + self.stage2 is set to True if there is a stage2 image. + self.repo is the path to the mounted iso if there is a /repodata dir. + """ + self.label = None + self.iso_path = iso_path + self.initrd_path = initrd_path + + if not self.initrd_path: + self.mount_dir = mount(self.iso_path, opts="loop") + else: + self.mount_dir = self.initrd_path + + kernel_list = [("/isolinux/vmlinuz", "/isolinux/initrd.img"), + ("/ppc/ppc64/vmlinuz", "/ppc/ppc64/initrd.img"), + ("/images/pxeboot/vmlinuz", "/images/pxeboot/initrd.img")] + + if os.path.isdir(self.mount_dir+"/repodata"): + self.repo = self.mount_dir + else: + self.repo = None + self.stage2 = os.path.exists(self.mount_dir+"/LiveOS/squashfs.img") or \ + os.path.exists(self.mount_dir+"/images/install.img") + + try: + for kernel, initrd in kernel_list: + if (os.path.isfile(self.mount_dir+kernel) and + os.path.isfile(self.mount_dir+initrd)): + self.kernel = self.mount_dir+kernel + self.initrd = self.mount_dir+initrd + break + else: + raise Exception("Missing kernel and initrd file in iso, failed" + " to search under: {0}".format(kernel_list)) + except: + self.umount() + raise + + self.get_iso_label() + +
[docs] def umount( self ): + """Unmount the iso""" + if not self.initrd_path: + umount(self.mount_dir)
+ +
[docs] def get_iso_label(self): + """ + Get the iso's label using isoinfo + + Sets self.label if one is found + """ + try: + iso = pycdlib.PyCdlib() + iso.open(self.iso_path) + self.label = iso.pvd.volume_identifier.decode("UTF-8").strip() + except PyCdlibException as e: + log.error("Problem reading label from %s: %s", self.iso_path, e)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/sysutils.html b/f32-branch/_modules/pylorax/sysutils.html new file mode 100644 index 00000000..91be7b43 --- /dev/null +++ b/f32-branch/_modules/pylorax/sysutils.html @@ -0,0 +1,361 @@ + + + + + + + + + + + pylorax.sysutils — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.sysutils

+#
+# sysutils.py
+#
+# Copyright (C) 2009-2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+__all__ = ["joinpaths", "touch", "replace", "chown_", "chmod_", "remove",
+           "linktree"]
+
+import sys
+import os
+import re
+import fileinput
+import pwd
+import grp
+import glob
+import shutil
+import shlex
+from configparser import ConfigParser
+
+from pylorax.executils import runcmd
+
+
[docs]def joinpaths(*args, **kwargs): + path = os.path.sep.join(args) + + if kwargs.get("follow_symlinks"): + return os.path.realpath(path) + else: + return path
+ + +
[docs]def touch(fname): + # python closes the file when it goes out of scope + open(fname, "w").write("")
+ + +
[docs]def replace(fname, find, sub): + fin = fileinput.input(fname, inplace=1) + pattern = re.compile(find) + + for line in fin: + line = pattern.sub(sub, line) + sys.stdout.write(line) + + fin.close()
+ + +
[docs]def chown_(path, user=None, group=None, recursive=False): + uid = gid = -1 + + if user is not None: + uid = pwd.getpwnam(user)[2] + if group is not None: + gid = grp.getgrnam(group)[2] + + for fname in glob.iglob(path): + os.chown(fname, uid, gid) + + if recursive and os.path.isdir(fname): + for nested in os.listdir(fname): + nested = joinpaths(fname, nested) + chown_(nested, user, group, recursive)
+ + +
[docs]def chmod_(path, mode, recursive=False): + for fname in glob.iglob(path): + os.chmod(fname, mode) + + if recursive and os.path.isdir(fname): + for nested in os.listdir(fname): + nested = joinpaths(fname, nested) + chmod_(nested, mode, recursive)
+ + +def cpfile(src, dst): + shutil.copy2(src, dst) + if os.path.isdir(dst): + dst = joinpaths(dst, os.path.basename(src)) + + return dst + +def mvfile(src, dst): + if os.path.isdir(dst): + dst = joinpaths(dst, os.path.basename(src)) + os.rename(src, dst) + return dst + +
[docs]def remove(target): + if os.path.isdir(target) and not os.path.islink(target): + shutil.rmtree(target) + else: + os.unlink(target)
+ +
[docs]def linktree(src, dst): + runcmd(["/bin/cp", "-alx", src, dst])
+ +def unquote(s): + return ' '.join(shlex.split(s)) + +class UnquotingConfigParser(ConfigParser): + """A ConfigParser, only with unquoting of the values.""" + # pylint: disable=arguments-differ + def get(self, *args, **kwargs): + ret = super().get(*args, **kwargs) + if ret: + ret = unquote(ret) + return ret + +def flatconfig(filename): + """Use UnquotingConfigParser to read a flat config file (without + section headers) by adding a section header. + """ + with open (filename, 'r') as conffh: + conftext = "[main]\n" + conffh.read() + config = UnquotingConfigParser() + config.read_string(conftext) + return config['main'] + +def read_tail(path, size): + """Read up to `size` kibibytes from the end of a file""" + + # NOTE: In py3 text files are unicode, not bytes so we have to open it as bytes + with open(path, "rb") as f: + return _read_file_end(f, size) + +def _read_file_end(f, size): + """Read the end of a file + + This skips to the next line to avoid starting in the middle of a unicode character. + And returns "" in the case of a UnicodeDecodeError + """ + f.seek(0, 2) + end = f.tell() + if end < 1024 * size: + f.seek(0, 0) + else: + f.seek(end - (1024 * size)) + data = f.read() + try: + # Find the first newline in the block + newline = min(1+data.find(b'\n'), len(data)) + text = data[newline:].decode("UTF-8") + except UnicodeDecodeError: + return "" + return text +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/treebuilder.html b/f32-branch/_modules/pylorax/treebuilder.html new file mode 100644 index 00000000..c967b789 --- /dev/null +++ b/f32-branch/_modules/pylorax/treebuilder.html @@ -0,0 +1,625 @@ + + + + + + + + + + + pylorax.treebuilder — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.treebuilder

+# treebuilder.py - handle arch-specific tree building stuff using templates
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s):  Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.treebuilder")
+
+import os, re
+from os.path import basename
+from shutil import copytree, copy2
+from subprocess import CalledProcessError
+from pathlib import Path
+import itertools
+
+from pylorax.sysutils import joinpaths, remove
+from pylorax.base import DataHolder
+from pylorax.ltmpl import LoraxTemplateRunner
+import pylorax.imgutils as imgutils
+from pylorax.executils import runcmd, runcmd_output, execWithCapture
+
+templatemap = {
+    'i386':    'x86.tmpl',
+    'x86_64':  'x86.tmpl',
+    'ppc64le': 'ppc64le.tmpl',
+    's390':    's390.tmpl',
+    's390x':   's390.tmpl',
+    'aarch64': 'aarch64.tmpl',
+    'arm':     'arm.tmpl',
+    'armhfp':  'arm.tmpl',
+}
+
+
[docs]def generate_module_info(moddir, outfile=None): + def module_desc(mod): + output = runcmd_output(["modinfo", "-F", "description", mod]) + return output.strip() + def read_module_set(name): + return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l) + modsets = {'scsi':read_module_set("modules.block"), + 'eth':read_module_set("modules.networking")} + + modinfo = list() + for root, _dirs, files in os.walk(moddir): + for modtype, modset in modsets.items(): + for mod in modset.intersection(files): # modules in this dir + (name, _ext) = os.path.splitext(mod) # foo.ko -> (foo, .ko) + desc = module_desc(joinpaths(root,mod)) or "%s driver" % name + modinfo.append(dict(name=name, type=modtype, desc=desc)) + + out = open(outfile or joinpaths(moddir,"module-info"), "w") + out.write("Version 0\n") + for mod in sorted(modinfo, key=lambda m: m.get('name')): + out.write('{name}\n\t{type}\n\t"{desc:.65}"\n'.format(**mod))
+ +
[docs]class RuntimeBuilder(object): + '''Builds the anaconda runtime image.''' + def __init__(self, product, arch, dbo, templatedir=None, + installpkgs=None, excludepkgs=None, + add_templates=None, + add_template_vars=None, + skip_branding=False): + root = dbo.conf.installroot + # use a copy of product so we can modify it locally + product = product.copy() + product.name = product.name.lower() + self.vars = DataHolder(arch=arch, product=product, dbo=dbo, root=root, + basearch=arch.basearch, libdir=arch.libdir) + self.dbo = dbo + self._runner = LoraxTemplateRunner(inroot=root, outroot=root, + dbo=dbo, templatedir=templatedir) + self.add_templates = add_templates or [] + self.add_template_vars = add_template_vars or {} + self._installpkgs = installpkgs or [] + self._excludepkgs = excludepkgs or [] + self._runner.defaults = self.vars + self.dbo.reset() + self._skip_branding = skip_branding + + def _install_branding(self): + """Select the branding from the available 'system-release' packages + The *best* way to control this is to have a single package in the repo provide 'system-release' + When there are more than 1 package it will: + - Make a list of the available packages + - If variant is set look for a package ending with lower(variant) and use that + - If there are one or more non-generic packages, use the first one after sorting + """ + if self._skip_branding: + return + + release = None + q = self.dbo.sack.query() + a = q.available() + pkgs = sorted([p.name for p in a.filter(provides='system-release') + if not p.name.startswith("generic")]) + if not pkgs: + logger.error("No system-release packages found, could not get the release") + return + + logger.debug("system-release packages: %s", pkgs) + if self.vars.product.variant: + variant = [p for p in pkgs if p.endswith("-"+self.vars.product.variant.lower())] + if variant: + release = variant[0] + if not release: + release = pkgs[0] + + # release + logger.info('got release: %s', release) + self._runner.installpkg(release) + + # logos + release, _suffix = release.split('-', 1) + self._runner.installpkg('%s-logos' % release) + +
[docs] def install(self): + '''Install packages and do initial setup with runtime-install.tmpl''' + self._install_branding() + if len(self._installpkgs) > 0: + self._runner.installpkg(*self._installpkgs) + if len(self._excludepkgs) > 0: + self._runner.removepkg(*self._excludepkgs) + self._runner.run("runtime-install.tmpl") + for tmpl in self.add_templates: + self._runner.run(tmpl, **self.add_template_vars)
+ +
[docs] def writepkglists(self, pkglistdir): + '''debugging data: write out lists of package contents''' + if not os.path.isdir(pkglistdir): + os.makedirs(pkglistdir) + q = self.dbo.sack.query() + for pkgobj in q.installed(): + with open(joinpaths(pkglistdir, pkgobj.name), "w") as fobj: + for fname in pkgobj.files: + fobj.write("{0}\n".format(fname))
+ +
[docs] def postinstall(self): + '''Do some post-install setup work with runtime-postinstall.tmpl''' + # copy configdir into runtime root beforehand + configdir = joinpaths(self._runner.templatedir,"config_files") + configdir_path = "tmp/config_files" + fullpath = joinpaths(self.vars.root, configdir_path) + if os.path.exists(fullpath): + remove(fullpath) + copytree(configdir, fullpath) + self._runner.run("runtime-postinstall.tmpl", configdir=configdir_path)
+ +
[docs] def cleanup(self): + '''Remove unneeded packages and files with runtime-cleanup.tmpl''' + self._runner.run("runtime-cleanup.tmpl")
+ +
[docs] def verify(self): + '''Ensure that contents of the installroot can run''' + status = True + + ELF_MAGIC = b'\x7fELF' + + # Iterate over all files in /usr/bin and /usr/sbin + # For ELF files, gather them into a list and we'll check them all at + # the end. For files with a #!, check them as we go + elf_files = [] + usr_bin = Path(self.vars.root + '/usr/bin') + usr_sbin = Path(self.vars.root + '/usr/sbin') + for path in (str(x) for x in itertools.chain(usr_bin.iterdir(), usr_sbin.iterdir()) \ + if x.is_file()): + with open(path, "rb") as f: + magic = f.read(4) + if magic == ELF_MAGIC: + # Save the path, minus the chroot prefix + elf_files.append(path[len(self.vars.root):]) + elif magic[:2] == b'#!': + # Reopen the file as text and read the first line. + # Open as latin-1 so that stray 8-bit characters don't make + # things blow up. We only really care about ASCII parts. + with open(path, "rt", encoding="latin-1") as f_text: + # Remove the #!, split on space, and take the first part + shabang = f_text.readline()[2:].split()[0] + + # Does the path exist? + if not os.path.exists(self.vars.root + shabang): + logger.error('%s, needed by %s, does not exist', shabang, path) + status = False + + # Now, run ldd on all the ELF files + # Just run ldd once on everything so it isn't logged a million times. + # At least one thing in the list isn't going to be a dynamic executable, + # so use execWithCapture to ignore the exit code. + filename = '' + for line in execWithCapture('ldd', elf_files, root=self.vars.root, + log_output=False, filter_stderr=True).split('\n'): + if line and not line[0].isspace(): + # New filename header, strip the : at the end and save + filename = line[:-1] + elif 'not found' in line: + logger.error('%s, needed by %s, not found', line.split()[0], filename) + status = False + + return status
+ +
[docs] def writepkgsizes(self, pkgsizefile): + '''debugging data: write a big list of pkg sizes''' + fobj = open(pkgsizefile, "w") + getsize = lambda f: os.lstat(f).st_size if os.path.exists(f) else 0 + q = self.dbo.sack.query() + for p in sorted(q.installed()): + pkgsize = sum(getsize(joinpaths(self.vars.root,f)) for f in p.files) + fobj.write("{0.name}.{0.arch}: {1}\n".format(p, pkgsize))
+ +
[docs] def generate_module_data(self): + root = self.vars.root + moddir = joinpaths(root, "lib/modules/") + for kernel in findkernels(root=root): + ksyms = joinpaths(root, "boot/System.map-%s" % kernel.version) + logger.info("doing depmod and module-info for %s", kernel.version) + runcmd(["depmod", "-a", "-F", ksyms, "-b", root, kernel.version]) + generate_module_info(moddir+kernel.version, outfile=moddir+"module-info")
+ +
[docs] def create_squashfs_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): + """Create a plain squashfs runtime""" + compressargs = compressargs or [] + os.makedirs(os.path.dirname(outfile)) + + # squash the rootfs + imgutils.mksquashfs(self.vars.root, outfile, compression, compressargs)
+ +
[docs] def create_ext4_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): + """Create a squashfs compressed ext4 runtime""" + # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut + compressargs = compressargs or [] + workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir") + os.makedirs(joinpaths(workdir, "LiveOS")) + + # Catch problems with the rootfs being too small and clearly log them + try: + imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"), + "Anaconda", size=size) + except CalledProcessError as e: + if e.stdout and "No space left on device" in e.stdout: + logger.error("The rootfs ran out of space with size=%d", size) + raise + + # squash the live rootfs and clean up workdir + imgutils.mksquashfs(workdir, outfile, compression, compressargs) + remove(workdir)
+ +
[docs] def finished(self): + """ Done using RuntimeBuilder + + Close the dnf base object + """ + self.dbo.close()
+ +
[docs]class TreeBuilder(object): + '''Builds the arch-specific boot images. + inroot should be the installtree root (the newly-built runtime dir)''' + def __init__(self, product, arch, inroot, outroot, runtime, isolabel, domacboot=True, doupgrade=True, + templatedir=None, add_templates=None, add_template_vars=None, workdir=None, extra_boot_args=""): + + # NOTE: if you pass an arg named "runtime" to a mako template it'll + # clobber some mako internal variables - hence "runtime_img". + self.vars = DataHolder(arch=arch, product=product, runtime_img=runtime, + runtime_base=basename(runtime), + inroot=inroot, outroot=outroot, + basearch=arch.basearch, libdir=arch.libdir, + isolabel=isolabel, udev=udev_escape, domacboot=domacboot, doupgrade=doupgrade, + workdir=workdir, lower=string_lower, + extra_boot_args=extra_boot_args) + self._runner = LoraxTemplateRunner(inroot, outroot, templatedir=templatedir) + self._runner.defaults = self.vars + self.add_templates = add_templates or [] + self.add_template_vars = add_template_vars or {} + self.templatedir = templatedir + self.treeinfo_data = None + + @property + def kernels(self): + return findkernels(root=self.vars.inroot) + +
[docs] def rebuild_initrds(self, add_args=None, backup="", prefix=""): + '''Rebuild all the initrds in the tree. If backup is specified, each + initrd will be renamed with backup as a suffix before rebuilding. + If backup is empty, the existing initrd files will be overwritten. + If suffix is specified, the existing initrd is untouched and a new + image is built with the filename "${prefix}-${kernel.version}.img" + + If the initrd doesn't exist its name will be created based on the + name of the kernel. + ''' + add_args = add_args or [] + dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args + if not backup: + dracut.append("--force") + + if not self.kernels: + raise Exception("No kernels found, cannot rebuild_initrds") + + # Hush some dracut warnings. TODO: bind-mount proc in place? + open(joinpaths(self.vars.inroot,"/proc/modules"),"w") + for kernel in self.kernels: + if prefix: + idir = os.path.dirname(kernel.path) + outfile = joinpaths(idir, prefix+'-'+kernel.version+'.img') + elif hasattr(kernel, "initrd"): + # If there is an existing initrd, use that + outfile = kernel.initrd.path + else: + # Construct an initrd from the kernel name + outfile = kernel.path.replace("vmlinuz-", "initrd-") + ".img" + logger.info("rebuilding %s", outfile) + if backup: + initrd = joinpaths(self.vars.inroot, outfile) + if os.path.exists(initrd): + os.rename(initrd, initrd + backup) + cmd = dracut + [outfile, kernel.version] + runcmd(cmd, root=self.vars.inroot) + + os.unlink(joinpaths(self.vars.inroot,"/proc/modules"))
+ +
[docs] def build(self): + templatefile = templatemap[self.vars.arch.basearch] + for tmpl in self.add_templates: + self._runner.run(tmpl, **self.add_template_vars) + self._runner.run(templatefile, kernels=self.kernels) + self.treeinfo_data = self._runner.results.treeinfo + self.implantisomd5()
+ +
[docs] def implantisomd5(self): + for _section, data in self.treeinfo_data.items(): + if 'boot.iso' in data: + iso = joinpaths(self.vars.outroot, data['boot.iso']) + runcmd(["implantisomd5", iso])
+ + @property + def dracut_hooks_path(self): + """ Return the path to the lorax dracut hooks scripts + + Use the configured share dir if it is setup, + otherwise default to /usr/share/lorax/dracut_hooks + """ + if self.templatedir: + return joinpaths(self.templatedir, "dracut_hooks") + else: + return "/usr/share/lorax/dracut_hooks" + +
[docs] def copy_dracut_hooks(self, hooks): + """ Copy the hook scripts in hooks into the installroot's /tmp/ + and return a list of commands to pass to dracut when creating the + initramfs + + hooks is a list of tuples with the name of the hook script and the + target dracut hook directory + (eg. [("99anaconda-copy-ks.sh", "/lib/dracut/hooks/pre-pivot")]) + """ + dracut_commands = [] + for hook_script, dracut_path in hooks: + src = joinpaths(self.dracut_hooks_path, hook_script) + if not os.path.exists(src): + logger.error("Missing lorax dracut hook script %s", (src)) + continue + dst = joinpaths(self.vars.inroot, "/tmp/", hook_script) + copy2(src, dst) + dracut_commands += ["--include", joinpaths("/tmp/", hook_script), + dracut_path] + return dracut_commands
+ +#### TreeBuilder helper functions + +
[docs]def findkernels(root="/", kdir="boot"): + # To find possible flavors, awk '/BuildKernel/ { print $4 }' kernel.spec + flavors = ('debug', 'PAE', 'PAEdebug', 'smp', 'xen', 'lpae') + kre = re.compile(r"vmlinuz-(?P<version>.+?\.(?P<arch>[a-z0-9_]+)" + r"(.(?P<flavor>{0}))?)$".format("|".join(flavors))) + kernels = [] + bootfiles = os.listdir(joinpaths(root, kdir)) + for f in bootfiles: + match = kre.match(f) + if match: + kernel = DataHolder(path=joinpaths(kdir, f)) + kernel.update(match.groupdict()) # sets version, arch, flavor + kernels.append(kernel) + + # look for associated initrd/initramfs/etc. + for kernel in kernels: + for f in bootfiles: + if f.endswith('-'+kernel.version+'.img'): + imgtype, _rest = f.split('-',1) + # special backwards-compat case + if imgtype == 'initramfs': + imgtype = 'initrd' + kernel[imgtype] = DataHolder(path=joinpaths(kdir, f)) + + logger.debug("kernels=%s", kernels) + return kernels
+ +# udev whitelist: 'a-zA-Z0-9#+.:=@_-' (see is_whitelisted in libudev-util.c) +udev_blacklist=' !"$%&\'()*,/;<>?[\\]^`{|}~' # ASCII printable, minus whitelist +udev_blacklist += ''.join(chr(i) for i in range(32)) # ASCII non-printable +
[docs]def udev_escape(label): + out = '' + for ch in label: + out += ch if ch not in udev_blacklist else '\\x%02x' % ord(ch) + return out
+ +
[docs]def string_lower(string): + """ Return a lowercase string. + + :param string: String to lowercase + + This is used as a filter in the templates. + """ + return string.lower()
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_modules/pylorax/treeinfo.html b/f32-branch/_modules/pylorax/treeinfo.html new file mode 100644 index 00000000..f47d9a81 --- /dev/null +++ b/f32-branch/_modules/pylorax/treeinfo.html @@ -0,0 +1,264 @@ + + + + + + + + + + + pylorax.treeinfo — Lorax 32.12 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.treeinfo

+#
+# treeinfo.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.treeinfo")
+
+import configparser
+import os
+import time
+
+
+
[docs]class TreeInfo(object): + + def __init__(self, product, version, variant, basearch, + packagedir=""): + + self.c = configparser.ConfigParser() + + if 'SOURCE_DATE_EPOCH' in os.environ: + timestamp = os.environ['SOURCE_DATE_EPOCH'] + else: + timestamp = str(time.time()) + + section = "general" + data = {"timestamp": timestamp, + "family": product, + "version": version, + "name": "%s-%s" % (product, version), + "variant": variant or "", + "arch": basearch, + "packagedir": packagedir} + + self.c.add_section(section) + list(self.c.set(section, key, value) for key, value in data.items()) + +
[docs] def add_section(self, section, data): + if not self.c.has_section(section): + self.c.add_section(section) + + list(self.c.set(section, key, value) for key, value in data.items())
+ +
[docs] def write(self, outfile): + logger.info("writing .treeinfo file") + with open(outfile, "w") as fobj: + self.c.write(fobj)
+
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/f32-branch/_sources/composer-cli.rst.txt b/f32-branch/_sources/composer-cli.rst.txt new file mode 100644 index 00000000..c5857f32 --- /dev/null +++ b/f32-branch/_sources/composer-cli.rst.txt @@ -0,0 +1,178 @@ +composer-cli +============ + +:Authors: + Brian C. Lane + +``composer-cli`` is used to interact with the ``lorax-composer`` API server, managing blueprints, exploring available packages, and building new images. + +It requires `lorax-composer `_ to be installed on the +local system, and the user running it needs to be a member of the ``weldr`` +group. They do not need to be root, but all of the `security precautions +`_ apply. + +composer-cli cmdline arguments +------------------------------ + +.. argparse:: + :ref: composer.cli.cmdline.composer_cli_parser + :prog: composer-cli + +Edit a Blueprint +---------------- + +Start out by listing the available blueprints using ``composer-cli blueprints +list``, pick one and save it to the local directory by running ``composer-cli +blueprints save http-server``. If there are no blueprints available you can +copy one of the examples `from the test suite +`_. + +Edit the file (it will be saved with a .toml extension) and change the +description, add a package or module to it. Send it back to the server by +running ``composer-cli blueprints push http-server.toml``. You can verify that it was +saved by viewing the changelog - ``composer-cli blueprints changes http-server``. + +Build an image +---------------- + +Build a ``qcow2`` disk image from this blueprint by running ``composer-cli +compose start http-server qcow2``. It will print a UUID that you can use to +keep track of the build. You can also cancel the build if needed. + +The available types of images is displayed by ``composer-cli compose types``. +Currently this consists of: alibaba, ami, ext4-filesystem, google, hyper-v, +live-iso, openstack, partitioned-disk, qcow2, tar, vhd, vmdk + +You can optionally start an upload of the finished image, see `Image Uploads`_ for +more information. + + +Monitor the build status +------------------------ + +Monitor it using ``composer-cli compose status``, which will show the status of +all the builds on the system. You can view the end of the anaconda build logs +once it is in the ``RUNNING`` state using ``composer-cli compose log UUID`` +where UUID is the UUID returned by the start command. + +Once the build is in the ``FINISHED`` state you can download the image. + +Download the image +------------------ + +Downloading the final image is done with ``composer-cli compose image UUID`` and it will +save the qcow2 image as ``UUID-disk.qcow2`` which you can then use to boot a VM like this:: + + qemu-kvm --name test-image -m 1024 -hda ./UUID-disk.qcow2 + + +Image Uploads +------------- + +``composer-cli`` can upload the images to a number of services, including AWS, +OpenStack, and vSphere. The upload can be started when the build is finished, +by using ``composer-cli compose start ...`` or an existing image can be uploaded +with ``composer-cli upload start ...``. In order to access the service you need +to pass authentication details to composer-cli using a TOML file, or reference +a previously saved profile. + + +Providers +--------- + +Providers are the services providers with Ansible playbook support under +``/usr/share/lorax/lifted/providers/``, you will need to gather some provider +specific information in order to authenticate with it. You can view the +required fields using ``composer-cli providers template ``, eg. for AWS +you would run:: + + composer-cli upload template aws + +The output looks like this:: + + provider = "aws" + + [settings] + aws_access_key = "AWS Access Key" + aws_bucket = "AWS Bucket" + aws_region = "AWS Region" + aws_secret_key = "AWS Secret Key" + +Save this into an ``aws-credentials.toml`` file and use it when running ``start``. + +AWS +^^^ + +The access key and secret key can be created by going to the +``IAM->Users->Security Credentials`` section and creating a new access key. The +secret key will only be shown when it is first created so make sure to record +it in a secure place. The region should be the region that you want to use the +AMI in, and the bucket can be an existing bucket, or a new one, following the +normal AWS bucket naming rules. It will be created if it doesn't already exist. + +When uploading the image it is first uploaded to the s3 bucket, and then +converted to an AMI. If the conversion is successful the s3 object will be +deleted. If it fails, re-trying after correcting the problem will re-use the +object if you have not deleted it in the meantime, speeding up the process. + + +Profiles +-------- + +Profiles store the authentication settings associated with a specific provider. +Providers can have multiple profiles, as long as their names are unique. For +example, you may have one profile for testing and another for production +uploads. + +Profiles are created by pushing the provider settings template to the server using +``composer-cli providers push `` where ``PROFILE.TOML`` is the same as the +provider template, but with the addition of a ``profile`` field. For example, an AWS +profile named ``test-uploads`` would look like this:: + + provider = "aws" + profile = "test-uploads" + + [settings] + aws_access_key = "AWS Access Key" + aws_bucket = "AWS Bucket" + aws_region = "AWS Region" + aws_secret_key = "AWS Secret Key" + +You can view the profile by using ``composer-cli providers aws test-uploads``. + + +Build an image and upload results +--------------------------------- + +If you have a profile named ``test-uploads``:: + + composer-cli compose start example-http-server ami "http image" aws test-uploads + +Or if you have the settings stored in a TOML file:: + + composer-cli compose start example-http-server ami "http image" aws-settings.toml + +It will return the UUID of the image build, and the UUID of the upload. Once +the build has finished successfully it will start the upload process, which you +can monitor with ``composer-cli upload info `` + +You can also view the upload logs from the Ansible playbook with:: + + ``composer-cli upload log `` + +The type of the image must match the type supported by the provider. + + +Upload an existing image +------------------------ + +You can upload previously built images, as long as they are in the ``FINISHED`` state, using ``composer-cli upload start ...```. If you have a profile named ``test-uploads``:: + + composer-cli upload start "http-image" aws test-uploads + +Or if you have the settings stored in a TOML file:: + + composer-cli upload start "http-image" aws-settings.toml + +This will output the UUID of the upload, which can then be used to monitor the status in the same way +described above. diff --git a/f32-branch/_sources/composer.cli.rst.txt b/f32-branch/_sources/composer.cli.rst.txt new file mode 100644 index 00000000..c1366a8e --- /dev/null +++ b/f32-branch/_sources/composer.cli.rst.txt @@ -0,0 +1,102 @@ +composer.cli package +==================== + +Submodules +---------- + +composer.cli.blueprints module +------------------------------ + +.. automodule:: composer.cli.blueprints + :members: + :undoc-members: + :show-inheritance: + +composer.cli.cmdline module +--------------------------- + +.. automodule:: composer.cli.cmdline + :members: + :undoc-members: + :show-inheritance: + +composer.cli.compose module +--------------------------- + +.. automodule:: composer.cli.compose + :members: + :undoc-members: + :show-inheritance: + +composer.cli.help module +------------------------ + +.. automodule:: composer.cli.help + :members: + :undoc-members: + :show-inheritance: + +composer.cli.modules module +--------------------------- + +.. automodule:: composer.cli.modules + :members: + :undoc-members: + :show-inheritance: + +composer.cli.projects module +---------------------------- + +.. automodule:: composer.cli.projects + :members: + :undoc-members: + :show-inheritance: + +composer.cli.providers module +----------------------------- + +.. automodule:: composer.cli.providers + :members: + :undoc-members: + :show-inheritance: + +composer.cli.sources module +--------------------------- + +.. automodule:: composer.cli.sources + :members: + :undoc-members: + :show-inheritance: + +composer.cli.status module +-------------------------- + +.. automodule:: composer.cli.status + :members: + :undoc-members: + :show-inheritance: + +composer.cli.upload module +-------------------------- + +.. automodule:: composer.cli.upload + :members: + :undoc-members: + :show-inheritance: + +composer.cli.utilities module +----------------------------- + +.. automodule:: composer.cli.utilities + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: composer.cli + :members: + :undoc-members: + :show-inheritance: diff --git a/f32-branch/_sources/composer.rst.txt b/f32-branch/_sources/composer.rst.txt new file mode 100644 index 00000000..dd0c06cb --- /dev/null +++ b/f32-branch/_sources/composer.rst.txt @@ -0,0 +1,37 @@ +composer package +================ + +Subpackages +----------- + +.. toctree:: + + composer.cli + +Submodules +---------- + +composer.http\_client module +---------------------------- + +.. automodule:: composer.http_client + :members: + :undoc-members: + :show-inheritance: + +composer.unix\_socket module +---------------------------- + +.. automodule:: composer.unix_socket + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: composer + :members: + :undoc-members: + :show-inheritance: diff --git a/f32-branch/_sources/index.rst.txt b/f32-branch/_sources/index.rst.txt new file mode 100644 index 00000000..9000845d --- /dev/null +++ b/f32-branch/_sources/index.rst.txt @@ -0,0 +1,39 @@ +.. Lorax documentation master file, created by + sphinx-quickstart on Wed Apr 8 13:46:00 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Lorax's documentation! +================================= + +Contents: + +.. toctree:: + :maxdepth: 1 + + intro + lorax + livemedia-creator + lorax-composer + composer-cli + mkksiso + product-images + modules + +Documentation for other Lorax Branches +====================================== + +* `Fedora 31 `_ +* `Fedora 30 `_ +* `Fedora 29 `_ +* `Fedora 28 `_ +* `RHEL8 lorax-composer `_ +* `RHEL7 lorax-composer `_ + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/f32-branch/_sources/intro.rst.txt b/f32-branch/_sources/intro.rst.txt new file mode 100644 index 00000000..01857ee9 --- /dev/null +++ b/f32-branch/_sources/intro.rst.txt @@ -0,0 +1,67 @@ +Introduction to Lorax +===================== + +I am the Lorax. I speak for the trees [and images]. + +Lorax is used to build the Anaconda Installer boot.iso, it consists of a +library, pylorax, a set of templates, and the lorax script. Its operation +is driven by a customized set of Mako templates that lists the packages +to be installed, steps to execute to remove unneeded files, and creation +of the iso for all of the supported architectures. + + + + + + +Before Lorax +============ + +Tree building tools such as pungi and revisor rely on 'buildinstall' in +anaconda/scripts/ to produce the boot images and other such control files +in the final tree. The existing buildinstall scripts written in a mix of +bash and Python are unmaintainable. Lorax is an attempt to replace them +with something more flexible. + + +EXISTING WORKFLOW: + +pungi and other tools call scripts/buildinstall, which in turn call other +scripts to do the image building and data generation. Here's how it +currently looks: + + -> buildinstall + * process command line options + * write temporary yum.conf to point to correct repo + * find anaconda release RPM + * unpack RPM, pull in those versions of upd-instroot, mk-images, + maketreeinfo.py, makestamp.py, and buildinstall + + -> call upd-instroot + + -> call maketreeinfo.py + + -> call mk-images (which figures out which mk-images.ARCH to call) + + -> call makestamp.py + + * clean up + + +PROBLEMS: + +The existing workflow presents some problems with maintaining the scripts. +First, almost all knowledge of what goes in to the stage 1 and stage 2 +images lives in upd-instroot. The mk-images* scripts copy things from the +root created by upd-instroot in order to build the stage 1 image, though +it's not completely clear from reading the scripts. + + +NEW IDEAS: + +Create a new central driver with all information living in Python modules. +Configuration files will provide the knowledge previously contained in the +upd-instroot and mk-images* scripts. + + + diff --git a/f32-branch/_sources/lifted.rst.txt b/f32-branch/_sources/lifted.rst.txt new file mode 100644 index 00000000..a9c4b004 --- /dev/null +++ b/f32-branch/_sources/lifted.rst.txt @@ -0,0 +1,46 @@ +lifted package +============== + +Submodules +---------- + +lifted.config module +-------------------- + +.. automodule:: lifted.config + :members: + :undoc-members: + :show-inheritance: + +lifted.providers module +----------------------- + +.. automodule:: lifted.providers + :members: + :undoc-members: + :show-inheritance: + +lifted.queue module +------------------- + +.. automodule:: lifted.queue + :members: + :undoc-members: + :show-inheritance: + +lifted.upload module +-------------------- + +.. automodule:: lifted.upload + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: lifted + :members: + :undoc-members: + :show-inheritance: diff --git a/f32-branch/_sources/livemedia-creator.rst.txt b/f32-branch/_sources/livemedia-creator.rst.txt new file mode 100644 index 00000000..962be311 --- /dev/null +++ b/f32-branch/_sources/livemedia-creator.rst.txt @@ -0,0 +1,670 @@ +livemedia-creator +================= + +:Authors: + Brian C. Lane + +livemedia-creator uses `Anaconda `_, +`kickstart `_ and `Lorax +`_ to create bootable media that use the +same install path as a normal system installation. It can be used to make live +isos, bootable (partitioned) disk images, tarfiles, and filesystem images for +use with virtualization and container solutions like libvirt, docker, and +OpenStack. + +The general idea is to use qemu with kickstart and an Anaconda boot.iso to +install into a disk image and then use the disk image to create the bootable +media. + +livemedia-creator --help will describe all of the options available. At the +minimum you need: + +``--make-iso`` to create a final bootable .iso or one of the other ``--make-*`` options. + +``--iso`` to specify the Anaconda install media to use with qemu. + +``--ks`` to select the kickstart file describing what to install. + +To use livemedia-creator with virtualization you will need to have qemu installed. + +If you are going to be using Anaconda directly, with ``--no-virt`` mode, make sure +you have the anaconda-tui package installed. + +Conventions used in this document: + +``lmc`` is an abbreviation for livemedia-creator. + +``builder`` is the system where livemedia-creator is being run + +``image`` is the disk image being created by running livemedia-creator + + +livemedia-creator cmdline arguments +----------------------------------- + +.. argparse:: + :ref: pylorax.cmdline.lmc_parser + :prog: livemedia-creator + + --macboot : @replace + Make the iso bootable on UEFI based Mac systems + + Default: True + + --nomacboot : @replace + Do not create a Mac bootable iso + + Default: False + + +Quickstart +---------- + +Run this to create a bootable live iso:: + + sudo livemedia-creator --make-iso \ + --iso=/extra/iso/boot.iso --ks=./docs/fedora-livemedia.ks + +You can run it directly from the lorax git repo like this:: + + sudo PATH=./src/sbin/:$PATH PYTHONPATH=./src/ ./src/sbin/livemedia-creator \ + --make-iso --iso=/extra/iso/boot.iso \ + --ks=./docs/fedora-livemedia.ks --lorax-templates=./share/ + +You can observe the installation using vnc. The logs will show what port was +chosen, or you can use a specific port by passing it. eg. ``--vnc vnc:127.0.0.1:5`` + +This is usually a good idea when testing changes to the kickstart. lmc tries +to monitor the logs for fatal errors, but may not catch everything. + + +How ISO creation works +---------------------- + +There are 2 stages, the install stage which produces a disk or filesystem image +as its output, and the boot media creation which uses the image as its input. +Normally you would run both stages, but it is possible to stop after the +install stage, by using ``--image-only``, or to skip the install stage and use +a previously created disk image by passing ``--disk-image`` or ``--fs-image`` + +When creating an iso qemu boots using the passed Anaconda installer iso +and installs the system based on the kickstart. The ``%post`` section of the +kickstart is used to customize the installed system in the same way that +current spin-kickstarts do. + +livemedia-creator monitors the install process for problems by watching the +install logs. They are written to the current directory or to the base +directory specified by the --logfile command. You can also monitor the install +by using a vnc client. This is recommended when first modifying a kickstart, +since there are still places where Anaconda may get stuck without the log +monitor catching it. + +The output from this process is a partitioned disk image. kpartx can be used +to mount and examine it when there is a problem with the install. It can also +be booted using kvm. + +When creating an iso the disk image's / partition is copied into a formatted +filesystem image which is then used as the input to lorax for creation of the +final media. + +The final image is created by lorax, using the templates in /usr/share/lorax/live/ +or the live directory below the directory specified by ``--lorax-templates``. The +templates are written using the Mako template system with some extra commands +added by lorax. + +.. note:: + The output from --make-iso includes the artifacts used to create the boot.iso; + the kernel, initrd, the squashfs filesystem, etc. If you only want the + boot.iso you can pass ``--iso-only`` and the other files will be removed. You + can also name the iso by using ``--iso-name my-live.iso``. + + +Kickstarts +---------- + +The docs/ directory includes several example kickstarts, one to create a live +desktop iso using GNOME, and another to create a minimal disk image. When +creating your own kickstarts you should start with the minimal example, it +includes several needed packages that are not always included by dependencies. + +Or you can use existing spin kickstarts to create live media with a few +changes. Here are the steps I used to convert the Fedora XFCE spin. + +1. Flatten the xfce kickstart using ksflatten +2. Add zerombr so you don't get the disk init dialog +3. Add clearpart --all +4. Add swap partition +5. bootloader target +6. Add shutdown to the kickstart +7. Add network --bootproto=dhcp --activate to activate the network + This works for F16 builds but for F15 and before you need to pass + something on the cmdline that activate the network, like sshd: + + ``livemedia-creator --kernel-args="sshd"`` + +8. Add a root password:: + + rootpw rootme + network --bootproto=dhcp --activate + zerombr + clearpart --all + bootloader --location=mbr + part swap --size=512 + shutdown + +9. In the livesys script section of the %post remove the root password. This + really depends on how the spin wants to work. You could add the live user + that you create to the %wheel group so that sudo works if you wanted to. + + ``passwd -d root > /dev/null`` + +10. Remove /etc/fstab in %post, dracut handles mounting the rootfs + + ``cat /dev/null > /dev/fstab`` + + Do this only for live iso's, the filesystem will be mounted read only if + there is no /etc/fstab + +11. Don't delete initramfs files from /boot in %post +12. When creating live iso's you need to have, at least, these packages in the %package section:: + dracut-config-generic + dracut-live + -dracut-config-rescue + grub2-efi + memtest86+ + syslinux + +User created repositories +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are using your own repositories and installing groups (eg. @core) make +sure you create the repodata with groups like this ``createrepo -g +/path/to/groups.xml /path/to/rpms`` + +Using a Proxy with repos +~~~~~~~~~~~~~~~~~~~~~~~~ + +One drawback to using qemu is that it pulls the packages from the repo each +time you run it. To speed things up you either need a local mirror of the +packages, or you can use a caching proxy. When using a proxy you pass it to +livemedia-creator like this: + + ``--proxy=http://proxy.yourdomain.com:3128`` + +You also need to use a specific mirror instead of mirrormanager so that the +packages will get cached, so your kickstart url would look like: + + ``url --url="http://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/x86_64/os/"`` + +You can also add an update repo, but don't name it updates. Add --proxy to it +as well. You can use all of the `kickstart commands `_ in your kickstart. Make sure there +is only one ``url`` command, other repos have to use the ``repo`` command and cannot be +named ``updates`` which is reserved for Anaconda's use. eg.:: + + url --url=PRIMARY-REPO-URL --proxy=PROXY-URL + repo --name="repo1" --baseurl=FIRST-REPO-URL --proxy=PROXY-URL + repo --name="repo2" --baseurl=SECOND-REPO_URL --proxy=PROXY-URL + + +Anaconda image install (no-virt) +-------------------------------- + +You can create images without using qemu by passing ``--no-virt`` on the +cmdline. This will use Anaconda's directory install feature to handle the +install. There are a couple of things to keep in mind when doing this: + +1. It will be most reliable when building images for the same release that the + host is running. Because Anaconda has expectations about the system it is + running under you may encounter strange bugs if you try to build newer or + older releases. + +2. It may totally trash your host. So far I haven't had this happen, but the + possibility exists that a bug in Anaconda could result in it operating on + real devices. I recommend running it in a virt or on a system that you can + afford to lose all data from. + +The logs from anaconda will be placed in an ./anaconda/ directory in either +the current directory or in the directory used for --logfile + +Example cmdline: + +``sudo livemedia-creator --make-iso --no-virt --ks=./fedora-livemedia.ks`` + +.. note:: + Using no-virt to create a partitioned disk image (eg. --make-disk or + --make-vagrant) will only create disks usable on the host platform (BIOS + or UEFI). You can create BIOS partitioned disk images on UEFI by using + virt. + +.. note:: + As of version 30.7 SELinux can be set to Enforcing. The current state is + logged for debugging purposes and if there are SELinux denials they should + be reported as a bug. + +AMI Images +---------- + +Amazon EC2 images can be created by using the --make-ami switch and an appropriate +kickstart file. All of the work to customize the image is handled by the kickstart. +The example currently included was modified from the cloud-kickstarts version so +that it would work with livemedia-creator. + +Example cmdline: + +``sudo livemedia-creator --make-ami --iso=/path/to/boot.iso --ks=./docs/fedora-livemedia-ec2.ks`` + +This will produce an ami-root.img file in the working directory. + +At this time I have not tested the image with EC2. Feedback would be welcome. + + +Appliance Creation +------------------ + +livemedia-creator can now replace appliance-tools by using the --make-appliance +switch. This will create the partitioned disk image and an XML file that can be +used with virt-image to setup a virtual system. + +The XML is generated using the Mako template from +/usr/share/lorax/appliance/libvirt.xml You can use a different template by +passing ``--app-template