diff --git a/f34-branch/.buildinfo b/f34-branch/.buildinfo new file mode 100644 index 00000000..a3e487a6 --- /dev/null +++ b/f34-branch/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: a40b1ee372d49c9b6702c59e1d27e3a5 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/f34-branch/.doctrees/environment.pickle b/f34-branch/.doctrees/environment.pickle new file mode 100644 index 00000000..85018f66 Binary files /dev/null and b/f34-branch/.doctrees/environment.pickle differ diff --git a/f34-branch/.doctrees/index.doctree b/f34-branch/.doctrees/index.doctree new file mode 100644 index 00000000..3ec153e7 Binary files /dev/null and b/f34-branch/.doctrees/index.doctree differ diff --git a/f34-branch/.doctrees/intro.doctree b/f34-branch/.doctrees/intro.doctree new file mode 100644 index 00000000..6c6ffa86 Binary files /dev/null and b/f34-branch/.doctrees/intro.doctree differ diff --git a/f34-branch/.doctrees/livemedia-creator.doctree b/f34-branch/.doctrees/livemedia-creator.doctree new file mode 100644 index 00000000..f8162dbb Binary files /dev/null and b/f34-branch/.doctrees/livemedia-creator.doctree differ diff --git a/f34-branch/.doctrees/lorax.doctree b/f34-branch/.doctrees/lorax.doctree new file mode 100644 index 00000000..1b49abf2 Binary files /dev/null and b/f34-branch/.doctrees/lorax.doctree differ diff --git a/f34-branch/.doctrees/mkksiso.doctree b/f34-branch/.doctrees/mkksiso.doctree new file mode 100644 index 00000000..dff8e3a9 Binary files /dev/null and b/f34-branch/.doctrees/mkksiso.doctree differ diff --git a/f34-branch/.doctrees/modules.doctree b/f34-branch/.doctrees/modules.doctree new file mode 100644 index 00000000..3e3b4ff5 Binary files /dev/null and b/f34-branch/.doctrees/modules.doctree differ diff --git a/f34-branch/.doctrees/product-images.doctree b/f34-branch/.doctrees/product-images.doctree new file mode 100644 index 00000000..61ee2228 Binary files /dev/null and b/f34-branch/.doctrees/product-images.doctree differ diff --git a/f34-branch/.doctrees/pylorax.doctree b/f34-branch/.doctrees/pylorax.doctree new file mode 100644 index 00000000..f4f47bf0 Binary files /dev/null and b/f34-branch/.doctrees/pylorax.doctree differ diff --git a/f34-branch/_modules/index.html b/f34-branch/_modules/index.html new file mode 100644 index 00000000..5f25c24b --- /dev/null +++ b/f34-branch/_modules/index.html @@ -0,0 +1,118 @@ + + + + + + Overview: module code — Lorax 34.12 documentation + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • »
  • +
  • Overview: module code
  • +
  • +
  • +
+
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax.html b/f34-branch/_modules/pylorax.html new file mode 100644 index 00000000..131dffd4 --- /dev/null +++ b/f34-branch/_modules/pylorax.html @@ -0,0 +1,663 @@ + + + + + + + + + + pylorax — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax

+#
+# __init__.py
+#
+# Copyright (C) 2010-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     David Cantrell <dcantrell@redhat.com>
+#                     Will Woods <wwoods@redhat.com>
+
+# set up logging
+import logging
+logger = logging.getLogger("pylorax")
+logger.addHandler(logging.NullHandler())
+
+program_log = logging.getLogger("program")
+
+import sys
+import os
+import configparser
+import tempfile
+import locale
+from subprocess import CalledProcessError
+import selinux
+from glob import glob
+
+from pylorax.base import BaseLoraxClass, DataHolder
+import pylorax.output as output
+
+import dnf
+
+from pylorax.sysutils import joinpaths, remove, linktree
+
+from pylorax.treebuilder import RuntimeBuilder, TreeBuilder
+from pylorax.buildstamp import BuildStamp
+from pylorax.treeinfo import TreeInfo
+from pylorax.discinfo import DiscInfo
+from pylorax.executils import runcmd, runcmd_output
+
+
+# get lorax version
+try:
+    import pylorax.version
+except ImportError:
+    vernum = "devel"
+else:
+    vernum = pylorax.version.num
+
+DRACUT_DEFAULT = ["--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips"]
+
+# Used for DNF conf.module_platform_id
+DEFAULT_PLATFORM_ID = "platform:f34"
+
+
[docs]class ArchData(DataHolder): + lib64_arches = ("x86_64", "ppc64le", "s390x", "ia64", "aarch64") + bcj_arch = dict(i386="x86", x86_64="x86", + ppc64le="powerpc", + arm="arm", armhfp="arm") + + def __init__(self, buildarch): + super(ArchData, self).__init__() + self.buildarch = buildarch + self.basearch = dnf.rpm.basearch(buildarch) + self.libdir = "lib64" if self.basearch in self.lib64_arches else "lib" + self.bcj = self.bcj_arch.get(self.basearch)
+ +
[docs]class Lorax(BaseLoraxClass): + + def __init__(self): + BaseLoraxClass.__init__(self) + self._configured = False + self.product = None + self.workdir = None + self.arch = None + self.conf = None + self.inroot = None + self.debug = False + self.outputdir = None + self._templatedir = None + + # set locale to C + locale.setlocale(locale.LC_ALL, 'C') + +
[docs] def configure(self, conf_file="/etc/lorax/lorax.conf"): + self.conf = configparser.SafeConfigParser() + + # set defaults + self.conf.add_section("lorax") + self.conf.set("lorax", "debug", "1") + self.conf.set("lorax", "sharedir", "/usr/share/lorax") + self.conf.set("lorax", "logdir", "/var/log/lorax") + + self.conf.add_section("output") + self.conf.set("output", "colors", "1") + self.conf.set("output", "encoding", "utf-8") + self.conf.set("output", "ignorelist", "/usr/share/lorax/ignorelist") + + self.conf.add_section("templates") + self.conf.set("templates", "ramdisk", "ramdisk.ltmpl") + + self.conf.add_section("compression") + self.conf.set("compression", "type", "xz") + self.conf.set("compression", "args", "") + self.conf.set("compression", "bcj", "on") + + # read the config file + if os.path.isfile(conf_file): + self.conf.read(conf_file) + + # set up the output + self.debug = self.conf.getboolean("lorax", "debug") + output_level = output.DEBUG if self.debug else output.INFO + + if sys.stdout.isatty(): + colors = self.conf.getboolean("output", "colors") + else: + colors = False + encoding = self.conf.get("output", "encoding") + + self.output.basic_config(output_level=output_level, + colors=colors, encoding=encoding) + + ignorelist = self.conf.get("output", "ignorelist") + if os.path.isfile(ignorelist): + with open(ignorelist, "r") as fobj: + for line in fobj: + line = line.strip() + if line and not line.startswith("#"): + self.output.ignore(line) + + # cron does not have sbin in PATH, + # so we have to add it ourselves + os.environ["PATH"] = "{0}:/sbin:/usr/sbin".format(os.environ["PATH"]) + + # remove some environmental variables that can cause problems with package scripts + env_remove = ('DISPLAY', 'DBUS_SESSION_BUS_ADDRESS') + list(os.environ.pop(k) for k in env_remove if k in os.environ) + + self._configured = True
+ + @property + def templatedir(self): + """Find the template directory. + + Pick the first directory under sharedir/templates.d/ if it exists. + Otherwise use the sharedir + """ + if not self._templatedir: + self._templatedir = find_templates(self.conf.get("lorax", "sharedir")) + logger.info("Using templatedir %s", self._templatedir) + return self._templatedir + +
[docs] def init_stream_logging(self): + sh = logging.StreamHandler() + sh.setLevel(logging.INFO) + logger.addHandler(sh)
+ +
[docs] def init_file_logging(self, logdir, logname="pylorax.log"): + fh = logging.FileHandler(filename=joinpaths(logdir, logname), mode="w") + fh.setLevel(logging.DEBUG) + logger.addHandler(fh)
+ +
[docs] def run(self, dbo, product, version, release, variant="", bugurl="", + isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None, + domacboot=True, doupgrade=True, remove_temp=False, + installpkgs=None, excludepkgs=None, + size=2, + add_templates=None, + add_template_vars=None, + add_arch_templates=None, + add_arch_template_vars=None, + verify=True, + user_dracut_args=None, + squashfs_only=False, + skip_branding=False): + + assert self._configured + + installpkgs = installpkgs or [] + excludepkgs = excludepkgs or [] + + if domacboot: + try: + runcmd(["rpm", "-q", "hfsplus-tools"]) + except CalledProcessError: + logger.critical("you need to install hfsplus-tools to create mac images") + sys.exit(1) + + # set up work directory + self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.") + if not os.path.isdir(self.workdir): + os.makedirs(self.workdir) + + # set up log directory + logdir = self.conf.get("lorax", "logdir") + if not os.path.isdir(logdir): + os.makedirs(logdir) + + self.init_stream_logging() + self.init_file_logging(logdir) + + logger.debug("version is %s", vernum) + log_selinux_state() + + logger.debug("using work directory %s", self.workdir) + logger.debug("using log directory %s", logdir) + + # set up output directory + self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.") + if not os.path.isdir(self.outputdir): + os.makedirs(self.outputdir) + logger.debug("using output directory %s", self.outputdir) + + # do we have root privileges? + logger.info("checking for root privileges") + if not os.geteuid() == 0: + logger.critical("no root privileges") + sys.exit(1) + + # do we have a proper dnf base object? + logger.info("checking dnf base object") + if not isinstance(dbo, dnf.Base): + logger.critical("no dnf base object") + sys.exit(1) + self.inroot = dbo.conf.installroot + logger.debug("using install root: %s", self.inroot) + + if not buildarch: + buildarch = get_buildarch(dbo) + + logger.info("setting up build architecture") + self.arch = ArchData(buildarch) + for attr in ('buildarch', 'basearch', 'libdir'): + logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr)) + + logger.info("setting up build parameters") + self.product = DataHolder(name=product, version=version, release=release, + variant=variant, bugurl=bugurl, isfinal=isfinal) + logger.debug("product data: %s", self.product) + + # NOTE: if you change isolabel, you need to change pungi to match, or + # the pungi images won't boot. + isolabel = volid or "%s-%s-%s" % (self.product.name, self.product.version, self.arch.basearch) + + if len(isolabel) > 32: + logger.fatal("the volume id cannot be longer than 32 characters") + sys.exit(1) + + # NOTE: rb.root = dbo.conf.installroot (== self.inroot) + rb = RuntimeBuilder(product=self.product, arch=self.arch, + dbo=dbo, templatedir=self.templatedir, + installpkgs=installpkgs, + excludepkgs=excludepkgs, + add_templates=add_templates, + add_template_vars=add_template_vars, + skip_branding=skip_branding) + + logger.info("installing runtime packages") + rb.install() + + # write .buildstamp + buildstamp = BuildStamp(self.product.name, self.product.version, + self.product.bugurl, self.product.isfinal, + self.arch.buildarch, self.product.variant) + + buildstamp.write(joinpaths(self.inroot, ".buildstamp")) + + if self.debug: + rb.writepkglists(joinpaths(logdir, "pkglists")) + rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt")) + + logger.info("doing post-install configuration") + rb.postinstall() + + # write .discinfo + discinfo = DiscInfo(self.product.release, self.arch.basearch) + discinfo.write(joinpaths(self.outputdir, ".discinfo")) + + logger.info("backing up installroot") + installroot = joinpaths(self.workdir, "installroot") + linktree(self.inroot, installroot) + + logger.info("generating kernel module metadata") + rb.generate_module_data() + + logger.info("cleaning unneeded files") + rb.cleanup() + + if verify: + logger.info("verifying the installroot") + if not rb.verify(): + sys.exit(1) + else: + logger.info("Skipping verify") + + if self.debug: + rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt")) + + logger.info("creating the runtime image") + runtime = "images/install.img" + compression = self.conf.get("compression", "type") + compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member + if self.conf.getboolean("compression", "bcj"): + if self.arch.bcj: + compressargs += ["-Xbcj", self.arch.bcj] + else: + logger.info("no BCJ filter for arch %s", self.arch.basearch) + if squashfs_only: + # Create an ext4 rootfs.img and compress it with squashfs + rc = rb.create_squashfs_runtime(joinpaths(installroot,runtime), + compression=compression, compressargs=compressargs, + size=size) + else: + # Create an ext4 rootfs.img and compress it with squashfs + rc = rb.create_ext4_runtime(joinpaths(installroot,runtime), + compression=compression, compressargs=compressargs, + size=size) + if rc != 0: + logger.error("rootfs.img creation failed. See program.log") + sys.exit(1) + + rb.finished() + + logger.info("preparing to build output tree and boot images") + treebuilder = TreeBuilder(product=self.product, arch=self.arch, + inroot=installroot, outroot=self.outputdir, + runtime=runtime, isolabel=isolabel, + domacboot=domacboot, doupgrade=doupgrade, + templatedir=self.templatedir, + add_templates=add_arch_templates, + add_template_vars=add_arch_template_vars, + workdir=self.workdir) + + logger.info("rebuilding initramfs images") + if not user_dracut_args: + dracut_args = DRACUT_DEFAULT + else: + dracut_args = [] + for arg in user_dracut_args: + dracut_args += arg.split(" ", 1) + + anaconda_args = dracut_args + ["--add", "anaconda pollcdrom qemu qemu-net"] + + logger.info("dracut args = %s", dracut_args) + logger.info("anaconda args = %s", anaconda_args) + treebuilder.rebuild_initrds(add_args=anaconda_args) + + logger.info("populating output tree and building boot images") + treebuilder.build() + + # write .treeinfo file and we're done + treeinfo = TreeInfo(self.product.name, self.product.version, + self.product.variant, self.arch.basearch) + for section, data in treebuilder.treeinfo_data.items(): + treeinfo.add_section(section, data) + treeinfo.write(joinpaths(self.outputdir, ".treeinfo")) + + # cleanup + if remove_temp: + remove(self.workdir)
+ + +
[docs]def get_buildarch(dbo): + # get architecture of the available anaconda package + buildarch = None + q = dbo.sack.query() + a = q.available() + for anaconda in a.filter(name="anaconda-core"): + if anaconda.arch != "src": + buildarch = anaconda.arch + break + if not buildarch: + logger.critical("no anaconda-core package in the repository") + sys.exit(1) + + return buildarch
+ + +
[docs]def setup_logging(logfile, theLogger): + """ + Setup the various logs + + :param logfile: filename to write the log to + :type logfile: string + :param theLogger: top-level logger + :type theLogger: logging.Logger + """ + if not os.path.isdir(os.path.abspath(os.path.dirname(logfile))): + os.makedirs(os.path.abspath(os.path.dirname(logfile))) + + # Setup logging to console and to logfile + logger.setLevel(logging.DEBUG) + theLogger.setLevel(logging.DEBUG) + + sh = logging.StreamHandler() + sh.setLevel(logging.INFO) + fmt = logging.Formatter("%(asctime)s: %(message)s") + sh.setFormatter(fmt) + logger.addHandler(sh) + theLogger.addHandler(sh) + + fh = logging.FileHandler(filename=logfile, mode="w") + fh.setLevel(logging.DEBUG) + fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s") + fh.setFormatter(fmt) + logger.addHandler(fh) + theLogger.addHandler(fh) + + # External program output log + program_log.setLevel(logging.DEBUG) + f = os.path.abspath(os.path.dirname(logfile))+"/program.log" + fh = logging.FileHandler(filename=f, mode="w") + fh.setLevel(logging.DEBUG) + fmt = logging.Formatter("%(asctime)s %(levelname)s: %(message)s") + fh.setFormatter(fmt) + program_log.addHandler(fh)
+ + +
[docs]def find_templates(templatedir="/usr/share/lorax"): + """ Find the templates to use. + + :param str templatedir: Top directory to search for templates + :returns: Path to templates + :rtype: str + + If there is a templates.d directory under templatedir the + lowest numbered directory entry is returned. + + eg. /usr/share/lorax/templates.d/99-generic/ + """ + if os.path.isdir(joinpaths(templatedir, "templates.d")): + try: + templatedir = sorted(glob(joinpaths(templatedir, "templates.d", "*")))[0] + except IndexError: + pass + return templatedir
+ +
[docs]def log_selinux_state(): + """Log the current state of selinux""" + if selinux.is_selinux_enabled(): + if selinux.security_getenforce(): + logger.info("selinux is enabled and in Enforcing mode") + else: + logger.info("selinux is enabled and in Permissive mode") + else: + logger.info("selinux is Disabled")
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/base.html b/f34-branch/_modules/pylorax/base.html new file mode 100644 index 00000000..9e846c70 --- /dev/null +++ b/f34-branch/_modules/pylorax/base.html @@ -0,0 +1,274 @@ + + + + + + + + + + pylorax.base — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.base

+#
+# base.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+import pylorax.output as output
+
+
+
[docs]class BaseLoraxClass(object, metaclass=ABCMeta): + @abstractmethod + def __init__(self): + self.output = output.LoraxOutput() + +
[docs] def pcritical(self, msg, fobj=sys.stdout): + self.output.critical(msg, fobj)
+ +
[docs] def perror(self, msg, fobj=sys.stdout): + self.output.error(msg, fobj)
+ +
[docs] def pwarning(self, msg, fobj=sys.stdout): + self.output.warning(msg, fobj)
+ +
[docs] def pinfo(self, msg, fobj=sys.stdout): + self.output.info(msg, fobj)
+ +
[docs] def pdebug(self, msg, fobj=sys.stdout): + self.output.debug(msg, fobj)
+ + +
[docs]class DataHolder(dict): + + def __init__(self, **kwargs): + dict.__init__(self) + + for attr, value in kwargs.items(): + self[attr] = value + + def __getattr__(self, attr): + if attr in self: + return self[attr] + else: + raise AttributeError + + def __setattr__(self, attr, value): + self[attr] = value + +
[docs] def copy(self): + return DataHolder(**dict.copy(self))
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/buildstamp.html b/f34-branch/_modules/pylorax/buildstamp.html new file mode 100644 index 00000000..f0278f2a --- /dev/null +++ b/f34-branch/_modules/pylorax/buildstamp.html @@ -0,0 +1,273 @@ + + + + + + + + + + pylorax.buildstamp — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.buildstamp

+#
+# buildstamp.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.buildstamp")
+
+import datetime
+import os
+
+
+
[docs]class BuildStamp(object): + + def __init__(self, product, version, bugurl, isfinal, buildarch, variant=""): + self.product = product + self.version = version + self.bugurl = bugurl + self.isfinal = isfinal + self.variant = variant + + if 'SOURCE_DATE_EPOCH' in os.environ: + now = datetime.datetime.utcfromtimestamp( + int(os.environ['SOURCE_DATE_EPOCH'])) + else: + now = datetime.datetime.now() + now = now.strftime("%Y%m%d%H%M") + self.uuid = "{0}.{1}".format(now, buildarch) + +
[docs] def write(self, outfile): + # get lorax version + try: + import pylorax.version + except ImportError: + vernum = "devel" + else: + vernum = pylorax.version.num + + logger.info("writing .buildstamp file") + with open(outfile, "w") as fobj: + fobj.write("[Main]\n") + fobj.write("Product={0.product}\n".format(self)) + fobj.write("Version={0.version}\n".format(self)) + fobj.write("BugURL={0.bugurl}\n".format(self)) + fobj.write("IsFinal={0.isfinal}\n".format(self)) + fobj.write("UUID={0.uuid}\n".format(self)) + if self.variant: + fobj.write("Variant={0.variant}\n".format(self)) + fobj.write("[Compose]\n") + fobj.write("Lorax={0}\n".format(vernum))
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/cmdline.html b/f34-branch/_modules/pylorax/cmdline.html new file mode 100644 index 00000000..9de49509 --- /dev/null +++ b/f34-branch/_modules/pylorax/cmdline.html @@ -0,0 +1,528 @@ + + + + + + + + + + pylorax.cmdline — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.cmdline

+#
+# cmdline.py
+#
+# Copyright (C) 2016  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Brian C. Lane <bcl@redhat.com>
+
+import os
+import sys
+import argparse
+
+from pylorax import vernum
+
+version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
+
+
[docs]def lorax_parser(dracut_default=""): + """ Return the ArgumentParser for lorax""" + + parser = argparse.ArgumentParser(description="Create the Anaconda boot.iso") + + # required arguments for image creation + required = parser.add_argument_group("required arguments") + required.add_argument("-p", "--product", help="product name", required=True, metavar="PRODUCT") + required.add_argument("-v", "--version", help="version identifier", required=True, metavar="VERSION") + required.add_argument("-r", "--release", help="release information", required=True, metavar="RELEASE") + required.add_argument("-s", "--source", help="source repository (may be listed multiple times)", + metavar="REPOSITORY", action="append", default=[]) + required.add_argument("--repo", help="source dnf repository file", type=os.path.abspath, + dest="repos", metavar="REPOSITORY", action="append", default=[]) + + # optional arguments + optional = parser.add_argument_group("optional arguments") + optional.add_argument("-m", "--mirrorlist", + help="mirrorlist repository (may be listed multiple times)", + metavar="REPOSITORY", action="append", default=[]) + optional.add_argument("-t", "--variant", default="", + help="variant name", metavar="VARIANT") + optional.add_argument("-b", "--bugurl", + help="bug reporting URL for the product", metavar="URL", + default="your distribution provided bug reporting tool") + optional.add_argument("--isfinal", help="", + action="store_true", default=False, dest="isfinal") + optional.add_argument("-c", "--config", default="/etc/lorax/lorax.conf", + help="config file", metavar="CONFIGFILE") + optional.add_argument("--proxy", default=None, + help="repo proxy url:port", metavar="HOST") + optional.add_argument("-i", "--installpkgs", default=[], + action="append", metavar="PACKAGE", + help="package glob to install before runtime-install.tmpl runs. (may be listed multiple times)") + optional.add_argument("-e", "--excludepkgs", default=[], + action="append", metavar="PACKAGE", + help="package glob to remove before runtime-install.tmpl runs. (may be listed multiple times)") + optional.add_argument("--buildarch", default=None, + help="build architecture", metavar="ARCH") + optional.add_argument("--volid", default=None, + help="volume id", metavar="VOLID") + optional.add_argument("--macboot", help="", + action="store_true", default=True, dest="domacboot") + optional.add_argument("--nomacboot", help="", + action="store_false", dest="domacboot") + optional.add_argument("--noupgrade", help="", + action="store_false", default=True, dest="doupgrade") + optional.add_argument("--logfile", default="./lorax.log", type=os.path.abspath, + help="Path to logfile") + optional.add_argument("--tmp", default="/var/tmp/lorax", + help="Top level temporary directory" ) + optional.add_argument("--cachedir", default=None, type=os.path.abspath, + help="DNF cache directory. Default is a temporary dir.") + optional.add_argument("--workdir", default=None, type=os.path.abspath, + help="Work directory, overrides --tmp. Default is a temporary dir under /var/tmp/lorax") + optional.add_argument("--force", default=False, action="store_true", + help="Run even when the destination directory exists") + optional.add_argument("--add-template", dest="add_templates", + action="append", help="Additional template for runtime image", + default=[]) + optional.add_argument("--add-template-var", dest="add_template_vars", + action="append", help="Set variable for runtime image template", + default=[]) + optional.add_argument("--add-arch-template", dest="add_arch_templates", + action="append", help="Additional template for architecture-specific image", + default=[]) + optional.add_argument("--add-arch-template-var", dest="add_arch_template_vars", + action="append", help="Set variable for architecture-specific image", + default=[]) + optional.add_argument("--noverify", action="store_false", default=True, dest="verify", + help="Do not verify the install root") + optional.add_argument("--sharedir", metavar="SHAREDIR", type=os.path.abspath, + help="Directory containing all the templates. Overrides config file sharedir") + optional.add_argument("--enablerepo", action="append", default=[], dest="enablerepos", + metavar="[repo]", help="Names of repos to enable") + optional.add_argument("--disablerepo", action="append", default=[], dest="disablerepos", + metavar="[repo]", help="Names of repos to disable") + optional.add_argument("--rootfs-size", type=int, default=2, + help="Size of root filesystem in GiB. Defaults to 2.") + optional.add_argument("--noverifyssl", action="store_true", default=False, + help="Do not verify SSL certificates") + optional.add_argument("--dnfplugin", action="append", default=[], dest="dnfplugins", + help="Enable a DNF plugin by name/glob, or * to enable all of them.") + optional.add_argument("--squashfs-only", action="store_true", default=False, + help="Use a plain squashfs filesystem for the runtime.") + optional.add_argument("--skip-branding", action="store_true", default=False, + help="Disable automatic branding package selection. Use --installpkgs to add custom branding.") + + # dracut arguments + dracut_group = parser.add_argument_group("dracut arguments: (default: %s)" % dracut_default) + dracut_group.add_argument("--dracut-conf", + help="Path to a dracut.conf file to use instead of the " + "default arguments. See the dracut.conf(5) manpage.") + dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args", + help="Argument to pass to dracut when " + "rebuilding the initramfs. Pass this " + "once for each argument. NOTE: this " + "overrides the defaults.") + + # add the show version option + parser.add_argument("-V", help="show program's version number and exit", + action="version", version=version) + + parser.add_argument("outputdir", help="Output directory", metavar="OUTPUTDIR", type=os.path.abspath) + + return parser
+ + +
[docs]def lmc_parser(dracut_default=""): + """ Return a ArgumentParser object for live-media-creator.""" + parser = argparse.ArgumentParser(description="Create Live Install Media", + fromfile_prefix_chars="@") + + # These are mutually exclusive, one is required + action = parser.add_mutually_exclusive_group(required=True) + action.add_argument("--make-iso", action="store_true", + help="Build a live iso") + action.add_argument("--make-disk", action="store_true", + help="Build a partitioned disk image") + action.add_argument("--make-fsimage", action="store_true", + help="Build a filesystem image") + action.add_argument("--make-appliance", action="store_true", + help="Build an appliance image and XML description") + action.add_argument("--make-ami", action="store_true", + help="Build an ami image") + action.add_argument("--make-tar", action="store_true", + help="Build a tar of the root filesystem") + action.add_argument("--make-tar-disk", action="store_true", + help="Build a tar of a partitioned disk image") + action.add_argument("--make-pxe-live", action="store_true", + help="Build a live pxe boot squashfs image") + action.add_argument("--make-ostree-live", action="store_true", + help="Build a live pxe boot squashfs image of Atomic Host") + action.add_argument("--make-oci", action="store_true", + help="Build an Open Container Initiative image") + action.add_argument("--make-vagrant", action="store_true", + help="Build a Vagrant Box image") + + parser.add_argument("--iso", type=os.path.abspath, + help="Anaconda installation .iso path to use for qemu") + parser.add_argument("--iso-only", action="store_true", + help="Remove all iso creation artifacts except the boot.iso, " + "combine with --iso-name to rename the boot.iso") + parser.add_argument("--iso-name", default=None, + help="Name of output iso file for --iso-only. Default is boot.iso") + parser.add_argument("--ks", action="append", type=os.path.abspath, + help="Kickstart file defining the install.") + parser.add_argument("--image-only", action="store_true", + help="Exit after creating fs/disk image.") + + parser.add_argument("--no-virt", action="store_true", + help="Run anaconda directly on host instead of using qemu") + parser.add_argument("--proxy", + help="proxy URL to use for the install") + parser.add_argument("--anaconda-arg", action="append", dest="anaconda_args", + help="Additional argument to pass to anaconda (no-virt " + "mode). Pass once for each argument") + parser.add_argument("--armplatform", + help="the platform to use when creating images for ARM, " + "i.e., highbank, mvebu, omap, tegra, etc.") + parser.add_argument("--location", default=None, type=os.path.abspath, + help="location of iso directory tree with initrd.img " + "and vmlinuz. Used to run qemu with a newer initrd " + "than the iso.") + + parser.add_argument("--logfile", default="./livemedia.log", + type=os.path.abspath, + help="Name and path for primary logfile, other logs will " + "be created in the same directory.") + parser.add_argument("--lorax-templates", default=None, + type=os.path.abspath, + help="Path to mako templates for lorax") + parser.add_argument("--tmp", default="/var/tmp", type=os.path.abspath, + help="Top level temporary directory") + parser.add_argument("--resultdir", default=None, dest="result_dir", + type=os.path.abspath, + help="Directory to copy the resulting images and iso into. " + "Defaults to the temporary working directory") + + parser.add_argument("--macboot", action="store_true", default=True, + dest="domacboot") + parser.add_argument("--nomacboot", action="store_false", + dest="domacboot") + + parser.add_argument("--extra-boot-args", default="", dest="extra_boot_args", + help="Extra arguments to add to the bootloader kernel cmdline in the templates") + + image_group = parser.add_argument_group("disk/fs image arguments") + image_group.add_argument("--disk-image", type=os.path.abspath, + help="Path to existing disk image to use for creating final image.") + image_group.add_argument("--keep-image", action="store_true", + help="Keep raw disk image after .iso creation") + image_group.add_argument("--fs-image", type=os.path.abspath, + help="Path to existing filesystem image to use for creating final image.") + image_group.add_argument("--image-name", default=None, + help="Name of output file to create. Used for tar, fs and disk image. Default is a random name.") + image_group.add_argument("--tar-disk-name", default=None, + help="Name of the archive member for make-tar-disk.") + image_group.add_argument("--fs-label", default="Anaconda", + help="Label to set on fsimage, default is 'Anaconda'") + image_group.add_argument("--image-size-align", type=int, default=0, + help="Create a disk image with a size that is a multiple of this value in MiB.") + image_group.add_argument("--image-type", default=None, + help="Create an image with qemu-img. See qemu-img --help for supported formats.") + image_group.add_argument("--qemu-arg", action="append", dest="qemu_args", default=[], + help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.") + image_group.add_argument("--qcow2", action="store_true", + help="Create qcow2 image instead of raw sparse image when making disk images.") + image_group.add_argument("--qcow2-arg", action="append", dest="qemu_args", default=[], + help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.") + image_group.add_argument("--compression", default="xz", + help="Compression binary for make-tar. xz, lzma, gzip, and bzip2 are supported. xz is the default.") + image_group.add_argument("--compress-arg", action="append", dest="compress_args", default=[], + help="Arguments to pass to compression. Pass once for each argument") + # Group of arguments for appliance creation + app_group = parser.add_argument_group("appliance arguments") + app_group.add_argument("--app-name", default=None, + help="Name of appliance to pass to template") + app_group.add_argument("--app-template", default=None, + help="Path to template to use for appliance data.") + app_group.add_argument("--app-file", default="appliance.xml", + help="Appliance template results file.") + + # Group of arguments to pass to qemu + virt_group = parser.add_argument_group("qemu arguments") + virt_group.add_argument("--ram", metavar="MEMORY", type=int, default=2048, + help="Memory to allocate for installer in megabytes.") + virt_group.add_argument("--vcpus", type=int, default=None, + help="Passed to qemu -smp command") + virt_group.add_argument("--vnc", + help="Passed to qemu -display command. eg. vnc=127.0.0.1:5, default is to " + "choose the first unused vnc port.") + virt_group.add_argument("--arch", default=None, + help="System arch to build for. Used to select qemu-system-* command. " + "Defaults to qemu-system-<arch>") + virt_group.add_argument("--kernel-args", + help="Additional argument to pass to the installation kernel") + virt_group.add_argument("--ovmf-path", default="/usr/share/edk2/ovmf/", + help="Path to OVMF firmware") + virt_group.add_argument("--virt-uefi", action="store_true", default=False, + help="Use OVMF firmware to boot the VM in UEFI mode") + virt_group.add_argument("--no-kvm", action="store_true", default=False, + help="Skip using kvm with qemu even if it is available.") + virt_group.add_argument("--with-rng", default="/dev/random", + help="RNG device for QEMU (none for no RNG)") + + # dracut arguments + dracut_group = parser.add_argument_group("dracut arguments: (default: %s)" % dracut_default) + dracut_group.add_argument("--dracut-conf", + help="Path to a dracut.conf file to use instead of the " + "default arguments. See the dracut.conf(5) manpage.") + dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args", + help="Argument to pass to dracut when " + "rebuilding the initramfs. Pass this " + "once for each argument. NOTE: this " + "overrides the defaults.") + + # pxe to live arguments + pxelive_group = parser.add_argument_group("pxe to live arguments") + pxelive_group.add_argument("--live-rootfs-size", type=int, default=0, + help="Size of root filesystem of live image in GiB") + pxelive_group.add_argument("--live-rootfs-keep-size", action="store_true", + help="Keep the original size of root filesystem in live image") + + # OCI specific commands + oci_group = parser.add_argument_group("OCI arguments") + oci_group.add_argument("--oci-config", + help="config.json OCI configuration file") + oci_group.add_argument("--oci-runtime", + help="runtime.json OCI configuration file") + + # Vagrant specific commands + vagrant_group = parser.add_argument_group("Vagrant arguments") + vagrant_group.add_argument("--vagrant-metadata", + help="optional metadata.json file") + vagrant_group.add_argument("--vagrantfile", + help="optional vagrantfile") + + parser.add_argument("--project", default="Linux", + help="substituted for @PROJECT@ in bootloader config files") + parser.add_argument("--releasever", default="34", + help="substituted for @VERSION@ in bootloader config files") + parser.add_argument("--volid", default=None, help="volume id") + parser.add_argument("--squashfs-only", action="store_true", default=False, + help="Use a plain squashfs filesystem for the runtime.") + parser.add_argument("--timeout", default=None, type=int, + help="Cancel installer after X minutes") + + # add the show version option + parser.add_argument("-V", help="show program's version number and exit", + action="version", version=version) + + return parser
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/creator.html b/f34-branch/_modules/pylorax/creator.html new file mode 100644 index 00000000..75659a90 --- /dev/null +++ b/f34-branch/_modules/pylorax/creator.html @@ -0,0 +1,963 @@ + + + + + + + + + + pylorax.creator — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.creator

+#
+# Copyright (C) 2011-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import os
+import tempfile
+import subprocess
+import shutil
+import hashlib
+import glob
+
+# Use Mako templates for appliance builder descriptions
+from mako.template import Template
+from mako.exceptions import text_error_template
+
+# Use pykickstart to calculate disk image size
+from pykickstart.parser import KickstartParser
+from pykickstart.constants import KS_SHUTDOWN
+from pykickstart.version import makeVersion
+
+# Use the Lorax treebuilder branch for iso creation
+from pylorax import ArchData
+from pylorax.base import DataHolder
+from pylorax.executils import execWithRedirect, runcmd
+from pylorax.imgutils import PartitionMount
+from pylorax.imgutils import mount, umount, Mount
+from pylorax.imgutils import mksquashfs, mkrootfsimg
+from pylorax.imgutils import copytree
+from pylorax.installer import novirt_install, virt_install, InstallError
+from pylorax.treebuilder import TreeBuilder, RuntimeBuilder
+from pylorax.treebuilder import findkernels
+from pylorax.sysutils import joinpaths, remove
+
+
+# Default parameters for rebuilding initramfs, override with --dracut-arg or --dracut-conf
+DRACUT_DEFAULT = ["--xz", "--add", "livenet dmsquash-live dmsquash-live-ntfs convertfs pollcdrom qemu qemu-net",
+                  "--omit", "plymouth", "--no-hostonly", "--debug", "--no-early-microcode"]
+
+RUNTIME = "images/install.img"
+
+
[docs]class FakeDNF(object): + """ + A minimal DNF object suitable for passing to RuntimeBuilder + + lmc uses RuntimeBuilder to run the arch specific iso creation + templates, so the the installroot config value is the important part of + this. Everything else should be a nop. + """ + def __init__(self, conf): + self.conf = conf + +
[docs] def reset(self): + pass
+ +
[docs]def is_image_mounted(disk_img): + """ + Check to see if the disk_img is mounted + + :returns: True if disk_img is in /proc/mounts + :rtype: bool + """ + with open("/proc/mounts") as mounts: + for mnt in mounts: + fields = mnt.split() + if len(fields) > 2 and fields[1] == disk_img: + return True + return False
+ +
[docs]def find_ostree_root(phys_root): + """ + Find root of ostree deployment + + :param str phys_root: Path to physical root + :returns: Relative path of ostree deployment root + :rtype: str + :raise Exception: More than one deployment roots were found + """ + ostree_root = "" + ostree_sysroots = glob.glob(joinpaths(phys_root, "ostree/boot.?/*/*/0")) + log.debug("ostree_sysroots = %s", ostree_sysroots) + if ostree_sysroots: + if len(ostree_sysroots) > 1: + raise Exception("Too many deployment roots found: %s" % ostree_sysroots) + ostree_root = os.path.relpath(ostree_sysroots[0], phys_root) + return ostree_root
+ +
[docs]def get_arch(mount_dir): + """ + Get the kernel arch + + :returns: Arch of first kernel found at mount_dir/boot/ or i386 + :rtype: str + """ + kernels = findkernels(mount_dir) + if not kernels: + return "i386" + return kernels[0].arch
+ +
[docs]def squashfs_args(opts): + """ Returns the compression type and args to use when making squashfs + + :param opts: ArgumentParser object with compression and compressopts + :returns: tuple of compression type and args + :rtype: tuple + """ + compression = opts.compression or "xz" + arch = ArchData(opts.arch or os.uname().machine) + if compression == "xz" and arch.bcj and not opts.compress_args: + # default to bcj when using xz + compressargs = ["-Xbcj", arch.bcj] + elif opts.compress_args: + compressargs = [] + for arg in opts.compress_args: + compressargs += arg.split(" ", 1) + else: + compressargs = [] + return (compression, compressargs)
+ +
[docs]def dracut_args(opts): + """Return a list of the args to pass to dracut + + Return the default argument list unless one of the dracut cmdline arguments + has been used. + """ + if opts.dracut_conf: + return ["--conf", opts.dracut_conf] + elif opts.dracut_args: + args = [] + for arg in opts.dracut_args: + args += arg.split(" ", 1) + return args + else: + return DRACUT_DEFAULT
+ +
[docs]def make_appliance(disk_img, name, template, outfile, networks=None, ram=1024, + vcpus=1, arch=None, title="Linux", project="Linux", + releasever="34"): + """ + Generate an appliance description file + + :param str disk_img: Full path of the disk image + :param str name: Name of the appliance, passed to the template + :param str template: Full path of Mako template + :param str outfile: Full path of file to write, using template + :param list networks: List of networks(str) from the kickstart + :param int ram: Ram, in MiB, passed to template. Default is 1024 + :param int vcpus: CPUs, passed to template. Default is 1 + :param str arch: CPU architecture. Default is 'x86_64' + :param str title: Title, passed to template. Default is 'Linux' + :param str project: Project, passed to template. Default is 'Linux' + :param str releasever: Release version, passed to template. Default is 34 + """ + if not (disk_img and template and outfile): + return None + + log.info("Creating appliance definition using %s", template) + + if not arch: + arch = "x86_64" + + log.info("Calculating SHA256 checksum of %s", disk_img) + sha256 = hashlib.sha256() + with open(disk_img, "rb") as f: + while True: + data = f.read(1024**2) + if not data: + break + sha256.update(data) + log.info("SHA256 of %s is %s", disk_img, sha256.hexdigest()) + disk_info = DataHolder(name=os.path.basename(disk_img), format="raw", + checksum_type="sha256", checksum=sha256.hexdigest()) + try: + result = Template(filename=template).render(disks=[disk_info], name=name, + arch=arch, memory=ram, vcpus=vcpus, networks=networks, + title=title, project=project, releasever=releasever) + except Exception: + log.error(text_error_template().render()) + raise + + with open(outfile, "w") as f: + f.write(result)
+ + +
[docs]def make_runtime(opts, mount_dir, work_dir, size=None): + """ + Make the squashfs image from a directory + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str mount_dir: Directory tree to compress + :param str work_dir: Output compressed image to work_dir+images/install.img + :param int size: Size of disk image, in GiB + :returns: rc of squashfs creation + :rtype: int + """ + kernel_arch = get_arch(mount_dir) + + # Fake dnf object + fake_dbo = FakeDNF(conf=DataHolder(installroot=mount_dir)) + # Fake arch with only basearch set + arch = ArchData(kernel_arch) + # TODO: Need to get release info from someplace... + product = DataHolder(name=opts.project, version=opts.releasever, release="", + variant="", bugurl="", isfinal=False) + + rb = RuntimeBuilder(product, arch, fake_dbo) + compression, compressargs = squashfs_args(opts) + + if opts.squashfs_only: + log.info("Creating a squashfs only runtime") + return rb.create_squashfs_runtime(joinpaths(work_dir, RUNTIME), size=size, + compression=compression, compressargs=compressargs) + else: + log.info("Creating a squashfs+ext4 runtime") + return rb.create_ext4_runtime(joinpaths(work_dir, RUNTIME), size=size, + compression=compression, compressargs=compressargs)
+ + +
[docs]def rebuild_initrds_for_live(opts, sys_root_dir, results_dir): + """ + Rebuild intrds for pxe live image (root=live:http://) + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str sys_root_dir: Path to root of the system + :param str results_dir: Path of directory for storing results + """ + # cmdline dracut args override the defaults, but need to be parsed + log.info("dracut args = %s", dracut_args(opts)) + + dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + dracut_args(opts) + + kdir = "boot" + if opts.ostree: + kernels_dir = glob.glob(joinpaths(sys_root_dir, "boot/ostree/*")) + if kernels_dir: + kdir = os.path.relpath(kernels_dir[0], sys_root_dir) + + kernels = [kernel for kernel in findkernels(sys_root_dir, kdir)] + if not kernels: + raise Exception("No initrds found, cannot rebuild_initrds") + + if opts.ostree: + # Dracut assumes to have some dirs in disk image + # /var/tmp for temp files + vartmp_dir = joinpaths(sys_root_dir, "var/tmp") + if not os.path.isdir(vartmp_dir): + os.mkdir(vartmp_dir) + # /root (maybe not fatal) + root_dir = joinpaths(sys_root_dir, "var/roothome") + if not os.path.isdir(root_dir): + os.mkdir(root_dir) + # /tmp (maybe not fatal) + tmp_dir = joinpaths(sys_root_dir, "sysroot/tmp") + if not os.path.isdir(tmp_dir): + os.mkdir(tmp_dir) + + # Write the new initramfs directly to the results directory + os.mkdir(joinpaths(sys_root_dir, "results")) + mount(results_dir, opts="bind", mnt=joinpaths(sys_root_dir, "results")) + # Dracut runs out of space inside the minimal rootfs image + mount("/var/tmp", opts="bind", mnt=joinpaths(sys_root_dir, "var/tmp")) + for kernel in kernels: + if hasattr(kernel, "initrd"): + outfile = os.path.basename(kernel.initrd.path) + else: + # Construct an initrd from the kernel name + outfile = os.path.basename(kernel.path.replace("vmlinuz-", "initrd-") + ".img") + log.info("rebuilding %s", outfile) + log.info("dracut warnings about /proc are safe to ignore") + + kver = kernel.version + cmd = dracut + ["/results/"+outfile, kver] + runcmd(cmd, root=sys_root_dir) + + shutil.copy2(joinpaths(sys_root_dir, kernel.path), results_dir) + umount(joinpaths(sys_root_dir, "var/tmp"), delete=False) + umount(joinpaths(sys_root_dir, "results"), delete=False)
+ +
[docs]def create_pxe_config(template, images_dir, live_image_name, add_args = None): + """ + Create template for pxe to live configuration + + :param str images_dir: Path of directory with images to be used + :param str live_image_name: Name of live rootfs image file + :param list add_args: Arguments to be added to initrd= pxe config + """ + + add_args = add_args or [] + + kernels = [kernel for kernel in findkernels(images_dir, kdir="") + if hasattr(kernel, "initrd")] + if not kernels: + return + + kernel = kernels[0] + + add_args_str = " ".join(add_args) + + + try: + result = Template(filename=template).render(kernel=kernel.path, + initrd=kernel.initrd.path, liveimg=live_image_name, + addargs=add_args_str) + except Exception: + log.error(text_error_template().render()) + raise + + with open (joinpaths(images_dir, "PXE_CONFIG"), "w") as f: + f.write(result)
+ + +
[docs]def make_livecd(opts, mount_dir, work_dir): + """ + Take the content from the disk image and make a livecd out of it + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str mount_dir: Directory tree to compress + :param str work_dir: Output compressed image to work_dir+images/install.img + + This uses wwood's squashfs live initramfs method: + * put the real / into LiveOS/rootfs.img + * make a squashfs of the LiveOS/rootfs.img tree + * This is loaded by dracut when the cmdline is passed to the kernel: + root=live:CDLABEL=<volid> rd.live.image + """ + kernel_arch = get_arch(mount_dir) + + arch = ArchData(kernel_arch) + # TODO: Need to get release info from someplace... + product = DataHolder(name=opts.project, version=opts.releasever, release="", + variant="", bugurl="", isfinal=False) + + # Link /images to work_dir/images to make the templates happy + if os.path.islink(joinpaths(mount_dir, "images")): + os.unlink(joinpaths(mount_dir, "images")) + rc = execWithRedirect("/bin/ln", ["-s", joinpaths(work_dir, "images"), + joinpaths(mount_dir, "images")]) + if rc: + raise RuntimeError("Failed to symlink images from mount_dir to work_dir") + + # The templates expect the config files to be in /tmp/config_files + # I think these should be release specific, not from lorax, but for now + configdir = joinpaths(opts.lorax_templates,"live/config_files/") + configdir_path = "tmp/config_files" + fullpath = joinpaths(mount_dir, configdir_path) + if os.path.exists(fullpath): + remove(fullpath) + copytree(configdir, fullpath) + + isolabel = opts.volid or "{0.name}-{0.version}-{1.basearch}".format(product, arch) + if len(isolabel) > 32: + isolabel = isolabel[:32] + log.warning("Truncating isolabel to 32 chars: %s", isolabel) + + tb = TreeBuilder(product=product, arch=arch, domacboot=opts.domacboot, + inroot=mount_dir, outroot=work_dir, + runtime=RUNTIME, isolabel=isolabel, + templatedir=joinpaths(opts.lorax_templates,"live/"), + extra_boot_args=opts.extra_boot_args) + log.info("Rebuilding initrds") + log.info("dracut args = %s", dracut_args(opts)) + tb.rebuild_initrds(add_args=dracut_args(opts)) + log.info("Building boot.iso") + tb.build() + + return work_dir
+ +
[docs]def mount_boot_part_over_root(img_mount): + """ + Mount boot partition to /boot of root fs mounted in img_mount + + Used for OSTree so it finds deployment configurations on live rootfs + + param img_mount: object with mounted disk image root partition + type img_mount: imgutils.PartitionMount + """ + root_dir = img_mount.mount_dir + is_boot_part = lambda dir: os.path.exists(dir+"/loader.0") + tmp_mount_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + sysroot_boot_dir = None + for dev, _size in img_mount.loop_devices: + if dev is img_mount.mount_dev: + continue + try: + mount("/dev/mapper/"+dev, mnt=tmp_mount_dir) + if is_boot_part(tmp_mount_dir): + umount(tmp_mount_dir) + sysroot_boot_dir = joinpaths(root_dir, "boot") + mount("/dev/mapper/"+dev, mnt=sysroot_boot_dir) + break + else: + umount(tmp_mount_dir) + except subprocess.CalledProcessError as e: + log.debug("Looking for boot partition error: %s", e) + remove(tmp_mount_dir) + return sysroot_boot_dir
+ +
[docs]def calculate_disk_size(opts, ks): + """ Calculate the disk size from the kickstart + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str ks: Path to the kickstart to use for the installation + :returns: Disk size in MiB + :rtype: int + + Also takes into account the use of reqpart or reqpart --add-boot + """ + # Disk size for a filesystem image should only be the size of / + # to prevent surprises when using the same kickstart for different installations. + unique_partitions = dict((p.mountpoint, p) for p in ks.handler.partition.partitions) + if opts.no_virt and (opts.make_iso or opts.make_fsimage): + disk_size = 2 + sum(p.size for p in unique_partitions.values() if p.mountpoint == "/") + else: + disk_size = 2 + sum(p.size for p in unique_partitions.values()) + + # reqpart can add 1M, 2M, 200M based on platform. Add 500M to be sure + if ks.handler.reqpart.seen: + log.info("Adding 500M for reqpart") + disk_size += 500 + + # It can also request adding /boot which is 1G + if ks.handler.reqpart.addBoot: + log.info("Adding 1024M for reqpart --addboot") + disk_size += 1024 + + if opts.image_size_align: + disk_size += opts.image_size_align - (disk_size % opts.image_size_align) + + log.info("Using disk size of %sMiB", disk_size) + return disk_size
+ +
[docs]def make_image(opts, ks, cancel_func=None): + """ + Install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str ks: Path to the kickstart to use for the installation + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :returns: Path of the image created + :rtype: str + + Use qemu+boot.iso or anaconda to install to a disk image. + """ + + # For make_tar_disk, opts.image_name is the name of the final tarball. + # Use opts.tar_disk_name as the name of the disk image + if opts.make_tar_disk: + disk_img = joinpaths(opts.result_dir, opts.tar_disk_name) + elif opts.image_name: + disk_img = joinpaths(opts.result_dir, opts.image_name) + else: + disk_img = tempfile.mktemp(prefix="lmc-disk-", suffix=".img", dir=opts.result_dir) + log.info("disk_img = %s", disk_img) + disk_size = calculate_disk_size(opts, ks) + + # For make_tar_disk, pass a second path parameter for the final tarball + # not the final output file. + if opts.make_tar_disk: + tar_img = joinpaths(opts.result_dir, opts.image_name) + else: + tar_img = None + + try: + if opts.no_virt: + novirt_install(opts, disk_img, disk_size, cancel_func=cancel_func, tar_img=tar_img) + else: + install_log = os.path.abspath(os.path.dirname(opts.logfile))+"/virt-install.log" + log.info("install_log = %s", install_log) + + virt_install(opts, install_log, disk_img, disk_size, cancel_func=cancel_func, tar_img=tar_img) + except InstallError as e: + log.error("Install failed: %s", e) + if not opts.keep_image: + if os.path.exists(disk_img): + log.info("Removing bad disk image") + os.unlink(disk_img) + if tar_img and os.path.exists(tar_img): + log.info("Removing bad tar file") + os.unlink(tar_img) + raise + + log.info("Disk Image install successful") + + if opts.make_tar_disk: + return tar_img + + return disk_img
+ + +
[docs]def make_live_images(opts, work_dir, disk_img): + """ + Create live images from direcory or rootfs image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str work_dir: Directory for storing results + :param str disk_img: Path to disk image (fsimage or partitioned) + :returns: Path of directory with created images or None + :rtype: str + + fsck.ext4 is run on the rootfs_image to make sure there are no errors and to zero + out any deleted blocks to make it compress better. If this fails for any reason + it will return None and log the error. + """ + sys_root = "" + + squashfs_root_dir = joinpaths(work_dir, "squashfs_root") + liveos_dir = joinpaths(squashfs_root_dir, "LiveOS") + os.makedirs(liveos_dir) + rootfs_img = joinpaths(liveos_dir, "rootfs.img") + + if opts.fs_image or opts.no_virt: + # Find the ostree root in the fsimage + if opts.ostree: + with Mount(disk_img, opts="loop") as mnt_dir: + sys_root = find_ostree_root(mnt_dir) + + # Try to hardlink the image, if that fails, copy it + rc = execWithRedirect("/bin/ln", [disk_img, rootfs_img]) + if rc != 0: + shutil.copy2(disk_img, rootfs_img) + else: + is_root_part = None + if opts.ostree: + is_root_part = lambda dir: os.path.exists(dir+"/ostree/deploy") + with PartitionMount(disk_img, mount_ok=is_root_part) as img_mount: + if img_mount and img_mount.mount_dir: + try: + mounted_sysroot_boot_dir = None + if opts.ostree: + sys_root = find_ostree_root(img_mount.mount_dir) + mounted_sysroot_boot_dir = mount_boot_part_over_root(img_mount) + if opts.live_rootfs_keep_size: + size = img_mount.mount_size / 1024**3 + else: + size = opts.live_rootfs_size or None + log.info("Creating live rootfs image") + mkrootfsimg(img_mount.mount_dir, rootfs_img, "LiveOS", size=size, sysroot=sys_root) + finally: + if mounted_sysroot_boot_dir: + umount(mounted_sysroot_boot_dir) + log.debug("sys_root = %s", sys_root) + + # Make sure free blocks are actually zeroed so it will compress + rc = execWithRedirect("/usr/sbin/fsck.ext4", ["-y", "-f", "-E", "discard", rootfs_img]) + if rc != 0: + log.error("Problem zeroing free blocks of %s", disk_img) + return None + + log.info("Packing live rootfs image") + add_pxe_args = [] + live_image_name = "live-rootfs.squashfs.img" + compression, compressargs = squashfs_args(opts) + rc = mksquashfs(squashfs_root_dir, joinpaths(work_dir, live_image_name), compression, compressargs) + if rc != 0: + log.error("mksquashfs failed to create %s", live_image_name) + return None + + log.info("Rebuilding initramfs for live") + with Mount(rootfs_img, opts="loop") as mnt_dir: + try: + mount(joinpaths(mnt_dir, "boot"), opts="bind", mnt=joinpaths(mnt_dir, sys_root, "boot")) + rebuild_initrds_for_live(opts, joinpaths(mnt_dir, sys_root), work_dir) + finally: + umount(joinpaths(mnt_dir, sys_root, "boot"), delete=False) + + remove(squashfs_root_dir) + + if opts.ostree: + add_pxe_args.append("ostree=/%s" % sys_root) + template = joinpaths(opts.lorax_templates, "pxe-live/pxe-config.tmpl") + create_pxe_config(template, work_dir, live_image_name, add_pxe_args) + + return work_dir
+ +
[docs]def check_kickstart(ks, opts): + """Check the parsed kickstart object for errors + + :param ks: Parsed Kickstart object + :type ks: pykickstart.parser.KickstartParser + :param opts: Commandline options to control the process + :type opts: Either a DataHolder or ArgumentParser + :returns: List of error strings or empty list + :rtype: list + """ + errors = [] + if opts.no_virt and ks.handler.method.method not in ("url", "nfs") \ + and not ks.handler.ostreesetup.seen: + errors.append("Only url, nfs and ostreesetup install methods are currently supported." + "Please fix your kickstart file." ) + + if ks.handler.repo.seen and ks.handler.method.method != "url": + errors.append("repo can only be used with the url install method. Add url to your " + "kickstart file.") + + if ks.handler.method.method in ("url", "nfs") and not ks.handler.network.seen: + errors.append("The kickstart must activate networking if " + "the url or nfs install method is used.") + + if ks.handler.displaymode.displayMode is not None: + errors.append("The kickstart must not set a display mode (text, cmdline, " + "graphical), this will interfere with livemedia-creator.") + + if opts.make_fsimage or (opts.make_pxe_live and opts.no_virt): + # Make sure the kickstart isn't using autopart and only has a / mountpoint + part_ok = not any(p for p in ks.handler.partition.partitions + if p.mountpoint not in ["/", "swap"]) + if not part_ok or ks.handler.autopart.seen: + errors.append("Filesystem images must use a single / part, not autopart or " + "multiple partitions. swap is allowed but not used.") + + if not opts.no_virt and ks.handler.reboot.action != KS_SHUTDOWN: + errors.append("The kickstart must include shutdown when using virt installation.") + + return errors
+ +
[docs]def run_creator(opts, cancel_func=None): + """Run the image creator process + + :param opts: Commandline options to control the process + :type opts: Either a DataHolder or ArgumentParser + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :returns: The result directory and the disk image path. + :rtype: Tuple of str + + This function takes the opts arguments and creates the selected output image. + See the cmdline --help for livemedia-creator for the possible options + + (Yes, this is not ideal, but we can fix that later) + """ + result_dir = None + + # Parse the kickstart + if opts.ks: + ks_version = makeVersion() + ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) + ks.readKickstart(opts.ks[0]) + + # live iso usually needs dracut-live so warn the user if it is missing + if opts.ks and opts.make_iso: + if "dracut-live" not in ks.handler.packages.packageList: + log.error("dracut-live package is missing from the kickstart.") + raise RuntimeError("dracut-live package is missing from the kickstart.") + + # Make the disk or filesystem image + if not opts.disk_image and not opts.fs_image: + if not opts.ks: + raise RuntimeError("Image creation requires a kickstart file") + + # Check the kickstart for problems + errors = check_kickstart(ks, opts) + if errors: + list(log.error(e) for e in errors) + raise RuntimeError("\n".join(errors)) + + # Make the image. Output of this is either a partitioned disk image or a fsimage + try: + disk_img = make_image(opts, ks, cancel_func=cancel_func) + except InstallError as e: + log.error("ERROR: Image creation failed: %s", e) + raise RuntimeError("Image creation failed: %s" % e) + + if opts.image_only: + return (result_dir, disk_img) + + if opts.make_iso: + work_dir = tempfile.mkdtemp(prefix="lmc-work-") + log.info("working dir is %s", work_dir) + + if (opts.fs_image or opts.no_virt) and not opts.disk_image: + # Create iso from a filesystem image + disk_img = opts.fs_image or disk_img + with Mount(disk_img, opts="loop") as mount_dir: + rc = make_runtime(opts, mount_dir, work_dir, calculate_disk_size(opts, ks)/1024.0) + if rc != 0: + log.error("make_runtime failed with rc = %d. See program.log", rc) + raise RuntimeError("make_runtime failed with rc = %d" % rc) + if cancel_func and cancel_func(): + raise RuntimeError("ISO creation canceled") + + result_dir = make_livecd(opts, mount_dir, work_dir) + else: + # Create iso from a partitioned disk image + disk_img = opts.disk_image or disk_img + with PartitionMount(disk_img) as img_mount: + if img_mount and img_mount.mount_dir: + rc = make_runtime(opts, img_mount.mount_dir, work_dir, calculate_disk_size(opts, ks)/1024.0) + if rc != 0: + log.error("make_runtime failed with rc = %d. See program.log", rc) + raise RuntimeError("make_runtime failed with rc = %d" % rc) + result_dir = make_livecd(opts, img_mount.mount_dir, work_dir) + + # --iso-only removes the extra build artifacts, keeping only the boot.iso + if opts.iso_only and result_dir: + boot_iso = joinpaths(result_dir, "images/boot.iso") + if not os.path.exists(boot_iso): + log.error("%s is missing, skipping --iso-only.", boot_iso) + else: + iso_dir = tempfile.mkdtemp(prefix="lmc-result-") + dest_file = joinpaths(iso_dir, opts.iso_name or "boot.iso") + shutil.move(boot_iso, dest_file) + shutil.rmtree(result_dir) + result_dir = iso_dir + + # cleanup the mess + # cleanup work_dir? + if disk_img and not (opts.keep_image or opts.disk_image or opts.fs_image): + os.unlink(disk_img) + log.info("Disk image erased") + disk_img = None + elif opts.make_appliance: + if not opts.ks: + networks = [] + else: + networks = ks.handler.network.network + make_appliance(opts.disk_image or disk_img, opts.app_name, + opts.app_template, opts.app_file, networks, opts.ram, + opts.vcpus or 1, opts.arch, opts.title, opts.project, opts.releasever) + elif opts.make_pxe_live: + work_dir = tempfile.mkdtemp(prefix="lmc-work-") + log.info("working dir is %s", work_dir) + disk_img = opts.fs_image or opts.disk_image or disk_img + log.debug("disk image is %s", disk_img) + + result_dir = make_live_images(opts, work_dir, disk_img) + if result_dir is None: + log.error("Creating PXE live image failed.") + raise RuntimeError("Creating PXE live image failed.") + + if opts.result_dir != opts.tmp and result_dir: + copytree(result_dir, opts.result_dir, preserve=False) + shutil.rmtree(result_dir) + result_dir = None + + return (result_dir, disk_img)
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/decorators.html b/f34-branch/_modules/pylorax/decorators.html new file mode 100644 index 00000000..17f83858 --- /dev/null +++ b/f34-branch/_modules/pylorax/decorators.html @@ -0,0 +1,237 @@ + + + + + + + + + + pylorax.decorators — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.decorators

+#
+# decorators.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+
[docs]def singleton(cls): + instances = {} + + def get_instance(): + if cls not in instances: + instances[cls] = cls() + return instances[cls] + + return get_instance
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/discinfo.html b/f34-branch/_modules/pylorax/discinfo.html new file mode 100644 index 00000000..a08154e6 --- /dev/null +++ b/f34-branch/_modules/pylorax/discinfo.html @@ -0,0 +1,252 @@ + + + + + + + + + + pylorax.discinfo — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.discinfo

+#
+# discinfo.py
+#
+# Copyright (C) 2010-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.discinfo")
+
+import os
+import time
+
+
+
[docs]class DiscInfo(object): + + def __init__(self, release, basearch): + self.release = release + self.basearch = basearch + +
[docs] def write(self, outfile): + if 'SOURCE_DATE_EPOCH' in os.environ: + timestamp = int(os.environ['SOURCE_DATE_EPOCH']) + else: + timestamp = time.time() + + logger.info("writing .discinfo file") + with open(outfile, "w") as fobj: + fobj.write("{0:f}\n".format(timestamp)) + fobj.write("{0.release}\n".format(self)) + fobj.write("{0.basearch}\n".format(self))
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/dnfbase.html b/f34-branch/_modules/pylorax/dnfbase.html new file mode 100644 index 00000000..ead0932d --- /dev/null +++ b/f34-branch/_modules/pylorax/dnfbase.html @@ -0,0 +1,394 @@ + + + + + + + + + + pylorax.dnfbase — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.dnfbase

+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import dnf
+import os
+import shutil
+
+from pylorax import DEFAULT_PLATFORM_ID
+from pylorax.sysutils import flatconfig
+
+
[docs]def get_dnf_base_object(installroot, sources, mirrorlists=None, repos=None, + enablerepos=None, disablerepos=None, + tempdir="/var/tmp", proxy=None, releasever="34", + cachedir=None, logdir=None, sslverify=True, dnfplugins=None): + """ Create a dnf Base object and setup the repositories and installroot + + :param string installroot: Full path to the installroot + :param list sources: List of source repo urls to use for the installation + :param list enablerepos: List of repo names to enable + :param list disablerepos: List of repo names to disable + :param list mirrorlist: List of mirrors to use + :param string tempdir: Path of temporary directory + :param string proxy: http proxy to use when fetching packages + :param string releasever: Release version to pass to dnf + :param string cachedir: Directory to use for caching packages + :param bool noverifyssl: Set to True to ignore the CA of ssl certs. eg. use self-signed ssl for https repos. + + If tempdir is not set /var/tmp is used. + If cachedir is None a dnf.cache directory is created inside tmpdir + """ + def sanitize_repo(repo): + """Convert bare paths to file:/// URIs, and silently reject protocols unhandled by yum""" + if repo.startswith("/"): + return "file://{0}".format(repo) + elif any(repo.startswith(p) for p in ('http://', 'https://', 'ftp://', 'file://')): + return repo + else: + return None + + mirrorlists = mirrorlists or [] + + # sanitize the repositories + sources = list(sanitize_repo(r) for r in sources) + mirrorlists = list(sanitize_repo(r) for r in mirrorlists) + + # remove invalid repositories + sources = list(r for r in sources if r) + mirrorlists = list(r for r in mirrorlists if r) + + if not cachedir: + cachedir = os.path.join(tempdir, "dnf.cache") + if not os.path.isdir(cachedir): + os.mkdir(cachedir) + + if not logdir: + logdir = os.path.join(tempdir, "dnf.logs") + if not os.path.isdir(logdir): + os.mkdir(logdir) + + dnfbase = dnf.Base() + # Enable DNF pluings + # NOTE: These come from the HOST system's environment + if dnfplugins: + if dnfplugins[0] == "*": + # Enable them all + dnfbase.init_plugins() + else: + # Only enable the listed plugins + dnfbase.init_plugins(disabled_glob=["*"], enable_plugins=dnfplugins) + conf = dnfbase.conf + conf.logdir = logdir + conf.cachedir = cachedir + + conf.install_weak_deps = False + conf.releasever = releasever + conf.installroot = installroot + conf.prepend_installroot('persistdir') + # this is a weird 'AppendOption' thing that, when you set it, + # actually appends. Doing this adds 'nodocs' to the existing list + # of values, over in libdnf, it does not replace the existing values. + conf.tsflags = ['nodocs'] + # Log details about the solver + conf.debug_solver = True + + if proxy: + conf.proxy = proxy + + if sslverify == False: + conf.sslverify = False + + # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly + if not os.path.exists("/etc/os-release"): + log.warning("/etc/os-release is missing, cannot determine platform id, falling back to %s", DEFAULT_PLATFORM_ID) + platform_id = DEFAULT_PLATFORM_ID + else: + os_release = flatconfig("/etc/os-release") + platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID) + log.info("Using %s for module_platform_id", platform_id) + conf.module_platform_id = platform_id + + # Add .repo files + if repos: + reposdir = os.path.join(tempdir, "dnf.repos") + if not os.path.isdir(reposdir): + os.mkdir(reposdir) + for r in repos: + shutil.copy2(r, reposdir) + conf.reposdir = [reposdir] + dnfbase.read_all_repos() + + # add the sources + for i, r in enumerate(sources): + if "SRPM" in r or "srpm" in r: + log.info("Skipping source repo: %s", r) + continue + repo_name = "lorax-repo-%d" % i + repo = dnf.repo.Repo(repo_name, conf) + repo.baseurl = [r] + if proxy: + repo.proxy = proxy + repo.enable() + dnfbase.repos.add(repo) + log.info("Added '%s': %s", repo_name, r) + log.info("Fetching metadata...") + try: + repo.load() + except dnf.exceptions.RepoError as e: + log.error("Error fetching metadata for %s: %s", repo_name, e) + return None + + # add the mirrorlists + for i, r in enumerate(mirrorlists): + if "SRPM" in r or "srpm" in r: + log.info("Skipping source repo: %s", r) + continue + repo_name = "lorax-mirrorlist-%d" % i + repo = dnf.repo.Repo(repo_name, conf) + repo.mirrorlist = r + if proxy: + repo.proxy = proxy + repo.enable() + dnfbase.repos.add(repo) + log.info("Added '%s': %s", repo_name, r) + log.info("Fetching metadata...") + try: + repo.load() + except dnf.exceptions.RepoError as e: + log.error("Error fetching metadata for %s: %s", repo_name, e) + return None + + # Enable repos listed on the cmdline + for r in enablerepos: + repolist = dnfbase.repos.get_matching(r) + if not repolist: + log.warning("%s is an unknown repo, not enabling it", r) + else: + repolist.enable() + log.info("Enabled repo %s", r) + + # Disable repos listed on the cmdline + for r in disablerepos: + repolist = dnfbase.repos.get_matching(r) + if not repolist: + log.warning("%s is an unknown repo, not disabling it", r) + else: + repolist.disable() + log.info("Disabled repo %s", r) + + dnfbase.fill_sack(load_system_repo=False) + dnfbase.read_comps() + + return dnfbase
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/dnfhelper.html b/f34-branch/_modules/pylorax/dnfhelper.html new file mode 100644 index 00000000..e210d3f0 --- /dev/null +++ b/f34-branch/_modules/pylorax/dnfhelper.html @@ -0,0 +1,317 @@ + + + + + + + + + + pylorax.dnfhelper — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.dnfhelper

+#
+# dnfhelper.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     Brian C. Lane <bcl@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.dnfhelper")
+import dnf
+import dnf.transaction
+import collections
+import time
+import pylorax.output as output
+
+__all__ = ['LoraxDownloadCallback', 'LoraxRpmCallback']
+
+def _paced(fn):
+    """Execute `fn` no more often then every 2 seconds."""
+    def paced_fn(self, *args):
+        now = time.time()
+        if now - self.last_time < 2:
+            return
+        self.last_time = now
+        return fn(self, *args)
+    return paced_fn
+
+
+
[docs]class LoraxDownloadCallback(dnf.callback.DownloadProgress): + def __init__(self): + self.downloads = collections.defaultdict(int) + self.last_time = time.time() + self.total_files = 0 + self.total_size = 0 + + self.pkgno = 0 + self.total = 0 + + self.output = output.LoraxOutput() + + @_paced + def _update(self): + msg = "Downloading %(pkgno)s / %(total_files)s RPMs, " \ + "%(downloaded)s / %(total_size)s (%(percent)d%%) done.\n" + downloaded = sum(self.downloads.values()) + vals = { + 'downloaded' : downloaded, + 'percent' : int(100 * downloaded/self.total_size), + 'pkgno' : self.pkgno, + 'total_files' : self.total_files, + 'total_size' : self.total_size + } + self.output.write(msg % vals) + +
[docs] def end(self, payload, status, msg): + nevra = str(payload) + if status is dnf.callback.STATUS_OK: + self.downloads[nevra] = payload.download_size + self.pkgno += 1 + self._update() + return + logger.critical("Failed to download '%s': %d - %s", nevra, status, msg)
+ +
[docs] def progress(self, payload, done): + nevra = str(payload) + self.downloads[nevra] = done + self._update()
+ + # dnf 2.5.0 adds a new argument, accept it if it is passed + # pylint: disable=arguments-differ +
[docs] def start(self, total_files, total_size, total_drpms=0): + self.total_files = total_files + self.total_size = total_size
+ + +
[docs]class LoraxRpmCallback(dnf.callback.TransactionProgress): + def __init__(self): + super(LoraxRpmCallback, self).__init__() + self._last_ts = None + +
[docs] def progress(self, package, action, ti_done, ti_total, ts_done, ts_total): + if action == dnf.transaction.PKG_INSTALL: + # do not report same package twice + if self._last_ts == ts_done: + return + self._last_ts = ts_done + + msg = '(%d/%d) %s' % (ts_done, ts_total, package) + logger.info(msg) + elif action == dnf.transaction.TRANS_POST: + msg = "Performing post-installation setup tasks" + logger.info(msg)
+ +
[docs] def error(self, message): + logger.warning(message)
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/executils.html b/f34-branch/_modules/pylorax/executils.html new file mode 100644 index 00000000..743d220a --- /dev/null +++ b/f34-branch/_modules/pylorax/executils.html @@ -0,0 +1,580 @@ + + + + + + + + + + pylorax.executils — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.executils

+#
+# executil.py - subprocess execution utility functions
+#
+# Copyright (C) 1999-2015
+# Red Hat, Inc.  All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import select
+import subprocess
+from subprocess import TimeoutExpired
+import signal
+import time
+
+import logging
+log = logging.getLogger("pylorax")
+program_log = logging.getLogger("program")
+
+# pylint: disable=not-context-manager
+from threading import Lock
+program_log_lock = Lock()
+
+_child_env = {}
+
+
[docs]def setenv(name, value): + """ Set an environment variable to be used by child processes. + + This method does not modify os.environ for the running process, which + is not thread-safe. If setenv has already been called for a particular + variable name, the old value is overwritten. + + :param str name: The name of the environment variable + :param str value: The value of the environment variable + """ + + _child_env[name] = value
+ +
[docs]def augmentEnv(): + env = os.environ.copy() + env.update(_child_env) + return env
+ +
[docs]class ExecProduct(object): + def __init__(self, rc, stdout, stderr): + self.rc = rc + self.stdout = stdout + self.stderr = stderr
+ +
[docs]def startProgram(argv, root='/', stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + env_prune=None, env_add=None, reset_handlers=True, reset_lang=True, **kwargs): + """ Start an external program and return the Popen object. + + The root and reset_handlers arguments are handled by passing a + preexec_fn argument to subprocess.Popen, but an additional preexec_fn + can still be specified and will be run. The user preexec_fn will be run + last. + + :param argv: The command to run and argument + :param root: The directory to chroot to before running command. + :param stdin: The file object to read stdin from. + :param stdout: The file object to write stdout to. + :param stderr: The file object to write stderr to. + :param env_prune: environment variables to remove before execution + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :param kwargs: Additional parameters to pass to subprocess.Popen + :return: A Popen object for the running command. + :keyword preexec_fn: A function to run before execution starts. + """ + if env_prune is None: + env_prune = [] + + # Check for and save a preexec_fn argument + preexec_fn = kwargs.pop("preexec_fn", None) + + def preexec(): + # If a target root was specificed, chroot into it + if root and root != '/': + os.chroot(root) + os.chdir("/") + + # Signal handlers set to SIG_IGN persist across exec. Reset + # these to SIG_DFL if requested. In particular this will include the + # SIGPIPE handler set by python. + if reset_handlers: + for signum in range(1, signal.NSIG): + if signal.getsignal(signum) == signal.SIG_IGN: + signal.signal(signum, signal.SIG_DFL) + + # If the user specified an additional preexec_fn argument, run it + if preexec_fn is not None: + preexec_fn() + + with program_log_lock: + program_log.info("Running... %s", " ".join(argv)) + + env = augmentEnv() + for var in env_prune: + env.pop(var, None) + + if reset_lang: + env.update({"LC_ALL": "C"}) + + if env_add: + env.update(env_add) + + # pylint: disable=subprocess-popen-preexec-fn + return subprocess.Popen(argv, + stdin=stdin, + stdout=stdout, + stderr=stderr, + close_fds=True, + preexec_fn=preexec, cwd=root, env=env, **kwargs)
+ +def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None, log_output=True, + binary_output=False, filter_stderr=False, raise_err=False, callback=None, + env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program, log the output and return it to the caller + + :param argv: The command to run and argument + :param root: The directory to chroot to before running command. + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to write the output to. + :param env_prune: environment variable to remove before execution + :param log_output: whether to log the output of command + :param binary_output: whether to treat the output of command as binary data + :param filter_stderr: whether to exclude the contents of stderr from the returned output + :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The return code of the command and the output + :raises: OSError or CalledProcessError + """ + try: + if filter_stderr: + stderr = subprocess.PIPE + else: + stderr = subprocess.STDOUT + + proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, + env_prune=env_prune, universal_newlines=not binary_output, + env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang) + + output_string = None + err_string = None + if callback: + while callback(proc) and proc.poll() is None: + try: + (output_string, err_string) = proc.communicate(timeout=1) + break + except TimeoutExpired: + pass + else: + (output_string, err_string) = proc.communicate() + if output_string: + if binary_output: + output_lines = [output_string] + else: + if output_string[-1] != "\n": + output_string = output_string + "\n" + output_lines = output_string.splitlines(True) + + if log_output: + with program_log_lock: + for line in output_lines: + program_log.info(line.strip()) + + if stdout: + stdout.write(output_string) + + # If stderr was filtered, log it separately + if filter_stderr and err_string and log_output: + err_lines = err_string.splitlines(True) + + with program_log_lock: + for line in err_lines: + program_log.info(line.strip()) + + except OSError as e: + with program_log_lock: + program_log.error("Error running %s: %s", argv[0], e.strerror) + raise + + with program_log_lock: + program_log.debug("Return code: %s", proc.returncode) + + if proc.returncode and raise_err: + output = (output_string or "") + (err_string or "") + raise subprocess.CalledProcessError(proc.returncode, argv, output) + + return (proc.returncode, output_string) + +
[docs]def execWithRedirect(command, argv, stdin=None, stdout=None, root='/', env_prune=None, + log_output=True, binary_output=False, raise_err=False, callback=None, + env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program and redirect the output to a file. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to redirect stdout and stderr to. + :param root: The directory to chroot to before running command. + :param env_prune: environment variable to remove before execution + :param log_output: whether to log the output of command + :param binary_output: whether to treat the output of command as binary data + :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The return code of the command + """ + argv = [command] + list(argv) + return _run_program(argv, stdin=stdin, stdout=stdout, root=root, env_prune=env_prune, + log_output=log_output, binary_output=binary_output, raise_err=raise_err, callback=callback, + env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang)[0]
+ +
[docs]def execWithCapture(command, argv, stdin=None, root='/', log_output=True, filter_stderr=False, + raise_err=False, callback=None, env_add=None, reset_handlers=True, reset_lang=True): + """ Run an external program and capture standard out and err. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param root: The directory to chroot to before running command. + :param log_output: Whether to log the output of command + :param filter_stderr: Whether stderr should be excluded from the returned output + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: The output of the command + """ + argv = [command] + list(argv) + return _run_program(argv, stdin=stdin, root=root, log_output=log_output, filter_stderr=filter_stderr, + raise_err=raise_err, callback=callback, env_add=env_add, + reset_handlers=reset_handlers, reset_lang=reset_lang)[1]
+ +
[docs]def execReadlines(command, argv, stdin=None, root='/', env_prune=None, filter_stderr=False, + callback=lambda x: True, env_add=None, reset_handlers=True, reset_lang=True): + """ Execute an external command and return the line output of the command + in real-time. + + This method assumes that there is a reasonably low delay between the + end of output and the process exiting. If the child process closes + stdout and then keeps on truckin' there will be problems. + + NOTE/WARNING: UnicodeDecodeError will be raised if the output of the + external command can't be decoded as UTF-8. + + :param command: The command to run + :param argv: The argument list + :param stdin: The file object to read stdin from. + :param stdout: Optional file object to redirect stdout and stderr to. + :param root: The directory to chroot to before running command. + :param env_prune: environment variable to remove before execution + :param filter_stderr: Whether stderr should be excluded from the returned output + :param callback: method to call while waiting for process to finish, passed Popen object + :param env_add: environment variables to add before execution + :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN + :param reset_lang: whether to set the locale of the child process to C + :return: Iterator of the lines from the command + + Output from the file is not logged to program.log + This returns an iterator with the lines from the command until it has finished + """ + + class ExecLineReader(object): + """Iterator class for returning lines from a process and cleaning + up the process when the output is no longer needed. + """ + + def __init__(self, proc, argv, callback): + self._proc = proc + self._argv = argv + self._callback = callback + self._data = "" + + def __iter__(self): + return self + + def __del__(self): + # See if the process is still running + if self._proc.poll() is None: + # Stop the process and ignore any problems that might arise + try: + self._proc.terminate() + except OSError: + pass + + def __next__(self): + # Return lines from stdout while also calling _callback + while True: + # Check for input without blocking + if select.select([self._proc.stdout], [], [], 0)[0]: + size = len(self._proc.stdout.peek(1)) + if size > 0: + self._data += self._proc.stdout.read(size).decode("utf-8") + + if self._data.find("\n") >= 0: + line = self._data.split("\n", 1) + self._data = line[1] + return line[0] + + if self._proc.poll() is not None or not self._callback(self._proc): + # Output finished, wait 60s for the process to end + try: + self._proc.communicate(timeout=60) + except subprocess.TimeoutExpired: + # Did not exit in 60s, kill it and wait 30s more + self._proc.kill() + try: + self._proc.communicate(timeout=30) + except subprocess.TimeoutExpired: + pass + + if self._proc.returncode is None: + raise OSError("process '%s' failed to be killed" % self._argv) + elif self._proc.returncode < 0: + raise OSError("process '%s' was killed by signal %s" % + (self._argv, -self._proc.returncode)) + elif self._proc.returncode > 0: + raise OSError("process '%s' exited with status %s" % + (self._argv, self._proc.returncode)) + raise StopIteration + + # Don't loop too fast with no input to read + time.sleep(0.5) + + argv = [command] + argv + + if filter_stderr: + stderr = subprocess.DEVNULL + else: + stderr = subprocess.STDOUT + + try: + proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, + env_prune=env_prune, env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang) + except OSError as e: + with program_log_lock: + program_log.error("Error running %s: %s", argv[0], e.strerror) + raise + + return ExecLineReader(proc, argv, callback)
+ +
[docs]def runcmd(cmd, **kwargs): + """ run execWithRedirect with raise_err=True + """ + kwargs["raise_err"] = True + return execWithRedirect(cmd[0], cmd[1:], **kwargs)
+ +
[docs]def runcmd_output(cmd, **kwargs): + """ run execWithCapture with raise_err=True + """ + kwargs["raise_err"] = True + return execWithCapture(cmd[0], cmd[1:], **kwargs)
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/imgutils.html b/f34-branch/_modules/pylorax/imgutils.html new file mode 100644 index 00000000..e78d511a --- /dev/null +++ b/f34-branch/_modules/pylorax/imgutils.html @@ -0,0 +1,703 @@ + + + + + + pylorax.imgutils — Lorax 34.13 documentation + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pylorax.imgutils

+# imgutils.py - utility functions/classes for building disk images
+#
+# Copyright (C) 2011-2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s):  Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.imgutils")
+
+import os, tempfile
+from os.path import join, dirname
+from subprocess import Popen, PIPE, CalledProcessError
+import sys
+import time
+import traceback
+import multiprocessing
+from time import sleep
+import shutil
+
+from pylorax.sysutils import cpfile
+from pylorax.executils import execWithRedirect, execWithCapture
+from pylorax.executils import runcmd, runcmd_output
+
+######## Functions for making container images (cpio, tar, squashfs) ##########
+
+
[docs]def compress(command, root, outfile, compression="xz", compressargs=None): + '''Make a compressed archive of the given rootdir or file. + command is a list of the archiver commands to run + compression should be "xz", "gzip", "lzma", "bzip2", or None. + compressargs will be used on the compression commandline.''' + if compression not in (None, "xz", "gzip", "lzma", "bzip2"): + raise ValueError("Unknown compression type %s" % compression) + compressargs = compressargs or ["-9"] + if compression == "xz": + compressargs.insert(0, "--check=crc32") + if compression is None: + compression = "cat" # this is a little silly + compressargs = [] + + # make compression run with multiple threads if possible + if compression in ("xz", "lzma"): + compressargs.insert(0, "-T%d" % multiprocessing.cpu_count()) + elif compression == "gzip": + compression = "pigz" + compressargs.insert(0, "-p%d" % multiprocessing.cpu_count()) + elif compression == "bzip2": + compression = "pbzip2" + compressargs.insert(0, "-p%d" % multiprocessing.cpu_count()) + + find, archive, comp = None, None, None + + try: + if os.path.isdir(root): + logger.debug("find %s -print0 |%s | %s %s > %s", root, " ".join(command), + compression, " ".join(compressargs), outfile) + + find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=root) + archive = Popen(command, stdin=find.stdout, stdout=PIPE, cwd=root) + else: + logger.debug("echo %s |%s | %s %s > %s", root, " ".join(command), + compression, " ".join(compressargs), outfile) + + archive = Popen(command, stdin=PIPE, stdout=PIPE, cwd=os.path.dirname(root)) + archive.stdin.write(os.path.basename(root).encode("utf-8") + b"\0") + archive.stdin.close() + + with open(outfile, "wb") as fout: + comp = Popen([compression] + compressargs, + stdin=archive.stdout, stdout=fout) + comp.wait() + + # Clean up the open fds and processes + if find: + find.wait() + find.stdout.close() + archive.wait() + if archive.stdin: + archive.stdin.close() + if archive.stdout: + archive.stdout.close() + return comp.returncode + except OSError as e: + logger.error(e) + # Kill off any hanging processes + list(p.kill() for p in (find, archive, comp) if p) + return 1
+ +
[docs]def mkcpio(root, outfile, compression="xz", compressargs=None): + compressargs = compressargs or ["-9"] + return compress(["cpio", "--null", "--quiet", "-H", "newc", "-o"], + root, outfile, compression, compressargs)
+ +
[docs]def mktar(root, outfile, compression="xz", compressargs=None, selinux=True): + compressargs = compressargs or ["-9"] + tar_cmd = ["tar", "--no-recursion"] + if selinux: + tar_cmd += ["--selinux", "--acls", "--xattrs"] + tar_cmd += ["-cf-", "--null", "-T-"] + return compress(tar_cmd, root, outfile, compression, compressargs)
+ +
[docs]def mksquashfs(rootdir, outfile, compression="default", compressargs=None): + '''Make a squashfs image containing the given rootdir.''' + compressargs = compressargs or [] + if compression != "default": + compressargs = ["-comp", compression] + compressargs + return execWithRedirect("mksquashfs", [rootdir, outfile] + compressargs)
+ +
[docs]def mkrootfsimg(rootdir, outfile, label, size=2, sysroot=""): + """ + Make rootfs image from a directory + + :param str rootdir: Root directory + :param str outfile: Path of output image file + :param str label: Filesystem label + :param int size: Size of the image in GiB, if None computed automatically + :param str sysroot: path to system (deployment) root relative to physical root + """ + if size: + fssize = size * (1024*1024*1024) # 2GB sparse file compresses down to nothin' + else: + fssize = None # Let mkext4img figure out the needed size + + mkext4img(rootdir, outfile, label=label, size=fssize)
+ + +######## Utility functions ############################################### + +
[docs]def mksparse(outfile, size): + '''use os.ftruncate to create a sparse file of the given size.''' + with open(outfile, "w") as fobj: + os.ftruncate(fobj.fileno(), size)
+ +
[docs]def mkqcow2(outfile, size, options=None): + '''use qemu-img to create a file of the given size. + options is a list of options passed to qemu-img + + Default format is qcow2, override by passing "-f", fmt + in options. + ''' + mkqemu_img(outfile, size, options)
+ +
[docs]def mkqemu_img(outfile, size, options=None): + '''use qemu-img to create a file of the given size. + options is a list of options passed to qemu-img + + Default format is qcow2, override by passing "-f", fmt + in options. + ''' + options = options or [] + if "-f" not in options: + options.extend(["-f", "qcow2"]) + runcmd(["qemu-img", "create"] + options + [outfile, str(size)])
+ +
[docs]def loop_waitfor(loop_dev, outfile): + """Make sure the loop device is attached to the outfile. + + It seems that on rare occasions losetup can return before the /dev/loopX is + ready for use, causing problems with mkfs. This tries to make sure that the + loop device really is associated with the backing file before continuing. + + Raise RuntimeError if it isn't setup after 5 tries. + """ + for _x in range(0,5): + runcmd(["udevadm", "settle", "--timeout", "300"]) + ## XXX Note that losetup --list output can be truncated to 64 bytes in some + ## situations. Don't use it to lookup backing file, go the other way + ## and lookup the loop for the backing file. See util-linux lib/loopdev.c + ## loopcxt_get_backing_file() + if get_loop_name(outfile) == os.path.basename(loop_dev): + return + + # If this really is a race, give it some time to settle down + time.sleep(1) + + raise RuntimeError("Unable to setup %s on %s" % (loop_dev, outfile))
+ +
[docs]def loop_attach(outfile): + """Attach a loop device to the given file. Return the loop device name. + + On rare occasions it appears that the device never shows up, some experiments + seem to indicate that it may be a race with another process using /dev/loop* devices. + + So we now try 3 times before actually failing. + + Raises CalledProcessError if losetup fails. + """ + retries = 0 + while True: + try: + retries += 1 + dev = runcmd_output(["losetup", "--find", "--show", outfile]).strip() + + # Sometimes the loop device isn't ready yet, make extra sure before returning + loop_waitfor(dev, outfile) + except RuntimeError: + # Try to setup the loop device 3 times + if retries == 3: + logger.error("loop_attach failed, retries exhausted.") + raise + logger.debug("Try %d failed, %s did not appear.", retries, dev) + break + return dev
+ +
[docs]def loop_detach(loopdev): + '''Detach the given loop device. Return False on failure.''' + return (execWithRedirect("losetup", ["--detach", loopdev]) == 0)
+ +
[docs]def get_loop_name(path): + '''Return the loop device associated with the path. + Raises RuntimeError if more than one loop is associated''' + buf = runcmd_output(["losetup", "-j", path]) + if len(buf.splitlines()) > 1: + # there should never be more than one loop device listed + raise RuntimeError("multiple loops associated with %s" % path) + name = os.path.basename(buf.split(":")[0]) + return name
+ +
[docs]def dm_attach(dev, size, name=None): + '''Attach a devicemapper device to the given device, with the given size. + If name is None, a random name will be chosen. Returns the device name. + raises CalledProcessError if dmsetup fails.''' + if name is None: + name = tempfile.mktemp(prefix="lorax.imgutils.", dir="") + runcmd(["dmsetup", "create", name, "--table", + "0 %i linear %s 0" % (size/512, dev)]) + return name
+ +
[docs]def dm_detach(dev): + '''Detach the named devicemapper device. Returns False if dmsetup fails.''' + dev = dev.replace("/dev/mapper/", "") # strip prefix, if it's there + return execWithRedirect("dmsetup", ["remove", dev])
+ +
[docs]def mount(dev, opts="", mnt=None): + '''Mount the given device at the given mountpoint, using the given opts. + opts should be a comma-separated string of mount options. + if mnt is none, a temporary directory will be created and its path will be + returned. + raises CalledProcessError if mount fails.''' + if mnt is None: + mnt = tempfile.mkdtemp(prefix="lorax.imgutils.") + logger.debug("make tmp mountdir %s", mnt) + cmd = ["mount"] + if opts: + cmd += ["-o", opts] + cmd += [dev, mnt] + runcmd(cmd) + return mnt
+ +
[docs]def umount(mnt, lazy=False, maxretry=3, retrysleep=1.0, delete=True): + '''Unmount the given mountpoint. If lazy is True, do a lazy umount (-l). + If the mount was a temporary dir created by mount, it will be deleted. + raises CalledProcessError if umount fails.''' + cmd = ["umount"] + if lazy: cmd += ["-l"] + cmd += [mnt] + count = 0 + while maxretry > 0: + try: + rv = runcmd(cmd) + except CalledProcessError: + count += 1 + if count == maxretry: + raise + logger.warning("failed to unmount %s. retrying (%d/%d)...", + mnt, count, maxretry) + if logger.getEffectiveLevel() <= logging.DEBUG: + fuser = execWithCapture("fuser", ["-vm", mnt]) + logger.debug("fuser -vm:\n%s\n", fuser) + sleep(retrysleep) + else: + break + if delete and 'lorax.imgutils' in mnt: + os.rmdir(mnt) + logger.debug("remove tmp mountdir %s", mnt) + return (rv == 0)
+ +
[docs]def copytree(src, dest, preserve=True): + '''Copy a tree of files using cp -a, thus preserving modes, timestamps, + links, acls, sparse files, xattrs, selinux contexts, etc. + If preserve is False, uses cp -R (useful for modeless filesystems) + raises CalledProcessError if copy fails.''' + logger.debug("copytree %s %s", src, dest) + cp = ["cp", "-a"] if preserve else ["cp", "-R", "-L", "--preserve=timestamps"] + cp += [join(src, "."), os.path.abspath(dest)] + runcmd(cp)
+ +
[docs]def do_grafts(grafts, dest, preserve=True): + '''Copy each of the items listed in grafts into dest. + If the key ends with '/' it's assumed to be a directory which should be + created, otherwise just the leading directories will be created.''' + for imgpath, filename in grafts.items(): + if imgpath[-1] == '/': + targetdir = join(dest, imgpath) + imgpath = imgpath[:-1] + else: + targetdir = join(dest, dirname(imgpath)) + if not os.path.isdir(targetdir): + os.makedirs(targetdir) + if os.path.isdir(filename): + copytree(filename, join(dest, imgpath), preserve) + else: + cpfile(filename, join(dest, imgpath))
+ +
[docs]def round_to_blocks(size, blocksize): + '''If size isn't a multiple of blocksize, round up to the next multiple''' + diff = size % blocksize + if diff or not size: + size += blocksize - diff + return size
+ +# TODO: move filesystem data outside this function +
[docs]def estimate_size(rootdir, graft=None, fstype=None, blocksize=4096, overhead=256): + graft = graft or {} + getsize = lambda f: os.lstat(f).st_size + if fstype == "btrfs": + overhead = 64*1024 # don't worry, it's all sparse + if fstype == "hfsplus": + overhead = 200 # hack to deal with two bootloader copies + if fstype in ("vfat", "msdos"): + blocksize = 2048 + getsize = lambda f: os.stat(f).st_size # no symlinks, count as copies + total = overhead*blocksize + dirlist = list(graft.values()) + if rootdir: + dirlist.append(rootdir) + for root in dirlist: + for top, dirs, files in os.walk(root): + for f in files + dirs: + total += round_to_blocks(getsize(join(top,f)), blocksize) + if fstype == "btrfs": + total = max(256*1024*1024, total) # btrfs minimum size: 256MB + logger.info("Size of %s block %s fs at %s estimated to be %s", blocksize, fstype, rootdir, total) + return total
+ +######## Execution contexts - use with the 'with' statement ############## + +
[docs]class LoopDev(object): + def __init__(self, filename, size=None): + self.loopdev = None + self.filename = filename + if size: + mksparse(self.filename, size) + def __enter__(self): + self.loopdev = loop_attach(self.filename) + return self.loopdev + def __exit__(self, exc_type, exc_value, tracebk): + loop_detach(self.loopdev)
+ +
[docs]class DMDev(object): + def __init__(self, dev, size, name=None): + self.mapperdev = None + (self.dev, self.size, self.name) = (dev, size, name) + def __enter__(self): + self.mapperdev = dm_attach(self.dev, self.size, self.name) + return self.mapperdev + def __exit__(self, exc_type, exc_value, tracebk): + dm_detach(self.mapperdev)
+ +
[docs]class Mount(object): + def __init__(self, dev, opts="", mnt=None): + (self.dev, self.opts, self.mnt) = (dev, opts, mnt) + def __enter__(self): + self.mnt = mount(self.dev, self.opts, self.mnt) + return self.mnt + def __exit__(self, exc_type, exc_value, tracebk): + umount(self.mnt)
+ +
[docs]def kpartx_disk_img(disk_img): + """Attach a disk image's partitions to /dev/loopX using kpartx + + :param disk_img: The full path to a partitioned disk image + :type disk_img: str + :returns: list of (loopXpN, size) + :rtype: list of tuples + """ + # Example kpartx output + # kpartx -p p -v -a /tmp/diskV2DiCW.im + # add map loop2p1 (253:2): 0 3481600 linear /dev/loop2 2048 + # add map loop2p2 (253:3): 0 614400 linear /dev/loop2 3483648 + kpartx_output = runcmd_output(["kpartx", "-v", "-a", "-s", disk_img]) + logger.debug(kpartx_output) + + # list of (deviceName, sizeInBytes) + loop_devices = [] + for line in kpartx_output.splitlines(): + # add map loop2p3 (253:4): 0 7139328 linear /dev/loop2 528384 + # 3rd element is size in 512 byte blocks + if line.startswith("add map "): + fields = line[8:].split() + loop_devices.append( (fields[0], int(fields[3])*512) ) + return loop_devices
+ +
[docs]class PartitionMount(object): + """ Mount a partitioned image file using kpartx """ + def __init__(self, disk_img, mount_ok=None, submount=None): + """ + :param str disk_img: The full path to a partitioned disk image + :param mount_ok: A function that is passed the mount point and + returns True if it should be mounted. + :param str submount: Directory inside mount_dir to mount at + + If mount_ok is not set it will look for /etc/passwd + + If the partition is found it will be mounted under a temporary + directory and self.temp_dir set to it. If submount is passed it will be + created and mounted there instead, with self.mount_dir set to point to + it. self.mount_dev is set to the loop device, and self.mount_size is + set to the size of the partition. + + When no subdir is passed self.temp_dir and self.mount_dir will be the same. + """ + self.mount_dev = None + self.mount_size = None + self.mount_dir = None + self.disk_img = disk_img + self.mount_ok = mount_ok + self.submount = submount + self.temp_dir = None + + # Default is to mount partition with /etc/passwd + if not self.mount_ok: + self.mount_ok = lambda mount_dir: os.path.isfile(mount_dir+"/etc/passwd") + + # list of (deviceName, sizeInBytes) + self.loop_devices = kpartx_disk_img(self.disk_img) + + def __enter__(self): + # Mount the device selected by mount_ok, if possible + self.temp_dir = tempfile.mkdtemp() + if self.submount: + mount_dir = os.path.normpath(os.path.sep.join([self.temp_dir, self.submount])) + os.makedirs(mount_dir, mode=0o755, exist_ok=True) + else: + mount_dir = self.temp_dir + for dev, size in self.loop_devices: + try: + mount( "/dev/mapper/"+dev, mnt=mount_dir ) + if self.mount_ok(mount_dir): + self.mount_dir = mount_dir + self.mount_dev = dev + self.mount_size = size + break + umount( mount_dir ) + except CalledProcessError: + logger.debug(traceback.format_exc()) + if self.mount_dir: + logger.info("Partition mounted on %s size=%s", self.mount_dir, self.mount_size) + else: + logger.debug("Unable to mount anything from %s", self.disk_img) + os.rmdir(self.temp_dir) + self.temp_dir = None + return self + + def __exit__(self, exc_type, exc_value, tracebk): + if self.temp_dir: + umount(self.mount_dir) + shutil.rmtree(self.temp_dir) + self.mount_dir = None + self.temp_dir = None + execWithRedirect("kpartx", ["-d", "-s", self.disk_img])
+ + +
[docs]class DracutChroot(object): + """Setup the chroot for running dracut inside it, cleanup when done + + This mount /proc, /dev, and /var/tmp plus optional bind mounted directories + as a list of (source, destination) tuples where destination is relative to the chroot. + """ + def __init__(self, root, bind=None): + self.root = root + self.bind = [("/var/tmp", "/var/tmp")] + (bind if bind else []) + + def __enter__(self): + for d in [d for _, d in self.bind] + ["/proc", "/dev"]: + if not os.path.exists(self.root + d): + logger.warning("Making missing dracut chroot directory: %s", d) + os.makedirs(self.root + d) + + runcmd(["mount", "-t", "proc", "-o", "nosuid,noexec,nodev", "proc", self.root + "/proc" ]) + runcmd(["mount", "-t", "devtmpfs", "-o", "mode=0755,noexec,nosuid,strictatime", "devtmpfs", self.root + "/dev" ]) + + for s, d in self.bind: + runcmd(["mount", "-o", "bind", s, self.root + d]) + + return self + + def __exit__(self, exc_type, exc_value, tracebk): + runcmd(["umount", self.root + "/proc" ]) + runcmd(["umount", self.root + "/dev" ]) + + # cleanup bind mounts + for _, d in self.bind: + runcmd(["umount", self.root + d ]) + +
[docs] def Run(self, args): + runcmd(["dracut"] + args, root=self.root)
+ + +######## Functions for making filesystem images ########################## + +
[docs]def mkfsimage(fstype, rootdir, outfile, size=None, mkfsargs=None, mountargs="", graft=None): + '''Generic filesystem image creation function. + fstype should be a filesystem type - "mkfs.${fstype}" must exist. + graft should be a dict: {"some/path/in/image": "local/file/or/dir"}; + if the path ends with a '/' it's assumed to be a directory. + Will raise CalledProcessError if something goes wrong.''' + mkfsargs = mkfsargs or [] + graft = graft or {} + preserve = (fstype not in ("msdos", "vfat")) + if not size: + size = estimate_size(rootdir, graft, fstype) + with LoopDev(outfile, size) as loopdev: + try: + runcmd(["mkfs.%s" % fstype] + mkfsargs + [loopdev]) + except CalledProcessError as e: + logger.error("mkfs exited with a non-zero return code: %d", e.returncode) + logger.error(e.output) + sys.exit(e.returncode) + + with Mount(loopdev, mountargs) as mnt: + if rootdir: + copytree(rootdir, mnt, preserve) + do_grafts(graft, mnt, preserve) + + # Save information about filesystem usage + execWithRedirect("df", [mnt]) + + # Make absolutely sure that the data has been written + runcmd(["sync"])
+ +# convenience functions with useful defaults +
[docs]def mkdosimg(rootdir, outfile, size=None, label="", mountargs="shortname=winnt,umask=0077", graft=None): + graft = graft or {} + mkfsargs = ["-n", label] + if 'SOURCE_DATE_EPOCH' in os.environ: + mkfsargs.extend(["-i", + "{:x}".format(int(os.environ['SOURCE_DATE_EPOCH']))]) + mkfsimage("msdos", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=mkfsargs, graft=graft)
+ +
[docs]def mkext4img(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("ext4", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-L", label, "-b", "4096", "-m", "0"], graft=graft)
+ +
[docs]def mkbtrfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("btrfs", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-L", label], graft=graft)
+ +
[docs]def mkhfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None): + graft = graft or {} + mkfsimage("hfsplus", rootdir, outfile, size, mountargs=mountargs, + mkfsargs=["-v", label], graft=graft)
+ +
[docs]def mkfsimage_from_disk(diskimage, fsimage, img_size=None, label="Anaconda"): + """ + Copy the / partition of a partitioned disk image to an un-partitioned + disk image. + + :param str diskimage: The full path to partitioned disk image with a / + :param str fsimage: The full path of the output fs image file + :param int img_size: Optional size of the fsimage in MiB or None to make + it as small as possible + :param str label: The label to apply to the image. Defaults to "Anaconda" + """ + with PartitionMount(diskimage) as img_mount: + if not img_mount or not img_mount.mount_dir: + return None + + logger.info("Creating fsimage %s (%s)", fsimage, img_size or "minimized") + if img_size: + # convert to Bytes + img_size *= 1024**2 + + mkext4img(img_mount.mount_dir, fsimage, size=img_size, label=label)
+ +
[docs]def default_image_name(compression, basename): + """ Return a default image name with the correct suffix for the compression type. + + :param str compression: Compression type + :param str basename: Base filename + :returns: basename with compression suffix + + If the compression is unknown it defaults to xz + """ + SUFFIXES = {"xz": ".xz", "gzip": ".gz", "bzip2": ".bz2", "lzma": ".lzma"} + return basename + SUFFIXES.get(compression, ".xz")
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/installer.html b/f34-branch/_modules/pylorax/installer.html new file mode 100644 index 00000000..5d9ee445 --- /dev/null +++ b/f34-branch/_modules/pylorax/installer.html @@ -0,0 +1,783 @@ + + + + + + pylorax.installer — Lorax 34.13 documentation + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pylorax.installer

+#
+# Copyright (C) 2011-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+import logging
+log = logging.getLogger("pylorax")
+
+import glob
+import json
+from math import ceil
+import os
+import subprocess
+import shutil
+import socket
+import tempfile
+
+# Use the Lorax treebuilder branch for iso creation
+from pylorax.executils import execWithRedirect, execReadlines
+from pylorax.imgutils import PartitionMount, mksparse, mkext4img, loop_detach
+from pylorax.imgutils import get_loop_name, dm_detach, mount, umount
+from pylorax.imgutils import mkqemu_img, mktar, mkcpio, mkfsimage_from_disk
+from pylorax.monitor import LogMonitor
+from pylorax.mount import IsoMountpoint
+from pylorax.sysutils import joinpaths
+from pylorax.treebuilder import udev_escape
+
+
+ROOT_PATH = "/mnt/sysimage/"
+
+
[docs]class InstallError(Exception): + pass
+ + +
[docs]def create_vagrant_metadata(path, size=0): + """ Create a default Vagrant metadata.json file + + :param str path: Path to metadata.json file + :param int size: Disk size in MiB + """ + metadata = { "provider":"libvirt", "format":"qcow2", "virtual_size": ceil(size / 1024) } + with open(path, "wt") as f: + json.dump(metadata, f, indent=4)
+ + +
[docs]def update_vagrant_metadata(path, size): + """ Update the Vagrant metadata.json file + + :param str path: Path to metadata.json file + :param int size: Disk size in MiB + + This function makes sure that the provider, format and virtual size of the + metadata file are set correctly. All other values are left untouched. + """ + with open(path, "rt") as f: + try: + metadata = json.load(f) + except ValueError as e: + log.error("Problem reading metadata file %s: %s", path, e) + return + + metadata["provider"] = "libvirt" + metadata["format"] = "qcow2" + metadata["virtual_size"] = ceil(size / 1024) + with open(path, "wt") as f: + json.dump(metadata, f, indent=4)
+ + +
[docs]def find_free_port(start=5900, end=5999, host="127.0.0.1"): + """ Return first free port in range. + + :param int start: Starting port number + :param int end: Ending port number + :param str host: Host IP to search + :returns: First free port or -1 if none found + :rtype: int + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + for port in range(start, end+1): + try: + s.bind((host, port)) + s.close() + return port + except OSError: + pass + + return -1
+ +
[docs]def append_initrd(initrd, files): + """ Append files to an initrd. + + :param str initrd: Path to initrd + :param list files: list of file paths to add + :returns: Path to a new initrd + :rtype: str + + The files are added to the initrd by creating a cpio image + of the files (stored at /) and writing the cpio to the end of a + copy of the initrd. + + The initrd is not changed, a copy is made before appending the + cpio archive. + """ + qemu_initrd = tempfile.mktemp(prefix="lmc-initrd-", suffix=".img") + shutil.copy2(initrd, qemu_initrd) + ks_dir = tempfile.mkdtemp(prefix="lmc-ksdir-") + for ks in files: + shutil.copy2(ks, ks_dir) + ks_initrd = tempfile.mktemp(prefix="lmc-ks-", suffix=".img") + mkcpio(ks_dir, ks_initrd) + shutil.rmtree(ks_dir) + with open(qemu_initrd, "ab") as initrd_fp: + with open(ks_initrd, "rb") as ks_fp: + while True: + data = ks_fp.read(1024**2) + if not data: + break + initrd_fp.write(data) + os.unlink(ks_initrd) + + return qemu_initrd
+ +
[docs]class QEMUInstall(object): + """ + Run qemu using an iso and a kickstart + """ + # Mapping of arch to qemu command + QEMU_CMDS = {"x86_64": "qemu-system-x86_64", + "i386": "qemu-system-i386", + "arm": "qemu-system-arm", + "aarch64": "qemu-system-aarch64", + "ppc64le": "qemu-system-ppc64" + } + + def __init__(self, opts, iso, ks_paths, disk_img, img_size=2048, + kernel_args=None, memory=1024, vcpus=None, vnc=None, arch=None, + cancel_func=None, virtio_host="127.0.0.1", virtio_port=6080, + image_type=None, boot_uefi=False, ovmf_path=None): + """ + Start the installation + + :param iso: Information about the iso to use for the installation + :type iso: IsoMountpoint + :param list ks_paths: Paths to kickstart files. All are injected, the + first one is the one executed. + :param str disk_img: Path to a disk image, created it it doesn't exist + :param int img_size: The image size, in MiB, to create if it doesn't exist + :param str kernel_args: Extra kernel arguments to pass on the kernel cmdline + :param int memory: Amount of RAM to assign to the virt, in MiB + :param int vcpus: Number of virtual cpus + :param str vnc: Arguments to pass to qemu -display + :param str arch: Optional architecture to use in the virt + :param cancel_func: Function that returns True if the installation fails + :type cancel_func: function + :param str virtio_host: Hostname to connect virtio log to + :param int virtio_port: Port to connect virtio log to + :param str image_type: Type of qemu-img disk to create, or None. + :param bool boot_uefi: Use OVMF to boot the VM in UEFI mode + :param str ovmf_path: Path to the OVMF firmware + """ + # Lookup qemu-system- for arch if passed, or try to guess using host arch + qemu_cmd = [self.QEMU_CMDS.get(arch or os.uname().machine, "qemu-system-"+os.uname().machine)] + if not os.path.exists("/usr/bin/"+qemu_cmd[0]): + raise InstallError("%s does not exist, cannot run qemu" % qemu_cmd[0]) + + qemu_cmd += ["-no-user-config"] + qemu_cmd += ["-m", str(memory)] + if vcpus: + qemu_cmd += ["-smp", str(vcpus)] + + if not opts.no_kvm and os.path.exists("/dev/kvm"): + qemu_cmd += ["-machine", "accel=kvm"] + + if boot_uefi: + qemu_cmd += ["-machine", "q35,smm=on"] + qemu_cmd += ["-global", "driver=cfi.pflash01,property=secure,value=on"] + + # Copy the initrd from the iso, create a cpio archive of the kickstart files + # and append it to the temporary initrd. + qemu_initrd = append_initrd(iso.initrd, ks_paths) + qemu_cmd += ["-kernel", iso.kernel] + qemu_cmd += ["-initrd", qemu_initrd] + + # Add the disk and cdrom + if not os.path.isfile(disk_img): + mksparse(disk_img, img_size * 1024**2) + drive_args = "file=%s" % disk_img + drive_args += ",cache=unsafe,discard=unmap" + if image_type: + drive_args += ",format=%s" % image_type + else: + drive_args += ",format=raw" + qemu_cmd += ["-drive", drive_args] + + drive_args = "file=%s,media=cdrom,readonly=on" % iso.iso_path + qemu_cmd += ["-drive", drive_args] + + # Setup the cmdline args + # ====================== + cmdline_args = "inst.ks=file:/%s" % os.path.basename(ks_paths[0]) + cmdline_args += " inst.stage2=hd:LABEL=%s" % udev_escape(iso.label) + if opts.proxy: + cmdline_args += " inst.proxy=%s" % opts.proxy + if kernel_args: + cmdline_args += " "+kernel_args + cmdline_args += " inst.text inst.cmdline" + + qemu_cmd += ["-append", cmdline_args] + + if not opts.vnc: + vnc_port = find_free_port() + if vnc_port == -1: + raise InstallError("No free VNC ports") + display_args = "vnc=127.0.0.1:%d" % (vnc_port - 5900) + else: + display_args = opts.vnc + log.info("qemu %s", display_args) + qemu_cmd += ["-nographic", "-monitor", "none", "-serial", "null", "-display", display_args ] + + # Setup the virtio log port + qemu_cmd += ["-device", "virtio-serial-pci,id=virtio-serial0"] + qemu_cmd += ["-device", "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0" + ",id=channel0,name=org.fedoraproject.anaconda.log.0"] + qemu_cmd += ["-chardev", "socket,id=charchannel0,host=%s,port=%s" % (virtio_host, virtio_port)] + + # Pass through rng from host + if opts.with_rng != "none": + qemu_cmd += ["-object", "rng-random,id=virtio-rng0,filename=%s" % opts.with_rng] + if boot_uefi: + qemu_cmd += ["-device", "virtio-rng-pci,rng=virtio-rng0,id=rng0,bus=pcie.0,addr=0x9"] + else: + qemu_cmd += ["-device", "virtio-rng-pci,rng=virtio-rng0,id=rng0,bus=pci.0,addr=0x9"] + + if boot_uefi and ovmf_path: + qemu_cmd += ["-drive", "file=%s/OVMF_CODE.secboot.fd,if=pflash,format=raw,unit=0,readonly=on" % ovmf_path] + + # Make a copy of the OVMF_VARS.secboot.fd for this run + ovmf_vars = tempfile.mktemp(prefix="lmc-OVMF_VARS-", suffix=".fd") + shutil.copy2(joinpaths(ovmf_path, "/OVMF_VARS.secboot.fd"), ovmf_vars) + + qemu_cmd += ["-drive", "file=%s,if=pflash,format=raw,unit=1" % ovmf_vars] + + log.info("Running qemu") + log.debug(qemu_cmd) + try: + execWithRedirect(qemu_cmd[0], qemu_cmd[1:], reset_lang=False, raise_err=True, + callback=lambda p: not (cancel_func and cancel_func())) + except subprocess.CalledProcessError as e: + log.error("Running qemu failed:") + log.error("cmd: %s", " ".join(e.cmd)) + log.error("output: %s", e.output or "") + raise InstallError("QEMUInstall failed") + except (OSError, KeyboardInterrupt) as e: + log.error("Running qemu failed: %s", str(e)) + raise InstallError("QEMUInstall failed") + finally: + os.unlink(qemu_initrd) + if boot_uefi and ovmf_path: + os.unlink(ovmf_vars) + + if cancel_func and cancel_func(): + log.error("Installation error detected. See logfile for details.") + raise InstallError("QEMUInstall failed") + else: + log.info("Installation finished without errors.")
+ + +
[docs]def novirt_cancel_check(cancel_funcs, proc): + """ + Check to see if there has been an error in the logs + + :param cancel_funcs: list of functions to call, True from any one cancels the build + :type cancel_funcs: list + :param proc: Popen object for the anaconda process + :type proc: subprocess.Popen + :returns: True if the process has been terminated + + The cancel_funcs functions should return a True if an error has been detected. + When an error is detected the process is terminated and this returns True + """ + for f in cancel_funcs: + if f(): + # Anaconda runs from unshare, anaconda doesn't exit correctly so try to + # send TERM to all of them directly + import psutil + for p in psutil.Process(proc.pid).children(recursive=True): + p.terminate() + psutil.Process(proc.pid).terminate() + return True + return False
+ + +
[docs]def anaconda_cleanup(dirinstall_path): + """ + Cleanup any leftover mounts from anaconda + + :param str dirinstall_path: Path where anaconda mounts things + :returns: True if cleanups were successful. False if any of them failed. + + If anaconda crashes it may leave things mounted under this path. It will + typically be set to /mnt/sysimage/ + + Attempts to cleanup may also fail. Catch these and continue trying the + other mountpoints. + + Anaconda may also leave /run/anaconda.pid behind, clean that up as well. + """ + # Anaconda may not clean up its /var/run/anaconda.pid file + # Make sure the process is really finished (it should be, since it was started from a subprocess call) + # and then remove the pid file. + if os.path.exists("/var/run/anaconda.pid"): + # anaconda may be started using unshare so the pid is always 1 + if open("/var/run/anaconda.pid").read().strip() == "1": + os.unlink("/var/run/anaconda.pid") + + rc = True + dirinstall_path = os.path.abspath(dirinstall_path) + # unmount filesystems + for mounted in reversed(open("/proc/mounts").readlines()): + (_device, mountpoint, _rest) = mounted.split(" ", 2) + if mountpoint.startswith(dirinstall_path) and os.path.ismount(mountpoint): + try: + umount(mountpoint) + except subprocess.CalledProcessError: + log.error("Cleanup of %s failed. See program.log for details", mountpoint) + rc = False + return rc
+ + +
[docs]def novirt_install(opts, disk_img, disk_size, cancel_func=None, tar_img=None): + """ + Use Anaconda to install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str disk_img: The full path to the disk image to be created + :param int disk_size: The size of the disk_img in MiB + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :param str tar_img: For make_tar_disk, the path to final tarball to be created + + This method runs anaconda to create the image and then based on the opts + passed creates a qemu disk image or tarfile. + """ + dirinstall_path = ROOT_PATH + + # Clean up /tmp/ from previous runs to prevent stale info from being used + for path in ["/tmp/yum.repos.d/", "/tmp/yum.cache/"]: + if os.path.isdir(path): + shutil.rmtree(path) + + args = ["--kickstart", opts.ks[0], "--cmdline"] + if opts.anaconda_args: + for arg in opts.anaconda_args: + args += arg.split(" ", 1) + if opts.proxy: + args += ["--proxy", opts.proxy] + if opts.armplatform: + args += ["--armplatform", opts.armplatform] + + if opts.make_iso or opts.make_fsimage or opts.make_pxe_live: + # Make a blank fs image + args += ["--dirinstall"] + + mkext4img(None, disk_img, label=opts.fs_label, size=disk_size * 1024**2) + if not os.path.isdir(dirinstall_path): + os.mkdir(dirinstall_path) + mount(disk_img, opts="loop", mnt=dirinstall_path) + elif opts.make_tar or opts.make_oci: + # Install under dirinstall_path, make sure it starts clean + if os.path.exists(dirinstall_path): + shutil.rmtree(dirinstall_path) + + if opts.make_oci: + # OCI installs under /rootfs/ + dirinstall_path = joinpaths(dirinstall_path, "rootfs") + args += ["--dirinstall", dirinstall_path] + else: + args += ["--dirinstall"] + + os.makedirs(dirinstall_path) + else: + args += ["--image", disk_img] + + # Create the sparse image + mksparse(disk_img, disk_size * 1024**2) + + log_monitor = LogMonitor(timeout=opts.timeout) + args += ["--remotelog", "%s:%s" % (log_monitor.host, log_monitor.port)] + cancel_funcs = [log_monitor.server.log_check] + if cancel_func is not None: + cancel_funcs.append(cancel_func) + + # Make sure anaconda has the right product and release + log.info("Running anaconda.") + try: + unshare_args = ["--pid", "--kill-child", "--mount", "--propagation", "unchanged", "anaconda"] + args + for line in execReadlines("unshare", unshare_args, reset_lang=False, + env_add={"ANACONDA_PRODUCTNAME": opts.project, + "ANACONDA_PRODUCTVERSION": opts.releasever}, + callback=lambda p: not novirt_cancel_check(cancel_funcs, p)): + log.info(line) + + # Make sure the new filesystem is correctly labeled + setfiles_args = ["-e", "/proc", "-e", "/sys", + "/etc/selinux/targeted/contexts/files/file_contexts", "/"] + + if "--dirinstall" in args: + # setfiles may not be available, warn instead of fail + try: + execWithRedirect("setfiles", setfiles_args, root=dirinstall_path) + except (subprocess.CalledProcessError, OSError) as e: + log.warning("Running setfiles on install tree failed: %s", str(e)) + else: + with PartitionMount(disk_img) as img_mount: + if img_mount and img_mount.mount_dir: + try: + execWithRedirect("setfiles", setfiles_args, root=img_mount.mount_dir) + except (subprocess.CalledProcessError, OSError) as e: + log.warning("Running setfiles on install tree failed: %s", str(e)) + + # For image installs, run fstrim to discard unused blocks. This way + # unused blocks do not need to be allocated for sparse image types + execWithRedirect("fstrim", [img_mount.mount_dir]) + + except (subprocess.CalledProcessError, OSError) as e: + log.error("Running anaconda failed: %s", e) + raise InstallError("novirt_install failed") + finally: + log_monitor.shutdown() + + # Move the anaconda logs over to a log directory + log_dir = os.path.abspath(os.path.dirname(opts.logfile)) + log_anaconda = joinpaths(log_dir, "anaconda") + if not os.path.isdir(log_anaconda): + os.mkdir(log_anaconda) + for l in glob.glob("/tmp/*log")+glob.glob("/tmp/anaconda-tb-*"): + shutil.copy2(l, log_anaconda) + os.unlink(l) + + # Make sure any leftover anaconda mounts have been cleaned up + if not anaconda_cleanup(dirinstall_path): + raise InstallError("novirt_install cleanup of anaconda mounts failed.") + + if not opts.make_iso and not opts.make_fsimage and not opts.make_pxe_live: + dm_name = os.path.splitext(os.path.basename(disk_img))[0] + + # Remove device-mapper for partitions and disk + log.debug("Removing device-mapper setup on %s", dm_name) + for d in sorted(glob.glob("/dev/mapper/"+dm_name+"*"), reverse=True): + dm_detach(d) + + log.debug("Removing loop device for %s", disk_img) + loop_detach("/dev/"+get_loop_name(disk_img)) + + # qemu disk image is used by bare qcow2 images and by Vagrant + if opts.image_type: + log.info("Converting %s to %s", disk_img, opts.image_type) + qemu_args = [] + for arg in opts.qemu_args: + qemu_args += arg.split(" ", 1) + + # convert the image to the selected format + if "-O" not in qemu_args: + qemu_args.extend(["-O", opts.image_type]) + qemu_img = tempfile.mktemp(prefix="lmc-disk-", suffix=".img") + execWithRedirect("qemu-img", ["convert"] + qemu_args + [disk_img, qemu_img], raise_err=True) + if not opts.make_vagrant: + execWithRedirect("mv", ["-f", qemu_img, disk_img], raise_err=True) + else: + # Take the new qcow2 image and package it up for Vagrant + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + vagrant_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + metadata_path = joinpaths(vagrant_dir, "metadata.json") + execWithRedirect("mv", ["-f", qemu_img, joinpaths(vagrant_dir, "box.img")], raise_err=True) + if opts.vagrant_metadata: + shutil.copy2(opts.vagrant_metadata, metadata_path) + else: + create_vagrant_metadata(metadata_path) + update_vagrant_metadata(metadata_path, disk_size) + if opts.vagrantfile: + shutil.copy2(opts.vagrantfile, joinpaths(vagrant_dir, "vagrantfile")) + + log.info("Creating Vagrant image") + rc = mktar(vagrant_dir, disk_img, opts.compression, compress_args, selinux=False) + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + shutil.rmtree(vagrant_dir) + elif opts.make_tar: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(dirinstall_path, disk_img, opts.compression, compress_args) + shutil.rmtree(dirinstall_path) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + elif opts.make_oci: + # An OCI image places the filesystem under /rootfs/ and adds the json files at the top + # And then creates a tar of the whole thing. + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + shutil.copy2(opts.oci_config, ROOT_PATH) + shutil.copy2(opts.oci_runtime, ROOT_PATH) + rc = mktar(ROOT_PATH, disk_img, opts.compression, compress_args) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + else: + # For raw disk images, use fallocate to deallocate unused space + execWithRedirect("fallocate", ["--dig-holes", disk_img], raise_err=True) + + # For make_tar_disk, wrap the result in a tar file, and remove the original disk image. + if opts.make_tar_disk: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(disk_img, tar_img, opts.compression, compress_args, selinux=False) + + if rc: + raise InstallError("novirt_install mktar failed: rc=%s" % rc) + + os.unlink(disk_img)
+ +
[docs]def virt_install(opts, install_log, disk_img, disk_size, cancel_func=None, tar_img=None): + """ + Use qemu to install to a disk image + + :param opts: options passed to livemedia-creator + :type opts: argparse options + :param str install_log: The path to write the log from qemu + :param str disk_img: The full path to the disk image to be created + :param int disk_size: The size of the disk_img in MiB + :param cancel_func: Function that returns True to cancel build + :type cancel_func: function + :param str tar_img: For make_tar_disk, the path to final tarball to be created + + This uses qemu with a boot.iso and a kickstart to create a disk + image and then optionally, based on the opts passed, creates tarfile. + """ + iso_mount = IsoMountpoint(opts.iso, opts.location) + if not iso_mount.stage2: + iso_mount.umount() + raise InstallError("ISO is missing stage2, cannot continue") + + log_monitor = LogMonitor(install_log, timeout=opts.timeout) + cancel_funcs = [log_monitor.server.log_check] + if cancel_func is not None: + cancel_funcs.append(cancel_func) + + kernel_args = "" + if opts.kernel_args: + kernel_args += opts.kernel_args + if opts.proxy: + kernel_args += " proxy="+opts.proxy + + if opts.image_type and not opts.make_fsimage: + qemu_args = [] + for arg in opts.qemu_args: + qemu_args += arg.split(" ", 1) + if "-f" not in qemu_args: + qemu_args += ["-f", opts.image_type] + + mkqemu_img(disk_img, disk_size*1024**2, qemu_args) + + if opts.make_fsimage or opts.make_tar or opts.make_oci: + diskimg_path = tempfile.mktemp(prefix="lmc-disk-", suffix=".img") + else: + diskimg_path = disk_img + + try: + QEMUInstall(opts, iso_mount, opts.ks, diskimg_path, disk_size, + kernel_args, opts.ram, opts.vcpus, opts.vnc, opts.arch, + cancel_func = lambda : any(f() for f in cancel_funcs), + virtio_host = log_monitor.host, + virtio_port = log_monitor.port, + image_type=opts.image_type, boot_uefi=opts.virt_uefi, + ovmf_path=opts.ovmf_path) + log_monitor.shutdown() + except InstallError as e: + log.error("VirtualInstall failed: %s", e) + raise + finally: + log.info("unmounting the iso") + iso_mount.umount() + + if log_monitor.server.log_check(): + if not log_monitor.server.error_line and opts.timeout: + msg = "virt_install failed due to timeout" + else: + msg = "virt_install failed on line: %s" % log_monitor.server.error_line + raise InstallError(msg) + elif cancel_func and cancel_func(): + raise InstallError("virt_install canceled by cancel_func") + + if opts.make_fsimage: + mkfsimage_from_disk(diskimg_path, disk_img, disk_size, label=opts.fs_label) + os.unlink(diskimg_path) + elif opts.make_tar: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + with PartitionMount(diskimg_path) as img_mount: + if img_mount and img_mount.mount_dir: + rc = mktar(img_mount.mount_dir, disk_img, opts.compression, compress_args) + else: + rc = 1 + os.unlink(diskimg_path) + + if rc: + raise InstallError("virt_install failed") + elif opts.make_oci: + # An OCI image places the filesystem under /rootfs/ and adds the json files at the top + # And then creates a tar of the whole thing. + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + with PartitionMount(diskimg_path, submount="rootfs") as img_mount: + if img_mount and img_mount.temp_dir: + shutil.copy2(opts.oci_config, img_mount.temp_dir) + shutil.copy2(opts.oci_runtime, img_mount.temp_dir) + rc = mktar(img_mount.temp_dir, disk_img, opts.compression, compress_args) + else: + rc = 1 + os.unlink(diskimg_path) + + if rc: + raise InstallError("virt_install failed") + elif opts.make_vagrant: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + vagrant_dir = tempfile.mkdtemp(prefix="lmc-tmpdir-") + metadata_path = joinpaths(vagrant_dir, "metadata.json") + execWithRedirect("mv", ["-f", disk_img, joinpaths(vagrant_dir, "box.img")], raise_err=True) + if opts.vagrant_metadata: + shutil.copy2(opts.vagrant_metadata, metadata_path) + else: + create_vagrant_metadata(metadata_path) + update_vagrant_metadata(metadata_path, disk_size) + if opts.vagrantfile: + shutil.copy2(opts.vagrantfile, joinpaths(vagrant_dir, "vagrantfile")) + + rc = mktar(vagrant_dir, disk_img, opts.compression, compress_args, selinux=False) + if rc: + raise InstallError("virt_install failed") + shutil.rmtree(vagrant_dir) + + # For make_tar_disk, wrap the result in a tar file, and remove the original disk image. + if opts.make_tar_disk: + compress_args = [] + for arg in opts.compress_args: + compress_args += arg.split(" ", 1) + + rc = mktar(disk_img, tar_img, opts.compression, compress_args, selinux=False) + + if rc: + raise InstallError("virt_install mktar failed: rc=%s" % rc) + + os.unlink(disk_img)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/ltmpl.html b/f34-branch/_modules/pylorax/ltmpl.html new file mode 100644 index 00000000..0a3fbb3e --- /dev/null +++ b/f34-branch/_modules/pylorax/ltmpl.html @@ -0,0 +1,1091 @@ + + + + + + + + + + pylorax.ltmpl — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.ltmpl

+#
+# ltmpl.py
+#
+# Copyright (C) 2009-2018  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#                     Will Woods <wwoods@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.ltmpl")
+
+import os, re, glob, shlex, fnmatch
+from os.path import basename, isdir
+from subprocess import CalledProcessError
+import shutil
+
+from pylorax.sysutils import joinpaths, cpfile, mvfile, replace, remove
+from pylorax.dnfhelper import LoraxDownloadCallback, LoraxRpmCallback
+from pylorax.base import DataHolder
+from pylorax.executils import runcmd, runcmd_output
+from pylorax.imgutils import mkcpio
+
+from mako.lookup import TemplateLookup
+from mako.exceptions import text_error_template
+import sys, traceback
+import struct
+import dnf
+import collections.abc
+
+
[docs]class LoraxTemplate(object): + def __init__(self, directories=None): + directories = directories or ["/usr/share/lorax"] + # we have to add ["/"] to the template lookup directories or the + # file includes won't work properly for absolute paths + self.directories = ["/"] + directories + +
[docs] def parse(self, template_file, variables): + lookup = TemplateLookup(directories=self.directories) + template = lookup.get_template(template_file) + + try: + textbuf = template.render(**variables) + except: + logger.error("Problem rendering %s (%s):", template_file, variables) + logger.error(text_error_template().render()) + raise + + # split, strip and remove empty lines + lines = textbuf.splitlines() + lines = [line.strip() for line in lines] + lines = [line for line in lines if line] + + # remove comments + lines = [line for line in lines if not line.startswith("#")] + + # split with shlex and perform brace expansion. This can fail, so we unroll the loop + # for better error reporting. + expanded_lines = [] + try: + for line in lines: + expanded_lines.append(split_and_expand(line)) + except Exception as e: + logger.error('shlex error processing "%s": %s', line, str(e)) + raise + return expanded_lines
+ +
[docs]def split_and_expand(line): + return [exp for word in shlex.split(line) for exp in brace_expand(word)]
+ +
[docs]def brace_expand(s): + if not ('{' in s and ',' in s and '}' in s): + yield s + else: + right = s.find('}') + left = s[:right].rfind('{') + (prefix, choices, suffix) = (s[:left], s[left+1:right], s[right+1:]) + for choice in choices.split(','): + for alt in brace_expand(prefix+choice+suffix): + yield alt
+ +
[docs]def rglob(pathname, root="/", fatal=False): + seen = set() + rootlen = len(root)+1 + for f in glob.iglob(joinpaths(root, pathname)): + if f not in seen: + seen.add(f) + yield f[rootlen:] # remove the root to produce relative path + if fatal and not seen: + raise IOError("nothing matching %s in %s" % (pathname, root))
+ +
[docs]def rexists(pathname, root=""): + # Generator is always True, even with no values; + # bool(rglob(...)) won't work here. + for _path in rglob(pathname, root): + return True + return False
+ +
[docs]class TemplateRunner(object): + ''' + This class parses and executes Lorax templates. Sample usage: + + # install a bunch of packages + runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, dbo=dnf_obj) + runner.run("install-packages.ltmpl") + + NOTES: + + * Parsing procedure is roughly: + 1. Mako template expansion (on the whole file) + 2. For each line of the result, + + a. Whitespace splitting (using shlex.split()) + b. Brace expansion (using brace_expand()) + c. If the first token is the name of a function, call that function + with the rest of the line as arguments + + * Parsing and execution are *separate* passes - so you can't use the result + of a command in an %if statement (or any other control statements)! + ''' + def __init__(self, fatalerrors=True, templatedir=None, defaults=None, builtins=None): + self.fatalerrors = fatalerrors + self.templatedir = templatedir or "/usr/share/lorax" + self.templatefile = None + self.builtins = builtins or {} + self.defaults = defaults or {} + + +
[docs] def run(self, templatefile, **variables): + for k,v in list(self.defaults.items()) + list(self.builtins.items()): + variables.setdefault(k,v) + logger.debug("executing %s with variables=%s", templatefile, variables) + self.templatefile = templatefile + t = LoraxTemplate(directories=[self.templatedir]) + commands = t.parse(templatefile, variables) + self._run(commands)
+ + + def _run(self, parsed_template): + logger.info("running %s", self.templatefile) + for (num, line) in enumerate(parsed_template,1): + logger.debug("template line %i: %s", num, " ".join(line)) + skiperror = False + (cmd, args) = (line[0], line[1:]) + # Following Makefile convention, if the command is prefixed with + # a dash ('-'), we'll ignore any errors on that line. + if cmd.startswith('-'): + cmd = cmd[1:] + skiperror = True + try: + # grab the method named in cmd and pass it the given arguments + f = getattr(self, cmd, None) + if cmd[0] == '_' or cmd == 'run' or not isinstance(f, collections.abc.Callable): + raise ValueError("unknown command %s" % cmd) + f(*args) + except Exception: # pylint: disable=broad-except + if skiperror: + logger.debug("ignoring error") + continue + logger.error("template command error in %s:", self.templatefile) + logger.error(" %s", " ".join(line)) + # format the exception traceback + exclines = traceback.format_exception(*sys.exc_info()) + # skip the bit about "ltmpl.py, in _run()" - we know that + exclines.pop(1) + # log the "ErrorType: this is what happened" line + logger.error(" %s", exclines[-1].strip()) + # and log the entire traceback to the debug log + for _line in ''.join(exclines).splitlines(): + logger.debug(" %s", _line) + if self.fatalerrors: + raise
+ + +# TODO: operate inside an actual chroot for safety? Not that RPM bothers.. +
[docs]class LoraxTemplateRunner(TemplateRunner): + ''' + This class parses and executes Lorax templates. Sample usage: + + # install a bunch of packages + runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, dbo=dnf_obj) + runner.run("install-packages.ltmpl") + + # modify a runtime dir + runner = LoraxTemplateRunner(inroot=rundir, outroot=newrun) + runner.run("runtime-transmogrify.ltmpl") + + NOTES: + + * Commands that run external programs (e.g. systemctl) currently use + the *host*'s copy of that program, which may cause problems if there's a + big enough difference between the host and the image you're modifying. + + * The commands are not executed under a real chroot, so absolute symlinks + will point *outside* the inroot/outroot. Be careful with symlinks! + + ADDING NEW COMMANDS: + + * Each template command is just a method of the LoraxTemplateRunner + object - so adding a new command is as easy as adding a new function. + + * Each function gets arguments that correspond to the rest of the tokens + on that line (after word splitting and brace expansion) + + * Commands should raise exceptions for errors - don't use sys.exit() + ''' + def __init__(self, inroot, outroot, dbo=None, fatalerrors=True, + templatedir=None, defaults=None): + self.inroot = inroot + self.outroot = outroot + self.dbo = dbo + builtins = DataHolder(exists=lambda p: rexists(p, root=inroot), + glob=lambda g: list(rglob(g, root=inroot))) + self.results = DataHolder(treeinfo=dict()) # just treeinfo for now + + super(LoraxTemplateRunner, self).__init__(fatalerrors, templatedir, defaults, builtins) + # TODO: set up custom logger with a filter to add line info + + def _out(self, path): + return joinpaths(self.outroot, path) + def _in(self, path): + return joinpaths(self.inroot, path) + + def _filelist(self, *pkgs): + """ Return the list of files in the packages """ + pkglist = [] + for pkg_glob in pkgs: + pkglist += list(self.dbo.sack.query().installed().filter(name__glob=pkg_glob)) + + # dnf/hawkey doesn't make any distinction between file, dir or ghost like yum did + # so only return the files. + return set(f for pkg in pkglist for f in pkg.files if not os.path.isdir(self._out(f))) + + def _getsize(self, *files): + return sum(os.path.getsize(self._out(f)) for f in files if os.path.isfile(self._out(f))) + + def _write_package_log(self): + """ + Write the list of installed packages to /root/ on the boot.iso + + If lorax is called with a debug repo find the corresponding debuginfo package + names and write them to /root/debubg-pkgs.log on the boot.iso + The non-debuginfo packages are written to /root/lorax-packages.log + """ + os.makedirs(self._out("root/"), exist_ok=True) + available = self.dbo.sack.query().available() + pkgs = [] + debug_pkgs = [] + for p in list(self.dbo.transaction.install_set): + pkgs.append(f"{p.name}-{p.version}-{p.release}.{p.arch}") + if available.filter(name=p.name+"-debuginfo"): + debug_pkgs.append(f"{p.name}-debuginfo-{p.epoch}:{p.version}-{p.release}") + + with open(self._out("root/lorax-packages.log"), "w") as f: + f.write("\n".join(sorted(pkgs))) + f.write("\n") + + if debug_pkgs: + with open(self._out("root/debug-pkgs.log"), "w") as f: + f.write("\n".join(sorted(debug_pkgs))) + f.write("\n") + +
[docs] def install(self, srcglob, dest): + ''' + install SRC DEST + Copy the given file (or files, if a glob is used) from the input + tree to the given destination in the output tree. + The path to DEST must exist in the output tree. + If DEST is a directory, SRC will be copied into that directory. + If DEST doesn't exist, SRC will be copied to a file with that name, + assuming the rest of the path exists. + This is pretty much like how the 'cp' command works. + + Examples: + install usr/share/myconfig/grub.conf /boot + install /usr/share/myconfig/grub.conf.in /boot/grub.conf + ''' + for src in rglob(self._in(srcglob), fatal=True): + try: + cpfile(src, self._out(dest)) + except shutil.Error as e: + logger.error(e)
+ +
[docs] def installimg(self, *args): + ''' + installimg [--xz|--gzip|--bzip2|--lzma] [-ARG|--ARG=OPTION] SRCDIR DESTFILE + Create a compressed cpio archive of the contents of SRCDIR and place + it in DESTFILE. + + If SRCDIR doesn't exist or is empty nothing is created. + + Examples: + installimg ${LORAXDIR}/product/ images/product.img + installimg ${LORAXDIR}/updates/ images/updates.img + installimg --xz -6 ${LORAXDIR}/updates/ images/updates.img + installimg --xz -9 --memlimit-compress=3700MiB ${LORAXDIR}/updates/ images/updates.img + + Optionally use a different compression type and override the default args + passed to it. The default is xz -9 + ''' + COMPRESSORS = ("--xz", "--gzip", "--bzip2", "--lzma") + if len(args) < 2: + raise ValueError("Not enough args for installimg.") + + srcdir = args[-2] + destfile = args[-1] + if not os.path.isdir(self._in(srcdir)) or not os.listdir(self._in(srcdir)): + return + + compression = "xz" + compressargs = [] + if args[0] in COMPRESSORS: + compression = args[0][2:] + + for arg in args[1:-2]: + if arg.startswith('-'): + compressargs.append(arg) + else: + raise ValueError("Argument is missing -") + + logger.info("Creating image file %s from contents of %s", self._out(destfile), self._in(srcdir)) + logger.debug("Using %s %s compression", compression, compressargs or "") + mkcpio(self._in(srcdir), self._out(destfile), compression=compression, compressargs=compressargs)
+ +
[docs] def mkdir(self, *dirs): + ''' + mkdir DIR [DIR ...] + Create the named DIR(s). Will create leading directories as needed. + + Example: + mkdir /images + ''' + for d in dirs: + d = self._out(d) + if not isdir(d): + os.makedirs(d)
+ +
[docs] def replace(self, pat, repl, *fileglobs): + ''' + replace PATTERN REPLACEMENT FILEGLOB [FILEGLOB ...] + Find-and-replace the given PATTERN (Python-style regex) with the given + REPLACEMENT string for each of the files listed. + + Example: + replace @VERSION@ ${product.version} /boot/grub.conf /boot/isolinux.cfg + ''' + match = False + for g in fileglobs: + for f in rglob(self._out(g)): + match = True + replace(f, pat, repl) + if not match: + raise IOError("no files matched %s" % " ".join(fileglobs))
+ +
[docs] def append(self, filename, data): + ''' + append FILE STRING + Append STRING (followed by a newline character) to FILE. + Python character escape sequences ('\\n', '\\t', etc.) will be + converted to the appropriate characters. + + Examples: + + append /etc/depmod.d/dd.conf "search updates built-in" + append /etc/resolv.conf "" + ''' + with open(self._out(filename), "a") as fobj: + fobj.write(bytes(data, "utf8").decode('unicode_escape')+"\n")
+ +
[docs] def treeinfo(self, section, key, *valuetoks): + ''' + treeinfo SECTION KEY ARG [ARG ...] + Add an item to the treeinfo data store. + The given SECTION will have a new item added where + KEY = ARG ARG ... + + Example: + treeinfo images-${kernel.arch} boot.iso images/boot.iso + ''' + if section not in self.results.treeinfo: + self.results.treeinfo[section] = dict() + self.results.treeinfo[section][key] = " ".join(valuetoks)
+ +
[docs] def installkernel(self, section, src, dest): + ''' + installkernel SECTION SRC DEST + Install the kernel from SRC in the input tree to DEST in the output + tree, and then add an item to the treeinfo data store, in the named + SECTION, where "kernel" = DEST. + + Equivalent to: + install SRC DEST + treeinfo SECTION kernel DEST + ''' + self.install(src, dest) + self.treeinfo(section, "kernel", dest)
+ +
[docs] def installinitrd(self, section, src, dest): + ''' + installinitrd SECTION SRC DEST + Same as installkernel, but for "initrd". + ''' + self.install(src, dest) + self.chmod(dest, '644') + self.treeinfo(section, "initrd", dest)
+ +
[docs] def installupgradeinitrd(self, section, src, dest): + ''' + installupgradeinitrd SECTION SRC DEST + Same as installkernel, but for "upgrade". + ''' + self.install(src, dest) + self.chmod(dest, '644') + self.treeinfo(section, "upgrade", dest)
+ + + + + +
[docs] def copy(self, src, dest): + ''' + copy SRC DEST + Copy SRC to DEST. + If DEST is a directory, SRC will be copied inside it. + If DEST doesn't exist, SRC will be copied to a file with + that name, if the path leading to it exists. + ''' + try: + cpfile(self._out(src), self._out(dest)) + except shutil.Error as e: + logger.error(e)
+ +
[docs] def move(self, src, dest): + ''' + move SRC DEST + Move SRC to DEST. + ''' + mvfile(self._out(src), self._out(dest))
+ +
[docs] def remove(self, *fileglobs): + ''' + remove FILEGLOB [FILEGLOB ...] + Remove all the named files or directories. + Will *not* raise exceptions if the file(s) are not found. + ''' + for g in fileglobs: + for f in rglob(self._out(g)): + remove(f) + logger.debug("removed %s", f)
+ +
[docs] def chmod(self, fileglob, mode): + ''' + chmod FILEGLOB OCTALMODE + Change the mode of all the files matching FILEGLOB to OCTALMODE. + ''' + for f in rglob(self._out(fileglob), fatal=True): + os.chmod(f, int(mode,8))
+ +
[docs] def log(self, msg): + ''' + log MESSAGE + Emit the given log message. Be sure to put it in quotes! + + Example: + log "Reticulating splines, please wait..." + ''' + logger.info(msg)
+ + # TODO: add ssh-keygen, mkisofs(?), find, and other useful commands +
[docs] def runcmd(self, *cmdlist): + ''' + runcmd CMD [ARG ...] + Run the given command with the given arguments. + + NOTE: All paths given MUST be COMPLETE, ABSOLUTE PATHS to the file + or files mentioned. ${root}/${inroot}/${outroot} are good for + constructing these paths. + + FURTHER NOTE: Please use this command only as a last resort! + Whenever possible, you should use the existing template commands. + If the existing commands don't do what you need, fix them! + + Examples: + (this should be replaced with a "find" function) + runcmd find ${root} -name "*.pyo" -type f -delete + %for f in find(root, name="*.pyo"): + remove ${f} + %endfor + ''' + cmd = cmdlist + logger.debug('running command: %s', cmd) + if cmd[0].startswith("--chdir="): + logger.error("--chdir is no longer supported for runcmd.") + raise ValueError("--chdir is no longer supported for runcmd.") + + try: + stdout = runcmd_output(cmd) + if stdout: + logger.debug('command output:\n%s', stdout) + logger.debug("command finished successfully") + except CalledProcessError as e: + if e.output: + logger.error('command output:\n%s', e.output) + logger.error('command returned failure (%d)', e.returncode) + raise
+ +
[docs] def installpkg(self, *pkgs): + ''' + installpkg [--required|--optional] [--except PKGGLOB [--except PKGGLOB ...]] PKGGLOB [PKGGLOB ...] + Request installation of all packages matching the given globs. + Note that this is just a *request* - nothing is *actually* installed + until the 'run_pkg_transaction' command is given. + + --required is now the default. If the PKGGLOB can be missing pass --optional + ''' + if pkgs[0] == '--optional': + pkgs = pkgs[1:] + required = False + elif pkgs[0] == '--required': + pkgs = pkgs[1:] + required = True + else: + required = True + + excludes = [] + while '--except' in pkgs: + idx = pkgs.index('--except') + if len(pkgs) == idx+1: + raise ValueError("installpkg needs an argument after --except") + + excludes.append(pkgs[idx+1]) + pkgs = pkgs[:idx] + pkgs[idx+2:] + + errors = False + for p in pkgs: + try: + # Start by using Subject to generate a package query, which will + # give us a query object similar to what dbo.install would select, + # minus the handling for multilib. This query may contain + # multiple arches. Pull the package names out of that, filter any + # that match the excludes patterns, and pass those names back to + # dbo.install to do the actual, arch and version and multilib + # aware, package selction. + + # dnf queries don't have a concept of negative globs which is why + # the filtering is done the hard way. + + pkgnames = [pkg for pkg in dnf.subject.Subject(p).get_best_query(self.dbo.sack).filter(latest=True)] + if not pkgnames: + raise dnf.exceptions.PackageNotFoundError("no package matched", p) + + # Apply excludes to the name only + for exclude in excludes: + pkgnames = [pkg for pkg in pkgnames if not fnmatch.fnmatch(pkg.name, exclude)] + + # Convert to a sorted NVR list for installation + pkgnvrs = sorted(["{}-{}-{}".format(pkg.name, pkg.version, pkg.release) for pkg in pkgnames]) + + # If the request is a glob, expand it in the log + if any(g for g in ['*','?','.'] if g in p): + logger.info("installpkg: %s expands to %s", p, ",".join(pkgnvrs)) + + for pkgnvr in pkgnvrs: + try: + self.dbo.install(pkgnvr) + except Exception as e: # pylint: disable=broad-except + if required: + raise + # Not required, log it and continue processing pkgs + logger.error("installpkg %s failed: %s", pkgnvr, str(e)) + except Exception as e: # pylint: disable=broad-except + logger.error("installpkg %s failed: %s", p, str(e)) + errors = True + + if errors and required: + raise Exception("Required installpkg failed.")
+ +
[docs] def removepkg(self, *pkgs): + ''' + removepkg PKGGLOB [PKGGLOB...] + Delete the named package(s). + + IMPLEMENTATION NOTES: + RPM scriptlets (%preun/%postun) are *not* run. + Files are deleted, but directories are left behind. + ''' + for p in pkgs: + filepaths = [f.lstrip('/') for f in self._filelist(p)] + # TODO: also remove directories that aren't owned by anything else + if filepaths: + logger.debug("removepkg %s: %ikb", p, self._getsize(*filepaths)/1024) + self.remove(*filepaths) + else: + logger.debug("removepkg %s: no files to remove!", p)
+ +
[docs] def run_pkg_transaction(self): + ''' + run_pkg_transaction + Actually install all the packages requested by previous 'installpkg' + commands. + ''' + try: + logger.info("Checking dependencies") + self.dbo.resolve() + except dnf.exceptions.DepsolveError as e: + logger.error("Dependency check failed: %s", e) + raise + logger.info("%d packages selected", len(self.dbo.transaction)) + if len(self.dbo.transaction) == 0: + raise Exception("No packages in transaction") + + # Write out the packages installed, including debuginfo packages + self._write_package_log() + + pkgs_to_download = self.dbo.transaction.install_set + logger.info("Downloading packages") + progress = LoraxDownloadCallback() + try: + self.dbo.download_packages(pkgs_to_download, progress) + except dnf.exceptions.DownloadError as e: + logger.error("Failed to download the following packages: %s", e) + raise + + logger.info("Preparing transaction from installation source") + try: + display = LoraxRpmCallback() + self.dbo.do_transaction(display=display) + except BaseException as e: + logger.error("The transaction process has ended abruptly: %s", e) + raise + + # Reset the package sack to pick up the installed packages + self.dbo.reset(repos=False) + self.dbo.fill_sack(load_system_repo=True, load_available_repos=False) + + # At this point dnf should know about the installed files. Double check that it really does. + if len(self._filelist("anaconda-core")) == 0: + raise Exception("Failed to reset dbo to installed package set")
+ +
[docs] def removefrom(self, pkg, *globs): + ''' + removefrom PKGGLOB [--allbut] FILEGLOB [FILEGLOB...] + Remove all files matching the given file globs from the package + (or packages) named. + If '--allbut' is used, all the files from the given package(s) will + be removed *except* the ones which match the file globs. + + Examples: + removefrom usbutils /usr/bin/* + removefrom xfsprogs --allbut /sbin/* + ''' + cmd = "%s %s" % (pkg, " ".join(globs)) # save for later logging + keepmatches = False + if globs[0] == '--allbut': + keepmatches = True + globs = globs[1:] + # get pkg filelist and find files that match the globs + filelist = self._filelist(pkg) + matches = set() + for g in globs: + globs_re = re.compile(fnmatch.translate(g)) + m = [f for f in filelist if globs_re.match(f)] + if m: + matches.update(m) + else: + logger.debug("removefrom %s %s: no files matched!", pkg, g) + # are we removing the matches, or keeping only the matches? + if keepmatches: + remove_files = filelist.difference(matches) + else: + remove_files = matches + # remove the files + if remove_files: + logger.debug("removefrom %s: removed %i/%i files, %ikb/%ikb", cmd, + len(remove_files), len(filelist), + self._getsize(*remove_files)/1024, self._getsize(*filelist)/1024) + self.remove(*remove_files) + else: + logger.debug("removefrom %s: no files to remove!", cmd)
+ + # pylint: disable=anomalous-backslash-in-string +
[docs] def removekmod(self, *globs): + ''' + removekmod GLOB [GLOB...] [--allbut] KEEPGLOB [KEEPGLOB...] + Remove all files and directories matching the given file globs from the kernel + modules directory. + + If '--allbut' is used, all the files from the modules will be removed *except* + the ones which match the file globs. There must be at least one initial GLOB + to search and one KEEPGLOB to keep. The KEEPGLOB is expanded to be *KEEPGLOB* + so that it will match anywhere in the path. + + This only removes files from under /lib/modules/\\*/kernel/ + + Examples: + removekmod sound drivers/media drivers/hwmon drivers/video + removekmod drivers/char --allbut virtio_console hw_random + ''' + cmd = " ".join(globs) + if "--allbut" in globs: + idx = globs.index("--allbut") + if idx == 0: + raise ValueError("removekmod needs at least one GLOB before --allbut") + + # Apply keepglobs anywhere they appear in the path + keepglobs = globs[idx+1:] + if len(keepglobs) == 0: + raise ValueError("removekmod needs at least one GLOB after --allbut") + + globs = globs[:idx] + else: + # Nothing to keep + keepglobs = [] + + filelist = set() + for g in globs: + for top_dir in rglob(self._out("/lib/modules/*/kernel/"+g)): + for root, _dirs, files in os.walk(top_dir): + filelist.update(root+"/"+f for f in files) + + # Remove anything matching keepglobs from the list + matches = set() + for g in keepglobs: + globs_re = re.compile(fnmatch.translate("*"+g+"*")) + m = [f for f in filelist if globs_re.match(f)] + if m: + matches.update(m) + else: + logger.debug("removekmod %s: no files matched!", g) + remove_files = filelist.difference(matches) + + if remove_files: + logger.debug("removekmod: removing %d files", len(remove_files)) + list(remove(f) for f in remove_files) + else: + logger.debug("removekmod %s: no files to remove!", cmd)
+ +
[docs] def createaddrsize(self, addr, src, dest): + ''' + createaddrsize INITRD_ADDRESS INITRD ADDRSIZE + Create the initrd.addrsize file required in LPAR boot process. + + Examples: + createaddrsize ${INITRD_ADDRESS} ${outroot}/${BOOTDIR}/initrd.img ${outroot}/${BOOTDIR}/initrd.addrsize + ''' + addrsize = open(dest, "wb") + addrsize_data = struct.pack(">iiii", 0, int(addr, 16), 0, os.stat(src).st_size) + addrsize.write(addrsize_data) + addrsize.close()
+ +
[docs] def systemctl(self, cmd, *units): + ''' + systemctl [enable|disable|mask] UNIT [UNIT...] + Enable, disable, or mask the given systemd units. + + Examples: + systemctl disable lvm2-monitor.service + systemctl mask fedora-storage-init.service fedora-configure.service + ''' + if cmd not in ('enable', 'disable', 'mask'): + raise ValueError('unsupported systemctl cmd: %s' % cmd) + if not units: + logger.debug("systemctl: no units given for %s, ignoring", cmd) + return + self.mkdir("/run/systemd/system") # XXX workaround for systemctl bug + systemctl = ['systemctl', '--root', self.outroot, '--no-reload', cmd] + # When a unit doesn't exist systemd aborts the command. Run them one at a time. + # XXX for some reason 'systemctl enable/disable' always returns 1 + for unit in units: + try: + cmd = systemctl + [unit] + runcmd(cmd) + except CalledProcessError: + pass
+ +
[docs]class LiveTemplateRunner(TemplateRunner): + """ + This class parses and executes a limited Lorax template. Sample usage: + + # install a bunch of packages + runner = LiveTemplateRunner(dbo, templatedir, defaults) + runner.run("live-install.tmpl") + + It is meant to be used with the live-install.tmpl which lists the per-arch + pacages needed to build the live-iso output. + """ + def __init__(self, dbo, fatalerrors=True, templatedir=None, defaults=None): + self.dbo = dbo + self.pkgs = [] + self.pkgnames = [] + + super(LiveTemplateRunner, self).__init__(fatalerrors, templatedir, defaults) + +
[docs] def installpkg(self, *pkgs): + ''' + installpkg [--required|--optional] [--except PKGGLOB [--except PKGGLOB ...]] PKGGLOB [PKGGLOB ...] + Request installation of all packages matching the given globs. + Note that this is just a *request* - nothing is *actually* installed + until the 'run_pkg_transaction' command is given. + + --required is now the default. If the PKGGLOB can be missing pass --optional + ''' + if pkgs[0] == '--optional': + pkgs = pkgs[1:] + required = False + elif pkgs[0] == '--required': + pkgs = pkgs[1:] + required = True + else: + required = True + + excludes = [] + while '--except' in pkgs: + idx = pkgs.index('--except') + if len(pkgs) == idx+1: + raise ValueError("installpkg needs an argument after --except") + + excludes.append(pkgs[idx+1]) + pkgs = pkgs[:idx] + pkgs[idx+2:] + + errors = False + for p in pkgs: + try: + # Start by using Subject to generate a package query, which will + # give us a query object similar to what dbo.install would select, + # minus the handling for multilib. This query may contain + # multiple arches. Pull the package names out of that, filter any + # that match the excludes patterns, and pass those names back to + # dbo.install to do the actual, arch and version and multilib + # aware, package selction. + + # dnf queries don't have a concept of negative globs which is why + # the filtering is done the hard way. + + pkgnames = [pkg for pkg in dnf.subject.Subject(p).get_best_query(self.dbo.sack).filter(latest=True)] + if not pkgnames: + raise dnf.exceptions.PackageNotFoundError("no package matched", p) + + # Apply excludes to the name only + for exclude in excludes: + pkgnames = [pkg for pkg in pkgnames if not fnmatch.fnmatch(pkg.name, exclude)] + + # Convert to a sorted NVR list for installation + pkgnvrs = sorted(["{}-{}-{}".format(pkg.name, pkg.version, pkg.release) for pkg in pkgnames]) + + # If the request is a glob, expand it in the log + if any(g for g in ['*','?','.'] if g in p): + logger.info("installpkg: %s expands to %s", p, ",".join(pkgnvrs)) + + self.pkgs.extend(pkgnvrs) + self.pkgnames.extend([pkg.name for pkg in pkgnames]) + except Exception as e: # pylint: disable=broad-except + logger.error("installpkg %s failed: %s", p, str(e)) + errors = True + + if errors and required: + raise Exception("Required installpkg failed.")
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/monitor.html b/f34-branch/_modules/pylorax/monitor.html new file mode 100644 index 00000000..ff020500 --- /dev/null +++ b/f34-branch/_modules/pylorax/monitor.html @@ -0,0 +1,411 @@ + + + + + + + + + + pylorax.monitor — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.monitor

+# monitor.py
+#
+# Copyright (C) 2011-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import re
+import socket
+import socketserver
+import threading
+import time
+
+
[docs]class LogRequestHandler(socketserver.BaseRequestHandler): + """ + Handle monitoring and saving the logfiles from the virtual install + + Incoming data is written to self.server.log_path and each line is checked + for patterns that would indicate that the installation failed. + self.server.log_error is set True when this happens. + """ + + simple_tests = [ + "Traceback (", + "traceback script(s) have been run", + "Out of memory:", + "Call Trace:", + "insufficient disk space:", + "Not enough disk space to download the packages", + "error populating transaction after", + "crashed on signal", + "packaging: Missed: NoSuchPackage", + "packaging: Installation failed", + "The following error occurred while installing. This is a fatal error", + "Error in POSTIN scriptlet in rpm package" + ] + + re_tests = [ + r"packaging: base repo .* not valid", + r"packaging: .* requires .*" + ] + +
[docs] def setup(self): + """Start writing to self.server.log_path""" + + if self.server.log_path: + self.fp = open(self.server.log_path, "w") # pylint: disable=attribute-defined-outside-init + else: + self.fp = None + self.request.settimeout(10)
+ +
[docs] def handle(self): + """ + Write incoming data to a logfile and check for errors + + Split incoming data into lines and check for any Tracebacks or other + errors that indicate that the install failed. + + Loops until self.server.kill is True + """ + log.info("Processing logs from %s", self.client_address) + line = "" + while True: + if self.server.kill: + break + + try: + data = str(self.request.recv(4096), "utf8") + if self.fp: + self.fp.write(data) + self.fp.flush() + + # check the data for errors and set error flag + # need to assemble it into lines so we can test for the error + # string. + while data: + more = data.split("\n", 1) + line += more[0] + if len(more) > 1: + self.iserror(line) + line = "" + data = more[1] + else: + data = None + + except socket.timeout: + pass + except Exception as e: # pylint: disable=broad-except + log.info("log processing killed by exception: %s", e) + break
+ +
[docs] def finish(self): + log.info("Shutting down log processing") + self.request.close() + if self.fp: + self.fp.close()
+ +
[docs] def iserror(self, line): + """ + Check a line to see if it contains an error indicating installation failure + + :param str line: log line to check for failure + + If the line contains IGNORED it will be skipped. + """ + if "IGNORED" in line: + return + + for t in self.simple_tests: + if t in line: + self.server.log_error = True + self.server.error_line = line + return + for t in self.re_tests: + if re.search(t, line): + self.server.log_error = True + self.server.error_line = line + return
+ + +
[docs]class LogServer(socketserver.TCPServer): + """A TCP Server that listens for log data""" + + # Number of seconds to wait for a connection after startup + timeout = 60 + + def __init__(self, log_path, *args, **kwargs): + """ + Setup the log server + + :param str log_path: Path to the log file to write + """ + self.kill = False + self.log_error = False + self.error_line = "" + self.log_path = log_path + self._timeout = kwargs.pop("timeout", None) + if self._timeout: + self._start_time = time.time() + socketserver.TCPServer.__init__(self, *args, **kwargs) + +
[docs] def log_check(self): + """ + Check to see if an error has been found in the log + + :returns: True if there has been an error + :rtype: bool + """ + if self._timeout: + taking_too_long = time.time() > self._start_time + (self._timeout * 60) + if taking_too_long: + log.error("Canceling installation due to timeout") + else: + taking_too_long = False + return self.log_error or taking_too_long
+ + +
[docs]class LogMonitor(object): + """ + Setup a server to monitor the logs output by the installation + + This needs to be running before the virt-install runs, it expects + there to be a listener on the port used for the virtio log port. + """ + def __init__(self, log_path=None, host="localhost", port=0, timeout=None, log_request_handler_class=LogRequestHandler): + """ + Start a thread to monitor the logs. + + :param str log_path: Path to the logfile to write + :param str host: Host to bind to. Default is localhost. + :param int port: Port to listen to or 0 to pick a port + + If 0 is passed for the port the dynamically assigned port will be + available as self.port + + If log_path isn't set then it only monitors the logs, instead of + also writing them to disk. + """ + self.server = LogServer(log_path, (host, port), log_request_handler_class, timeout=timeout) + self.host, self.port = self.server.server_address + self.log_path = log_path + self.server_thread = threading.Thread(target=self.server.handle_request) + self.server_thread.daemon = True + self.server_thread.start() + +
[docs] def shutdown(self): + """Force shutdown of the monitoring thread""" + self.server.kill = True + self.server_thread.join()
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/mount.html b/f34-branch/_modules/pylorax/mount.html new file mode 100644 index 00000000..87968291 --- /dev/null +++ b/f34-branch/_modules/pylorax/mount.html @@ -0,0 +1,206 @@ + + + + + + pylorax.mount — Lorax 34.13 documentation + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for pylorax.mount

+# mount.py
+#
+# Copyright (C) 2011-2015  Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import os
+import pycdlib
+from pycdlib.pycdlibexception import PyCdlibException
+
+from pylorax.imgutils import mount, umount
+
+
[docs]class IsoMountpoint(object): + """ + Mount the iso and check to make sure the vmlinuz and initrd.img files exist + + Also check the iso for a a stage2 image and set a flag and extract the + iso's label. + + stage2 can be either LiveOS/squashfs.img or images/install.img + """ + def __init__(self, iso_path, initrd_path=None): + """ + Mount the iso + + :param str iso_path: Path to the iso to mount + :param str initrd_path: Optional path to initrd + + initrd_path can be used to point to a tree with a newer + initrd.img than the iso has. The iso is still used for stage2. + + self.kernel and self.initrd point to the kernel and initrd. + self.stage2 is set to True if there is a stage2 image. + self.repo is the path to the mounted iso if there is a /repodata dir. + """ + self.label = None + self.iso_path = iso_path + self.initrd_path = initrd_path + + if not self.initrd_path: + self.mount_dir = mount(self.iso_path, opts="loop") + else: + self.mount_dir = self.initrd_path + + kernel_list = [("/isolinux/vmlinuz", "/isolinux/initrd.img"), + ("/ppc/ppc64/vmlinuz", "/ppc/ppc64/initrd.img"), + ("/images/pxeboot/vmlinuz", "/images/pxeboot/initrd.img")] + + if os.path.isdir(self.mount_dir+"/repodata"): + self.repo = self.mount_dir + else: + self.repo = None + self.stage2 = os.path.exists(self.mount_dir+"/LiveOS/squashfs.img") or \ + os.path.exists(self.mount_dir+"/images/install.img") + + try: + for kernel, initrd in kernel_list: + if (os.path.isfile(self.mount_dir+kernel) and + os.path.isfile(self.mount_dir+initrd)): + self.kernel = self.mount_dir+kernel + self.initrd = self.mount_dir+initrd + break + else: + raise Exception("Missing kernel and initrd file in iso, failed" + " to search under: {0}".format(kernel_list)) + except: + self.umount() + raise + + self.get_iso_label() + +
[docs] def umount( self ): + """Unmount the iso""" + if not self.initrd_path: + umount(self.mount_dir)
+ +
[docs] def get_iso_label(self): + """ + Get the iso's label using isoinfo + + Sets self.label if one is found + """ + try: + iso = pycdlib.PyCdlib() + iso.open(self.iso_path) + self.label = iso.pvd.volume_identifier.decode("UTF-8").strip() + except PyCdlibException as e: + log.error("Problem reading label from %s: %s", self.iso_path, e)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/sysutils.html b/f34-branch/_modules/pylorax/sysutils.html new file mode 100644 index 00000000..88816ecc --- /dev/null +++ b/f34-branch/_modules/pylorax/sysutils.html @@ -0,0 +1,367 @@ + + + + + + + + + + pylorax.sysutils — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.sysutils

+#
+# sysutils.py
+#
+# Copyright (C) 2009-2019 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+__all__ = ["joinpaths", "touch", "replace", "chown_", "chmod_", "remove",
+           "linktree"]
+
+import sys
+import os
+import re
+import fileinput
+import pwd
+import grp
+import glob
+import shutil
+import shlex
+from configparser import ConfigParser
+
+from pylorax.executils import runcmd
+
+
[docs]def joinpaths(*args, **kwargs): + path = os.path.sep.join(args) + + if kwargs.get("follow_symlinks"): + return os.path.realpath(path) + else: + return path
+ + +
[docs]def touch(fname): + with open(fname, "w") as f: + f.write("")
+ + +
[docs]def replace(fname, find, sub): + fin = fileinput.input(fname, inplace=1) + pattern = re.compile(find) + + for line in fin: + line = pattern.sub(sub, line) + sys.stdout.write(line) + + fin.close()
+ + +
[docs]def chown_(path, user=None, group=None, recursive=False): + uid = gid = -1 + + if user is not None: + uid = pwd.getpwnam(user)[2] + if group is not None: + gid = grp.getgrnam(group)[2] + + for fname in glob.iglob(path): + os.chown(fname, uid, gid) + + if recursive and os.path.isdir(fname): + for nested in os.listdir(fname): + nested = joinpaths(fname, nested) + chown_(nested, user, group, recursive)
+ + +
[docs]def chmod_(path, mode, recursive=False): + for fname in glob.iglob(path): + os.chmod(fname, mode) + + if recursive and os.path.isdir(fname): + for nested in os.listdir(fname): + nested = joinpaths(fname, nested) + chmod_(nested, mode, recursive)
+ + +def cpfile(src, dst): + shutil.copy2(src, dst) + if os.path.isdir(dst): + dst = joinpaths(dst, os.path.basename(src)) + + return dst + +def mvfile(src, dst): + if os.path.isdir(dst): + dst = joinpaths(dst, os.path.basename(src)) + os.rename(src, dst) + return dst + +
[docs]def remove(target): + if os.path.isdir(target) and not os.path.islink(target): + shutil.rmtree(target) + else: + os.unlink(target)
+ +
[docs]def linktree(src, dst): + runcmd(["/bin/cp", "-alx", src, dst])
+ +def unquote(s): + return ' '.join(shlex.split(s)) + +class UnquotingConfigParser(ConfigParser): + """A ConfigParser, only with unquoting of the values.""" + # pylint: disable=arguments-differ + def get(self, *args, **kwargs): + ret = super().get(*args, **kwargs) + if ret: + ret = unquote(ret) + return ret + +def flatconfig(filename): + """Use UnquotingConfigParser to read a flat config file (without + section headers) by adding a section header. + """ + with open (filename, 'r') as conffh: + conftext = "[main]\n" + conffh.read() + config = UnquotingConfigParser() + config.read_string(conftext) + return config['main'] + +def read_tail(path, size): + """Read up to `size` kibibytes from the end of a file""" + + # NOTE: In py3 text files are unicode, not bytes so we have to open it as bytes + with open(path, "rb") as f: + return _read_file_end(f, size) + +def _read_file_end(f, size): + """Read the end of a file + + This skips to the next line to avoid starting in the middle of a unicode character. + And returns "" in the case of a UnicodeDecodeError + """ + f.seek(0, 2) + end = f.tell() + if end < 1024 * size: + f.seek(0, 0) + else: + f.seek(end - (1024 * size)) + data = f.read() + try: + # Find the first newline in the block + newline = min(1+data.find(b'\n'), len(data)) + text = data[newline:].decode("UTF-8") + except UnicodeDecodeError: + return "" + return text +
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/treebuilder.html b/f34-branch/_modules/pylorax/treebuilder.html new file mode 100644 index 00000000..cadcd20d --- /dev/null +++ b/f34-branch/_modules/pylorax/treebuilder.html @@ -0,0 +1,630 @@ + + + + + + + + + + pylorax.treebuilder — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.treebuilder

+# treebuilder.py - handle arch-specific tree building stuff using templates
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s):  Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.treebuilder")
+
+import os, re
+from os.path import basename
+from shutil import copytree, copy2
+from subprocess import CalledProcessError
+from pathlib import Path
+import itertools
+
+from pylorax.sysutils import joinpaths, remove
+from pylorax.base import DataHolder
+from pylorax.ltmpl import LoraxTemplateRunner
+import pylorax.imgutils as imgutils
+from pylorax.executils import runcmd, runcmd_output, execWithCapture
+
+templatemap = {
+    'i386':    'x86.tmpl',
+    'x86_64':  'x86.tmpl',
+    'ppc64le': 'ppc64le.tmpl',
+    's390':    's390.tmpl',
+    's390x':   's390.tmpl',
+    'aarch64': 'aarch64.tmpl',
+    'arm':     'arm.tmpl',
+    'armhfp':  'arm.tmpl',
+}
+
+
[docs]def generate_module_info(moddir, outfile=None): + def module_desc(mod): + output = runcmd_output(["modinfo", "-F", "description", mod]) + return output.strip() + def read_module_set(name): + return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l) + modsets = {'scsi':read_module_set("modules.block"), + 'eth':read_module_set("modules.networking")} + + modinfo = list() + for root, _dirs, files in os.walk(moddir): + for modtype, modset in modsets.items(): + for mod in modset.intersection(files): # modules in this dir + (name, _ext) = os.path.splitext(mod) # foo.ko -> (foo, .ko) + desc = module_desc(joinpaths(root,mod)) or "%s driver" % name + modinfo.append(dict(name=name, type=modtype, desc=desc)) + + out = open(outfile or joinpaths(moddir,"module-info"), "w") + out.write("Version 0\n") + for mod in sorted(modinfo, key=lambda m: m.get('name')): + out.write('{name}\n\t{type}\n\t"{desc:.65}"\n'.format(**mod))
+ +
[docs]class RuntimeBuilder(object): + '''Builds the anaconda runtime image.''' + def __init__(self, product, arch, dbo, templatedir=None, + installpkgs=None, excludepkgs=None, + add_templates=None, + add_template_vars=None, + skip_branding=False): + root = dbo.conf.installroot + # use a copy of product so we can modify it locally + product = product.copy() + product.name = product.name.lower() + self.vars = DataHolder(arch=arch, product=product, dbo=dbo, root=root, + basearch=arch.basearch, libdir=arch.libdir) + self.dbo = dbo + self._runner = LoraxTemplateRunner(inroot=root, outroot=root, + dbo=dbo, templatedir=templatedir) + self.add_templates = add_templates or [] + self.add_template_vars = add_template_vars or {} + self._installpkgs = installpkgs or [] + self._excludepkgs = excludepkgs or [] + self._runner.defaults = self.vars + self.dbo.reset() + self._skip_branding = skip_branding + + def _install_branding(self): + """Select the branding from the available 'system-release' packages + The *best* way to control this is to have a single package in the repo provide 'system-release' + When there are more than 1 package it will: + - Make a list of the available packages + - If variant is set look for a package ending with lower(variant) and use that + - If there are one or more non-generic packages, use the first one after sorting + """ + if self._skip_branding: + return + + release = None + q = self.dbo.sack.query() + a = q.available() + pkgs = sorted([p.name for p in a.filter(provides='system-release') + if not p.name.startswith("generic")]) + if not pkgs: + logger.error("No system-release packages found, could not get the release") + return + + logger.debug("system-release packages: %s", pkgs) + if self.vars.product.variant: + variant = [p for p in pkgs if p.endswith("-"+self.vars.product.variant.lower())] + if variant: + release = variant[0] + if not release: + release = pkgs[0] + + # release + logger.info('got release: %s', release) + self._runner.installpkg(release) + + # logos + release, _suffix = release.split('-', 1) + self._runner.installpkg('%s-logos' % release) + +
[docs] def install(self): + '''Install packages and do initial setup with runtime-install.tmpl''' + self._install_branding() + if len(self._installpkgs) > 0: + self._runner.installpkg(*self._installpkgs) + if len(self._excludepkgs) > 0: + self._runner.removepkg(*self._excludepkgs) + self._runner.run("runtime-install.tmpl") + for tmpl in self.add_templates: + self._runner.run(tmpl, **self.add_template_vars)
+ +
[docs] def writepkglists(self, pkglistdir): + '''debugging data: write out lists of package contents''' + if not os.path.isdir(pkglistdir): + os.makedirs(pkglistdir) + q = self.dbo.sack.query() + for pkgobj in q.installed(): + with open(joinpaths(pkglistdir, pkgobj.name), "w") as fobj: + for fname in pkgobj.files: + fobj.write("{0}\n".format(fname))
+ +
[docs] def postinstall(self): + '''Do some post-install setup work with runtime-postinstall.tmpl''' + # copy configdir into runtime root beforehand + configdir = joinpaths(self._runner.templatedir,"config_files") + configdir_path = "tmp/config_files" + fullpath = joinpaths(self.vars.root, configdir_path) + if os.path.exists(fullpath): + remove(fullpath) + copytree(configdir, fullpath) + self._runner.run("runtime-postinstall.tmpl", configdir=configdir_path)
+ +
[docs] def cleanup(self): + '''Remove unneeded packages and files with runtime-cleanup.tmpl''' + self._runner.run("runtime-cleanup.tmpl")
+ +
[docs] def verify(self): + '''Ensure that contents of the installroot can run''' + status = True + + ELF_MAGIC = b'\x7fELF' + + # Iterate over all files in /usr/bin and /usr/sbin + # For ELF files, gather them into a list and we'll check them all at + # the end. For files with a #!, check them as we go + elf_files = [] + usr_bin = Path(self.vars.root + '/usr/bin') + usr_sbin = Path(self.vars.root + '/usr/sbin') + for path in (str(x) for x in itertools.chain(usr_bin.iterdir(), usr_sbin.iterdir()) \ + if x.is_file()): + with open(path, "rb") as f: + magic = f.read(4) + if magic == ELF_MAGIC: + # Save the path, minus the chroot prefix + elf_files.append(path[len(self.vars.root):]) + elif magic[:2] == b'#!': + # Reopen the file as text and read the first line. + # Open as latin-1 so that stray 8-bit characters don't make + # things blow up. We only really care about ASCII parts. + with open(path, "rt", encoding="latin-1") as f_text: + # Remove the #!, split on space, and take the first part + shabang = f_text.readline()[2:].split()[0] + + # Does the path exist? + if not os.path.exists(self.vars.root + shabang): + logger.error('%s, needed by %s, does not exist', shabang, path) + status = False + + # Now, run ldd on all the ELF files + # Just run ldd once on everything so it isn't logged a million times. + # At least one thing in the list isn't going to be a dynamic executable, + # so use execWithCapture to ignore the exit code. + filename = '' + for line in execWithCapture('ldd', elf_files, root=self.vars.root, + log_output=False, filter_stderr=True).split('\n'): + if line and not line[0].isspace(): + # New filename header, strip the : at the end and save + filename = line[:-1] + elif 'not found' in line: + logger.error('%s, needed by %s, not found', line.split()[0], filename) + status = False + + return status
+ +
[docs] def writepkgsizes(self, pkgsizefile): + '''debugging data: write a big list of pkg sizes''' + fobj = open(pkgsizefile, "w") + getsize = lambda f: os.lstat(f).st_size if os.path.exists(f) else 0 + q = self.dbo.sack.query() + for p in sorted(q.installed()): + pkgsize = sum(getsize(joinpaths(self.vars.root,f)) for f in p.files) + fobj.write("{0.name}.{0.arch}: {1}\n".format(p, pkgsize))
+ +
[docs] def generate_module_data(self): + root = self.vars.root + moddir = joinpaths(root, "lib/modules/") + for kernel in findkernels(root=root): + ksyms = joinpaths(root, "boot/System.map-%s" % kernel.version) + logger.info("doing depmod and module-info for %s", kernel.version) + runcmd(["depmod", "-a", "-F", ksyms, "-b", root, kernel.version]) + generate_module_info(moddir+kernel.version, outfile=moddir+"module-info")
+ +
[docs] def create_squashfs_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): + """Create a plain squashfs runtime""" + compressargs = compressargs or [] + os.makedirs(os.path.dirname(outfile)) + + # squash the rootfs + return imgutils.mksquashfs(self.vars.root, outfile, compression, compressargs)
+ +
[docs] def create_ext4_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): + """Create a squashfs compressed ext4 runtime""" + # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut + compressargs = compressargs or [] + workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir") + os.makedirs(joinpaths(workdir, "LiveOS")) + + # Catch problems with the rootfs being too small and clearly log them + try: + imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"), + "Anaconda", size=size) + except CalledProcessError as e: + if e.stdout and "No space left on device" in e.stdout: + logger.error("The rootfs ran out of space with size=%d", size) + raise + + # squash the live rootfs and clean up workdir + rc = imgutils.mksquashfs(workdir, outfile, compression, compressargs) + remove(workdir) + return rc
+ +
[docs] def finished(self): + """ Done using RuntimeBuilder + + Close the dnf base object + """ + self.dbo.close()
+ +
[docs]class TreeBuilder(object): + '''Builds the arch-specific boot images. + inroot should be the installtree root (the newly-built runtime dir)''' + def __init__(self, product, arch, inroot, outroot, runtime, isolabel, domacboot=True, doupgrade=True, + templatedir=None, add_templates=None, add_template_vars=None, workdir=None, extra_boot_args=""): + + # NOTE: if you pass an arg named "runtime" to a mako template it'll + # clobber some mako internal variables - hence "runtime_img". + self.vars = DataHolder(arch=arch, product=product, runtime_img=runtime, + runtime_base=basename(runtime), + inroot=inroot, outroot=outroot, + basearch=arch.basearch, libdir=arch.libdir, + isolabel=isolabel, udev=udev_escape, domacboot=domacboot, doupgrade=doupgrade, + workdir=workdir, lower=string_lower, + extra_boot_args=extra_boot_args) + self._runner = LoraxTemplateRunner(inroot, outroot, templatedir=templatedir) + self._runner.defaults = self.vars + self.add_templates = add_templates or [] + self.add_template_vars = add_template_vars or {} + self.templatedir = templatedir + self.treeinfo_data = None + + @property + def kernels(self): + return findkernels(root=self.vars.inroot) + +
[docs] def rebuild_initrds(self, add_args=None, backup="", prefix=""): + '''Rebuild all the initrds in the tree. If backup is specified, each + initrd will be renamed with backup as a suffix before rebuilding. + If backup is empty, the existing initrd files will be overwritten. + If suffix is specified, the existing initrd is untouched and a new + image is built with the filename "${prefix}-${kernel.version}.img" + + If the initrd doesn't exist its name will be created based on the + name of the kernel. + ''' + add_args = add_args or [] + dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args + if not backup: + dracut.append("--force") + + if not self.kernels: + raise Exception("No kernels found, cannot rebuild_initrds") + + for kernel in self.kernels: + if prefix: + idir = os.path.dirname(kernel.path) + outfile = joinpaths(idir, prefix+'-'+kernel.version+'.img') + elif hasattr(kernel, "initrd"): + # If there is an existing initrd, use that + outfile = kernel.initrd.path + else: + # Construct an initrd from the kernel name + outfile = kernel.path.replace("vmlinuz-", "initrd-") + ".img" + logger.info("rebuilding %s", outfile) + logger.info("dracut warnings about /proc are safe to ignore") + + if backup: + initrd = joinpaths(self.vars.inroot, outfile) + if os.path.exists(initrd): + os.rename(initrd, initrd + backup) + cmd = dracut + [outfile, kernel.version] + runcmd(cmd, root=self.vars.inroot)
+ +
[docs] def build(self): + templatefile = templatemap[self.vars.arch.basearch] + for tmpl in self.add_templates: + self._runner.run(tmpl, **self.add_template_vars) + self._runner.run(templatefile, kernels=self.kernels) + self.treeinfo_data = self._runner.results.treeinfo + self.implantisomd5()
+ +
[docs] def implantisomd5(self): + for _section, data in self.treeinfo_data.items(): + if 'boot.iso' in data: + iso = joinpaths(self.vars.outroot, data['boot.iso']) + runcmd(["implantisomd5", iso])
+ + @property + def dracut_hooks_path(self): + """ Return the path to the lorax dracut hooks scripts + + Use the configured share dir if it is setup, + otherwise default to /usr/share/lorax/dracut_hooks + """ + if self.templatedir: + return joinpaths(self.templatedir, "dracut_hooks") + else: + return "/usr/share/lorax/dracut_hooks" + +
[docs] def copy_dracut_hooks(self, hooks): + """ Copy the hook scripts in hooks into the installroot's /tmp/ + and return a list of commands to pass to dracut when creating the + initramfs + + hooks is a list of tuples with the name of the hook script and the + target dracut hook directory + (eg. [("99anaconda-copy-ks.sh", "/lib/dracut/hooks/pre-pivot")]) + """ + dracut_commands = [] + for hook_script, dracut_path in hooks: + src = joinpaths(self.dracut_hooks_path, hook_script) + if not os.path.exists(src): + logger.error("Missing lorax dracut hook script %s", (src)) + continue + dst = joinpaths(self.vars.inroot, "/tmp/", hook_script) + copy2(src, dst) + dracut_commands += ["--include", joinpaths("/tmp/", hook_script), + dracut_path] + return dracut_commands
+ +#### TreeBuilder helper functions + +
[docs]def findkernels(root="/", kdir="boot"): + # To find possible flavors, awk '/BuildKernel/ { print $4 }' kernel.spec + flavors = ('debug', 'PAE', 'PAEdebug', 'smp', 'xen', 'lpae') + kre = re.compile(r"vmlinuz-(?P<version>.+?\.(?P<arch>[a-z0-9_]+)" + r"(.(?P<flavor>{0}))?)$".format("|".join(flavors))) + kernels = [] + bootfiles = os.listdir(joinpaths(root, kdir)) + for f in bootfiles: + match = kre.match(f) + if match: + kernel = DataHolder(path=joinpaths(kdir, f)) + kernel.update(match.groupdict()) # sets version, arch, flavor + kernels.append(kernel) + + # look for associated initrd/initramfs/etc. + for kernel in kernels: + for f in bootfiles: + if f.endswith('-'+kernel.version+'.img'): + imgtype, _rest = f.split('-',1) + # special backwards-compat case + if imgtype == 'initramfs': + imgtype = 'initrd' + kernel[imgtype] = DataHolder(path=joinpaths(kdir, f)) + + logger.debug("kernels=%s", kernels) + return kernels
+ +# udev whitelist: 'a-zA-Z0-9#+.:=@_-' (see is_whitelisted in libudev-util.c) +udev_blacklist=' !"$%&\'()*,/;<>?[\\]^`{|}~' # ASCII printable, minus whitelist +udev_blacklist += ''.join(chr(i) for i in range(32)) # ASCII non-printable +
[docs]def udev_escape(label): + out = '' + for ch in label: + out += ch if ch not in udev_blacklist else '\\x%02x' % ord(ch) + return out
+ +
[docs]def string_lower(string): + """ Return a lowercase string. + + :param string: String to lowercase + + This is used as a filter in the templates. + """ + return string.lower()
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_modules/pylorax/treeinfo.html b/f34-branch/_modules/pylorax/treeinfo.html new file mode 100644 index 00000000..6dd84503 --- /dev/null +++ b/f34-branch/_modules/pylorax/treeinfo.html @@ -0,0 +1,270 @@ + + + + + + + + + + pylorax.treeinfo — Lorax 34.10 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for pylorax.treeinfo

+#
+# treeinfo.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s):  Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.treeinfo")
+
+import configparser
+import os
+import time
+
+
+
[docs]class TreeInfo(object): + + def __init__(self, product, version, variant, basearch, + packagedir=""): + + self.c = configparser.ConfigParser() + + if 'SOURCE_DATE_EPOCH' in os.environ: + timestamp = os.environ['SOURCE_DATE_EPOCH'] + else: + timestamp = str(time.time()) + + section = "general" + data = {"timestamp": timestamp, + "family": product, + "version": version, + "name": "%s-%s" % (product, version), + "variant": variant or "", + "arch": basearch, + "packagedir": packagedir} + + self.c.add_section(section) + list(self.c.set(section, key, value) for key, value in data.items()) + +
[docs] def add_section(self, section, data): + if not self.c.has_section(section): + self.c.add_section(section) + + list(self.c.set(section, key, value) for key, value in data.items())
+ +
[docs] def write(self, outfile): + logger.info("writing .treeinfo file") + with open(outfile, "w") as fobj: + self.c.write(fobj)
+
+ +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/f34-branch/_sources/index.rst.txt b/f34-branch/_sources/index.rst.txt new file mode 100644 index 00000000..1cc32989 --- /dev/null +++ b/f34-branch/_sources/index.rst.txt @@ -0,0 +1,35 @@ +.. Lorax documentation master file, created by + sphinx-quickstart on Wed Apr 8 13:46:00 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Lorax's documentation! +================================= + +Contents: + +.. toctree:: + :maxdepth: 1 + + intro + lorax + livemedia-creator + mkksiso + product-images + modules + +Documentation for other Lorax Branches +====================================== + +* `Fedora 34 `_ +* `Fedora 33 `_ +* `RHEL8 lorax-composer `_ +* `RHEL7 lorax-composer `_ + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/f34-branch/_sources/intro.rst.txt b/f34-branch/_sources/intro.rst.txt new file mode 100644 index 00000000..01857ee9 --- /dev/null +++ b/f34-branch/_sources/intro.rst.txt @@ -0,0 +1,67 @@ +Introduction to Lorax +===================== + +I am the Lorax. I speak for the trees [and images]. + +Lorax is used to build the Anaconda Installer boot.iso, it consists of a +library, pylorax, a set of templates, and the lorax script. Its operation +is driven by a customized set of Mako templates that lists the packages +to be installed, steps to execute to remove unneeded files, and creation +of the iso for all of the supported architectures. + + + + + + +Before Lorax +============ + +Tree building tools such as pungi and revisor rely on 'buildinstall' in +anaconda/scripts/ to produce the boot images and other such control files +in the final tree. The existing buildinstall scripts written in a mix of +bash and Python are unmaintainable. Lorax is an attempt to replace them +with something more flexible. + + +EXISTING WORKFLOW: + +pungi and other tools call scripts/buildinstall, which in turn call other +scripts to do the image building and data generation. Here's how it +currently looks: + + -> buildinstall + * process command line options + * write temporary yum.conf to point to correct repo + * find anaconda release RPM + * unpack RPM, pull in those versions of upd-instroot, mk-images, + maketreeinfo.py, makestamp.py, and buildinstall + + -> call upd-instroot + + -> call maketreeinfo.py + + -> call mk-images (which figures out which mk-images.ARCH to call) + + -> call makestamp.py + + * clean up + + +PROBLEMS: + +The existing workflow presents some problems with maintaining the scripts. +First, almost all knowledge of what goes in to the stage 1 and stage 2 +images lives in upd-instroot. The mk-images* scripts copy things from the +root created by upd-instroot in order to build the stage 1 image, though +it's not completely clear from reading the scripts. + + +NEW IDEAS: + +Create a new central driver with all information living in Python modules. +Configuration files will provide the knowledge previously contained in the +upd-instroot and mk-images* scripts. + + + diff --git a/f34-branch/_sources/livemedia-creator.rst.txt b/f34-branch/_sources/livemedia-creator.rst.txt new file mode 100644 index 00000000..962be311 --- /dev/null +++ b/f34-branch/_sources/livemedia-creator.rst.txt @@ -0,0 +1,670 @@ +livemedia-creator +================= + +:Authors: + Brian C. Lane + +livemedia-creator uses `Anaconda `_, +`kickstart `_ and `Lorax +`_ to create bootable media that use the +same install path as a normal system installation. It can be used to make live +isos, bootable (partitioned) disk images, tarfiles, and filesystem images for +use with virtualization and container solutions like libvirt, docker, and +OpenStack. + +The general idea is to use qemu with kickstart and an Anaconda boot.iso to +install into a disk image and then use the disk image to create the bootable +media. + +livemedia-creator --help will describe all of the options available. At the +minimum you need: + +``--make-iso`` to create a final bootable .iso or one of the other ``--make-*`` options. + +``--iso`` to specify the Anaconda install media to use with qemu. + +``--ks`` to select the kickstart file describing what to install. + +To use livemedia-creator with virtualization you will need to have qemu installed. + +If you are going to be using Anaconda directly, with ``--no-virt`` mode, make sure +you have the anaconda-tui package installed. + +Conventions used in this document: + +``lmc`` is an abbreviation for livemedia-creator. + +``builder`` is the system where livemedia-creator is being run + +``image`` is the disk image being created by running livemedia-creator + + +livemedia-creator cmdline arguments +----------------------------------- + +.. argparse:: + :ref: pylorax.cmdline.lmc_parser + :prog: livemedia-creator + + --macboot : @replace + Make the iso bootable on UEFI based Mac systems + + Default: True + + --nomacboot : @replace + Do not create a Mac bootable iso + + Default: False + + +Quickstart +---------- + +Run this to create a bootable live iso:: + + sudo livemedia-creator --make-iso \ + --iso=/extra/iso/boot.iso --ks=./docs/fedora-livemedia.ks + +You can run it directly from the lorax git repo like this:: + + sudo PATH=./src/sbin/:$PATH PYTHONPATH=./src/ ./src/sbin/livemedia-creator \ + --make-iso --iso=/extra/iso/boot.iso \ + --ks=./docs/fedora-livemedia.ks --lorax-templates=./share/ + +You can observe the installation using vnc. The logs will show what port was +chosen, or you can use a specific port by passing it. eg. ``--vnc vnc:127.0.0.1:5`` + +This is usually a good idea when testing changes to the kickstart. lmc tries +to monitor the logs for fatal errors, but may not catch everything. + + +How ISO creation works +---------------------- + +There are 2 stages, the install stage which produces a disk or filesystem image +as its output, and the boot media creation which uses the image as its input. +Normally you would run both stages, but it is possible to stop after the +install stage, by using ``--image-only``, or to skip the install stage and use +a previously created disk image by passing ``--disk-image`` or ``--fs-image`` + +When creating an iso qemu boots using the passed Anaconda installer iso +and installs the system based on the kickstart. The ``%post`` section of the +kickstart is used to customize the installed system in the same way that +current spin-kickstarts do. + +livemedia-creator monitors the install process for problems by watching the +install logs. They are written to the current directory or to the base +directory specified by the --logfile command. You can also monitor the install +by using a vnc client. This is recommended when first modifying a kickstart, +since there are still places where Anaconda may get stuck without the log +monitor catching it. + +The output from this process is a partitioned disk image. kpartx can be used +to mount and examine it when there is a problem with the install. It can also +be booted using kvm. + +When creating an iso the disk image's / partition is copied into a formatted +filesystem image which is then used as the input to lorax for creation of the +final media. + +The final image is created by lorax, using the templates in /usr/share/lorax/live/ +or the live directory below the directory specified by ``--lorax-templates``. The +templates are written using the Mako template system with some extra commands +added by lorax. + +.. note:: + The output from --make-iso includes the artifacts used to create the boot.iso; + the kernel, initrd, the squashfs filesystem, etc. If you only want the + boot.iso you can pass ``--iso-only`` and the other files will be removed. You + can also name the iso by using ``--iso-name my-live.iso``. + + +Kickstarts +---------- + +The docs/ directory includes several example kickstarts, one to create a live +desktop iso using GNOME, and another to create a minimal disk image. When +creating your own kickstarts you should start with the minimal example, it +includes several needed packages that are not always included by dependencies. + +Or you can use existing spin kickstarts to create live media with a few +changes. Here are the steps I used to convert the Fedora XFCE spin. + +1. Flatten the xfce kickstart using ksflatten +2. Add zerombr so you don't get the disk init dialog +3. Add clearpart --all +4. Add swap partition +5. bootloader target +6. Add shutdown to the kickstart +7. Add network --bootproto=dhcp --activate to activate the network + This works for F16 builds but for F15 and before you need to pass + something on the cmdline that activate the network, like sshd: + + ``livemedia-creator --kernel-args="sshd"`` + +8. Add a root password:: + + rootpw rootme + network --bootproto=dhcp --activate + zerombr + clearpart --all + bootloader --location=mbr + part swap --size=512 + shutdown + +9. In the livesys script section of the %post remove the root password. This + really depends on how the spin wants to work. You could add the live user + that you create to the %wheel group so that sudo works if you wanted to. + + ``passwd -d root > /dev/null`` + +10. Remove /etc/fstab in %post, dracut handles mounting the rootfs + + ``cat /dev/null > /dev/fstab`` + + Do this only for live iso's, the filesystem will be mounted read only if + there is no /etc/fstab + +11. Don't delete initramfs files from /boot in %post +12. When creating live iso's you need to have, at least, these packages in the %package section:: + dracut-config-generic + dracut-live + -dracut-config-rescue + grub2-efi + memtest86+ + syslinux + +User created repositories +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are using your own repositories and installing groups (eg. @core) make +sure you create the repodata with groups like this ``createrepo -g +/path/to/groups.xml /path/to/rpms`` + +Using a Proxy with repos +~~~~~~~~~~~~~~~~~~~~~~~~ + +One drawback to using qemu is that it pulls the packages from the repo each +time you run it. To speed things up you either need a local mirror of the +packages, or you can use a caching proxy. When using a proxy you pass it to +livemedia-creator like this: + + ``--proxy=http://proxy.yourdomain.com:3128`` + +You also need to use a specific mirror instead of mirrormanager so that the +packages will get cached, so your kickstart url would look like: + + ``url --url="http://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/x86_64/os/"`` + +You can also add an update repo, but don't name it updates. Add --proxy to it +as well. You can use all of the `kickstart commands `_ in your kickstart. Make sure there +is only one ``url`` command, other repos have to use the ``repo`` command and cannot be +named ``updates`` which is reserved for Anaconda's use. eg.:: + + url --url=PRIMARY-REPO-URL --proxy=PROXY-URL + repo --name="repo1" --baseurl=FIRST-REPO-URL --proxy=PROXY-URL + repo --name="repo2" --baseurl=SECOND-REPO_URL --proxy=PROXY-URL + + +Anaconda image install (no-virt) +-------------------------------- + +You can create images without using qemu by passing ``--no-virt`` on the +cmdline. This will use Anaconda's directory install feature to handle the +install. There are a couple of things to keep in mind when doing this: + +1. It will be most reliable when building images for the same release that the + host is running. Because Anaconda has expectations about the system it is + running under you may encounter strange bugs if you try to build newer or + older releases. + +2. It may totally trash your host. So far I haven't had this happen, but the + possibility exists that a bug in Anaconda could result in it operating on + real devices. I recommend running it in a virt or on a system that you can + afford to lose all data from. + +The logs from anaconda will be placed in an ./anaconda/ directory in either +the current directory or in the directory used for --logfile + +Example cmdline: + +``sudo livemedia-creator --make-iso --no-virt --ks=./fedora-livemedia.ks`` + +.. note:: + Using no-virt to create a partitioned disk image (eg. --make-disk or + --make-vagrant) will only create disks usable on the host platform (BIOS + or UEFI). You can create BIOS partitioned disk images on UEFI by using + virt. + +.. note:: + As of version 30.7 SELinux can be set to Enforcing. The current state is + logged for debugging purposes and if there are SELinux denials they should + be reported as a bug. + +AMI Images +---------- + +Amazon EC2 images can be created by using the --make-ami switch and an appropriate +kickstart file. All of the work to customize the image is handled by the kickstart. +The example currently included was modified from the cloud-kickstarts version so +that it would work with livemedia-creator. + +Example cmdline: + +``sudo livemedia-creator --make-ami --iso=/path/to/boot.iso --ks=./docs/fedora-livemedia-ec2.ks`` + +This will produce an ami-root.img file in the working directory. + +At this time I have not tested the image with EC2. Feedback would be welcome. + + +Appliance Creation +------------------ + +livemedia-creator can now replace appliance-tools by using the --make-appliance +switch. This will create the partitioned disk image and an XML file that can be +used with virt-image to setup a virtual system. + +The XML is generated using the Mako template from +/usr/share/lorax/appliance/libvirt.xml You can use a different template by +passing ``--app-template