diff --git a/f28-branch/.buildinfo b/f28-branch/.buildinfo new file mode 100644 index 00000000..b4412b9f --- /dev/null +++ b/f28-branch/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: a191c75fd1f5041e8ca7029874dc82a4 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/f28-branch/.doctrees/environment.pickle b/f28-branch/.doctrees/environment.pickle new file mode 100644 index 00000000..cb706f19 Binary files /dev/null and b/f28-branch/.doctrees/environment.pickle differ diff --git a/f28-branch/.doctrees/index.doctree b/f28-branch/.doctrees/index.doctree new file mode 100644 index 00000000..a0640cae Binary files /dev/null and b/f28-branch/.doctrees/index.doctree differ diff --git a/f28-branch/.doctrees/intro.doctree b/f28-branch/.doctrees/intro.doctree new file mode 100644 index 00000000..78b76b2d Binary files /dev/null and b/f28-branch/.doctrees/intro.doctree differ diff --git a/f28-branch/.doctrees/livemedia-creator.doctree b/f28-branch/.doctrees/livemedia-creator.doctree new file mode 100644 index 00000000..41f77eab Binary files /dev/null and b/f28-branch/.doctrees/livemedia-creator.doctree differ diff --git a/f28-branch/.doctrees/lorax.doctree b/f28-branch/.doctrees/lorax.doctree new file mode 100644 index 00000000..b29302ee Binary files /dev/null and b/f28-branch/.doctrees/lorax.doctree differ diff --git a/f28-branch/.doctrees/modules.doctree b/f28-branch/.doctrees/modules.doctree new file mode 100644 index 00000000..8b55504f Binary files /dev/null and b/f28-branch/.doctrees/modules.doctree differ diff --git a/f28-branch/.doctrees/product-images.doctree b/f28-branch/.doctrees/product-images.doctree new file mode 100644 index 00000000..f54cf679 Binary files /dev/null and b/f28-branch/.doctrees/product-images.doctree differ diff --git a/f28-branch/.doctrees/pylorax.doctree b/f28-branch/.doctrees/pylorax.doctree new file mode 100644 index 00000000..6e8ebbae Binary files /dev/null and b/f28-branch/.doctrees/pylorax.doctree differ diff --git a/f28-branch/.doctrees/source/index.doctree b/f28-branch/.doctrees/source/index.doctree new file mode 100644 index 00000000..a1aebf96 Binary files /dev/null and b/f28-branch/.doctrees/source/index.doctree differ diff --git a/f28-branch/_modules/index.html b/f28-branch/_modules/index.html new file mode 100644 index 00000000..b57c2515 --- /dev/null +++ b/f28-branch/_modules/index.html @@ -0,0 +1,239 @@ + + + + + +
+ + + + +
+#
+# __init__.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+# David Cantrell <dcantrell@redhat.com>
+# Will Woods <wwoods@redhat.com>
+
+# set up logging
+import logging
+logger = logging.getLogger("pylorax")
+logger.addHandler(logging.NullHandler())
+
+program_log = logging.getLogger("program")
+
+import sys
+import os
+import configparser
+import tempfile
+import locale
+from subprocess import CalledProcessError
+import selinux
+from glob import glob
+
+from pylorax.base import BaseLoraxClass, DataHolder
+import pylorax.output as output
+
+import dnf
+
+from pylorax.sysutils import joinpaths, remove, linktree
+
+from pylorax.treebuilder import RuntimeBuilder, TreeBuilder
+from pylorax.buildstamp import BuildStamp
+from pylorax.treeinfo import TreeInfo
+from pylorax.discinfo import DiscInfo
+from pylorax.executils import runcmd, runcmd_output
+
+
+# get lorax version
+try:
+ import pylorax.version
+except ImportError:
+ vernum = "devel"
+else:
+ vernum = pylorax.version.num
+
+# List of drivers to remove on ppc64 arch to keep initrd < 32MiB
+REMOVE_PPC64_DRIVERS = "floppy scsi_debug nouveau radeon cirrus mgag200"
+REMOVE_PPC64_MODULES = "drm plymouth"
+
+[docs]class ArchData(DataHolder):
+ lib64_arches = ("x86_64", "ppc64", "ppc64le", "s390x", "ia64", "aarch64")
+ bcj_arch = dict(i386="x86", x86_64="x86",
+ ppc="powerpc", ppc64="powerpc", ppc64le="powerpc",
+ arm="arm", armhfp="arm")
+
+ def __init__(self, buildarch):
+ super(ArchData, self).__init__()
+ self.buildarch = buildarch
+ self.basearch = dnf.rpm.basearch(buildarch)
+ self.libdir = "lib64" if self.basearch in self.lib64_arches else "lib"
+ self.bcj = self.bcj_arch.get(self.basearch)
+
+[docs]class Lorax(BaseLoraxClass):
+
+ def __init__(self):
+ BaseLoraxClass.__init__(self)
+ self._configured = False
+ self.product = None
+ self.workdir = None
+ self.arch = None
+ self.conf = None
+ self.inroot = None
+ self.debug = False
+ self.outputdir = None
+ self._templatedir = None
+
+ # set locale to C
+ locale.setlocale(locale.LC_ALL, 'C')
+
+[docs] def configure(self, conf_file="/etc/lorax/lorax.conf"):
+ self.conf = configparser.SafeConfigParser()
+
+ # set defaults
+ self.conf.add_section("lorax")
+ self.conf.set("lorax", "debug", "1")
+ self.conf.set("lorax", "sharedir", "/usr/share/lorax")
+ self.conf.set("lorax", "logdir", "/var/log/lorax")
+
+ self.conf.add_section("output")
+ self.conf.set("output", "colors", "1")
+ self.conf.set("output", "encoding", "utf-8")
+ self.conf.set("output", "ignorelist", "/usr/share/lorax/ignorelist")
+
+ self.conf.add_section("templates")
+ self.conf.set("templates", "ramdisk", "ramdisk.ltmpl")
+
+ self.conf.add_section("compression")
+ self.conf.set("compression", "type", "xz")
+ self.conf.set("compression", "args", "")
+ self.conf.set("compression", "bcj", "on")
+
+ # read the config file
+ if os.path.isfile(conf_file):
+ self.conf.read(conf_file)
+
+ # set up the output
+ self.debug = self.conf.getboolean("lorax", "debug")
+ output_level = output.DEBUG if self.debug else output.INFO
+
+ if sys.stdout.isatty():
+ colors = self.conf.getboolean("output", "colors")
+ else:
+ colors = False
+ encoding = self.conf.get("output", "encoding")
+
+ self.output.basic_config(output_level=output_level,
+ colors=colors, encoding=encoding)
+
+ ignorelist = self.conf.get("output", "ignorelist")
+ if os.path.isfile(ignorelist):
+ with open(ignorelist, "r") as fobj:
+ for line in fobj:
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.output.ignore(line)
+
+ # cron does not have sbin in PATH,
+ # so we have to add it ourselves
+ os.environ["PATH"] = "{0}:/sbin:/usr/sbin".format(os.environ["PATH"])
+
+ # remove some environmental variables that can cause problems with package scripts
+ env_remove = ('DISPLAY', 'DBUS_SESSION_BUS_ADDRESS')
+ list(os.environ.pop(k) for k in env_remove if k in os.environ)
+
+ self._configured = True
+
+ @property
+ def templatedir(self):
+ """Find the template directory.
+
+ Pick the first directory under sharedir/templates.d/ if it exists.
+ Otherwise use the sharedir
+ """
+ if not self._templatedir:
+ self._templatedir = find_templates(self.conf.get("lorax", "sharedir"))
+ logger.info("Using templatedir %s", self._templatedir)
+ return self._templatedir
+
+[docs] def init_stream_logging(self):
+ sh = logging.StreamHandler()
+ sh.setLevel(logging.INFO)
+ logger.addHandler(sh)
+
+[docs] def init_file_logging(self, logdir, logname="pylorax.log"):
+ fh = logging.FileHandler(filename=joinpaths(logdir, logname), mode="w")
+ fh.setLevel(logging.DEBUG)
+ logger.addHandler(fh)
+
+[docs] def run(self, dbo, product, version, release, variant="", bugurl="",
+ isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None,
+ domacboot=True, doupgrade=True, remove_temp=False,
+ installpkgs=None, excludepkgs=None,
+ size=2,
+ add_templates=None,
+ add_template_vars=None,
+ add_arch_templates=None,
+ add_arch_template_vars=None,
+ verify=True):
+
+ assert self._configured
+
+ installpkgs = installpkgs or []
+ excludepkgs = excludepkgs or []
+
+ if domacboot:
+ try:
+ runcmd(["rpm", "-q", "hfsplus-tools"])
+ except CalledProcessError:
+ logger.critical("you need to install hfsplus-tools to create mac images")
+ sys.exit(1)
+
+ # set up work directory
+ self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.")
+ if not os.path.isdir(self.workdir):
+ os.makedirs(self.workdir)
+
+ # set up log directory
+ logdir = self.conf.get("lorax", "logdir")
+ if not os.path.isdir(logdir):
+ os.makedirs(logdir)
+
+ self.init_stream_logging()
+ self.init_file_logging(logdir)
+
+ logger.debug("version is %s", vernum)
+ logger.debug("using work directory %s", self.workdir)
+ logger.debug("using log directory %s", logdir)
+
+ # set up output directory
+ self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.")
+ if not os.path.isdir(self.outputdir):
+ os.makedirs(self.outputdir)
+ logger.debug("using output directory %s", self.outputdir)
+
+ # do we have root privileges?
+ logger.info("checking for root privileges")
+ if not os.geteuid() == 0:
+ logger.critical("no root privileges")
+ sys.exit(1)
+
+ # is selinux disabled?
+ # With selinux in enforcing mode the rpcbind package required for
+ # dracut nfs module, which is in turn required by anaconda module,
+ # will not get installed, because it's preinstall scriptlet fails,
+ # resulting in an incomplete initial ramdisk image.
+ # The reason is that the scriptlet runs tools from the shadow-utils
+ # package in chroot, particularly groupadd and useradd to add the
+ # required rpc group and rpc user. This operation fails, because
+ # the selinux context on files in the chroot, that the shadow-utils
+ # tools need to access (/etc/group, /etc/passwd, /etc/shadow etc.),
+ # is wrong and selinux therefore disallows access to these files.
+ logger.info("checking the selinux mode")
+ if selinux.is_selinux_enabled() and selinux.security_getenforce():
+ logger.critical("selinux must be disabled or in Permissive mode")
+ sys.exit(1)
+
+ # do we have a proper dnf base object?
+ logger.info("checking dnf base object")
+ if not isinstance(dbo, dnf.Base):
+ logger.critical("no dnf base object")
+ sys.exit(1)
+ self.inroot = dbo.conf.installroot
+ logger.debug("using install root: %s", self.inroot)
+
+ if not buildarch:
+ buildarch = get_buildarch(dbo)
+
+ logger.info("setting up build architecture")
+ self.arch = ArchData(buildarch)
+ for attr in ('buildarch', 'basearch', 'libdir'):
+ logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr))
+
+ logger.info("setting up build parameters")
+ self.product = DataHolder(name=product, version=version, release=release,
+ variant=variant, bugurl=bugurl, isfinal=isfinal)
+ logger.debug("product data: %s", self.product)
+
+ # NOTE: if you change isolabel, you need to change pungi to match, or
+ # the pungi images won't boot.
+ isolabel = volid or "%s-%s-%s" % (self.product.name, self.product.version, self.arch.basearch)
+
+ if len(isolabel) > 32:
+ logger.fatal("the volume id cannot be longer than 32 characters")
+ sys.exit(1)
+
+ # NOTE: rb.root = dbo.conf.installroot (== self.inroot)
+ rb = RuntimeBuilder(product=self.product, arch=self.arch,
+ dbo=dbo, templatedir=self.templatedir,
+ installpkgs=installpkgs,
+ excludepkgs=excludepkgs,
+ add_templates=add_templates,
+ add_template_vars=add_template_vars)
+
+ logger.info("installing runtime packages")
+ rb.install()
+
+ # write .buildstamp
+ buildstamp = BuildStamp(self.product.name, self.product.version,
+ self.product.bugurl, self.product.isfinal,
+ self.arch.buildarch, self.product.variant)
+
+ buildstamp.write(joinpaths(self.inroot, ".buildstamp"))
+
+ if self.debug:
+ rb.writepkglists(joinpaths(logdir, "pkglists"))
+ rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt"))
+
+ logger.info("doing post-install configuration")
+ rb.postinstall()
+
+ # write .discinfo
+ discinfo = DiscInfo(self.product.release, self.arch.basearch)
+ discinfo.write(joinpaths(self.outputdir, ".discinfo"))
+
+ logger.info("backing up installroot")
+ installroot = joinpaths(self.workdir, "installroot")
+ linktree(self.inroot, installroot)
+
+ logger.info("generating kernel module metadata")
+ rb.generate_module_data()
+
+ logger.info("cleaning unneeded files")
+ rb.cleanup()
+
+ if verify:
+ logger.info("verifying the installroot")
+ if not rb.verify():
+ sys.exit(1)
+ else:
+ logger.info("Skipping verify")
+
+ if self.debug:
+ rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt"))
+
+ logger.info("creating the runtime image")
+ runtime = "images/install.img"
+ compression = self.conf.get("compression", "type")
+ compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member
+ if self.conf.getboolean("compression", "bcj"):
+ if self.arch.bcj:
+ compressargs += ["-Xbcj", self.arch.bcj]
+ else:
+ logger.info("no BCJ filter for arch %s", self.arch.basearch)
+ rb.create_runtime(joinpaths(installroot,runtime),
+ compression=compression, compressargs=compressargs,
+ size=size)
+ rb.finished()
+
+ logger.info("preparing to build output tree and boot images")
+ treebuilder = TreeBuilder(product=self.product, arch=self.arch,
+ inroot=installroot, outroot=self.outputdir,
+ runtime=runtime, isolabel=isolabel,
+ domacboot=domacboot, doupgrade=doupgrade,
+ templatedir=self.templatedir,
+ add_templates=add_arch_templates,
+ add_template_vars=add_arch_template_vars,
+ workdir=self.workdir)
+
+ logger.info("rebuilding initramfs images")
+ dracut_args = ["--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips"]
+ anaconda_args = dracut_args + ["--add", "anaconda pollcdrom qemu qemu-net"]
+
+ # ppc64 cannot boot an initrd > 32MiB so remove some drivers
+ if self.arch.basearch in ("ppc64", "ppc64le"):
+ dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS])
+
+ # Only omit dracut modules from the initrd so that they're kept for
+ # upgrade.img
+ anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES])
+
+ treebuilder.rebuild_initrds(add_args=anaconda_args)
+
+ logger.info("populating output tree and building boot images")
+ treebuilder.build()
+
+ # write .treeinfo file and we're done
+ treeinfo = TreeInfo(self.product.name, self.product.version,
+ self.product.variant, self.arch.basearch)
+ for section, data in treebuilder.treeinfo_data.items():
+ treeinfo.add_section(section, data)
+ treeinfo.write(joinpaths(self.outputdir, ".treeinfo"))
+
+ # cleanup
+ if remove_temp:
+ remove(self.workdir)
+
+
+[docs]def get_buildarch(dbo):
+ # get architecture of the available anaconda package
+ buildarch = None
+ q = dbo.sack.query()
+ a = q.available()
+ for anaconda in a.filter(name="anaconda-core"):
+ if anaconda.arch != "src":
+ buildarch = anaconda.arch
+ break
+ if not buildarch:
+ logger.critical("no anaconda-core package in the repository")
+ sys.exit(1)
+
+ return buildarch
+
+
+[docs]def setup_logging(logfile, theLogger):
+ """
+ Setup the various logs
+
+ :param logfile: filename to write the log to
+ :type logfile: string
+ :param theLogger: top-level logger
+ :type theLogger: logging.Logger
+ """
+ if not os.path.isdir(os.path.abspath(os.path.dirname(logfile))):
+ os.makedirs(os.path.abspath(os.path.dirname(logfile)))
+
+ # Setup logging to console and to logfile
+ logger.setLevel(logging.DEBUG)
+ theLogger.setLevel(logging.DEBUG)
+
+ sh = logging.StreamHandler()
+ sh.setLevel(logging.INFO)
+ fmt = logging.Formatter("%(asctime)s: %(message)s")
+ sh.setFormatter(fmt)
+ logger.addHandler(sh)
+ theLogger.addHandler(sh)
+
+ fh = logging.FileHandler(filename=logfile, mode="w")
+ fh.setLevel(logging.DEBUG)
+ fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")
+ fh.setFormatter(fmt)
+ logger.addHandler(fh)
+ theLogger.addHandler(fh)
+
+ # External program output log
+ program_log.setLevel(logging.DEBUG)
+ f = os.path.abspath(os.path.dirname(logfile))+"/program.log"
+ fh = logging.FileHandler(filename=f, mode="w")
+ fh.setLevel(logging.DEBUG)
+ program_log.addHandler(fh)
+
+
+[docs]def find_templates(templatedir="/usr/share/lorax"):
+ """ Find the templates to use.
+
+ :param str templatedir: Top directory to search for templates
+ :returns: Path to templates
+ :rtype: str
+
+ If there is a templates.d directory under templatedir the
+ lowest numbered directory entry is returned.
+
+ eg. /usr/share/lorax/templates.d/99-generic/
+ """
+ if os.path.isdir(joinpaths(templatedir, "templates.d")):
+ try:
+ templatedir = sorted(glob(joinpaths(templatedir, "templates.d", "*")))[0]
+ except IndexError:
+ pass
+ return templatedir
+
+#
+# base.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+import pylorax.output as output
+
+
+[docs]class BaseLoraxClass(object, metaclass=ABCMeta):
+ @abstractmethod
+ def __init__(self):
+ self.output = output.LoraxOutput()
+
+
+
+
+
+
+
+
+
+
+
+
+[docs]class DataHolder(dict):
+
+ def __init__(self, **kwargs):
+ dict.__init__(self)
+
+ for attr, value in kwargs.items():
+ self[attr] = value
+
+ def __getattr__(self, attr):
+ if attr in self:
+ return self[attr]
+ else:
+ raise AttributeError
+
+ def __setattr__(self, attr, value):
+ self[attr] = value
+
+
+
+#
+# buildstamp.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.buildstamp")
+
+import datetime
+
+
+[docs]class BuildStamp(object):
+
+ def __init__(self, product, version, bugurl, isfinal, buildarch, variant=""):
+ self.product = product
+ self.version = version
+ self.bugurl = bugurl
+ self.isfinal = isfinal
+ self.variant = variant
+
+ now = datetime.datetime.now()
+ now = now.strftime("%Y%m%d%H%M")
+ self.uuid = "{0}.{1}".format(now, buildarch)
+
+[docs] def write(self, outfile):
+ # get lorax version
+ try:
+ import pylorax.version
+ except ImportError:
+ vernum = "devel"
+ else:
+ vernum = pylorax.version.num
+
+ logger.info("writing .buildstamp file")
+ with open(outfile, "w") as fobj:
+ fobj.write("[Main]\n")
+ fobj.write("Product={0.product}\n".format(self))
+ fobj.write("Version={0.version}\n".format(self))
+ fobj.write("BugURL={0.bugurl}\n".format(self))
+ fobj.write("IsFinal={0.isfinal}\n".format(self))
+ fobj.write("UUID={0.uuid}\n".format(self))
+ if self.variant:
+ fobj.write("Variant={0.variant}\n".format(self))
+ fobj.write("[Compose]\n")
+ fobj.write("Lorax={0}\n".format(vernum))
+
+#
+# cmdline.py
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Brian C. Lane <bcl@redhat.com>
+
+import os
+import sys
+import argparse
+
+from pylorax import vernum
+
+version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
+
+[docs]def lorax_parser():
+ """ Return the ArgumentParser for lorax"""
+
+ parser = argparse.ArgumentParser(description="Create the Anaconda boot.iso")
+
+ # required arguments for image creation
+ required = parser.add_argument_group("required arguments")
+ required.add_argument("-p", "--product", help="product name", required=True, metavar="PRODUCT")
+ required.add_argument("-v", "--version", help="version identifier", required=True, metavar="VERSION")
+ required.add_argument("-r", "--release", help="release information", required=True, metavar="RELEASE")
+ required.add_argument("-s", "--source", help="source repository (may be listed multiple times)",
+ metavar="REPOSITORY", action="append", default=[])
+ required.add_argument("--repo", help="source dnf repository file", type=os.path.abspath,
+ dest="repos", metavar="REPOSITORY", action="append", default=[])
+
+ # optional arguments
+ optional = parser.add_argument_group("optional arguments")
+ optional.add_argument("-m", "--mirrorlist",
+ help="mirrorlist repository (may be listed multiple times)",
+ metavar="REPOSITORY", action="append", default=[])
+ optional.add_argument("-t", "--variant", default="",
+ help="variant name", metavar="VARIANT")
+ optional.add_argument("-b", "--bugurl",
+ help="bug reporting URL for the product", metavar="URL",
+ default="your distribution provided bug reporting tool")
+ optional.add_argument("--isfinal", help="",
+ action="store_true", default=False, dest="isfinal")
+ optional.add_argument("-c", "--config", default="/etc/lorax/lorax.conf",
+ help="config file", metavar="CONFIGFILE")
+ optional.add_argument("--proxy", default=None,
+ help="repo proxy url:port", metavar="HOST")
+ optional.add_argument("-i", "--installpkgs", default=[],
+ action="append", metavar="PACKAGE",
+ help="package glob to install before runtime-install.tmpl runs. (may be listed multiple times)")
+ optional.add_argument("-e", "--excludepkgs", default=[],
+ action="append", metavar="PACKAGE",
+ help="package glob to remove before runtime-install.tmpl runs. (may be listed multiple times)")
+ optional.add_argument("--buildarch", default=None,
+ help="build architecture", metavar="ARCH")
+ optional.add_argument("--volid", default=None,
+ help="volume id", metavar="VOLID")
+ optional.add_argument("--macboot", help="",
+ action="store_true", default=True, dest="domacboot")
+ optional.add_argument("--nomacboot", help="",
+ action="store_false", dest="domacboot")
+ optional.add_argument("--noupgrade", help="",
+ action="store_false", default=True, dest="doupgrade")
+ optional.add_argument("--logfile", default="./lorax.log", type=os.path.abspath,
+ help="Path to logfile")
+ optional.add_argument("--tmp", default="/var/tmp",
+ help="Top level temporary directory" )
+ optional.add_argument("--cachedir", default=None, type=os.path.abspath,
+ help="DNF cache directory. Default is a temporary dir.")
+ optional.add_argument("--workdir", default=None, type=os.path.abspath,
+ help="Work directory, overrides --tmp. Default is a temporary dir under /var/tmp")
+ optional.add_argument("--force", default=False, action="store_true",
+ help="Run even when the destination directory exists")
+ optional.add_argument("--add-template", dest="add_templates",
+ action="append", help="Additional template for runtime image",
+ default=[])
+ optional.add_argument("--add-template-var", dest="add_template_vars",
+ action="append", help="Set variable for runtime image template",
+ default=[])
+ optional.add_argument("--add-arch-template", dest="add_arch_templates",
+ action="append", help="Additional template for architecture-specific image",
+ default=[])
+ optional.add_argument("--add-arch-template-var", dest="add_arch_template_vars",
+ action="append", help="Set variable for architecture-specific image",
+ default=[])
+ optional.add_argument("--noverify", action="store_false", default=True, dest="verify",
+ help="Do not verify the install root")
+ optional.add_argument("--sharedir", metavar="SHAREDIR", type=os.path.abspath,
+ help="Directory containing all the templates. Overrides config file sharedir")
+ optional.add_argument("--enablerepo", action="append", default=[], dest="enablerepos",
+ metavar="[repo]", help="Names of repos to enable")
+ optional.add_argument("--disablerepo", action="append", default=[], dest="disablerepos",
+ metavar="[repo]", help="Names of repos to disable")
+ optional.add_argument("--rootfs-size", type=int, default=2,
+ help="Size of root filesystem in GiB. Defaults to 2.")
+ optional.add_argument("--noverifyssl", action="store_true", default=False,
+ help="Do not verify SSL certificates")
+
+ # add the show version option
+ parser.add_argument("-V", help="show program's version number and exit",
+ action="version", version=version)
+
+ parser.add_argument("outputdir", help="Output directory", metavar="OUTPUTDIR", type=os.path.abspath)
+
+ return parser
+
+
+[docs]def lmc_parser(dracut_default=""):
+ """ Return a ArgumentParser object for live-media-creator."""
+ parser = argparse.ArgumentParser(description="Create Live Install Media",
+ fromfile_prefix_chars="@")
+
+ # These are mutually exclusive, one is required
+ action = parser.add_mutually_exclusive_group(required=True)
+ action.add_argument("--make-iso", action="store_true",
+ help="Build a live iso")
+ action.add_argument("--make-disk", action="store_true",
+ help="Build a partitioned disk image")
+ action.add_argument("--make-fsimage", action="store_true",
+ help="Build a filesystem image")
+ action.add_argument("--make-appliance", action="store_true",
+ help="Build an appliance image and XML description")
+ action.add_argument("--make-ami", action="store_true",
+ help="Build an ami image")
+ action.add_argument("--make-tar", action="store_true",
+ help="Build a tar of the root filesystem")
+ action.add_argument("--make-pxe-live", action="store_true",
+ help="Build a live pxe boot squashfs image")
+ action.add_argument("--make-ostree-live", action="store_true",
+ help="Build a live pxe boot squashfs image of Atomic Host")
+ action.add_argument("--make-oci", action="store_true",
+ help="Build an Open Container Initiative image")
+ action.add_argument("--make-vagrant", action="store_true",
+ help="Build a Vagrant Box image")
+
+ parser.add_argument("--iso", type=os.path.abspath,
+ help="Anaconda installation .iso path to use for qemu")
+ parser.add_argument("--iso-only", action="store_true",
+ help="Remove all iso creation artifacts except the boot.iso, "
+ "combine with --iso-name to rename the boot.iso")
+ parser.add_argument("--iso-name", default=None,
+ help="Name of output iso file for --iso-only. Default is boot.iso")
+ parser.add_argument("--ks", action="append", type=os.path.abspath,
+ help="Kickstart file defining the install.")
+ parser.add_argument("--image-only", action="store_true",
+ help="Exit after creating fs/disk image.")
+
+ parser.add_argument("--no-virt", action="store_true",
+ help="Run anaconda directly on host instead of using qemu")
+ parser.add_argument("--proxy",
+ help="proxy URL to use for the install")
+ parser.add_argument("--anaconda-arg", action="append", dest="anaconda_args",
+ help="Additional argument to pass to anaconda (no-virt "
+ "mode). Pass once for each argument")
+ parser.add_argument("--armplatform",
+ help="the platform to use when creating images for ARM, "
+ "i.e., highbank, mvebu, omap, tegra, etc.")
+ parser.add_argument("--location", default=None, type=os.path.abspath,
+ help="location of iso directory tree with initrd.img "
+ "and vmlinuz. Used to run qemu with a newer initrd "
+ "than the iso.")
+
+ parser.add_argument("--logfile", default="./livemedia.log",
+ type=os.path.abspath,
+ help="Name and path for primary logfile, other logs will "
+ "be created in the same directory.")
+ parser.add_argument("--lorax-templates", default=None,
+ type=os.path.abspath,
+ help="Path to mako templates for lorax")
+ parser.add_argument("--tmp", default="/var/tmp", type=os.path.abspath,
+ help="Top level temporary directory")
+ parser.add_argument("--resultdir", default=None, dest="result_dir",
+ type=os.path.abspath,
+ help="Directory to copy the resulting images and iso into. "
+ "Defaults to the temporary working directory")
+
+ parser.add_argument("--macboot", action="store_true", default=True,
+ dest="domacboot")
+ parser.add_argument("--nomacboot", action="store_false",
+ dest="domacboot")
+
+ image_group = parser.add_argument_group("disk/fs image arguments")
+ image_group.add_argument("--disk-image", type=os.path.abspath,
+ help="Path to existing disk image to use for creating final image.")
+ image_group.add_argument("--keep-image", action="store_true",
+ help="Keep raw disk image after .iso creation")
+ image_group.add_argument("--fs-image", type=os.path.abspath,
+ help="Path to existing filesystem image to use for creating final image.")
+ image_group.add_argument("--image-name", default=None,
+ help="Name of output file to create. Used for tar, fs and disk image. Default is a random name.")
+ image_group.add_argument("--fs-label", default="Anaconda",
+ help="Label to set on fsimage, default is 'Anaconda'")
+ image_group.add_argument("--image-type", default=None,
+ help="Create an image with qemu-img. See qemu-img --help for supported formats.")
+ image_group.add_argument("--qemu-arg", action="append", dest="qemu_args", default=[],
+ help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.")
+ image_group.add_argument("--qcow2", action="store_true",
+ help="Create qcow2 image instead of raw sparse image when making disk images.")
+ image_group.add_argument("--qcow2-arg", action="append", dest="qemu_args", default=[],
+ help="Arguments to pass to qemu-img. Pass once for each argument, they will be used for ALL calls to qemu-img.")
+ image_group.add_argument("--compression", default="xz",
+ help="Compression binary for make-tar. xz, lzma, gzip, and bzip2 are supported. xz is the default.")
+ image_group.add_argument("--compress-arg", action="append", dest="compress_args", default=[],
+ help="Arguments to pass to compression. Pass once for each argument")
+ # Group of arguments for appliance creation
+ app_group = parser.add_argument_group("appliance arguments")
+ app_group.add_argument("--app-name", default=None,
+ help="Name of appliance to pass to template")
+ app_group.add_argument("--app-template", default=None,
+ help="Path to template to use for appliance data.")
+ app_group.add_argument("--app-file", default="appliance.xml",
+ help="Appliance template results file.")
+
+ # Group of arguments to pass to qemu
+ virt_group = parser.add_argument_group("qemu arguments")
+ virt_group.add_argument("--ram", metavar="MEMORY", type=int, default=1024,
+ help="Memory to allocate for installer in megabytes.")
+ virt_group.add_argument("--vcpus", type=int, default=None,
+ help="Passed to qemu -smp command")
+ virt_group.add_argument("--vnc",
+ help="Passed to qemu -display command. eg. vnc=127.0.0.1:5, default is to "
+ "choose the first unused vnc port.")
+ virt_group.add_argument("--arch", default=None,
+ help="System arch to build for. Used to select qemu-system-* command. "
+ "Defaults to qemu-system-<arch>")
+ virt_group.add_argument("--kernel-args",
+ help="Additional argument to pass to the installation kernel")
+ virt_group.add_argument("--ovmf-path", default="/usr/share/edk2/ovmf/",
+ help="Path to OVMF firmware")
+ virt_group.add_argument("--virt-uefi", action="store_true", default=False,
+ help="Use OVMF firmware to boot the VM in UEFI mode")
+ virt_group.add_argument("--no-kvm", action="store_true", default=False,
+ help="Skip using kvm with qemu even if it is available.")
+ virt_group.add_argument("--with-rng", default="/dev/random",
+ help="RNG device for QEMU (none for no RNG)")
+
+ # dracut arguments
+ dracut_group = parser.add_argument_group("dracut arguments")
+ dracut_group.add_argument("--dracut-arg", action="append", dest="dracut_args",
+ help="Argument to pass to dracut when "
+ "rebuilding the initramfs. Pass this "
+ "once for each argument. NOTE: this "
+ "overrides the default. (default: %s)" % dracut_default)
+
+ # pxe to live arguments
+ pxelive_group = parser.add_argument_group("pxe to live arguments")
+ pxelive_group.add_argument("--live-rootfs-size", type=int, default=0,
+ help="Size of root filesystem of live image in GiB")
+ pxelive_group.add_argument("--live-rootfs-keep-size", action="store_true",
+ help="Keep the original size of root filesystem in live image")
+
+ # OCI specific commands
+ oci_group = parser.add_argument_group("OCI arguments")
+ oci_group.add_argument("--oci-config",
+ help="config.json OCI configuration file")
+ oci_group.add_argument("--oci-runtime",
+ help="runtime.json OCI configuration file")
+
+ # Vagrant specific commands
+ vagrant_group = parser.add_argument_group("Vagrant arguments")
+ vagrant_group.add_argument("--vagrant-metadata",
+ help="optional metadata.json file")
+ vagrant_group.add_argument("--vagrantfile",
+ help="optional vagrantfile")
+
+ parser.add_argument("--title", default="Linux Live Media",
+ help="Substituted for @TITLE@ in bootloader config files")
+ parser.add_argument("--project", default="Linux",
+ help="substituted for @PROJECT@ in bootloader config files")
+ parser.add_argument("--releasever", default="28",
+ help="substituted for @VERSION@ in bootloader config files")
+ parser.add_argument("--volid", default=None, help="volume id")
+ parser.add_argument("--squashfs_args",
+ help="additional squashfs args")
+ parser.add_argument("--timeout", default=None, type=int,
+ help="Cancel installer after X minutes")
+
+ # add the show version option
+ parser.add_argument("-V", help="show program's version number and exit",
+ action="version", version=version)
+
+ return parser
+
+#
+# decorators.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+[docs]def singleton(cls):
+ instances = {}
+
+ def get_instance():
+ if cls not in instances:
+ instances[cls] = cls()
+ return instances[cls]
+
+ return get_instance
+
+#
+# discinfo.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.discinfo")
+
+import time
+
+
+[docs]class DiscInfo(object):
+
+ def __init__(self, release, basearch):
+ self.release = release
+ self.basearch = basearch
+
+[docs] def write(self, outfile):
+ logger.info("writing .discinfo file")
+ with open(outfile, "w") as fobj:
+ fobj.write("{0:f}\n".format(time.time()))
+ fobj.write("{0.release}\n".format(self))
+ fobj.write("{0.basearch}\n".format(self))
+
+#
+# dnfhelper.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+# Brian C. Lane <bcl@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.dnfhelper")
+import dnf
+import collections
+import time
+import pylorax.output as output
+
+__all__ = ['LoraxDownloadCallback', 'LoraxRpmCallback']
+
+def _paced(fn):
+ """Execute `fn` no more often then every 2 seconds."""
+ def paced_fn(self, *args):
+ now = time.time()
+ if now - self.last_time < 2:
+ return
+ self.last_time = now
+ return fn(self, *args)
+ return paced_fn
+
+
+[docs]class LoraxDownloadCallback(dnf.callback.DownloadProgress):
+ def __init__(self):
+ self.downloads = collections.defaultdict(int)
+ self.last_time = time.time()
+ self.total_files = 0
+ self.total_size = 0
+
+ self.pkgno = 0
+ self.total = 0
+
+ self.output = output.LoraxOutput()
+
+ @_paced
+ def _update(self):
+ msg = "Downloading %(pkgno)s / %(total_files)s RPMs, " \
+ "%(downloaded)s / %(total_size)s (%(percent)d%%) done.\n"
+ downloaded = sum(self.downloads.values())
+ vals = {
+ 'downloaded' : downloaded,
+ 'percent' : int(100 * downloaded/self.total_size),
+ 'pkgno' : self.pkgno,
+ 'total_files' : self.total_files,
+ 'total_size' : self.total_size
+ }
+ self.output.write(msg % vals)
+
+[docs] def end(self, payload, status, msg):
+ nevra = str(payload)
+ if status is dnf.callback.STATUS_OK:
+ self.downloads[nevra] = payload.download_size
+ self.pkgno += 1
+ self._update()
+ return
+ logger.critical("Failed to download '%s': %d - %s", nevra, status, msg)
+
+[docs] def progress(self, payload, done):
+ nevra = str(payload)
+ self.downloads[nevra] = done
+ self._update()
+
+ # dnf 2.5.0 adds a new argument, accept it if it is passed
+ # pylint: disable=arguments-differ
+[docs] def start(self, total_files, total_size, total_drpms=0):
+ self.total_files = total_files
+ self.total_size = total_size
+
+
+[docs]class LoraxRpmCallback(dnf.callback.TransactionProgress):
+ def __init__(self):
+ super(LoraxRpmCallback, self).__init__()
+ self._last_ts = None
+
+[docs] def progress(self, package, action, ti_done, ti_total, ts_done, ts_total):
+ if action == self.PKG_INSTALL:
+ # do not report same package twice
+ if self._last_ts == ts_done:
+ return
+ self._last_ts = ts_done
+
+ msg = '(%d/%d) %s' % (ts_done, ts_total, package)
+ logger.info(msg)
+ elif action == self.TRANS_POST:
+ msg = "Performing post-installation setup tasks"
+ logger.info(msg)
+
+
+
+#
+# executil.py - subprocess execution utility functions
+#
+# Copyright (C) 1999-2015
+# Red Hat, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import subprocess
+from subprocess import TimeoutExpired
+import signal
+
+import logging
+log = logging.getLogger("pylorax")
+program_log = logging.getLogger("program")
+
+from threading import Lock
+program_log_lock = Lock()
+
+_child_env = {}
+
+[docs]def setenv(name, value):
+ """ Set an environment variable to be used by child processes.
+
+ This method does not modify os.environ for the running process, which
+ is not thread-safe. If setenv has already been called for a particular
+ variable name, the old value is overwritten.
+
+ :param str name: The name of the environment variable
+ :param str value: The value of the environment variable
+ """
+
+ _child_env[name] = value
+
+
+
+[docs]class ExecProduct(object):
+ def __init__(self, rc, stdout, stderr):
+ self.rc = rc
+ self.stdout = stdout
+ self.stderr = stderr
+
+[docs]def startProgram(argv, root='/', stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ env_prune=None, env_add=None, reset_handlers=True, reset_lang=True, **kwargs):
+ """ Start an external program and return the Popen object.
+
+ The root and reset_handlers arguments are handled by passing a
+ preexec_fn argument to subprocess.Popen, but an additional preexec_fn
+ can still be specified and will be run. The user preexec_fn will be run
+ last.
+
+ :param argv: The command to run and argument
+ :param root: The directory to chroot to before running command.
+ :param stdin: The file object to read stdin from.
+ :param stdout: The file object to write stdout to.
+ :param stderr: The file object to write stderr to.
+ :param env_prune: environment variables to remove before execution
+ :param env_add: environment variables to add before execution
+ :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
+ :param reset_lang: whether to set the locale of the child process to C
+ :param kwargs: Additional parameters to pass to subprocess.Popen
+ :return: A Popen object for the running command.
+ :keyword preexec_fn: A function to run before execution starts.
+ """
+ if env_prune is None:
+ env_prune = []
+
+ # Check for and save a preexec_fn argument
+ preexec_fn = kwargs.pop("preexec_fn", None)
+
+ def preexec():
+ # If a target root was specificed, chroot into it
+ if root and root != '/':
+ os.chroot(root)
+ os.chdir("/")
+
+ # Signal handlers set to SIG_IGN persist across exec. Reset
+ # these to SIG_DFL if requested. In particular this will include the
+ # SIGPIPE handler set by python.
+ if reset_handlers:
+ for signum in range(1, signal.NSIG):
+ if signal.getsignal(signum) == signal.SIG_IGN:
+ signal.signal(signum, signal.SIG_DFL)
+
+ # If the user specified an additional preexec_fn argument, run it
+ if preexec_fn is not None:
+ preexec_fn()
+
+ with program_log_lock:
+ program_log.info("Running... %s", " ".join(argv))
+
+ env = augmentEnv()
+ for var in env_prune:
+ env.pop(var, None)
+
+ if reset_lang:
+ env.update({"LC_ALL": "C"})
+
+ if env_add:
+ env.update(env_add)
+
+ return subprocess.Popen(argv,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ close_fds=True,
+ preexec_fn=preexec, cwd=root, env=env, **kwargs)
+
+def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None, log_output=True,
+ binary_output=False, filter_stderr=False, raise_err=False, callback=None,
+ env_add=None, reset_handlers=True, reset_lang=True):
+ """ Run an external program, log the output and return it to the caller
+
+ :param argv: The command to run and argument
+ :param root: The directory to chroot to before running command.
+ :param stdin: The file object to read stdin from.
+ :param stdout: Optional file object to write the output to.
+ :param env_prune: environment variable to remove before execution
+ :param log_output: whether to log the output of command
+ :param binary_output: whether to treat the output of command as binary data
+ :param filter_stderr: whether to exclude the contents of stderr from the returned output
+ :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero
+ :param callback: method to call while waiting for process to finish, passed Popen object
+ :param env_add: environment variables to add before execution
+ :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
+ :param reset_lang: whether to set the locale of the child process to C
+ :return: The return code of the command and the output
+ :raises: OSError or CalledProcessError
+ """
+ try:
+ if filter_stderr:
+ stderr = subprocess.PIPE
+ else:
+ stderr = subprocess.STDOUT
+
+ proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr,
+ env_prune=env_prune, universal_newlines=not binary_output,
+ env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang)
+
+ output_string = None
+ err_string = None
+ if callback:
+ while callback(proc) and proc.poll() is None:
+ try:
+ (output_string, err_string) = proc.communicate(timeout=1)
+ break
+ except TimeoutExpired:
+ pass
+ else:
+ (output_string, err_string) = proc.communicate()
+ if output_string:
+ if binary_output:
+ output_lines = [output_string]
+ else:
+ if output_string[-1] != "\n":
+ output_string = output_string + "\n"
+ output_lines = output_string.splitlines(True)
+
+ if log_output:
+ with program_log_lock:
+ for line in output_lines:
+ program_log.info(line.strip())
+
+ if stdout:
+ stdout.write(output_string)
+
+ # If stderr was filtered, log it separately
+ if filter_stderr and err_string and log_output:
+ err_lines = err_string.splitlines(True)
+
+ with program_log_lock:
+ for line in err_lines:
+ program_log.info(line.strip())
+
+ except OSError as e:
+ with program_log_lock:
+ program_log.error("Error running %s: %s", argv[0], e.strerror)
+ raise
+
+ with program_log_lock:
+ program_log.debug("Return code: %s", proc.returncode)
+
+ if proc.returncode and raise_err:
+ output = (output_string or "") + (err_string or "")
+ raise subprocess.CalledProcessError(proc.returncode, argv, output)
+
+ return (proc.returncode, output_string)
+
+[docs]def execWithRedirect(command, argv, stdin=None, stdout=None, root='/', env_prune=None,
+ log_output=True, binary_output=False, raise_err=False, callback=None,
+ env_add=None, reset_handlers=True, reset_lang=True):
+ """ Run an external program and redirect the output to a file.
+
+ :param command: The command to run
+ :param argv: The argument list
+ :param stdin: The file object to read stdin from.
+ :param stdout: Optional file object to redirect stdout and stderr to.
+ :param root: The directory to chroot to before running command.
+ :param env_prune: environment variable to remove before execution
+ :param log_output: whether to log the output of command
+ :param binary_output: whether to treat the output of command as binary data
+ :param raise_err: whether to raise a CalledProcessError if the returncode is non-zero
+ :param callback: method to call while waiting for process to finish, passed Popen object
+ :param env_add: environment variables to add before execution
+ :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
+ :param reset_lang: whether to set the locale of the child process to C
+ :return: The return code of the command
+ """
+ argv = [command] + list(argv)
+ return _run_program(argv, stdin=stdin, stdout=stdout, root=root, env_prune=env_prune,
+ log_output=log_output, binary_output=binary_output, raise_err=raise_err, callback=callback,
+ env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang)[0]
+
+[docs]def execWithCapture(command, argv, stdin=None, root='/', log_output=True, filter_stderr=False,
+ raise_err=False, callback=None, env_add=None, reset_handlers=True, reset_lang=True):
+ """ Run an external program and capture standard out and err.
+
+ :param command: The command to run
+ :param argv: The argument list
+ :param stdin: The file object to read stdin from.
+ :param root: The directory to chroot to before running command.
+ :param log_output: Whether to log the output of command
+ :param filter_stderr: Whether stderr should be excluded from the returned output
+ :param callback: method to call while waiting for process to finish, passed Popen object
+ :param env_add: environment variables to add before execution
+ :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
+ :param reset_lang: whether to set the locale of the child process to C
+ :return: The output of the command
+ """
+ argv = [command] + list(argv)
+ return _run_program(argv, stdin=stdin, root=root, log_output=log_output, filter_stderr=filter_stderr,
+ raise_err=raise_err, callback=callback, env_add=env_add,
+ reset_handlers=reset_handlers, reset_lang=reset_lang)[1]
+
+[docs]def execReadlines(command, argv, stdin=None, root='/', env_prune=None, filter_stderr=False,
+ callback=lambda x: True, env_add=None, reset_handlers=True, reset_lang=True):
+ """ Execute an external command and return the line output of the command
+ in real-time.
+
+ This method assumes that there is a reasonably low delay between the
+ end of output and the process exiting. If the child process closes
+ stdout and then keeps on truckin' there will be problems.
+
+ NOTE/WARNING: UnicodeDecodeError will be raised if the output of the
+ external command can't be decoded as UTF-8.
+
+ :param command: The command to run
+ :param argv: The argument list
+ :param stdin: The file object to read stdin from.
+ :param stdout: Optional file object to redirect stdout and stderr to.
+ :param root: The directory to chroot to before running command.
+ :param env_prune: environment variable to remove before execution
+ :param filter_stderr: Whether stderr should be excluded from the returned output
+ :param callback: method to call while waiting for process to finish, passed Popen object
+ :param env_add: environment variables to add before execution
+ :param reset_handlers: whether to reset to SIG_DFL any signal handlers set to SIG_IGN
+ :param reset_lang: whether to set the locale of the child process to C
+ :return: Iterator of the lines from the command
+
+ Output from the file is not logged to program.log
+ This returns an iterator with the lines from the command until it has finished
+ """
+
+ class ExecLineReader(object):
+ """Iterator class for returning lines from a process and cleaning
+ up the process when the output is no longer needed.
+ """
+
+ def __init__(self, proc, argv, callback):
+ self._proc = proc
+ self._argv = argv
+ self._callback = callback
+
+ def __iter__(self):
+ return self
+
+ def __del__(self):
+ # See if the process is still running
+ if self._proc.poll() is None:
+ # Stop the process and ignore any problems that might arise
+ try:
+ self._proc.terminate()
+ except OSError:
+ pass
+
+ def __next__(self):
+ # Read the next line, blocking if a line is not yet available
+ line = self._proc.stdout.readline().decode("utf-8")
+ if line == '' or not self._callback(self._proc):
+ # Output finished, wait for the process to end
+ self._proc.communicate()
+
+ # Check for successful exit
+ if self._proc.returncode < 0:
+ raise OSError("process '%s' was killed by signal %s" %
+ (self._argv, -self._proc.returncode))
+ elif self._proc.returncode > 0:
+ raise OSError("process '%s' exited with status %s" %
+ (self._argv, self._proc.returncode))
+ raise StopIteration
+
+ return line.strip()
+
+ argv = [command] + argv
+
+ if filter_stderr:
+ stderr = subprocess.DEVNULL
+ else:
+ stderr = subprocess.STDOUT
+
+ try:
+ proc = startProgram(argv, root=root, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, bufsize=1,
+ env_prune=env_prune, env_add=env_add, reset_handlers=reset_handlers, reset_lang=reset_lang)
+ except OSError as e:
+ with program_log_lock:
+ program_log.error("Error running %s: %s", argv[0], e.strerror)
+ raise
+
+ return ExecLineReader(proc, argv, callback)
+
+[docs]def runcmd(cmd, **kwargs):
+ """ run execWithRedirect with raise_err=True
+ """
+ kwargs["raise_err"] = True
+ return execWithRedirect(cmd[0], cmd[1:], **kwargs)
+
+[docs]def runcmd_output(cmd, **kwargs):
+ """ run execWithCapture with raise_err=True
+ """
+ kwargs["raise_err"] = True
+ return execWithCapture(cmd[0], cmd[1:], **kwargs)
+
+# imgutils.py - utility functions/classes for building disk images
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.imgutils")
+
+import os, tempfile
+from os.path import join, dirname
+from subprocess import Popen, PIPE, CalledProcessError
+import sys
+import time
+import traceback
+import multiprocessing
+from time import sleep
+import shutil
+
+from pylorax.sysutils import cpfile
+from pylorax.executils import execWithRedirect, execWithCapture
+from pylorax.executils import runcmd, runcmd_output
+
+######## Functions for making container images (cpio, tar, squashfs) ##########
+
+[docs]def compress(command, rootdir, outfile, compression="xz", compressargs=None):
+ '''Make a compressed archive of the given rootdir.
+ command is a list of the archiver commands to run
+ compression should be "xz", "gzip", "lzma", "bzip2", or None.
+ compressargs will be used on the compression commandline.'''
+ if compression not in (None, "xz", "gzip", "lzma", "bzip2"):
+ raise ValueError("Unknown compression type %s" % compression)
+ compressargs = compressargs or ["-9"]
+ if compression == "xz":
+ compressargs.insert(0, "--check=crc32")
+ if compression is None:
+ compression = "cat" # this is a little silly
+ compressargs = []
+
+ # make compression run with multiple threads if possible
+ if compression in ("xz", "lzma"):
+ compressargs.insert(0, "-T%d" % multiprocessing.cpu_count())
+ elif compression == "gzip":
+ compression = "pigz"
+ compressargs.insert(0, "-p%d" % multiprocessing.cpu_count())
+ elif compression == "bzip2":
+ compression = "pbzip2"
+ compressargs.insert(0, "-p%d" % multiprocessing.cpu_count())
+
+ logger.debug("find %s -print0 |%s | %s %s > %s", rootdir, " ".join(command),
+ compression, " ".join(compressargs), outfile)
+ find, archive, comp = None, None, None
+ try:
+ find = Popen(["find", ".", "-print0"], stdout=PIPE, cwd=rootdir)
+ archive = Popen(command, stdin=find.stdout, stdout=PIPE, cwd=rootdir)
+ comp = Popen([compression] + compressargs,
+ stdin=archive.stdout, stdout=open(outfile, "wb"))
+ comp.wait()
+ return comp.returncode
+ except OSError as e:
+ logger.error(e)
+ # Kill off any hanging processes
+ list(p.kill() for p in (find, archive, comp) if p)
+ return 1
+
+[docs]def mkcpio(rootdir, outfile, compression="xz", compressargs=None):
+ compressargs = compressargs or ["-9"]
+ return compress(["cpio", "--null", "--quiet", "-H", "newc", "-o"],
+ rootdir, outfile, compression, compressargs)
+
+[docs]def mktar(rootdir, outfile, compression="xz", compressargs=None, selinux=True):
+ compressargs = compressargs or ["-9"]
+ tar_cmd = ["tar", "--no-recursion"]
+ if selinux:
+ tar_cmd += ["--selinux", "--acls", "--xattrs"]
+ tar_cmd += ["-cf-", "--null", "-T-"]
+ return compress(tar_cmd, rootdir, outfile, compression, compressargs)
+
+[docs]def mksquashfs(rootdir, outfile, compression="default", compressargs=None):
+ '''Make a squashfs image containing the given rootdir.'''
+ compressargs = compressargs or []
+ if compression != "default":
+ compressargs = ["-comp", compression] + compressargs
+ return execWithRedirect("mksquashfs", [rootdir, outfile] + compressargs)
+
+[docs]def mkrootfsimg(rootdir, outfile, label, size=2, sysroot=""):
+ """
+ Make rootfs image from a directory
+
+ :param str rootdir: Root directory
+ :param str outfile: Path of output image file
+ :param str label: Filesystem label
+ :param int size: Size of the image in GiB, if None computed automatically
+ :param str sysroot: path to system (deployment) root relative to physical root
+ """
+ if size:
+ fssize = size * (1024*1024*1024) # 2GB sparse file compresses down to nothin'
+ else:
+ fssize = None # Let mkext4img figure out the needed size
+
+ mkext4img(rootdir, outfile, label=label, size=fssize)
+ # Reset selinux context on new rootfs
+ with LoopDev(outfile) as loopdev:
+ with Mount(loopdev) as mnt:
+ cmd = [ "setfiles", "-e", "/proc", "-e", "/sys", "-e", "/dev",
+ "-e", "/install", "-e", "/ostree",
+ "/etc/selinux/targeted/contexts/files/file_contexts", "/"]
+ root = join(mnt, sysroot.lstrip("/"))
+ try:
+ runcmd(cmd, root=root)
+ except CalledProcessError as e:
+ logger.error("setfiles exited with a non-zero return code (%d) which may "
+ "be caused by running without SELinux in Permissive mode.", e.returncode)
+ raise
+
+
+######## Utility functions ###############################################
+
+[docs]def mksparse(outfile, size):
+ '''use os.ftruncate to create a sparse file of the given size.'''
+ fobj = open(outfile, "w")
+ os.ftruncate(fobj.fileno(), size)
+
+[docs]def mkqcow2(outfile, size, options=None):
+ '''use qemu-img to create a file of the given size.
+ options is a list of options passed to qemu-img
+
+ Default format is qcow2, override by passing "-f", fmt
+ in options.
+ '''
+ mkqemu_img(outfile, size, options)
+
+[docs]def mkqemu_img(outfile, size, options=None):
+ '''use qemu-img to create a file of the given size.
+ options is a list of options passed to qemu-img
+
+ Default format is qcow2, override by passing "-f", fmt
+ in options.
+ '''
+ options = options or []
+ if "-f" not in options:
+ options.extend(["-f", "qcow2"])
+ runcmd(["qemu-img", "create"] + options + [outfile, str(size)])
+
+[docs]def loop_waitfor(loop_dev, outfile):
+ """Make sure the loop device is attached to the outfile.
+
+ It seems that on rare occasions losetup can return before the /dev/loopX is
+ ready for use, causing problems with mkfs. This tries to make sure that the
+ loop device really is associated with the backing file before continuing.
+
+ Raise RuntimeError if it isn't setup after 5 tries.
+ """
+ for _x in range(0,5):
+ runcmd(["udevadm", "settle", "--timeout", "300"])
+ ## XXX Note that losetup --list output can be truncated to 64 bytes in some
+ ## situations. Don't use it to lookup backing file, go the other way
+ ## and lookup the loop for the backing file. See util-linux lib/loopdev.c
+ ## loopcxt_get_backing_file()
+ if get_loop_name(outfile) == os.path.basename(loop_dev):
+ return
+
+ # If this really is a race, give it some time to settle down
+ time.sleep(1)
+
+ raise RuntimeError("Unable to setup %s on %s" % (loop_dev, outfile))
+
+[docs]def loop_attach(outfile):
+ '''Attach a loop device to the given file. Return the loop device name.
+ Raises CalledProcessError if losetup fails.'''
+ dev = runcmd_output(["losetup", "--find", "--show", outfile])
+
+ # Sometimes the loop device isn't ready yet, make extra sure before returning
+ loop_waitfor(dev.strip(), outfile)
+ return dev.strip()
+
+[docs]def loop_detach(loopdev):
+ '''Detach the given loop device. Return False on failure.'''
+ return (execWithRedirect("losetup", ["--detach", loopdev]) == 0)
+
+[docs]def get_loop_name(path):
+ '''Return the loop device associated with the path.
+ Raises RuntimeError if more than one loop is associated'''
+ buf = runcmd_output(["losetup", "-j", path])
+ if len(buf.splitlines()) > 1:
+ # there should never be more than one loop device listed
+ raise RuntimeError("multiple loops associated with %s" % path)
+ name = os.path.basename(buf.split(":")[0])
+ return name
+
+[docs]def dm_attach(dev, size, name=None):
+ '''Attach a devicemapper device to the given device, with the given size.
+ If name is None, a random name will be chosen. Returns the device name.
+ raises CalledProcessError if dmsetup fails.'''
+ if name is None:
+ name = tempfile.mktemp(prefix="lorax.imgutils.", dir="")
+ runcmd(["dmsetup", "create", name, "--table",
+ "0 %i linear %s 0" % (size/512, dev)])
+ return name
+
+[docs]def dm_detach(dev):
+ '''Detach the named devicemapper device. Returns False if dmsetup fails.'''
+ dev = dev.replace("/dev/mapper/", "") # strip prefix, if it's there
+ return execWithRedirect("dmsetup", ["remove", dev])
+
+[docs]def mount(dev, opts="", mnt=None):
+ '''Mount the given device at the given mountpoint, using the given opts.
+ opts should be a comma-separated string of mount options.
+ if mnt is none, a temporary directory will be created and its path will be
+ returned.
+ raises CalledProcessError if mount fails.'''
+ if mnt is None:
+ mnt = tempfile.mkdtemp(prefix="lorax.imgutils.")
+ logger.debug("make tmp mountdir %s", mnt)
+ cmd = ["mount"]
+ if opts:
+ cmd += ["-o", opts]
+ cmd += [dev, mnt]
+ runcmd(cmd)
+ return mnt
+
+[docs]def umount(mnt, lazy=False, maxretry=3, retrysleep=1.0, delete=True):
+ '''Unmount the given mountpoint. If lazy is True, do a lazy umount (-l).
+ If the mount was a temporary dir created by mount, it will be deleted.
+ raises CalledProcessError if umount fails.'''
+ cmd = ["umount"]
+ if lazy: cmd += ["-l"]
+ cmd += [mnt]
+ count = 0
+ while maxretry > 0:
+ try:
+ rv = runcmd(cmd)
+ except CalledProcessError:
+ count += 1
+ if count == maxretry:
+ raise
+ logger.warning("failed to unmount %s. retrying (%d/%d)...",
+ mnt, count, maxretry)
+ if logger.getEffectiveLevel() <= logging.DEBUG:
+ fuser = execWithCapture("fuser", ["-vm", mnt])
+ logger.debug("fuser -vm:\n%s\n", fuser)
+ sleep(retrysleep)
+ else:
+ break
+ if delete and 'lorax.imgutils' in mnt:
+ os.rmdir(mnt)
+ logger.debug("remove tmp mountdir %s", mnt)
+ return (rv == 0)
+
+[docs]def copytree(src, dest, preserve=True):
+ '''Copy a tree of files using cp -a, thus preserving modes, timestamps,
+ links, acls, sparse files, xattrs, selinux contexts, etc.
+ If preserve is False, uses cp -R (useful for modeless filesystems)
+ raises CalledProcessError if copy fails.'''
+ logger.debug("copytree %s %s", src, dest)
+ cp = ["cp", "-a"] if preserve else ["cp", "-R", "-L"]
+ cp += [join(src, "."), os.path.abspath(dest)]
+ runcmd(cp)
+
+[docs]def do_grafts(grafts, dest, preserve=True):
+ '''Copy each of the items listed in grafts into dest.
+ If the key ends with '/' it's assumed to be a directory which should be
+ created, otherwise just the leading directories will be created.'''
+ for imgpath, filename in grafts.items():
+ if imgpath[-1] == '/':
+ targetdir = join(dest, imgpath)
+ imgpath = imgpath[:-1]
+ else:
+ targetdir = join(dest, dirname(imgpath))
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+ if os.path.isdir(filename):
+ copytree(filename, join(dest, imgpath), preserve)
+ else:
+ cpfile(filename, join(dest, imgpath))
+
+[docs]def round_to_blocks(size, blocksize):
+ '''If size isn't a multiple of blocksize, round up to the next multiple'''
+ diff = size % blocksize
+ if diff or not size:
+ size += blocksize - diff
+ return size
+
+# TODO: move filesystem data outside this function
+[docs]def estimate_size(rootdir, graft=None, fstype=None, blocksize=4096, overhead=256):
+ graft = graft or {}
+ getsize = lambda f: os.lstat(f).st_size
+ if fstype == "btrfs":
+ overhead = 64*1024 # don't worry, it's all sparse
+ if fstype == "hfsplus":
+ overhead = 200 # hack to deal with two bootloader copies
+ if fstype in ("vfat", "msdos"):
+ blocksize = 2048
+ getsize = lambda f: os.stat(f).st_size # no symlinks, count as copies
+ total = overhead*blocksize
+ dirlist = list(graft.values())
+ if rootdir:
+ dirlist.append(rootdir)
+ for root in dirlist:
+ for top, dirs, files in os.walk(root):
+ for f in files + dirs:
+ total += round_to_blocks(getsize(join(top,f)), blocksize)
+ if fstype == "btrfs":
+ total = max(256*1024*1024, total) # btrfs minimum size: 256MB
+ logger.info("Size of %s block %s fs at %s estimated to be %s", blocksize, fstype, rootdir, total)
+ return total
+
+######## Execution contexts - use with the 'with' statement ##############
+
+[docs]class LoopDev(object):
+ def __init__(self, filename, size=None):
+ self.loopdev = None
+ self.filename = filename
+ if size:
+ mksparse(self.filename, size)
+ def __enter__(self):
+ self.loopdev = loop_attach(self.filename)
+ return self.loopdev
+ def __exit__(self, exc_type, exc_value, tracebk):
+ loop_detach(self.loopdev)
+
+[docs]class DMDev(object):
+ def __init__(self, dev, size, name=None):
+ self.mapperdev = None
+ (self.dev, self.size, self.name) = (dev, size, name)
+ def __enter__(self):
+ self.mapperdev = dm_attach(self.dev, self.size, self.name)
+ return self.mapperdev
+ def __exit__(self, exc_type, exc_value, tracebk):
+ dm_detach(self.mapperdev)
+
+[docs]class Mount(object):
+ def __init__(self, dev, opts="", mnt=None):
+ (self.dev, self.opts, self.mnt) = (dev, opts, mnt)
+ def __enter__(self):
+ self.mnt = mount(self.dev, self.opts, self.mnt)
+ return self.mnt
+ def __exit__(self, exc_type, exc_value, tracebk):
+ umount(self.mnt)
+
+[docs]class PartitionMount(object):
+ """ Mount a partitioned image file using kpartx """
+ def __init__(self, disk_img, mount_ok=None, submount=None):
+ """
+ :param str disk_img: The full path to a partitioned disk image
+ :param mount_ok: A function that is passed the mount point and
+ returns True if it should be mounted.
+ :param str submount: Directory inside mount_dir to mount at
+
+ If mount_ok is not set it will look for /etc/passwd
+
+ If the partition is found it will be mounted under a temporary
+ directory and self.temp_dir set to it. If submount is passed it will be
+ created and mounted there instead, with self.mount_dir set to point to
+ it. self.mount_dev is set to the loop device, and self.mount_size is
+ set to the size of the partition.
+
+ When no subdir is passed self.temp_dir and self.mount_dir will be the same.
+ """
+ self.mount_dev = None
+ self.mount_size = None
+ self.mount_dir = None
+ self.disk_img = disk_img
+ self.mount_ok = mount_ok
+ self.submount = submount
+ self.temp_dir = None
+
+ # Default is to mount partition with /etc/passwd
+ if not self.mount_ok:
+ self.mount_ok = lambda mount_dir: os.path.isfile(mount_dir+"/etc/passwd")
+
+ # Example kpartx output
+ # kpartx -p p -v -a /tmp/diskV2DiCW.im
+ # add map loop2p1 (253:2): 0 3481600 linear /dev/loop2 2048
+ # add map loop2p2 (253:3): 0 614400 linear /dev/loop2 3483648
+ kpartx_output = runcmd_output(["kpartx", "-v", "-a", "-s", self.disk_img])
+ logger.debug(kpartx_output)
+
+ # list of (deviceName, sizeInBytes)
+ self.loop_devices = []
+ for line in kpartx_output.splitlines():
+ # add map loop2p3 (253:4): 0 7139328 linear /dev/loop2 528384
+ # 3rd element is size in 512 byte blocks
+ if line.startswith("add map "):
+ fields = line[8:].split()
+ self.loop_devices.append( (fields[0], int(fields[3])*512) )
+
+ def __enter__(self):
+ # Mount the device selected by mount_ok, if possible
+ self.temp_dir = tempfile.mkdtemp()
+ if self.submount:
+ mount_dir = os.path.normpath(os.path.sep.join([self.temp_dir, self.submount]))
+ os.makedirs(mount_dir, mode=0o755, exist_ok=True)
+ else:
+ mount_dir = self.temp_dir
+ for dev, size in self.loop_devices:
+ try:
+ mount( "/dev/mapper/"+dev, mnt=mount_dir )
+ if self.mount_ok(mount_dir):
+ self.mount_dir = mount_dir
+ self.mount_dev = dev
+ self.mount_size = size
+ break
+ umount( mount_dir )
+ except CalledProcessError:
+ logger.debug(traceback.format_exc())
+ if self.mount_dir:
+ logger.info("Partition mounted on %s size=%s", self.mount_dir, self.mount_size)
+ else:
+ logger.debug("Unable to mount anything from %s", self.disk_img)
+ os.rmdir(self.temp_dir)
+ self.temp_dir = None
+ return self
+
+ def __exit__(self, exc_type, exc_value, tracebk):
+ if self.temp_dir:
+ umount(self.mount_dir)
+ shutil.rmtree(self.temp_dir)
+ self.mount_dir = None
+ self.temp_dir = None
+ execWithRedirect("kpartx", ["-d", "-s", self.disk_img])
+
+
+######## Functions for making filesystem images ##########################
+
+[docs]def mkfsimage(fstype, rootdir, outfile, size=None, mkfsargs=None, mountargs="", graft=None):
+ '''Generic filesystem image creation function.
+ fstype should be a filesystem type - "mkfs.${fstype}" must exist.
+ graft should be a dict: {"some/path/in/image": "local/file/or/dir"};
+ if the path ends with a '/' it's assumed to be a directory.
+ Will raise CalledProcessError if something goes wrong.'''
+ mkfsargs = mkfsargs or []
+ graft = graft or {}
+ preserve = (fstype not in ("msdos", "vfat"))
+ if not size:
+ size = estimate_size(rootdir, graft, fstype)
+ with LoopDev(outfile, size) as loopdev:
+ try:
+ runcmd(["mkfs.%s" % fstype] + mkfsargs + [loopdev])
+ except CalledProcessError as e:
+ logger.error("mkfs exited with a non-zero return code: %d", e.returncode)
+ logger.error(e.output)
+ sys.exit(e.returncode)
+
+ with Mount(loopdev, mountargs) as mnt:
+ if rootdir:
+ copytree(rootdir, mnt, preserve)
+ do_grafts(graft, mnt, preserve)
+
+ # Make absolutely sure that the data has been written
+ runcmd(["sync"])
+
+# convenience functions with useful defaults
+[docs]def mkdosimg(rootdir, outfile, size=None, label="", mountargs="shortname=winnt,umask=0077", graft=None):
+ graft = graft or {}
+ mkfsimage("msdos", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-n", label], graft=graft)
+
+[docs]def mkext4img(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ graft = graft or {}
+ mkfsimage("ext4", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-L", label, "-b", "4096", "-m", "0"], graft=graft)
+
+[docs]def mkbtrfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ graft = graft or {}
+ mkfsimage("btrfs", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-L", label], graft=graft)
+
+[docs]def mkhfsimg(rootdir, outfile, size=None, label="", mountargs="", graft=None):
+ graft = graft or {}
+ mkfsimage("hfsplus", rootdir, outfile, size, mountargs=mountargs,
+ mkfsargs=["-v", label], graft=graft)
+
+#
+# ltmpl.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+# Will Woods <wwoods@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.ltmpl")
+
+import os, re, glob, shlex, fnmatch
+from os.path import basename, isdir
+from subprocess import CalledProcessError
+import shutil
+
+from pylorax.sysutils import joinpaths, cpfile, mvfile, replace, remove
+from pylorax.dnfhelper import LoraxDownloadCallback, LoraxRpmCallback
+from pylorax.base import DataHolder
+from pylorax.executils import runcmd, runcmd_output
+from pylorax.imgutils import mkcpio
+
+from mako.lookup import TemplateLookup
+from mako.exceptions import text_error_template
+import sys, traceback
+import struct
+import dnf
+import collections
+
+[docs]class LoraxTemplate(object):
+ def __init__(self, directories=None):
+ directories = directories or ["/usr/share/lorax"]
+ # we have to add ["/"] to the template lookup directories or the
+ # file includes won't work properly for absolute paths
+ self.directories = ["/"] + directories
+
+[docs] def parse(self, template_file, variables):
+ lookup = TemplateLookup(directories=self.directories)
+ template = lookup.get_template(template_file)
+
+ try:
+ textbuf = template.render(**variables)
+ except:
+ logger.error("Problem rendering %s (%s):", template_file, variables)
+ logger.error(text_error_template().render())
+ raise
+
+ # split, strip and remove empty lines
+ lines = textbuf.splitlines()
+ lines = [line.strip() for line in lines]
+ lines = [line for line in lines if line]
+
+ # remove comments
+ lines = [line for line in lines if not line.startswith("#")]
+
+ # split with shlex and perform brace expansion
+ lines = [split_and_expand(line) for line in lines]
+
+ return lines
+
+[docs]def split_and_expand(line):
+ return [exp for word in shlex.split(line) for exp in brace_expand(word)]
+
+[docs]def brace_expand(s):
+ if not ('{' in s and ',' in s and '}' in s):
+ yield s
+ else:
+ right = s.find('}')
+ left = s[:right].rfind('{')
+ (prefix, choices, suffix) = (s[:left], s[left+1:right], s[right+1:])
+ for choice in choices.split(','):
+ for alt in brace_expand(prefix+choice+suffix):
+ yield alt
+
+[docs]def rglob(pathname, root="/", fatal=False):
+ seen = set()
+ rootlen = len(root)+1
+ for f in glob.iglob(joinpaths(root, pathname)):
+ if f not in seen:
+ seen.add(f)
+ yield f[rootlen:] # remove the root to produce relative path
+ if fatal and not seen:
+ raise IOError("nothing matching %s in %s" % (pathname, root))
+
+[docs]def rexists(pathname, root=""):
+ # Generator is always True, even with no values;
+ # bool(rglob(...)) won't work here.
+ for _path in rglob(pathname, root):
+ return True
+ return False
+
+# TODO: operate inside an actual chroot for safety? Not that RPM bothers..
+[docs]class LoraxTemplateRunner(object):
+ '''
+ This class parses and executes Lorax templates. Sample usage:
+
+ # install a bunch of packages
+ runner = LoraxTemplateRunner(inroot=rundir, outroot=rundir, dbo=dnf_obj)
+ runner.run("install-packages.ltmpl")
+
+ # modify a runtime dir
+ runner = LoraxTemplateRunner(inroot=rundir, outroot=newrun)
+ runner.run("runtime-transmogrify.ltmpl")
+
+ NOTES:
+
+ * Parsing procedure is roughly:
+ 1. Mako template expansion (on the whole file)
+ 2. For each line of the result,
+ a. Whitespace splitting (using shlex.split())
+ b. Brace expansion (using brace_expand())
+ c. If the first token is the name of a function, call that function
+ with the rest of the line as arguments
+
+ * Parsing and execution are *separate* passes - so you can't use the result
+ of a command in an %if statement (or any other control statements)!
+
+ * Commands that run external programs (systemctl, gconfset) currently use
+ the *host*'s copy of that program, which may cause problems if there's a
+ big enough difference between the host and the image you're modifying.
+
+ * The commands are not executed under a real chroot, so absolute symlinks
+ will point *outside* the inroot/outroot. Be careful with symlinks!
+
+ ADDING NEW COMMANDS:
+
+ * Each template command is just a method of the LoraxTemplateRunner
+ object - so adding a new command is as easy as adding a new function.
+
+ * Each function gets arguments that correspond to the rest of the tokens
+ on that line (after word splitting and brace expansion)
+
+ * Commands should raise exceptions for errors - don't use sys.exit()
+ '''
+ def __init__(self, inroot, outroot, dbo=None, fatalerrors=True,
+ templatedir=None, defaults=None):
+ self.inroot = inroot
+ self.outroot = outroot
+ self.dbo = dbo
+ self.fatalerrors = fatalerrors
+ self.templatedir = templatedir or "/usr/share/lorax"
+ self.templatefile = None
+ # some builtin methods
+ self.builtins = DataHolder(exists=lambda p: rexists(p, root=inroot),
+ glob=lambda g: list(rglob(g, root=inroot)))
+ self.defaults = defaults or {}
+ self.results = DataHolder(treeinfo=dict()) # just treeinfo for now
+ # TODO: set up custom logger with a filter to add line info
+
+ def _out(self, path):
+ return joinpaths(self.outroot, path)
+ def _in(self, path):
+ return joinpaths(self.inroot, path)
+
+ def _filelist(self, *pkgs):
+ """ Return the list of files in the packages """
+ pkglist = []
+ for pkg_glob in pkgs:
+ pkglist += list(self.dbo.sack.query().installed().filter(name__glob=pkg_glob))
+
+ # dnf/hawkey doesn't make any distinction between file, dir or ghost like yum did
+ # so only return the files.
+ return set(f for pkg in pkglist for f in pkg.files if not os.path.isdir(self._out(f)))
+
+ def _getsize(self, *files):
+ return sum(os.path.getsize(self._out(f)) for f in files if os.path.isfile(self._out(f)))
+
+ def _write_debuginfo_log(self):
+ """
+ Write a list of debuginfo packages to /root/debug-pkgs.log
+
+ If lorax is called with a debug repo find the corresponding debuginfo package
+ names and write them to /root/debubg-pkgs.log on the boot.iso
+ """
+ for repo in self.dbo.repos:
+ repo = self.dbo.repos[repo]
+ if any(True for url in repo.baseurl if "debug" in url):
+ break
+ if repo.metalink and "debug" in repo.metalink:
+ break
+ if repo.mirrorlist and "debug" in repo.mirrorlist:
+ break
+ else:
+ # No debug repos
+ return
+
+ available = self.dbo.sack.query().available()
+ debug_pkgs = []
+ for p in list(self.dbo.transaction.install_set):
+ if available.filter(name=p.name+"-debuginfo"):
+ debug_pkgs += ["{0.name}-debuginfo-{0.epoch}:{0.version}-{0.release}".format(p)]
+
+ os.makedirs(self._out("root/"), exist_ok=True)
+ with open(self._out("root/debug-pkgs.log"), "w") as f:
+ for pkg in debug_pkgs:
+ f.write("%s\n" % pkg)
+
+
+[docs] def run(self, templatefile, **variables):
+ for k,v in list(self.defaults.items()) + list(self.builtins.items()):
+ variables.setdefault(k,v)
+ logger.debug("executing %s with variables=%s", templatefile, variables)
+ self.templatefile = templatefile
+ t = LoraxTemplate(directories=[self.templatedir])
+ commands = t.parse(templatefile, variables)
+ self._run(commands)
+
+
+ def _run(self, parsed_template):
+ logger.info("running %s", self.templatefile)
+ for (num, line) in enumerate(parsed_template,1):
+ logger.debug("template line %i: %s", num, " ".join(line))
+ skiperror = False
+ (cmd, args) = (line[0], line[1:])
+ # Following Makefile convention, if the command is prefixed with
+ # a dash ('-'), we'll ignore any errors on that line.
+ if cmd.startswith('-'):
+ cmd = cmd[1:]
+ skiperror = True
+ try:
+ # grab the method named in cmd and pass it the given arguments
+ f = getattr(self, cmd, None)
+ if cmd[0] == '_' or cmd == 'run' or not isinstance(f, collections.Callable):
+ raise ValueError("unknown command %s" % cmd)
+ f(*args)
+ except Exception: # pylint: disable=broad-except
+ if skiperror:
+ logger.debug("ignoring error")
+ continue
+ logger.error("template command error in %s:", self.templatefile)
+ logger.error(" %s", " ".join(line))
+ # format the exception traceback
+ exclines = traceback.format_exception(*sys.exc_info())
+ # skip the bit about "ltmpl.py, in _run()" - we know that
+ exclines.pop(1)
+ # log the "ErrorType: this is what happened" line
+ logger.error(" " + exclines[-1].strip())
+ # and log the entire traceback to the debug log
+ for _line in ''.join(exclines).splitlines():
+ logger.debug(" " + _line)
+ if self.fatalerrors:
+ raise
+
+[docs] def install(self, srcglob, dest):
+ '''
+ install SRC DEST
+ Copy the given file (or files, if a glob is used) from the input
+ tree to the given destination in the output tree.
+ The path to DEST must exist in the output tree.
+ If DEST is a directory, SRC will be copied into that directory.
+ If DEST doesn't exist, SRC will be copied to a file with that name,
+ assuming the rest of the path exists.
+ This is pretty much like how the 'cp' command works.
+ Examples:
+ install usr/share/myconfig/grub.conf /boot
+ install /usr/share/myconfig/grub.conf.in /boot/grub.conf
+ '''
+ for src in rglob(self._in(srcglob), fatal=True):
+ try:
+ cpfile(src, self._out(dest))
+ except shutil.Error as e:
+ logger.error(e)
+
+[docs] def installimg(self, *args):
+ '''
+ installimg [--xz|--gzip|--bzip2|--lzma] [-ARG|--ARG=OPTION] SRCDIR DESTFILE
+ Create a compressed cpio archive of the contents of SRCDIR and place
+ it in DESTFILE.
+
+ If SRCDIR doesn't exist or is empty nothing is created.
+
+ Examples:
+ installimg ${LORAXDIR}/product/ images/product.img
+ installimg ${LORAXDIR}/updates/ images/updates.img
+ installimg --xz -6 ${LORAXDIR}/updates/ images/updates.img
+ installimg --xz -9 --memlimit-compress=3700MiB ${LORAXDIR}/updates/ images/updates.img
+
+ Optionally use a different compression type and override the default args
+ passed to it. The default is xz -9
+ '''
+ COMPRESSORS = ("--xz", "--gzip", "--bzip2", "--lzma")
+ if len(args) < 2:
+ raise ValueError("Not enough args for installimg.")
+
+ srcdir = args[-2]
+ destfile = args[-1]
+ if not os.path.isdir(self._in(srcdir)) or not os.listdir(self._in(srcdir)):
+ return
+
+ compression = "xz"
+ compressargs = []
+ if args[0] in COMPRESSORS:
+ compression = args[0][2:]
+
+ for arg in args[1:-2]:
+ if arg.startswith('-'):
+ compressargs.append(arg)
+ else:
+ raise ValueError("Argument is missing -")
+
+ logger.info("Creating image file %s from contents of %s", self._out(destfile), self._in(srcdir))
+ logger.debug("Using %s %s compression", compression, compressargs or "")
+ mkcpio(self._in(srcdir), self._out(destfile), compression=compression, compressargs=compressargs)
+
+[docs] def mkdir(self, *dirs):
+ '''
+ mkdir DIR [DIR ...]
+ Create the named DIR(s). Will create leading directories as needed.
+ Example:
+ mkdir /images
+ '''
+ for d in dirs:
+ d = self._out(d)
+ if not isdir(d):
+ os.makedirs(d)
+
+[docs] def replace(self, pat, repl, *fileglobs):
+ '''
+ replace PATTERN REPLACEMENT FILEGLOB [FILEGLOB ...]
+ Find-and-replace the given PATTERN (Python-style regex) with the given
+ REPLACEMENT string for each of the files listed.
+ Example:
+ replace @VERSION@ ${product.version} /boot/grub.conf /boot/isolinux.cfg
+ '''
+ match = False
+ for g in fileglobs:
+ for f in rglob(self._out(g)):
+ match = True
+ replace(f, pat, repl)
+ if not match:
+ raise IOError("no files matched %s" % " ".join(fileglobs))
+
+[docs] def append(self, filename, data):
+ '''
+ append FILE STRING
+ Append STRING (followed by a newline character) to FILE.
+ Python character escape sequences ('\\n', '\\t', etc.) will be
+ converted to the appropriate characters.
+ Examples:
+ append /etc/depmod.d/dd.conf "search updates built-in"
+ append /etc/resolv.conf ""
+ '''
+ with open(self._out(filename), "a") as fobj:
+ fobj.write(bytes(data, "utf8").decode('unicode_escape')+"\n")
+
+[docs] def treeinfo(self, section, key, *valuetoks):
+ '''
+ treeinfo SECTION KEY ARG [ARG ...]
+ Add an item to the treeinfo data store.
+ The given SECTION will have a new item added where
+ KEY = ARG ARG ...
+ Example:
+ treeinfo images-${kernel.arch} boot.iso images/boot.iso
+ '''
+ if section not in self.results.treeinfo:
+ self.results.treeinfo[section] = dict()
+ self.results.treeinfo[section][key] = " ".join(valuetoks)
+
+[docs] def installkernel(self, section, src, dest):
+ '''
+ installkernel SECTION SRC DEST
+ Install the kernel from SRC in the input tree to DEST in the output
+ tree, and then add an item to the treeinfo data store, in the named
+ SECTION, where "kernel" = DEST.
+
+ Equivalent to:
+ install SRC DEST
+ treeinfo SECTION kernel DEST
+ '''
+ self.install(src, dest)
+ self.treeinfo(section, "kernel", dest)
+
+[docs] def installinitrd(self, section, src, dest):
+ '''
+ installinitrd SECTION SRC DEST
+ Same as installkernel, but for "initrd".
+ '''
+ self.install(src, dest)
+ self.chmod(dest, '644')
+ self.treeinfo(section, "initrd", dest)
+
+[docs] def installupgradeinitrd(self, section, src, dest):
+ '''
+ installupgradeinitrd SECTION SRC DEST
+ Same as installkernel, but for "upgrade".
+ '''
+ self.install(src, dest)
+ self.chmod(dest, '644')
+ self.treeinfo(section, "upgrade", dest)
+
+[docs] def hardlink(self, src, dest):
+ '''
+ hardlink SRC DEST
+ Create a hardlink at DEST which is linked to SRC.
+ '''
+ if isdir(self._out(dest)):
+ dest = joinpaths(dest, basename(src))
+ os.link(self._out(src), self._out(dest))
+
+[docs] def symlink(self, target, dest):
+ '''
+ symlink SRC DEST
+ Create a symlink at DEST which points to SRC.
+ '''
+ if rexists(self._out(dest)):
+ self.remove(dest)
+ os.symlink(target, self._out(dest))
+
+[docs] def copy(self, src, dest):
+ '''
+ copy SRC DEST
+ Copy SRC to DEST.
+ If DEST is a directory, SRC will be copied inside it.
+ If DEST doesn't exist, SRC will be copied to a file with
+ that name, if the path leading to it exists.
+ '''
+ try:
+ cpfile(self._out(src), self._out(dest))
+ except shutil.Error as e:
+ logger.error(e)
+
+[docs] def move(self, src, dest):
+ '''
+ move SRC DEST
+ Move SRC to DEST.
+ '''
+ mvfile(self._out(src), self._out(dest))
+
+[docs] def remove(self, *fileglobs):
+ '''
+ remove FILEGLOB [FILEGLOB ...]
+ Remove all the named files or directories.
+ Will *not* raise exceptions if the file(s) are not found.
+ '''
+ for g in fileglobs:
+ for f in rglob(self._out(g)):
+ remove(f)
+ logger.debug("removed %s", f)
+
+[docs] def chmod(self, fileglob, mode):
+ '''
+ chmod FILEGLOB OCTALMODE
+ Change the mode of all the files matching FILEGLOB to OCTALMODE.
+ '''
+ for f in rglob(self._out(fileglob), fatal=True):
+ os.chmod(f, int(mode,8))
+
+ # TODO: do we need a new command for gsettings?
+[docs] def gconfset(self, path, keytype, value, outfile=None):
+ '''
+ gconfset PATH KEYTYPE VALUE [OUTFILE]
+ Set the given gconf PATH, with type KEYTYPE, to the given value.
+ OUTFILE defaults to /etc/gconf/gconf.xml.defaults if not given.
+ Example:
+ gconfset /apps/metacity/general/num_workspaces int 1
+ '''
+ if outfile is None:
+ outfile = self._out("etc/gconf/gconf.xml.defaults")
+ cmd = ["gconftool-2", "--direct",
+ "--config-source=xml:readwrite:%s" % outfile,
+ "--set", "--type", keytype, path, value]
+ runcmd(cmd)
+
+[docs] def log(self, msg):
+ '''
+ log MESSAGE
+ Emit the given log message. Be sure to put it in quotes!
+ Example:
+ log "Reticulating splines, please wait..."
+ '''
+ logger.info(msg)
+
+ # TODO: add ssh-keygen, mkisofs(?), find, and other useful commands
+[docs] def runcmd(self, *cmdlist):
+ '''
+ runcmd CMD [ARG ...]
+ Run the given command with the given arguments.
+
+ NOTE: All paths given MUST be COMPLETE, ABSOLUTE PATHS to the file
+ or files mentioned. ${root}/${inroot}/${outroot} are good for
+ constructing these paths.
+
+ FURTHER NOTE: Please use this command only as a last resort!
+ Whenever possible, you should use the existing template commands.
+ If the existing commands don't do what you need, fix them!
+
+ Examples:
+ (this should be replaced with a "find" function)
+ runcmd find ${root} -name "*.pyo" -type f -delete
+ %for f in find(root, name="*.pyo"):
+ remove ${f}
+ %endfor
+ '''
+ cmd = cmdlist
+ logger.debug('running command: %s', cmd)
+ if cmd[0].startswith("--chdir="):
+ logger.error("--chdir is no longer supported for runcmd.")
+ raise ValueError("--chdir is no longer supported for runcmd.")
+
+ try:
+ stdout = runcmd_output(cmd)
+ if stdout:
+ logger.debug('command output:\n%s', stdout)
+ logger.debug("command finished successfully")
+ except CalledProcessError as e:
+ if e.output:
+ logger.error('command output:\n%s', e.output)
+ logger.error('command returned failure (%d)', e.returncode)
+ raise
+
+[docs] def installpkg(self, *pkgs):
+ '''
+ installpkg [--required|--optional] [--except PKGGLOB [--except PKGGLOB ...]] PKGGLOB [PKGGLOB ...]
+ Request installation of all packages matching the given globs.
+ Note that this is just a *request* - nothing is *actually* installed
+ until the 'run_pkg_transaction' command is given.
+
+ --required is now the default. If the PKGGLOB can be missing pass --optional
+ '''
+ if pkgs[0] == '--optional':
+ pkgs = pkgs[1:]
+ required = False
+ elif pkgs[0] == '--required':
+ pkgs = pkgs[1:]
+ required = True
+ else:
+ required = True
+
+ excludes = []
+ while '--except' in pkgs:
+ idx = pkgs.index('--except')
+ if len(pkgs) == idx+1:
+ raise ValueError("installpkg needs an argument after --except")
+
+ excludes.append(pkgs[idx+1])
+ pkgs = pkgs[:idx] + pkgs[idx+2:]
+
+ errors = False
+ for p in pkgs:
+ try:
+ # Start by using Subject to generate a package query, which will
+ # give us a query object similar to what dbo.install would select,
+ # minus the handling for multilib. This query may contain
+ # multiple arches. Pull the package names out of that, filter any
+ # that match the excludes patterns, and pass those names back to
+ # dbo.install to do the actual, arch and version and multilib
+ # aware, package selction.
+
+ # dnf queries don't have a concept of negative globs which is why
+ # the filtering is done the hard way.
+
+ pkgnames = [pkg for pkg in dnf.subject.Subject(p).get_best_query(self.dbo.sack).filter(latest=True)]
+ if not pkgnames:
+ raise dnf.exceptions.PackageNotFoundError("no package matched", p)
+
+ # Apply excludes to the name only
+ for exclude in excludes:
+ pkgnames = [pkg for pkg in pkgnames if not fnmatch.fnmatch(pkg.name, exclude)]
+
+ # Convert to a sorted NVR list for installation
+ pkgnvrs = sorted(["{}-{}-{}".format(pkg.name, pkg.version, pkg.release) for pkg in pkgnames])
+
+ # If the request is a glob, expand it in the log
+ if any(g for g in ['*','?','.'] if g in p):
+ logger.info("installpkg: %s expands to %s", p, ",".join(pkgnvrs))
+
+ for pkgnvr in pkgnvrs:
+ try:
+ self.dbo.install(pkgnvr)
+ except Exception as e: # pylint: disable=broad-except
+ if required:
+ raise
+ # Not required, log it and continue processing pkgs
+ logger.error("installpkg %s failed: %s", pkgnvr, str(e))
+ except Exception as e: # pylint: disable=broad-except
+ logger.error("installpkg %s failed: %s", p, str(e))
+ errors = True
+
+ if errors and required:
+ raise Exception("Required installpkg failed.")
+
+[docs] def removepkg(self, *pkgs):
+ '''
+ removepkg PKGGLOB [PKGGLOB...]
+ Delete the named package(s).
+ IMPLEMENTATION NOTES:
+ RPM scriptlets (%preun/%postun) are *not* run.
+ Files are deleted, but directories are left behind.
+ '''
+ for p in pkgs:
+ filepaths = [f.lstrip('/') for f in self._filelist(p)]
+ # TODO: also remove directories that aren't owned by anything else
+ if filepaths:
+ logger.debug("removepkg %s: %ikb", p, self._getsize(*filepaths)/1024)
+ self.remove(*filepaths)
+ else:
+ logger.debug("removepkg %s: no files to remove!", p)
+
+[docs] def run_pkg_transaction(self):
+ '''
+ run_pkg_transaction
+ Actually install all the packages requested by previous 'installpkg'
+ commands.
+ '''
+ try:
+ logger.info("Checking dependencies")
+ self.dbo.resolve()
+ except dnf.exceptions.DepsolveError as e:
+ logger.error("Dependency check failed: %s", e)
+ raise
+ logger.info("%d packages selected", len(self.dbo.transaction))
+ if len(self.dbo.transaction) == 0:
+ raise Exception("No packages in transaction")
+
+ # If a debug repo has been included, write out a list of debuginfo packages
+ self._write_debuginfo_log()
+
+ pkgs_to_download = self.dbo.transaction.install_set
+ logger.info("Downloading packages")
+ progress = LoraxDownloadCallback()
+ try:
+ self.dbo.download_packages(pkgs_to_download, progress)
+ except dnf.exceptions.DownloadError as e:
+ logger.error("Failed to download the following packages: %s", e)
+ raise
+
+ logger.info("Preparing transaction from installation source")
+ try:
+ display = LoraxRpmCallback()
+ self.dbo.do_transaction(display=display)
+ except BaseException as e:
+ logger.error("The transaction process has ended abruptly: %s", e)
+ raise
+
+ # Reset the package sack to pick up the installed packages
+ self.dbo.reset(repos=False)
+ self.dbo.fill_sack(load_system_repo=True, load_available_repos=False)
+
+ # At this point dnf should know about the installed files. Double check that it really does.
+ if len(self._filelist("anaconda-core")) == 0:
+ raise Exception("Failed to reset dbo to installed package set")
+
+[docs] def removefrom(self, pkg, *globs):
+ '''
+ removefrom PKGGLOB [--allbut] FILEGLOB [FILEGLOB...]
+ Remove all files matching the given file globs from the package
+ (or packages) named.
+ If '--allbut' is used, all the files from the given package(s) will
+ be removed *except* the ones which match the file globs.
+ Examples:
+ removefrom usbutils /usr/bin/*
+ removefrom xfsprogs --allbut /sbin/*
+ '''
+ cmd = "%s %s" % (pkg, " ".join(globs)) # save for later logging
+ keepmatches = False
+ if globs[0] == '--allbut':
+ keepmatches = True
+ globs = globs[1:]
+ # get pkg filelist and find files that match the globs
+ filelist = self._filelist(pkg)
+ matches = set()
+ for g in globs:
+ globs_re = re.compile(fnmatch.translate(g))
+ m = [f for f in filelist if globs_re.match(f)]
+ if m:
+ matches.update(m)
+ else:
+ logger.debug("removefrom %s %s: no files matched!", pkg, g)
+ # are we removing the matches, or keeping only the matches?
+ if keepmatches:
+ remove_files = filelist.difference(matches)
+ else:
+ remove_files = matches
+ # remove the files
+ if remove_files:
+ logger.debug("removefrom %s: removed %i/%i files, %ikb/%ikb", cmd,
+ len(remove_files), len(filelist),
+ self._getsize(*remove_files)/1024, self._getsize(*filelist)/1024)
+ self.remove(*remove_files)
+ else:
+ logger.debug("removefrom %s: no files to remove!", cmd)
+
+[docs] def removekmod(self, *globs):
+ '''
+ removekmod GLOB [GLOB...] [--allbut] KEEPGLOB [KEEPGLOB...]
+ Remove all files and directories matching the given file globs from the kernel
+ modules directory.
+
+ If '--allbut' is used, all the files from the modules will be removed *except*
+ the ones which match the file globs. There must be at least one initial GLOB
+ to search and one KEEPGLOB to keep. The KEEPGLOB is expanded to be *KEEPGLOB*
+ so that it will match anywhere in the path.
+
+ This only removes files from under /lib/modules/*/kernel/
+
+ Examples:
+ removekmod sound drivers/media drivers/hwmon drivers/video
+ removekmod drivers/char --allbut virtio_console hw_random
+ '''
+ cmd = " ".join(globs)
+ if "--allbut" in globs:
+ idx = globs.index("--allbut")
+ if idx == 0:
+ raise ValueError("removekmod needs at least one GLOB before --allbut")
+
+ # Apply keepglobs anywhere they appear in the path
+ keepglobs = globs[idx+1:]
+ if len(keepglobs) == 0:
+ raise ValueError("removekmod needs at least one GLOB after --allbut")
+
+ globs = globs[:idx]
+ else:
+ # Nothing to keep
+ keepglobs = []
+
+ filelist = set()
+ for g in globs:
+ for top_dir in rglob(self._out("/lib/modules/*/kernel/"+g)):
+ for root, _dirs, files in os.walk(top_dir):
+ filelist.update(root+"/"+f for f in files)
+
+ # Remove anything matching keepglobs from the list
+ matches = set()
+ for g in keepglobs:
+ globs_re = re.compile(fnmatch.translate("*"+g+"*"))
+ m = [f for f in filelist if globs_re.match(f)]
+ if m:
+ matches.update(m)
+ else:
+ logger.debug("removekmod %s: no files matched!", g)
+ remove_files = filelist.difference(matches)
+
+ if remove_files:
+ logger.debug("removekmod: removing %d files", len(remove_files))
+ list(remove(f) for f in remove_files)
+ else:
+ logger.debug("removekmod %s: no files to remove!", cmd)
+
+[docs] def createaddrsize(self, addr, src, dest):
+ '''
+ createaddrsize INITRD_ADDRESS INITRD ADDRSIZE
+ Create the initrd.addrsize file required in LPAR boot process.
+ Examples:
+ createaddrsize ${INITRD_ADDRESS} ${outroot}/${BOOTDIR}/initrd.img ${outroot}/${BOOTDIR}/initrd.addrsize
+ '''
+ addrsize = open(dest, "wb")
+ addrsize_data = struct.pack(">iiii", 0, int(addr, 16), 0, os.stat(src).st_size)
+ addrsize.write(addrsize_data)
+ addrsize.close()
+
+[docs] def systemctl(self, cmd, *units):
+ '''
+ systemctl [enable|disable|mask] UNIT [UNIT...]
+ Enable, disable, or mask the given systemd units.
+ Examples:
+ systemctl disable lvm2-monitor.service
+ systemctl mask fedora-storage-init.service fedora-configure.service
+ '''
+ if cmd not in ('enable', 'disable', 'mask'):
+ raise ValueError('unsupported systemctl cmd: %s' % cmd)
+ if not units:
+ logger.debug("systemctl: no units given for %s, ignoring", cmd)
+ return
+ self.mkdir("/run/systemd/system") # XXX workaround for systemctl bug
+ systemctl = ['systemctl', '--root', self.outroot, '--no-reload', cmd]
+ # When a unit doesn't exist systemd aborts the command. Run them one at a time.
+ # XXX for some reason 'systemctl enable/disable' always returns 1
+ for unit in units:
+ try:
+ cmd = systemctl + [unit]
+ runcmd(cmd)
+ except CalledProcessError:
+ pass
+
+# monitor.py
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import re
+import socket
+import socketserver
+import threading
+import time
+
+[docs]class LogRequestHandler(socketserver.BaseRequestHandler):
+ """
+ Handle monitoring and saving the logfiles from the virtual install
+
+ Incoming data is written to self.server.log_path and each line is checked
+ for patterns that would indicate that the installation failed.
+ self.server.log_error is set True when this happens.
+ """
+[docs] def setup(self):
+ """Start writing to self.server.log_path"""
+
+ if self.server.log_path:
+ self.fp = open(self.server.log_path, "w") # pylint: disable=attribute-defined-outside-init
+ else:
+ self.fp = None
+ self.request.settimeout(10)
+
+[docs] def handle(self):
+ """
+ Write incoming data to a logfile and check for errors
+
+ Split incoming data into lines and check for any Tracebacks or other
+ errors that indicate that the install failed.
+
+ Loops until self.server.kill is True
+ """
+ log.info("Processing logs from %s", self.client_address)
+ line = ""
+ while True:
+ if self.server.kill:
+ break
+
+ try:
+ data = str(self.request.recv(4096), "utf8")
+ if self.fp:
+ self.fp.write(data)
+ self.fp.flush()
+
+ # check the data for errors and set error flag
+ # need to assemble it into lines so we can test for the error
+ # string.
+ while data:
+ more = data.split("\n", 1)
+ line += more[0]
+ if len(more) > 1:
+ self.iserror(line)
+ line = ""
+ data = more[1]
+ else:
+ data = None
+
+ except socket.timeout:
+ pass
+ except Exception as e: # pylint: disable=broad-except
+ log.info("log processing killed by exception: %s", e)
+ break
+
+[docs] def finish(self):
+ log.info("Shutting down log processing")
+ self.request.close()
+ if self.fp:
+ self.fp.close()
+
+[docs] def iserror(self, line):
+ """
+ Check a line to see if it contains an error indicating installation failure
+
+ :param str line: log line to check for failure
+
+ If the line contains IGNORED it will be skipped.
+ """
+ if "IGNORED" in line:
+ return
+ simple_tests = ["Traceback (",
+ "Out of memory:",
+ "Call Trace:",
+ "insufficient disk space:",
+ "Not enough disk space to download the packages",
+ "error populating transaction after",
+ "traceback script(s) have been run",
+ "crashed on signal",
+ "packaging: Missed: NoSuchPackage",
+ "packaging: Installation failed",
+ "The following error occurred while installing. This is a fatal error"]
+ re_tests = [r"packaging: base repo .* not valid",
+ r"packaging: .* requires .*"]
+ for t in simple_tests:
+ if t in line:
+ self.server.log_error = True
+ self.server.error_line = line
+ return
+ for t in re_tests:
+ if re.search(t, line):
+ self.server.log_error = True
+ self.server.error_line = line
+ return
+
+
+[docs]class LogServer(socketserver.TCPServer):
+ """A TCP Server that listens for log data"""
+
+ # Number of seconds to wait for a connection after startup
+ timeout = 60
+
+ def __init__(self, log_path, *args, **kwargs):
+ """
+ Setup the log server
+
+ :param str log_path: Path to the log file to write
+ """
+ self.kill = False
+ self.log_error = False
+ self.error_line = ""
+ self.log_path = log_path
+ self._timeout = kwargs.pop("timeout", None)
+ if self._timeout:
+ self._start_time = time.time()
+ socketserver.TCPServer.__init__(self, *args, **kwargs)
+
+[docs] def log_check(self):
+ """
+ Check to see if an error has been found in the log
+
+ :returns: True if there has been an error
+ :rtype: bool
+ """
+ if self._timeout:
+ taking_too_long = time.time() > self._start_time + (self._timeout * 60)
+ if taking_too_long:
+ log.error("Canceling installation due to timeout")
+ else:
+ taking_too_long = False
+ return self.log_error or taking_too_long
+
+
+[docs]class LogMonitor(object):
+ """
+ Setup a server to monitor the logs output by the installation
+
+ This needs to be running before the virt-install runs, it expects
+ there to be a listener on the port used for the virtio log port.
+ """
+ def __init__(self, log_path=None, host="localhost", port=0, timeout=None):
+ """
+ Start a thread to monitor the logs.
+
+ :param str log_path: Path to the logfile to write
+ :param str host: Host to bind to. Default is localhost.
+ :param int port: Port to listen to or 0 to pick a port
+
+ If 0 is passed for the port the dynamically assigned port will be
+ available as self.port
+
+ If log_path isn't set then it only monitors the logs, instead of
+ also writing them to disk.
+ """
+ self.server = LogServer(log_path, (host, port), LogRequestHandler, timeout=timeout)
+ self.host, self.port = self.server.server_address
+ self.log_path = log_path
+ self.server_thread = threading.Thread(target=self.server.handle_request)
+ self.server_thread.daemon = True
+ self.server_thread.start()
+
+[docs] def shutdown(self):
+ """Force shutdown of the monitoring thread"""
+ self.server.kill = True
+ self.server_thread.join()
+
+# mount.py
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Brian C. Lane <bcl@redhat.com>
+#
+import logging
+log = logging.getLogger("livemedia-creator")
+
+import os
+
+from pylorax.imgutils import mount, umount
+from pylorax.executils import execWithCapture
+
+[docs]class IsoMountpoint(object):
+ """
+ Mount the iso and check to make sure the vmlinuz and initrd.img files exist
+
+ Also check the iso for a a stage2 image and set a flag and extract the
+ iso's label.
+
+ stage2 can be either LiveOS/squashfs.img or images/install.img
+ """
+ def __init__(self, iso_path, initrd_path=None):
+ """
+ Mount the iso
+
+ :param str iso_path: Path to the iso to mount
+ :param str initrd_path: Optional path to initrd
+
+ initrd_path can be used to point to a tree with a newer
+ initrd.img than the iso has. The iso is still used for stage2.
+
+ self.kernel and self.initrd point to the kernel and initrd.
+ self.stage2 is set to True if there is a stage2 image.
+ self.repo is the path to the mounted iso if there is a /repodata dir.
+ """
+ self.label = None
+ self.iso_path = iso_path
+ self.initrd_path = initrd_path
+
+ if not self.initrd_path:
+ self.mount_dir = mount(self.iso_path, opts="loop")
+ else:
+ self.mount_dir = self.initrd_path
+
+ kernel_list = [("/isolinux/vmlinuz", "/isolinux/initrd.img"),
+ ("/ppc/ppc64/vmlinuz", "/ppc/ppc64/initrd.img")]
+
+ if os.path.isdir(self.mount_dir+"/repodata"):
+ self.repo = self.mount_dir
+ else:
+ self.repo = None
+ self.stage2 = os.path.exists(self.mount_dir+"/LiveOS/squashfs.img") or \
+ os.path.exists(self.mount_dir+"/images/install.img")
+
+ try:
+ for kernel, initrd in kernel_list:
+ if (os.path.isfile(self.mount_dir+kernel) and
+ os.path.isfile(self.mount_dir+initrd)):
+ self.kernel = self.mount_dir+kernel
+ self.initrd = self.mount_dir+initrd
+ break
+ else:
+ raise Exception("Missing kernel and initrd file in iso, failed"
+ " to search under: {0}".format(kernel_list))
+ except:
+ self.umount()
+ raise
+
+ self.get_iso_label()
+
+[docs] def umount( self ):
+ """Unmount the iso"""
+ if not self.initrd_path:
+ umount(self.mount_dir)
+
+[docs] def get_iso_label(self):
+ """
+ Get the iso's label using isoinfo
+
+ Sets self.label if one is found
+ """
+ isoinfo_output = execWithCapture("isoinfo", ["-d", "-i", self.iso_path])
+ log.debug(isoinfo_output)
+ for line in isoinfo_output.splitlines():
+ if line.startswith("Volume id: "):
+ self.label = line[11:]
+ return
+
+#
+# sysutils.py
+#
+# Copyright (C) 2009-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+__all__ = ["joinpaths", "touch", "replace", "chown_", "chmod_", "remove",
+ "linktree"]
+
+import sys
+import os
+import re
+import fileinput
+import pwd
+import grp
+import glob
+import shutil
+
+from pylorax.executils import runcmd
+
+[docs]def joinpaths(*args, **kwargs):
+ path = os.path.sep.join(args)
+
+ if kwargs.get("follow_symlinks"):
+ return os.path.realpath(path)
+ else:
+ return path
+
+
+[docs]def touch(fname):
+ # python closes the file when it goes out of scope
+ open(fname, "w").write("")
+
+
+[docs]def replace(fname, find, sub):
+ fin = fileinput.input(fname, inplace=1)
+ pattern = re.compile(find)
+
+ for line in fin:
+ line = pattern.sub(sub, line)
+ sys.stdout.write(line)
+
+ fin.close()
+
+
+[docs]def chown_(path, user=None, group=None, recursive=False):
+ uid = gid = -1
+
+ if user is not None:
+ uid = pwd.getpwnam(user)[2]
+ if group is not None:
+ gid = grp.getgrnam(group)[2]
+
+ for fname in glob.iglob(path):
+ os.chown(fname, uid, gid)
+
+ if recursive and os.path.isdir(fname):
+ for nested in os.listdir(fname):
+ nested = joinpaths(fname, nested)
+ chown_(nested, user, group, recursive)
+
+
+[docs]def chmod_(path, mode, recursive=False):
+ for fname in glob.iglob(path):
+ os.chmod(fname, mode)
+
+ if recursive and os.path.isdir(fname):
+ for nested in os.listdir(fname):
+ nested = joinpaths(fname, nested)
+ chmod_(nested, mode, recursive)
+
+
+def cpfile(src, dst):
+ shutil.copy2(src, dst)
+ if os.path.isdir(dst):
+ dst = joinpaths(dst, os.path.basename(src))
+
+ return dst
+
+def mvfile(src, dst):
+ if os.path.isdir(dst):
+ dst = joinpaths(dst, os.path.basename(src))
+ os.rename(src, dst)
+ return dst
+
+[docs]def remove(target):
+ if os.path.isdir(target) and not os.path.islink(target):
+ shutil.rmtree(target)
+ else:
+ os.unlink(target)
+
+
+
+# treebuilder.py - handle arch-specific tree building stuff using templates
+#
+# Copyright (C) 2011-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Author(s): Will Woods <wwoods@redhat.com>
+
+import logging
+logger = logging.getLogger("pylorax.treebuilder")
+
+import os, re
+from os.path import basename
+from shutil import copytree, copy2
+from pathlib import Path
+import itertools
+
+from pylorax.sysutils import joinpaths, remove
+from pylorax.base import DataHolder
+from pylorax.ltmpl import LoraxTemplateRunner
+import pylorax.imgutils as imgutils
+from pylorax.executils import runcmd, runcmd_output, execWithCapture
+
+templatemap = {
+ 'i386': 'x86.tmpl',
+ 'x86_64': 'x86.tmpl',
+ 'ppc': 'ppc.tmpl',
+ 'ppc64': 'ppc.tmpl',
+ 'ppc64le': 'ppc64le.tmpl',
+ 's390': 's390.tmpl',
+ 's390x': 's390.tmpl',
+ 'aarch64': 'aarch64.tmpl',
+ 'arm': 'arm.tmpl',
+ 'armhfp': 'arm.tmpl',
+}
+
+[docs]def generate_module_info(moddir, outfile=None):
+ def module_desc(mod):
+ output = runcmd_output(["modinfo", "-F", "description", mod])
+ return output.strip()
+ def read_module_set(name):
+ return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l)
+ modsets = {'scsi':read_module_set("modules.block"),
+ 'eth':read_module_set("modules.networking")}
+
+ modinfo = list()
+ for root, _dirs, files in os.walk(moddir):
+ for modtype, modset in modsets.items():
+ for mod in modset.intersection(files): # modules in this dir
+ (name, _ext) = os.path.splitext(mod) # foo.ko -> (foo, .ko)
+ desc = module_desc(joinpaths(root,mod)) or "%s driver" % name
+ modinfo.append(dict(name=name, type=modtype, desc=desc))
+
+ out = open(outfile or joinpaths(moddir,"module-info"), "w")
+ out.write("Version 0\n")
+ for mod in sorted(modinfo, key=lambda m: m.get('name')):
+ out.write('{name}\n\t{type}\n\t"{desc:.65}"\n'.format(**mod))
+
+[docs]class RuntimeBuilder(object):
+ '''Builds the anaconda runtime image.'''
+ def __init__(self, product, arch, dbo, templatedir=None,
+ installpkgs=None, excludepkgs=None,
+ add_templates=None,
+ add_template_vars=None):
+ root = dbo.conf.installroot
+ # use a copy of product so we can modify it locally
+ product = product.copy()
+ product.name = product.name.lower()
+ self.vars = DataHolder(arch=arch, product=product, dbo=dbo, root=root,
+ basearch=arch.basearch, libdir=arch.libdir)
+ self.dbo = dbo
+ self._runner = LoraxTemplateRunner(inroot=root, outroot=root,
+ dbo=dbo, templatedir=templatedir)
+ self.add_templates = add_templates or []
+ self.add_template_vars = add_template_vars or {}
+ self._installpkgs = installpkgs or []
+ self._excludepkgs = excludepkgs or []
+ self._runner.defaults = self.vars
+ self.dbo.reset()
+
+ def _install_branding(self):
+ release = None
+ q = self.dbo.sack.query()
+ a = q.available()
+ for pkg in a.filter(provides='system-release'):
+ logger.debug("Found release package %s", pkg)
+ if pkg.name.startswith('generic'):
+ continue
+ else:
+ release = pkg.name
+ break
+
+ if not release:
+ logger.error('could not get the release')
+ return
+
+ # release
+ logger.info('got release: %s', release)
+ self._runner.installpkg(release)
+
+ # logos
+ release, _suffix = release.split('-', 1)
+ self._runner.installpkg('%s-logos' % release)
+
+[docs] def install(self):
+ '''Install packages and do initial setup with runtime-install.tmpl'''
+ self._install_branding()
+ if len(self._installpkgs) > 0:
+ self._runner.installpkg(*self._installpkgs)
+ if len(self._excludepkgs) > 0:
+ self._runner.removepkg(*self._excludepkgs)
+ self._runner.run("runtime-install.tmpl")
+ for tmpl in self.add_templates:
+ self._runner.run(tmpl, **self.add_template_vars)
+
+[docs] def writepkglists(self, pkglistdir):
+ '''debugging data: write out lists of package contents'''
+ if not os.path.isdir(pkglistdir):
+ os.makedirs(pkglistdir)
+ q = self.dbo.sack.query()
+ for pkgobj in q.installed():
+ with open(joinpaths(pkglistdir, pkgobj.name), "w") as fobj:
+ for fname in pkgobj.files:
+ fobj.write("{0}\n".format(fname))
+
+[docs] def postinstall(self):
+ '''Do some post-install setup work with runtime-postinstall.tmpl'''
+ # copy configdir into runtime root beforehand
+ configdir = joinpaths(self._runner.templatedir,"config_files")
+ configdir_path = "tmp/config_files"
+ fullpath = joinpaths(self.vars.root, configdir_path)
+ if os.path.exists(fullpath):
+ remove(fullpath)
+ copytree(configdir, fullpath)
+ self._runner.run("runtime-postinstall.tmpl", configdir=configdir_path)
+
+[docs] def cleanup(self):
+ '''Remove unneeded packages and files with runtime-cleanup.tmpl'''
+ self._runner.run("runtime-cleanup.tmpl")
+
+[docs] def verify(self):
+ '''Ensure that contents of the installroot can run'''
+ status = True
+
+ ELF_MAGIC = b'\x7fELF'
+
+ # Iterate over all files in /usr/bin and /usr/sbin
+ # For ELF files, gather them into a list and we'll check them all at
+ # the end. For files with a #!, check them as we go
+ elf_files = []
+ usr_bin = Path(self.vars.root + '/usr/bin')
+ usr_sbin = Path(self.vars.root + '/usr/sbin')
+ for path in (str(x) for x in itertools.chain(usr_bin.iterdir(), usr_sbin.iterdir()) \
+ if x.is_file()):
+ with open(path, "rb") as f:
+ magic = f.read(4)
+ if magic == ELF_MAGIC:
+ # Save the path, minus the chroot prefix
+ elf_files.append(path[len(self.vars.root):])
+ elif magic[:2] == b'#!':
+ # Reopen the file as text and read the first line.
+ # Open as latin-1 so that stray 8-bit characters don't make
+ # things blow up. We only really care about ASCII parts.
+ with open(path, "rt", encoding="latin-1") as f_text:
+ # Remove the #!, split on space, and take the first part
+ shabang = f_text.readline()[2:].split()[0]
+
+ # Does the path exist?
+ if not os.path.exists(self.vars.root + shabang):
+ logger.error('%s, needed by %s, does not exist', shabang, path)
+ status = False
+
+ # Now, run ldd on all the ELF files
+ # Just run ldd once on everything so it isn't logged a million times.
+ # At least one thing in the list isn't going to be a dynamic executable,
+ # so use execWithCapture to ignore the exit code.
+ filename = ''
+ for line in execWithCapture('ldd', elf_files, root=self.vars.root,
+ log_output=False, filter_stderr=True).split('\n'):
+ if line and not line[0].isspace():
+ # New filename header, strip the : at the end and save
+ filename = line[:-1]
+ elif 'not found' in line:
+ logger.error('%s, needed by %s, not found', line.split()[0], filename)
+ status = False
+
+ return status
+
+[docs] def writepkgsizes(self, pkgsizefile):
+ '''debugging data: write a big list of pkg sizes'''
+ fobj = open(pkgsizefile, "w")
+ getsize = lambda f: os.lstat(f).st_size if os.path.exists(f) else 0
+ q = self.dbo.sack.query()
+ for p in sorted(q.installed()):
+ pkgsize = sum(getsize(joinpaths(self.vars.root,f)) for f in p.files)
+ fobj.write("{0.name}.{0.arch}: {1}\n".format(p, pkgsize))
+
+[docs] def generate_module_data(self):
+ root = self.vars.root
+ moddir = joinpaths(root, "lib/modules/")
+ for kver in os.listdir(moddir):
+ ksyms = joinpaths(root, "boot/System.map-%s" % kver)
+ logger.info("doing depmod and module-info for %s", kver)
+ runcmd(["depmod", "-a", "-F", ksyms, "-b", root, kver])
+ generate_module_info(moddir+kver, outfile=moddir+"module-info")
+
+[docs] def create_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2):
+ # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut
+ compressargs = compressargs or []
+ workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir")
+ os.makedirs(joinpaths(workdir, "LiveOS"))
+
+ imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"),
+ "Anaconda", size=size)
+
+ # squash the live rootfs and clean up workdir
+ imgutils.mksquashfs(workdir, outfile, compression, compressargs)
+ remove(workdir)
+
+[docs] def finished(self):
+ """ Done using RuntimeBuilder
+
+ Close the dnf base object
+ """
+ self.dbo.close()
+
+[docs]class TreeBuilder(object):
+ '''Builds the arch-specific boot images.
+ inroot should be the installtree root (the newly-built runtime dir)'''
+ def __init__(self, product, arch, inroot, outroot, runtime, isolabel, domacboot=True, doupgrade=True, templatedir=None, add_templates=None, add_template_vars=None, workdir=None):
+
+ # NOTE: if you pass an arg named "runtime" to a mako template it'll
+ # clobber some mako internal variables - hence "runtime_img".
+ self.vars = DataHolder(arch=arch, product=product, runtime_img=runtime,
+ runtime_base=basename(runtime),
+ inroot=inroot, outroot=outroot,
+ basearch=arch.basearch, libdir=arch.libdir,
+ isolabel=isolabel, udev=udev_escape, domacboot=domacboot, doupgrade=doupgrade,
+ workdir=workdir, lower=string_lower)
+ self._runner = LoraxTemplateRunner(inroot, outroot, templatedir=templatedir)
+ self._runner.defaults = self.vars
+ self.add_templates = add_templates or []
+ self.add_template_vars = add_template_vars or {}
+ self.templatedir = templatedir
+ self.treeinfo_data = None
+
+ @property
+ def kernels(self):
+ return findkernels(root=self.vars.inroot)
+
+[docs] def rebuild_initrds(self, add_args=None, backup="", prefix=""):
+ '''Rebuild all the initrds in the tree. If backup is specified, each
+ initrd will be renamed with backup as a suffix before rebuilding.
+ If backup is empty, the existing initrd files will be overwritten.
+ If suffix is specified, the existing initrd is untouched and a new
+ image is built with the filename "${prefix}-${kernel.version}.img"
+
+ If the initrd doesn't exist its name will be created based on the
+ name of the kernel.
+ '''
+ add_args = add_args or []
+ dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args
+ if not backup:
+ dracut.append("--force")
+
+ if not self.kernels:
+ raise Exception("No kernels found, cannot rebuild_initrds")
+
+ # Hush some dracut warnings. TODO: bind-mount proc in place?
+ open(joinpaths(self.vars.inroot,"/proc/modules"),"w")
+ for kernel in self.kernels:
+ if prefix:
+ idir = os.path.dirname(kernel.path)
+ outfile = joinpaths(idir, prefix+'-'+kernel.version+'.img')
+ elif hasattr(kernel, "initrd"):
+ # If there is an existing initrd, use that
+ outfile = kernel.initrd.path
+ else:
+ # Construct an initrd from the kernel name
+ outfile = kernel.path.replace("vmlinuz-", "initrd-") + ".img"
+ logger.info("rebuilding %s", outfile)
+ if backup:
+ initrd = joinpaths(self.vars.inroot, outfile)
+ if os.path.exists(initrd):
+ os.rename(initrd, initrd + backup)
+ cmd = dracut + [outfile, kernel.version]
+ runcmd(cmd, root=self.vars.inroot)
+
+ # ppc64 cannot boot images > 32MiB, check size and warn
+ if self.vars.arch.basearch in ("ppc64", "ppc64le") and os.path.exists(outfile):
+ st = os.stat(outfile)
+ if st.st_size > 32 * 1024 * 1024:
+ logging.warning("ppc64 initrd %s is > 32MiB", outfile)
+
+ os.unlink(joinpaths(self.vars.inroot,"/proc/modules"))
+
+[docs] def build(self):
+ templatefile = templatemap[self.vars.arch.basearch]
+ for tmpl in self.add_templates:
+ self._runner.run(tmpl, **self.add_template_vars)
+ self._runner.run(templatefile, kernels=self.kernels)
+ self.treeinfo_data = self._runner.results.treeinfo
+ self.implantisomd5()
+
+[docs] def implantisomd5(self):
+ for _section, data in self.treeinfo_data.items():
+ if 'boot.iso' in data:
+ iso = joinpaths(self.vars.outroot, data['boot.iso'])
+ runcmd(["implantisomd5", iso])
+
+ @property
+ def dracut_hooks_path(self):
+ """ Return the path to the lorax dracut hooks scripts
+
+ Use the configured share dir if it is setup,
+ otherwise default to /usr/share/lorax/dracut_hooks
+ """
+ if self.templatedir:
+ return joinpaths(self.templatedir, "dracut_hooks")
+ else:
+ return "/usr/share/lorax/dracut_hooks"
+
+[docs] def copy_dracut_hooks(self, hooks):
+ """ Copy the hook scripts in hooks into the installroot's /tmp/
+ and return a list of commands to pass to dracut when creating the
+ initramfs
+
+ hooks is a list of tuples with the name of the hook script and the
+ target dracut hook directory
+ (eg. [("99anaconda-copy-ks.sh", "/lib/dracut/hooks/pre-pivot")])
+ """
+ dracut_commands = []
+ for hook_script, dracut_path in hooks:
+ src = joinpaths(self.dracut_hooks_path, hook_script)
+ if not os.path.exists(src):
+ logger.error("Missing lorax dracut hook script %s", (src))
+ continue
+ dst = joinpaths(self.vars.inroot, "/tmp/", hook_script)
+ copy2(src, dst)
+ dracut_commands += ["--include", joinpaths("/tmp/", hook_script),
+ dracut_path]
+ return dracut_commands
+
+#### TreeBuilder helper functions
+
+[docs]def findkernels(root="/", kdir="boot"):
+ # To find possible flavors, awk '/BuildKernel/ { print $4 }' kernel.spec
+ flavors = ('debug', 'PAE', 'PAEdebug', 'smp', 'xen', 'lpae')
+ kre = re.compile(r"vmlinuz-(?P<version>.+?\.(?P<arch>[a-z0-9_]+)"
+ r"(.(?P<flavor>{0}))?)$".format("|".join(flavors)))
+ kernels = []
+ bootfiles = os.listdir(joinpaths(root, kdir))
+ for f in bootfiles:
+ match = kre.match(f)
+ if match:
+ kernel = DataHolder(path=joinpaths(kdir, f))
+ kernel.update(match.groupdict()) # sets version, arch, flavor
+ kernels.append(kernel)
+
+ # look for associated initrd/initramfs/etc.
+ for kernel in kernels:
+ for f in bootfiles:
+ if f.endswith('-'+kernel.version+'.img'):
+ imgtype, _rest = f.split('-',1)
+ # special backwards-compat case
+ if imgtype == 'initramfs':
+ imgtype = 'initrd'
+ kernel[imgtype] = DataHolder(path=joinpaths(kdir, f))
+
+ logger.debug("kernels=%s", kernels)
+ return kernels
+
+# udev whitelist: 'a-zA-Z0-9#+.:=@_-' (see is_whitelisted in libudev-util.c)
+udev_blacklist=' !"$%&\'()*,/;<>?[\\]^`{|}~' # ASCII printable, minus whitelist
+udev_blacklist += ''.join(chr(i) for i in range(32)) # ASCII non-printable
+[docs]def udev_escape(label):
+ out = ''
+ for ch in label:
+ out += ch if ch not in udev_blacklist else '\\x%02x' % ord(ch)
+ return out
+
+[docs]def string_lower(string):
+ """ Return a lowercase string.
+
+ :param string: String to lowercase
+
+ This is used as a filter in the templates.
+ """
+ return string.lower()
+
+#
+# treeinfo.py
+#
+# Copyright (C) 2010-2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Red Hat Author(s): Martin Gracik <mgracik@redhat.com>
+#
+
+import logging
+logger = logging.getLogger("pylorax.treeinfo")
+
+import configparser
+import time
+
+
+[docs]class TreeInfo(object):
+
+ def __init__(self, product, version, variant, basearch,
+ packagedir=""):
+
+ self.c = configparser.ConfigParser()
+
+ section = "general"
+ data = {"timestamp": str(time.time()),
+ "family": product,
+ "version": version,
+ "name": "%s-%s" % (product, version),
+ "variant": variant or "",
+ "arch": basearch,
+ "packagedir": packagedir}
+
+ self.c.add_section(section)
+ list(self.c.set(section, key, value) for key, value in data.items())
+
+[docs] def add_section(self, section, data):
+ if not self.c.has_section(section):
+ self.c.add_section(section)
+
+ list(self.c.set(section, key, value) for key, value in data.items())
+
+[docs] def write(self, outfile):
+ logger.info("writing .treeinfo file")
+ with open(outfile, "w") as fobj:
+ self.c.write(fobj)
+
' + _('Hide Search Matches') + '
') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/f28-branch/_static/down-pressed.png b/f28-branch/_static/down-pressed.png new file mode 100644 index 00000000..5756c8ca Binary files /dev/null and b/f28-branch/_static/down-pressed.png differ diff --git a/f28-branch/_static/down.png b/f28-branch/_static/down.png new file mode 100644 index 00000000..1b3bdad2 Binary files /dev/null and b/f28-branch/_static/down.png differ diff --git a/f28-branch/_static/file.png b/f28-branch/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/f28-branch/_static/file.png differ diff --git a/f28-branch/_static/fonts/Inconsolata-Bold.ttf b/f28-branch/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 00000000..809c1f58 Binary files /dev/null and b/f28-branch/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/f28-branch/_static/fonts/Inconsolata-Regular.ttf b/f28-branch/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 00000000..fc981ce7 Binary files /dev/null and b/f28-branch/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/f28-branch/_static/fonts/Inconsolata.ttf b/f28-branch/_static/fonts/Inconsolata.ttf new file mode 100644 index 00000000..4b8a36d2 Binary files /dev/null and b/f28-branch/_static/fonts/Inconsolata.ttf differ diff --git a/f28-branch/_static/fonts/Lato-Bold.ttf b/f28-branch/_static/fonts/Lato-Bold.ttf new file mode 100644 index 00000000..ef5ae3b4 Binary files /dev/null and b/f28-branch/_static/fonts/Lato-Bold.ttf differ diff --git a/f28-branch/_static/fonts/Lato-Regular.ttf b/f28-branch/_static/fonts/Lato-Regular.ttf new file mode 100644 index 00000000..adbfc467 Binary files /dev/null and b/f28-branch/_static/fonts/Lato-Regular.ttf differ diff --git a/f28-branch/_static/fonts/RobotoSlab-Bold.ttf b/f28-branch/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 00000000..df5d1df2 Binary files /dev/null and b/f28-branch/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/f28-branch/_static/fonts/RobotoSlab-Regular.ttf b/f28-branch/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 00000000..eb52a790 Binary files /dev/null and b/f28-branch/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/f28-branch/_static/fonts/fontawesome-webfont.eot b/f28-branch/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 00000000..e9f60ca9 Binary files /dev/null and b/f28-branch/_static/fonts/fontawesome-webfont.eot differ diff --git a/f28-branch/_static/fonts/fontawesome-webfont.svg b/f28-branch/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 00000000..855c845e --- /dev/null +++ b/f28-branch/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + diff --git a/f28-branch/_static/fonts/fontawesome-webfont.ttf b/f28-branch/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 00000000..35acda2f Binary files /dev/null and b/f28-branch/_static/fonts/fontawesome-webfont.ttf differ diff --git a/f28-branch/_static/fonts/fontawesome-webfont.woff b/f28-branch/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 00000000..400014a4 Binary files /dev/null and b/f28-branch/_static/fonts/fontawesome-webfont.woff differ diff --git a/f28-branch/_static/jquery-1.11.1.js b/f28-branch/_static/jquery-1.11.1.js new file mode 100644 index 00000000..d4b67f7e --- /dev/null +++ b/f28-branch/_static/jquery-1.11.1.js @@ -0,0 +1,10308 @@ +/*! + * jQuery JavaScript Library v1.11.1 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-05-01T17:42Z + */ + +(function( global, factory ) { + + if ( typeof module === "object" && typeof module.exports === "object" ) { + // For CommonJS and CommonJS-like environments where a proper window is present, + // execute the factory and get jQuery + // For environments that do not inherently posses a window with a document + // (such as Node.js), expose a jQuery-making factory as module.exports + // This accentuates the need for the creation of a real window + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +}(typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Can't do this because several apps including ASP.NET trace +// the stack via arguments.caller.callee and Firefox dies if +// you try to trace through "use strict" call chains. (#13335) +// Support: Firefox 18+ +// + +var deletedIds = []; + +var slice = deletedIds.slice; + +var concat = deletedIds.concat; + +var push = deletedIds.push; + +var indexOf = deletedIds.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var support = {}; + + + +var + version = "1.11.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android<4.1, IE<9 + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // Start with an empty selector + selector: "", + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + ret.context = this.context; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: deletedIds.sort, + splice: deletedIds.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var src, copyIsArray, copy, name, options, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + /* jshint eqeqeq: false */ + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + // parseFloat NaNs numeric-cast false positives (null|true|false|"") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + return !jQuery.isArray( obj ) && obj - parseFloat( obj ) >= 0; + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + isPlainObject: function( obj ) { + var key; + + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Support: IE<9 + // Handle iteration over inherited properties before own properties. + if ( support.ownLast ) { + for ( key in obj ) { + return hasOwn.call( obj, key ); + } + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call(obj) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && jQuery.trim( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var value, + i = 0, + length = obj.length, + isArray = isArraylike( obj ); + + if ( args ) { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } + } + + return obj; + }, + + // Support: Android<4.1, IE<9 + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArraylike( Object(arr) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( indexOf ) { + return indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + while ( j < len ) { + first[ i++ ] = second[ j++ ]; + } + + // Support: IE<9 + // Workaround casting of .length to NaN on otherwise arraylike objects (e.g., NodeLists) + if ( len !== len ) { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, + i = 0, + length = elems.length, + isArray = isArraylike( elems ), + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var args, proxy, tmp; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: function() { + return +( new Date() ); + }, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +function isArraylike( obj ) { + var length = obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + if ( obj.nodeType === 1 && length ) { + return true; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v1.10.19 + * http://sizzlejs.com/ + * + * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-04-18 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + -(new Date()), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // General-purpose constants + strundefined = typeof undefined, + MAX_NEGATIVE = 1 << 31, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf if we can't use a native one + indexOf = arr.indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + characterEncoding + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + rescape = /'|\\/g, + + // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }; + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var match, elem, m, nodeType, + // QSA vars + i, groups, old, nid, newContext, newSelector; + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + + context = context || document; + results = results || []; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { + return []; + } + + if ( documentIsHTML && !seed ) { + + // Shortcuts + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document (jQuery #6963) + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // QSA path + if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + nid = old = expando; + newContext = context; + newSelector = nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + toSelector( groups[i] ); + } + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created div and expects a boolean result + */ +function assert( fn ) { + var div = document.createElement("div"); + + try { + return !!fn( div ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( div.parentNode ) { + div.parentNode.removeChild( div ); + } + // release memory in IE + div = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = attrs.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + ( ~b.sourceIndex || MAX_NEGATIVE ) - + ( ~a.sourceIndex || MAX_NEGATIVE ); + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== strundefined && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, + doc = node ? node.ownerDocument || node : preferredDoc, + parent = doc.defaultView; + + // If no document and documentElement is available, return + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Set our document + document = doc; + docElem = doc.documentElement; + + // Support tests + documentIsHTML = !isXML( doc ); + + // Support: IE>8 + // If iframe document is assigned to "document" variable and if iframe has been reloaded, + // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 + // IE6-8 do not support the defaultView property so parent will be undefined + if ( parent && parent !== parent.top ) { + // IE11 does not have attachEvent, so all must suffer + if ( parent.addEventListener ) { + parent.addEventListener( "unload", function() { + setDocument(); + }, false ); + } else if ( parent.attachEvent ) { + parent.attachEvent( "onunload", function() { + setDocument(); + }); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) + support.attributes = assert(function( div ) { + div.className = "i"; + return !div.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( div ) { + div.appendChild( doc.createComment("") ); + return !div.getElementsByTagName("*").length; + }); + + // Check if getElementsByClassName can be trusted + support.getElementsByClassName = rnative.test( doc.getElementsByClassName ) && assert(function( div ) { + div.innerHTML = ""; + + // Support: Safari<4 + // Catch class over-caching + div.firstChild.className = "i"; + // Support: Opera<10 + // Catch gEBCN failure to find non-leading classes + return div.getElementsByClassName("i").length === 2; + }); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( div ) { + docElem.appendChild( div ).id = expando; + return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + }); + + // ID find and filter + if ( support.getById ) { + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== strundefined && documentIsHTML ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [ m ] : []; + } + }; + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + } else { + // Support: IE6/7 + // getElementById is not reliable as a find shortcut + delete Expr.find["ID"]; + + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var elem, + tmp = [], + i = 0, + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See http://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // http://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( div.querySelectorAll("[msallowclip^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = doc.createElement("input"); + input.setAttribute( "type", "hidden" ); + div.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( div.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + div.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( div, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully does not implement inclusive descendent + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === doc || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === doc || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === doc ? -1 : + b === doc ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return doc; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, outerCache, node, diff, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + // Seek `elem` from a previously-cached index + outerCache = parent[ expando ] || (parent[ expando ] = {}); + cache = outerCache[ type ] || []; + nodeIndex = cache[0] === dirruns && cache[1]; + diff = cache[0] === dirruns && cache[2]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + // Use previously-cached element index if available + } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { + diff = cache[1]; + + // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) + } else { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { + // Cache the index of each encountered element + if ( useCache ) { + (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + if ( (oldCache = outerCache[ dir ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + outerCache[ dir ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context !== document && context; + } + + // Add elements passing elementMatchers directly to results + // Keep `i` a string if there are no elements so `matchedCount` will be "00" below + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: