diff --git a/Makefile b/Makefile index c00e6283..1862dbe3 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ mandir ?= $(PREFIX)/share/man DOCKER ?= podman DOCS_VERSION ?= next RUN_TESTS ?= ci -BACKEND ?= lorax-composer +BACKEND ?= osbuild-composer PKGNAME = lorax VERSION = $(shell awk '/Version:/ { print $$2 }' $(PKGNAME).spec) @@ -47,14 +47,12 @@ install: all check: @echo "*** Running pylint ***" PYTHONPATH=$(PYTHONPATH):./src/ ./tests/pylint/runpylint.py - @echo "*** Running yamllint ***" - ./tests/lint-playbooks.sh test: @echo "*** Running tests ***" PYTHONPATH=$(PYTHONPATH):./src/ $(PYTHON) -m pytest -v --cov-branch \ - --cov=pylorax --cov=lifted --cov=composer \ - ./tests/pylorax/ ./tests/composer/ ./tests/lifted/ + --cov=pylorax --cov=composer \ + ./tests/pylorax/ ./tests/composer/ coverage3 report -m [ -f "/usr/bin/coveralls" ] && [ -n "$(COVERALLS_REPO_TOKEN)" ] && coveralls || echo diff --git a/docs/conf.py b/docs/conf.py index 9f1fceea..ed68fdd2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -265,7 +265,6 @@ latex_documents = [ man_pages = [ ('lorax', 'lorax', u'Lorax Documentation', [u'Weldr Team'], 1), ('livemedia-creator', 'livemedia-creator', u'Live Media Creator Documentation', [u'Weldr Team'], 1), - ('lorax-composer', 'lorax-composer', u'Lorax Composer Documentation', [u'Weldr Team'], 1), ('composer-cli', 'composer-cli', u'Composer Cmdline Utility Documentation', [u'Weldr Team'], 1), ('mkksiso', 'mkksiso', u'Make Kickstart ISO Utility Documentation', [u'Weldr Team'], 1), ] diff --git a/docs/lorax-composer.rst b/docs/lorax-composer.rst index d23be09c..903402bd 100644 --- a/docs/lorax-composer.rst +++ b/docs/lorax-composer.rst @@ -1,535 +1,8 @@ lorax-composer ============== -:Authors: - Brian C. Lane - -``lorax-composer`` is a WELDR API server that allows you to build disk images using -`Blueprints`_ to describe the package versions to be installed into the image. -It is compatible with the Weldr project's bdcs-api REST protocol. More -information on Weldr can be found `on the Weldr blog `_. - -Behind the scenes it uses `livemedia-creator `_ and -`Anaconda `_ to handle the -installation and configuration of the images. - -.. note:: - - ``lorax-composer`` is now deprecated. It is being replaced by the - ``osbuild-composer`` WELDR API server which implements more features (eg. - ostree, image uploads, etc.) You can still use ``composer-cli`` and - ``cockpit-composer`` with ``osbuild-composer``. See the documentation or - the `osbuild website `_ for more information. - - -Important Things To Note ------------------------- - -* As of version 30.7 SELinux can be set to Enforcing. The current state is - logged for debugging purposes and if there are SELinux denials they should - be reported as a bug. - -* All image types lock the root account, except for live-iso. You will need to either - use one of the `Customizations`_ methods for setting a ssh key/password, install a - package that creates a user, or use something like `cloud-init` to setup access at - boot time. - - -Installation ------------- - -The best way to install ``lorax-composer`` is to use ``sudo dnf install -lorax-composer composer-cli``, this will setup the weldr user and install the -systemd socket activation service. You will then need to enable it with ``sudo -systemctl enable lorax-composer.socket && sudo systemctl start -lorax-composer.socket``. This will leave the server off until the first request -is made. Systemd will then launch the server and it will remain running until -the system is rebooted. This will cause some delay in responding to the first -request from the UI or `composer-cli`. - -.. note:: - - If you want lorax-composer to respond immediately to the first request you can - start and enable `lorax-composer.service` instead of `lorax-composer.socket` - -Quickstart ----------- - -1. Create a ``weldr`` user and group by running ``useradd weldr`` -2. Remove any pre-existing socket directory with ``rm -rf /run/weldr/`` - A new directory with correct permissions will be created the first time the server runs. -3. Enable the socket activation with ``systemctl enable lorax-composer.socket - && sudo systemctl start lorax-composer.socket``. - -NOTE: You can also run it directly with ``lorax-composer /path/to/blueprints``. However, -``lorax-composer`` does not react well to being started both on the command line and via -socket activation at the same time. It is therefore recommended that you run it directly -on the command line only for testing or development purposes. For real use or development -of other projects that simply use the API, you should stick to socket activation only. - -The ``/path/to/blueprints/`` directory is where the blueprints' git repo will -be created, and all the blueprints created with the ``/api/v0/blueprints/new`` -route will be stored. If there are blueprint ``.toml`` files in the top level -of the directory they will be imported into the blueprint git storage when -``lorax-composer`` starts. - -Logs ----- - -Logs are stored under ``/var/log/lorax-composer/`` and include all console -messages as well as extra debugging info and API requests. - -Security --------- - -Some security related issues that you should be aware of before running ``lorax-composer``: - -* One of the API server threads needs to retain root privileges in order to run Anaconda. -* Only allow authorized users access to the ``weldr`` group and socket. - -Since Anaconda kickstarts are used there is the possibility that a user could -inject commands into a blueprint that would result in the kickstart executing -arbitrary code on the host. Only authorized users should be allowed to build -images using ``lorax-composer``. - -lorax-composer cmdline arguments --------------------------------- - -.. argparse:: - :ref: pylorax.api.cmdline.lorax_composer_parser - :prog: lorax-composer - - -How it Works ------------- - -The server runs as root, and as ``weldr``. Communication with it is via a unix -domain socket (``/run/weldr/api.socket`` by default). The directory and socket -are owned by ``root:weldr`` so that any user in the ``weldr`` group can use the API -to control ``lorax-composer``. - -At startup the server will check for the correct permissions and -ownership of a pre-existing directory, or it will create a new one if it -doesn't exist. The socket path and group owner's name can be changed from the -cmdline by passing it the ``--socket`` and ``--group`` arguments. - -It will then drop root privileges for the API thread and run as the ``weldr`` -user. The queue and compose thread still runs as root because it needs to be -able to mount/umount files and run Anaconda. - -Composing Images ----------------- - -The `welder-web `_ GUI project can be used to construct -blueprints and create composes using a web browser. - -Or use the command line with `composer-cli `_. - -Blueprints ----------- - -Blueprints are simple text files in `TOML `_ format that describe -which packages, and what versions, to install into the image. They can also define a limited set -of customizations to make to the final image. - -Example blueprints can be found in the ``lorax-composer`` `test suite -`_, with a simple one -looking like this:: - - name = "base" - description = "A base system with bash" - version = "0.0.1" - - [[packages]] - name = "bash" - version = "4.4.*" - -The ``name`` field is the name of the blueprint. It can contain spaces, but they will be converted to ``-`` -when it is written to disk. It should be short and descriptive. - -``description`` can be a longer description of the blueprint, it is only used for display purposes. - -``version`` is a `semver compatible `_ version number. If -a new blueprint is uploaded with the same ``version`` the server will -automatically bump the PATCH level of the ``version``. If the ``version`` -doesn't match it will be used as is. eg. Uploading a blueprint with ``version`` -set to ``0.1.0`` when the existing blueprint ``version`` is ``0.0.1`` will -result in the new blueprint being stored as ``version 0.1.0``. - -[[packages]] and [[modules]] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -These entries describe the package names and matching version glob to be installed into the image. - -The names must match the names exactly, and the versions can be an exact match -or a filesystem-like glob of the version using ``*`` wildcards and ``?`` -character matching. - -NOTE: Currently there are no differences between ``packages`` and ``modules`` -in ``lorax-composer``. Both are treated like an rpm package dependency. - -For example, to install ``tmux-2.9a`` and ``openssh-server-8.*``, you would add -this to your blueprint:: - - [[packages]] - name = "tmux" - version = "2.9a" - - [[packages]] - name = "openssh-server" - version = "8.*" - - - -[[groups]] -~~~~~~~~~~ - -The ``groups`` entries describe a group of packages to be installed into the image. Package groups are -defined in the repository metadata. Each group has a descriptive name used primarily for display -in user interfaces and an ID more commonly used in kickstart files. Here, the ID is the expected -way of listing a group. - -Groups have three different ways of categorizing their packages: mandatory, default, and optional. -For purposes of blueprints, mandatory and default packages will be installed. There is no mechanism -for selecting optional packages. - -For example, if you want to install the ``anaconda-tools`` group you would add this to your -blueprint:: - - [[groups]] - name="anaconda-tools" - -``groups`` is a TOML list, so each group needs to be listed separately, like ``packages`` but with -no version number. - - -Customizations -~~~~~~~~~~~~~~ - -The ``[customizations]`` section can be used to configure the hostname of the final image. eg.:: - - [customizations] - hostname = "baseimage" - -This is optional and may be left out to use the defaults. - - -[customizations.kernel] -*********************** - -This allows you to append arguments to the bootloader's kernel commandline. This will not have any -effect on ``tar`` or ``ext4-filesystem`` images since they do not include a bootloader. - -For example:: - - [customizations.kernel] - append = "nosmt=force" - - -[[customizations.sshkey]] -************************* - -Set an existing user's ssh key in the final image:: - - [[customizations.sshkey]] - user = "root" - key = "PUBLIC SSH KEY" - -The key will be added to the user's authorized_keys file. - -.. warning:: - - ``key`` expects the entire content of ``~/.ssh/id_rsa.pub`` - - -[[customizations.user]] -*********************** - -Add a user to the image, and/or set their ssh key. -All fields for this section are optional except for the ``name``, here is a complete example:: - - [[customizations.user]] - name = "admin" - description = "Administrator account" - password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L..." - key = "PUBLIC SSH KEY" - home = "/srv/widget/" - shell = "/usr/bin/bash" - groups = ["widget", "users", "wheel"] - uid = 1200 - gid = 1200 - -If the password starts with ``$6$``, ``$5$``, or ``$2b$`` it will be stored as -an encrypted password. Otherwise it will be treated as a plain text password. - -.. warning:: - - ``key`` expects the entire content of ``~/.ssh/id_rsa.pub`` - - -[[customizations.group]] -************************ - -Add a group to the image. ``name`` is required and ``gid`` is optional:: - - [[customizations.group]] - name = "widget" - gid = 1130 - - -[customizations.timezone] -************************* - -Customizing the timezone and the NTP servers to use for the system:: - - [customizations.timezone] - timezone = "US/Eastern" - ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] - -The values supported by ``timezone`` can be listed by running ``timedatectl list-timezones``. - -If no timezone is setup the system will default to using `UTC`. The ntp servers are also -optional and will default to using the distribution defaults which are fine for most uses. - -In some image types there are already NTP servers setup, eg. Google cloud image, and they -cannot be overridden because they are required to boot in the selected environment. But the -timezone will be updated to the one selected in the blueprint. - - -[customizations.locale] -*********************** - -Customize the locale settings for the system:: - - [customizations.locale] - languages = ["en_US.UTF-8"] - keyboard = "us" - -The values supported by ``languages`` can be listed by running ``localectl list-locales`` from -the command line. - -The values supported by ``keyboard`` can be listed by running ``localectl list-keymaps`` from -the command line. - -Multiple languages can be added. The first one becomes the -primary, and the others are added as secondary. One or the other of ``languages`` -or ``keyboard`` must be included (or both) in the section. - - -[customizations.firewall] -************************* - -By default the firewall blocks all access except for services that enable their ports explicitly, -like ``sshd``. This command can be used to open other ports or services. Ports are configured using -the port:protocol format:: - - [customizations.firewall] - ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] - -Numeric ports, or their names from ``/etc/services`` can be used in the ``ports`` enabled/disabled lists. - -The blueprint settings extend any existing settings in the image templates, so if ``sshd`` is -already enabled it will extend the list of ports with the ones listed by the blueprint. - -If the distribution uses ``firewalld`` you can specify services listed by ``firewall-cmd --get-services`` -in a ``customizations.firewall.services`` section:: - - [customizations.firewall.services] - enabled = ["ftp", "ntp", "dhcp"] - disabled = ["telnet"] - -Remember that the ``firewall.services`` are different from the names in ``/etc/services``. - -Both are optional, if they are not used leave them out or set them to an empty list ``[]``. If you -only want the default firewall setup this section can be omitted from the blueprint. - -NOTE: The ``Google`` and ``OpenStack`` templates explicitly disable the firewall for their environment. -This cannot be overridden by the blueprint. - -[customizations.services] -************************* - -This section can be used to control which services are enabled at boot time. -Some image types already have services enabled or disabled in order for the -image to work correctly, and cannot be overridden. eg. ``ami`` requires -``sshd``, ``chronyd``, and ``cloud-init``. Without them the image will not -boot. Blueprint services are added to, not replacing, the list already in the -templates, if any. - -The service names are systemd service units. You may specify any systemd unit -file accepted by ``systemctl enable`` eg. ``cockpit.socket``:: - - [customizations.services] - enabled = ["sshd", "cockpit.socket", "httpd"] - disabled = ["postfix", "telnetd"] - - -[[repos.git]] -~~~~~~~~~~~~~ - -The ``[[repos.git]]`` entries are used to add files from a `git repository `_ -repository to the created image. The repository is cloned, the specified ``ref`` is checked out -and an rpm is created to install the files to a ``destination`` path. The rpm includes a summary -with the details of the repository and reference used to create it. The rpm is also included in the -image build metadata. - -To create an rpm named ``server-config-1.0-1.noarch.rpm`` you would add this to your blueprint:: - - [[repos.git]] - rpmname="server-config" - rpmversion="1.0" - rpmrelease="1" - summary="Setup files for server deployment" - repo="PATH OF GIT REPO TO CLONE" - ref="v1.0" - destination="/opt/server/" - -* rpmname: Name of the rpm to create, also used as the prefix name in the tar archive -* rpmversion: Version of the rpm, eg. "1.0.0" -* rpmrelease: Release of the rpm, eg. "1" -* summary: Summary string for the rpm -* repo: URL of the get repo to clone and create the archive from -* ref: Git reference to check out. eg. origin/branch-name, git tag, or git commit hash -* destination: Path to install the / of the git repo at when installing the rpm - -An rpm will be created with the contents of the git repository referenced, with the files -being installed under ``/opt/server/`` in this case. - -``ref`` can be any valid git reference for use with ``git archive``. eg. to use the head -of a branch set it to ``origin/branch-name``, a tag name, or a commit hash. - -Note that the repository is cloned in full each time a build is started, so pointing to a -repository with a large amount of history may take a while to clone and use a significant -amount of disk space. The clone is temporary and is removed once the rpm is created. - - -Adding Output Types -------------------- - -``livemedia-creator`` supports a large number of output types, and only some of -these are currently available via ``lorax-composer``. To add a new output type to -lorax-composer a kickstart file needs to be added to ``./share/composer/``. The -name of the kickstart is what will be used by the ``/compose/types`` route, and the -``compose_type`` field of the POST to start a compose. It also needs to have -code added to the :py:func:`pylorax.api.compose.compose_args` function. The -``_MAP`` entry in this function defines what lorax-composer will pass to -:py:func:`pylorax.installer.novirt_install` when it runs the compose. When the -compose is finished the output files need to be copied out of the build -directory (``/var/lib/lorax/composer/results//compose/``), -:py:func:`pylorax.api.compose.move_compose_results` handles this for each type. -You should move them instead of copying to save space. - -If the new output type does not have support in livemedia-creator it should be -added there first. This will make the output available to the widest number of -users. - -Example: Add partitioned disk support -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Partitioned disk support is something that livemedia-creator already supports -via the ``--make-disk`` cmdline argument. To add this to lorax-composer it -needs 3 things: - -* A ``partitioned-disk.ks`` file in ``./share/composer/`` -* A new entry in the _MAP in :py:func:`pylorax.api.compose.compose_args` -* Add a bit of code to :py:func:`pylorax.api.compose.move_compose_results` to move the disk image from - the compose directory to the results directory. - -The ``partitioned-disk.ks`` is pretty similar to the example minimal kickstart -in ``./docs/fedora-minimal.ks``. You should remove the ``url`` and ``repo`` -commands, they will be added by the compose process. Make sure the bootloader -packages are included in the ``%packages`` section at the end of the kickstart, -and you will want to leave off the ``%end`` so that the compose can append the -list of packages from the blueprint. - -The new ``_MAP`` entry should be a copy of one of the existing entries, but with ``make_disk`` set -to ``True``. Make sure that none of the other ``make_*`` options are ``True``. The ``image_name`` is -what the name of the final image will be. - -``move_compose_results()`` can be as simple as moving the output file into -the results directory, or it could do some post-processing on it. The end of -the function should always clean up the ``./compose/`` directory, removing any -unneeded extra files. This is especially true for the ``live-iso`` since it produces -the contents of the iso as well as the boot.iso itself. - -Package Sources ---------------- - -By default lorax-composer uses the host's configured repositories. It copies -the ``*.repo`` files from ``/etc/yum.repos.d/`` into -``/var/lib/lorax/composer/repos.d/`` at startup, these are immutable system -repositories and cannot be deleted or changed. If you want to add additional -repos you can put them into ``/var/lib/lorax/composer/repos.d/`` or use the -``/api/v0/projects/source/*`` API routes to create them. - -The new source can be added by doing a POST to the ``/api/v0/projects/source/new`` -route using JSON (with `Content-Type` header set to `application/json`) or TOML -(with it set to `text/x-toml`). The format of the source looks like this (in -TOML):: - - name = "custom-source-1" - url = "https://url/path/to/repository/" - type = "yum-baseurl" - proxy = "https://proxy-url/" - check_ssl = true - check_gpg = true - gpgkey_urls = ["https://url/path/to/gpg-key"] - -The ``proxy`` and ``gpgkey_urls`` entries are optional. All of the others are required. The supported -types for the urls are: - -* ``yum-baseurl`` is a URL to a yum repository. -* ``yum-mirrorlist`` is a URL for a mirrorlist. -* ``yum-metalink`` is a URL for a metalink. - -If ``check_ssl`` is true the https certificates must be valid. If they are self-signed you can either set -this to false, or add your Certificate Authority to the host system. - -If ``check_gpg`` is true the GPG key must either be installed on the host system, or ``gpgkey_urls`` -should point to it. - -You can edit an existing source (other than system sources), by doing a POST to the ``new`` route -with the new version of the source. It will overwrite the previous one. - -A list of existing sources is available from ``/api/v0/projects/source/list``, and detailed info -on a source can be retrieved with the ``/api/v0/projects/source/info/`` route. By default -it returns JSON but it can also return TOML if ``?format=toml`` is added to the request. - -Non-system sources can be deleted by doing a ``DELETE`` request to the -``/api/v0/projects/source/delete/`` route. - -The documentation for the source API routes can be `found here `_ - -The configured sources are used for all blueprint depsolve operations, and for composing images. -When adding additional sources you must make sure that the packages in the source do not -conflict with any other package sources, otherwise depsolving will fail. - -DVD ISO Package Source -~~~~~~~~~~~~~~~~~~~~~~ - -In some situations the system may want to *only* use a DVD iso as the package -source, not the repos from the network. ``lorax-composer`` and ``anaconda`` -understand ``file://`` URLs so you can mount an iso on the host, and replace the -system repo files with a configuration file pointing to the DVD. - -* Stop the ``lorax-composer.service`` if it is running -* Move the repo files in ``/etc/yum.repos.d/`` someplace safe -* Create a new ``iso.repo`` file in ``/etc/yum.repos.d/``:: - - [iso] - name=iso - baseurl=file:///mnt/iso/ - enabled=1 - gpgcheck=1 - gpgkey=file:///mnt/iso/RPM-GPG-KEY-redhat-release - -* Remove all the cached repo files from ``/var/lib/lorax/composer/repos/`` -* Restart the ``lorax-composer.service`` -* Check the output of ``composer-cli status show`` for any output specific depsolve errors. - For example, the DVD usually does not include ``grub2-efi-*-cdboot-*`` so the live-iso image - type will not be available. - -If you want to *add* the DVD source to the existing sources you can do that by -mounting the iso and creating a source file to point to it as described in the -`Package Sources`_ documentation. In that case there is no need to remove the other -sources from ``/etc/yum.repos.d/`` or clear the cached repos. +``lorax-composer`` has been replaced by the ``osbuild-composer`` WELDR API +server which implements more features (eg. ostree, image uploads, etc.) You +can still use ``composer-cli`` and ``cockpit-composer`` with +``osbuild-composer``. See the documentation or the `osbuild website +`_ for more information. diff --git a/docs/man/lorax-composer.1 b/docs/man/lorax-composer.1 deleted file mode 100644 index d331ac48..00000000 --- a/docs/man/lorax-composer.1 +++ /dev/null @@ -1,750 +0,0 @@ -.\" Man page generated from reStructuredText. -. -.TH "LORAX-COMPOSER" "1" "Sep 08, 2020" "34.0" "Lorax" -.SH NAME -lorax-composer \- Lorax Composer Documentation -. -.nr rst2man-indent-level 0 -. -.de1 rstReportMargin -\\$1 \\n[an-margin] -level \\n[rst2man-indent-level] -level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] -- -\\n[rst2man-indent0] -\\n[rst2man-indent1] -\\n[rst2man-indent2] -.. -.de1 INDENT -.\" .rstReportMargin pre: -. RS \\$1 -. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] -. nr rst2man-indent-level +1 -.\" .rstReportMargin post: -.. -.de UNINDENT -. RE -.\" indent \\n[an-margin] -.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] -.nr rst2man-indent-level -1 -.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] -.in \\n[rst2man-indent\\n[rst2man-indent-level]]u -.. -.INDENT 0.0 -.TP -.B Authors -Brian C. Lane <\fI\%bcl@redhat.com\fP> -.UNINDENT -.sp -\fBlorax\-composer\fP is a WELDR API server that allows you to build disk images using -\fI\%Blueprints\fP to describe the package versions to be installed into the image. -It is compatible with the Weldr project\(aqs bdcs\-api REST protocol. More -information on Weldr can be found \fI\%on the Weldr blog\fP\&. -.sp -Behind the scenes it uses \fI\%livemedia\-creator\fP and -\fI\%Anaconda\fP to handle the -installation and configuration of the images. -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -\fBlorax\-composer\fP is now deprecated. It is being replaced by the -\fBosbuild\-composer\fP WELDR API server which implements more features (eg. -ostree, image uploads, etc.) You can still use \fBcomposer\-cli\fP and -\fBcockpit\-composer\fP with \fBosbuild\-composer\fP\&. See the documentation or -the \fI\%osbuild website\fP for more information. -.UNINDENT -.UNINDENT -.SH IMPORTANT THINGS TO NOTE -.INDENT 0.0 -.IP \(bu 2 -As of version 30.7 SELinux can be set to Enforcing. The current state is -logged for debugging purposes and if there are SELinux denials they should -be reported as a bug. -.IP \(bu 2 -All image types lock the root account, except for live\-iso. You will need to either -use one of the \fI\%Customizations\fP methods for setting a ssh key/password, install a -package that creates a user, or use something like \fIcloud\-init\fP to setup access at -boot time. -.UNINDENT -.SH INSTALLATION -.sp -The best way to install \fBlorax\-composer\fP is to use \fBsudo dnf install -lorax\-composer composer\-cli\fP, this will setup the weldr user and install the -systemd socket activation service. You will then need to enable it with \fBsudo -systemctl enable lorax\-composer.socket && sudo systemctl start -lorax\-composer.socket\fP\&. This will leave the server off until the first request -is made. Systemd will then launch the server and it will remain running until -the system is rebooted. This will cause some delay in responding to the first -request from the UI or \fIcomposer\-cli\fP\&. -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -If you want lorax\-composer to respond immediately to the first request you can -start and enable \fIlorax\-composer.service\fP instead of \fIlorax\-composer.socket\fP -.UNINDENT -.UNINDENT -.SH QUICKSTART -.INDENT 0.0 -.IP 1. 3 -Create a \fBweldr\fP user and group by running \fBuseradd weldr\fP -.IP 2. 3 -Remove any pre\-existing socket directory with \fBrm \-rf /run/weldr/\fP -A new directory with correct permissions will be created the first time the server runs. -.IP 3. 3 -Enable the socket activation with \fBsystemctl enable lorax\-composer.socket -&& sudo systemctl start lorax\-composer.socket\fP\&. -.UNINDENT -.sp -NOTE: You can also run it directly with \fBlorax\-composer /path/to/blueprints\fP\&. However, -\fBlorax\-composer\fP does not react well to being started both on the command line and via -socket activation at the same time. It is therefore recommended that you run it directly -on the command line only for testing or development purposes. For real use or development -of other projects that simply use the API, you should stick to socket activation only. -.sp -The \fB/path/to/blueprints/\fP directory is where the blueprints\(aq git repo will -be created, and all the blueprints created with the \fB/api/v0/blueprints/new\fP -route will be stored. If there are blueprint \fB\&.toml\fP files in the top level -of the directory they will be imported into the blueprint git storage when -\fBlorax\-composer\fP starts. -.SH LOGS -.sp -Logs are stored under \fB/var/log/lorax\-composer/\fP and include all console -messages as well as extra debugging info and API requests. -.SH SECURITY -.sp -Some security related issues that you should be aware of before running \fBlorax\-composer\fP: -.INDENT 0.0 -.IP \(bu 2 -One of the API server threads needs to retain root privileges in order to run Anaconda. -.IP \(bu 2 -Only allow authorized users access to the \fBweldr\fP group and socket. -.UNINDENT -.sp -Since Anaconda kickstarts are used there is the possibility that a user could -inject commands into a blueprint that would result in the kickstart executing -arbitrary code on the host. Only authorized users should be allowed to build -images using \fBlorax\-composer\fP\&. -.SH LORAX-COMPOSER CMDLINE ARGUMENTS -.sp -Lorax Composer API Server - -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -usage: lorax\-composer [\-h] [\-\-socket SOCKET] [\-\-user USER] [\-\-group GROUP] [\-\-log LOG] [\-\-mockfiles MOCKFILES] [\-\-sharedir SHAREDIR] [\-V] [\-c CONFIG] [\-\-releasever STRING] [\-\-tmp TMP] [\-\-proxy PROXY] [\-\-no\-system\-repos] BLUEPRINTS -.ft P -.fi -.UNINDENT -.UNINDENT -.SS Positional Arguments -.INDENT 0.0 -.TP -.BBLUEPRINTS -Path to the blueprints -.UNINDENT -.SS Named Arguments -.INDENT 0.0 -.TP -.B\-\-socket -Path to the socket file to listen on -.sp -Default: "/run/weldr/api.socket" -.TP -.B\-\-user -User to use for reduced permissions -.sp -Default: "root" -.TP -.B\-\-group -Group to set ownership of the socket to -.sp -Default: "weldr" -.TP -.B\-\-log -Path to logfile (/var/log/lorax\-composer/composer.log) -.sp -Default: "/var/log/lorax\-composer/composer.log" -.TP -.B\-\-mockfiles -Path to JSON files used for /api/mock/ paths (/var/tmp/bdcs\-mockfiles/) -.sp -Default: "/var/tmp/bdcs\-mockfiles/" -.TP -.B\-\-sharedir -Directory containing all the templates. Overrides config file sharedir -.TP -.B\-V -show program\(aqs version number and exit -.sp -Default: False -.TP -.B\-c, \-\-config -Path to lorax\-composer configuration file. -.sp -Default: "/etc/lorax/composer.conf" -.TP -.B\-\-releasever -Release version to use for $releasever in dnf repository urls -.TP -.B\-\-tmp -Top level temporary directory -.sp -Default: "/var/tmp" -.TP -.B\-\-proxy -Set proxy for DNF, overrides configuration file setting. -.TP -.B\-\-no\-system\-repos -Do not copy over system repos from /etc/yum.repos.d/ at startup -.sp -Default: False -.UNINDENT -.SH HOW IT WORKS -.sp -The server runs as root, and as \fBweldr\fP\&. Communication with it is via a unix -domain socket (\fB/run/weldr/api.socket\fP by default). The directory and socket -are owned by \fBroot:weldr\fP so that any user in the \fBweldr\fP group can use the API -to control \fBlorax\-composer\fP\&. -.sp -At startup the server will check for the correct permissions and -ownership of a pre\-existing directory, or it will create a new one if it -doesn\(aqt exist. The socket path and group owner\(aqs name can be changed from the -cmdline by passing it the \fB\-\-socket\fP and \fB\-\-group\fP arguments. -.sp -It will then drop root privileges for the API thread and run as the \fBweldr\fP -user. The queue and compose thread still runs as root because it needs to be -able to mount/umount files and run Anaconda. -.SH COMPOSING IMAGES -.sp -The \fI\%welder\-web\fP GUI project can be used to construct -blueprints and create composes using a web browser. -.sp -Or use the command line with \fI\%composer\-cli\fP\&. -.SH BLUEPRINTS -.sp -Blueprints are simple text files in \fI\%TOML\fP format that describe -which packages, and what versions, to install into the image. They can also define a limited set -of customizations to make to the final image. -.sp -Example blueprints can be found in the \fBlorax\-composer\fP \fI\%test suite\fP, with a simple one -looking like this: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -name = "base" -description = "A base system with bash" -version = "0.0.1" - -[[packages]] -name = "bash" -version = "4.4.*" -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -The \fBname\fP field is the name of the blueprint. It can contain spaces, but they will be converted to \fB\-\fP -when it is written to disk. It should be short and descriptive. -.sp -\fBdescription\fP can be a longer description of the blueprint, it is only used for display purposes. -.sp -\fBversion\fP is a \fI\%semver compatible\fP version number. If -a new blueprint is uploaded with the same \fBversion\fP the server will -automatically bump the PATCH level of the \fBversion\fP\&. If the \fBversion\fP -doesn\(aqt match it will be used as is. eg. Uploading a blueprint with \fBversion\fP -set to \fB0.1.0\fP when the existing blueprint \fBversion\fP is \fB0.0.1\fP will -result in the new blueprint being stored as \fBversion 0.1.0\fP\&. -.SS [[packages]] and [[modules]] -.sp -These entries describe the package names and matching version glob to be installed into the image. -.sp -The names must match the names exactly, and the versions can be an exact match -or a filesystem\-like glob of the version using \fB*\fP wildcards and \fB?\fP -character matching. -.sp -NOTE: Currently there are no differences between \fBpackages\fP and \fBmodules\fP -in \fBlorax\-composer\fP\&. Both are treated like an rpm package dependency. -.sp -For example, to install \fBtmux\-2.9a\fP and \fBopenssh\-server\-8.*\fP, you would add -this to your blueprint: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[packages]] -name = "tmux" -version = "2.9a" - -[[packages]] -name = "openssh\-server" -version = "8.*" -.ft P -.fi -.UNINDENT -.UNINDENT -.SS [[groups]] -.sp -The \fBgroups\fP entries describe a group of packages to be installed into the image. Package groups are -defined in the repository metadata. Each group has a descriptive name used primarily for display -in user interfaces and an ID more commonly used in kickstart files. Here, the ID is the expected -way of listing a group. -.sp -Groups have three different ways of categorizing their packages: mandatory, default, and optional. -For purposes of blueprints, mandatory and default packages will be installed. There is no mechanism -for selecting optional packages. -.sp -For example, if you want to install the \fBanaconda\-tools\fP group you would add this to your -blueprint: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[groups]] -name="anaconda\-tools" -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -\fBgroups\fP is a TOML list, so each group needs to be listed separately, like \fBpackages\fP but with -no version number. -.SS Customizations -.sp -The \fB[customizations]\fP section can be used to configure the hostname of the final image. eg.: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations] -hostname = "baseimage" -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -This is optional and may be left out to use the defaults. -.SS [customizations.kernel] -.sp -This allows you to append arguments to the bootloader\(aqs kernel commandline. This will not have any -effect on \fBtar\fP or \fBext4\-filesystem\fP images since they do not include a bootloader. -.sp -For example: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.kernel] -append = "nosmt=force" -.ft P -.fi -.UNINDENT -.UNINDENT -.SS [[customizations.sshkey]] -.sp -Set an existing user\(aqs ssh key in the final image: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[customizations.sshkey]] -user = "root" -key = "PUBLIC SSH KEY" -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -The key will be added to the user\(aqs authorized_keys file. -.sp -\fBWARNING:\fP -.INDENT 0.0 -.INDENT 3.5 -\fBkey\fP expects the entire content of \fB~/.ssh/id_rsa.pub\fP -.UNINDENT -.UNINDENT -.SS [[customizations.user]] -.sp -Add a user to the image, and/or set their ssh key. -All fields for this section are optional except for the \fBname\fP, here is a complete example: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[customizations.user]] -name = "admin" -description = "Administrator account" -password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L..." -key = "PUBLIC SSH KEY" -home = "/srv/widget/" -shell = "/usr/bin/bash" -groups = ["widget", "users", "wheel"] -uid = 1200 -gid = 1200 -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -If the password starts with \fB$6$\fP, \fB$5$\fP, or \fB$2b$\fP it will be stored as -an encrypted password. Otherwise it will be treated as a plain text password. -.sp -\fBWARNING:\fP -.INDENT 0.0 -.INDENT 3.5 -\fBkey\fP expects the entire content of \fB~/.ssh/id_rsa.pub\fP -.UNINDENT -.UNINDENT -.SS [[customizations.group]] -.sp -Add a group to the image. \fBname\fP is required and \fBgid\fP is optional: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[customizations.group]] -name = "widget" -gid = 1130 -.ft P -.fi -.UNINDENT -.UNINDENT -.SS [customizations.timezone] -.sp -Customizing the timezone and the NTP servers to use for the system: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.timezone] -timezone = "US/Eastern" -ntpservers = ["0.north\-america.pool.ntp.org", "1.north\-america.pool.ntp.org"] -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -The values supported by \fBtimezone\fP can be listed by running \fBtimedatectl list\-timezones\fP\&. -.sp -If no timezone is setup the system will default to using \fIUTC\fP\&. The ntp servers are also -optional and will default to using the distribution defaults which are fine for most uses. -.sp -In some image types there are already NTP servers setup, eg. Google cloud image, and they -cannot be overridden because they are required to boot in the selected environment. But the -timezone will be updated to the one selected in the blueprint. -.SS [customizations.locale] -.sp -Customize the locale settings for the system: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.locale] -languages = ["en_US.UTF\-8"] -keyboard = "us" -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -The values supported by \fBlanguages\fP can be listed by running \fBlocalectl list\-locales\fP from -the command line. -.sp -The values supported by \fBkeyboard\fP can be listed by running \fBlocalectl list\-keymaps\fP from -the command line. -.sp -Multiple languages can be added. The first one becomes the -primary, and the others are added as secondary. One or the other of \fBlanguages\fP -or \fBkeyboard\fP must be included (or both) in the section. -.SS [customizations.firewall] -.sp -By default the firewall blocks all access except for services that enable their ports explicitly, -like \fBsshd\fP\&. This command can be used to open other ports or services. Ports are configured using -the port:protocol format: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -Numeric ports, or their names from \fB/etc/services\fP can be used in the \fBports\fP enabled/disabled lists. -.sp -The blueprint settings extend any existing settings in the image templates, so if \fBsshd\fP is -already enabled it will extend the list of ports with the ones listed by the blueprint. -.sp -If the distribution uses \fBfirewalld\fP you can specify services listed by \fBfirewall\-cmd \-\-get\-services\fP -in a \fBcustomizations.firewall.services\fP section: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -Remember that the \fBfirewall.services\fP are different from the names in \fB/etc/services\fP\&. -.sp -Both are optional, if they are not used leave them out or set them to an empty list \fB[]\fP\&. If you -only want the default firewall setup this section can be omitted from the blueprint. -.sp -NOTE: The \fBGoogle\fP and \fBOpenStack\fP templates explicitly disable the firewall for their environment. -This cannot be overridden by the blueprint. -.SS [customizations.services] -.sp -This section can be used to control which services are enabled at boot time. -Some image types already have services enabled or disabled in order for the -image to work correctly, and cannot be overridden. eg. \fBami\fP requires -\fBsshd\fP, \fBchronyd\fP, and \fBcloud\-init\fP\&. Without them the image will not -boot. Blueprint services are added to, not replacing, the list already in the -templates, if any. -.sp -The service names are systemd service units. You may specify any systemd unit -file accepted by \fBsystemctl enable\fP eg. \fBcockpit.socket\fP: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[customizations.services] -enabled = ["sshd", "cockpit.socket", "httpd"] -disabled = ["postfix", "telnetd"] -.ft P -.fi -.UNINDENT -.UNINDENT -.SS [[repos.git]] -.sp -The \fB[[repos.git]]\fP entries are used to add files from a \fI\%git repository\fP -repository to the created image. The repository is cloned, the specified \fBref\fP is checked out -and an rpm is created to install the files to a \fBdestination\fP path. The rpm includes a summary -with the details of the repository and reference used to create it. The rpm is also included in the -image build metadata. -.sp -To create an rpm named \fBserver\-config\-1.0\-1.noarch.rpm\fP you would add this to your blueprint: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -[[repos.git]] -rpmname="server\-config" -rpmversion="1.0" -rpmrelease="1" -summary="Setup files for server deployment" -repo="PATH OF GIT REPO TO CLONE" -ref="v1.0" -destination="/opt/server/" -.ft P -.fi -.UNINDENT -.UNINDENT -.INDENT 0.0 -.IP \(bu 2 -rpmname: Name of the rpm to create, also used as the prefix name in the tar archive -.IP \(bu 2 -rpmversion: Version of the rpm, eg. "1.0.0" -.IP \(bu 2 -rpmrelease: Release of the rpm, eg. "1" -.IP \(bu 2 -summary: Summary string for the rpm -.IP \(bu 2 -repo: URL of the get repo to clone and create the archive from -.IP \(bu 2 -ref: Git reference to check out. eg. origin/branch\-name, git tag, or git commit hash -.IP \(bu 2 -destination: Path to install the / of the git repo at when installing the rpm -.UNINDENT -.sp -An rpm will be created with the contents of the git repository referenced, with the files -being installed under \fB/opt/server/\fP in this case. -.sp -\fBref\fP can be any valid git reference for use with \fBgit archive\fP\&. eg. to use the head -of a branch set it to \fBorigin/branch\-name\fP, a tag name, or a commit hash. -.sp -Note that the repository is cloned in full each time a build is started, so pointing to a -repository with a large amount of history may take a while to clone and use a significant -amount of disk space. The clone is temporary and is removed once the rpm is created. -.SH ADDING OUTPUT TYPES -.sp -\fBlivemedia\-creator\fP supports a large number of output types, and only some of -these are currently available via \fBlorax\-composer\fP\&. To add a new output type to -lorax\-composer a kickstart file needs to be added to \fB\&./share/composer/\fP\&. The -name of the kickstart is what will be used by the \fB/compose/types\fP route, and the -\fBcompose_type\fP field of the POST to start a compose. It also needs to have -code added to the \fBpylorax.api.compose.compose_args()\fP function. The -\fB_MAP\fP entry in this function defines what lorax\-composer will pass to -\fBpylorax.installer.novirt_install()\fP when it runs the compose. When the -compose is finished the output files need to be copied out of the build -directory (\fB/var/lib/lorax/composer/results//compose/\fP), -\fBpylorax.api.compose.move_compose_results()\fP handles this for each type. -You should move them instead of copying to save space. -.sp -If the new output type does not have support in livemedia\-creator it should be -added there first. This will make the output available to the widest number of -users. -.SS Example: Add partitioned disk support -.sp -Partitioned disk support is something that livemedia\-creator already supports -via the \fB\-\-make\-disk\fP cmdline argument. To add this to lorax\-composer it -needs 3 things: -.INDENT 0.0 -.IP \(bu 2 -A \fBpartitioned\-disk.ks\fP file in \fB\&./share/composer/\fP -.IP \(bu 2 -A new entry in the _MAP in \fBpylorax.api.compose.compose_args()\fP -.IP \(bu 2 -Add a bit of code to \fBpylorax.api.compose.move_compose_results()\fP to move the disk image from -the compose directory to the results directory. -.UNINDENT -.sp -The \fBpartitioned\-disk.ks\fP is pretty similar to the example minimal kickstart -in \fB\&./docs/fedora\-minimal.ks\fP\&. You should remove the \fBurl\fP and \fBrepo\fP -commands, they will be added by the compose process. Make sure the bootloader -packages are included in the \fB%packages\fP section at the end of the kickstart, -and you will want to leave off the \fB%end\fP so that the compose can append the -list of packages from the blueprint. -.sp -The new \fB_MAP\fP entry should be a copy of one of the existing entries, but with \fBmake_disk\fP set -to \fBTrue\fP\&. Make sure that none of the other \fBmake_*\fP options are \fBTrue\fP\&. The \fBimage_name\fP is -what the name of the final image will be. -.sp -\fBmove_compose_results()\fP can be as simple as moving the output file into -the results directory, or it could do some post\-processing on it. The end of -the function should always clean up the \fB\&./compose/\fP directory, removing any -unneeded extra files. This is especially true for the \fBlive\-iso\fP since it produces -the contents of the iso as well as the boot.iso itself. -.SH PACKAGE SOURCES -.sp -By default lorax\-composer uses the host\(aqs configured repositories. It copies -the \fB*.repo\fP files from \fB/etc/yum.repos.d/\fP into -\fB/var/lib/lorax/composer/repos.d/\fP at startup, these are immutable system -repositories and cannot be deleted or changed. If you want to add additional -repos you can put them into \fB/var/lib/lorax/composer/repos.d/\fP or use the -\fB/api/v0/projects/source/*\fP API routes to create them. -.sp -The new source can be added by doing a POST to the \fB/api/v0/projects/source/new\fP -route using JSON (with \fIContent\-Type\fP header set to \fIapplication/json\fP) or TOML -(with it set to \fItext/x\-toml\fP). The format of the source looks like this (in -TOML): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -name = "custom\-source\-1" -url = "https://url/path/to/repository/" -type = "yum\-baseurl" -proxy = "https://proxy\-url/" -check_ssl = true -check_gpg = true -gpgkey_urls = ["https://url/path/to/gpg\-key"] -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -The \fBproxy\fP and \fBgpgkey_urls\fP entries are optional. All of the others are required. The supported -types for the urls are: -.INDENT 0.0 -.IP \(bu 2 -\fByum\-baseurl\fP is a URL to a yum repository. -.IP \(bu 2 -\fByum\-mirrorlist\fP is a URL for a mirrorlist. -.IP \(bu 2 -\fByum\-metalink\fP is a URL for a metalink. -.UNINDENT -.sp -If \fBcheck_ssl\fP is true the https certificates must be valid. If they are self\-signed you can either set -this to false, or add your Certificate Authority to the host system. -.sp -If \fBcheck_gpg\fP is true the GPG key must either be installed on the host system, or \fBgpgkey_urls\fP -should point to it. -.sp -You can edit an existing source (other than system sources), by doing a POST to the \fBnew\fP route -with the new version of the source. It will overwrite the previous one. -.sp -A list of existing sources is available from \fB/api/v0/projects/source/list\fP, and detailed info -on a source can be retrieved with the \fB/api/v0/projects/source/info/\fP route. By default -it returns JSON but it can also return TOML if \fB?format=toml\fP is added to the request. -.sp -Non\-system sources can be deleted by doing a \fBDELETE\fP request to the -\fB/api/v0/projects/source/delete/\fP route. -.sp -The documentation for the source API routes can be \fI\%found here\fP -.sp -The configured sources are used for all blueprint depsolve operations, and for composing images. -When adding additional sources you must make sure that the packages in the source do not -conflict with any other package sources, otherwise depsolving will fail. -.SS DVD ISO Package Source -.sp -In some situations the system may want to \fIonly\fP use a DVD iso as the package -source, not the repos from the network. \fBlorax\-composer\fP and \fBanaconda\fP -understand \fBfile://\fP URLs so you can mount an iso on the host, and replace the -system repo files with a configuration file pointing to the DVD. -.INDENT 0.0 -.IP \(bu 2 -Stop the \fBlorax\-composer.service\fP if it is running -.IP \(bu 2 -Move the repo files in \fB/etc/yum.repos.d/\fP someplace safe -.IP \(bu 2 -Create a new \fBiso.repo\fP file in \fB/etc/yum.repos.d/\fP: -.INDENT 2.0 -.INDENT 3.5 -.sp -.nf -.ft C -[iso] -name=iso -baseurl=file:///mnt/iso/ -enabled=1 -gpgcheck=1 -gpgkey=file:///mnt/iso/RPM\-GPG\-KEY\-redhat\-release -.ft P -.fi -.UNINDENT -.UNINDENT -.IP \(bu 2 -Remove all the cached repo files from \fB/var/lib/lorax/composer/repos/\fP -.IP \(bu 2 -Restart the \fBlorax\-composer.service\fP -.IP \(bu 2 -Check the output of \fBcomposer\-cli status show\fP for any output specific depsolve errors. -For example, the DVD usually does not include \fBgrub2\-efi\-*\-cdboot\-*\fP so the live\-iso image -type will not be available. -.UNINDENT -.sp -If you want to \fIadd\fP the DVD source to the existing sources you can do that by -mounting the iso and creating a source file to point to it as described in the -\fI\%Package Sources\fP documentation. In that case there is no need to remove the other -sources from \fB/etc/yum.repos.d/\fP or clear the cached repos. -.SH AUTHOR -Weldr Team -.SH COPYRIGHT -2018, Red Hat, Inc. -.\" Generated by docutils manpage writer. -. diff --git a/etc/composer.conf b/etc/composer.conf deleted file mode 100644 index b580a388..00000000 --- a/etc/composer.conf +++ /dev/null @@ -1 +0,0 @@ -# lorax-composer configuration file diff --git a/lorax.spec b/lorax.spec index 9aa28ee1..60de38a0 100644 --- a/lorax.spec +++ b/lorax.spec @@ -94,8 +94,7 @@ Summary: Lorax html documentation Requires: lorax = %{version}-%{release} %description docs -Includes the full html documentation for lorax, livemedia-creator, lorax-composer and the -pylorax library. +Includes the full html documentation for lorax, livemedia-creator, and the pylorax library. %package lmc-virt Summary: livemedia-creator libvirt dependencies @@ -132,42 +131,6 @@ Provides: lorax-templates = %{version}-%{release} Lorax templates for creating the boot.iso and live isos are placed in /usr/share/lorax/templates.d/99-generic -%package composer -Summary: Lorax Image Composer API Server -# For Sphinx documentation build -BuildRequires: python3-flask python3-gobject libgit2-glib python3-toml python3-semantic_version - -Requires: lorax = %{version}-%{release} -Requires(pre): /usr/bin/getent -Requires(pre): /usr/sbin/groupadd -Requires(pre): /usr/sbin/useradd - -Requires: python3-toml -Requires: python3-semantic_version -Requires: libgit2 -Requires: libgit2-glib -Requires: python3-flask -Requires: python3-gevent -Requires: anaconda-tui >= 29.19-1 -Requires: qemu-img -Requires: tar -Requires: python3-rpmfluff -Requires: git -Requires: xz -Requires: createrepo_c -Requires: python3-ansible-runner -# For AWS playbook support -Requires: python3-boto3 - -%{?systemd_requires} -BuildRequires: systemd - -# Implements the weldr API -Provides: weldr - -%description composer -lorax-composer provides a REST API for building images using lorax. - %package -n composer-cli Summary: A command line tool for use with the lorax-composer API server @@ -188,29 +151,6 @@ build images, etc. from the command line. rm -rf $RPM_BUILD_ROOT make DESTDIR=$RPM_BUILD_ROOT mandir=%{_mandir} install -# Install example blueprints from the test suite. -# This path MUST match the lorax-composer.service blueprint path. -mkdir -p $RPM_BUILD_ROOT/var/lib/lorax/composer/blueprints/ -for bp in example-http-server.toml example-development.toml example-atlas.toml; do - cp ./tests/pylorax/blueprints/$bp $RPM_BUILD_ROOT/var/lib/lorax/composer/blueprints/ -done - -%pre composer -getent group weldr >/dev/null 2>&1 || groupadd -r weldr >/dev/null 2>&1 || : -getent passwd weldr >/dev/null 2>&1 || useradd -r -g weldr -d / -s /sbin/nologin -c "User for lorax-composer" weldr >/dev/null 2>&1 || : - -%post composer -%systemd_post lorax-composer.service -%systemd_post lorax-composer.socket - -%preun composer -%systemd_preun lorax-composer.service -%systemd_preun lorax-composer.socket - -%postun composer -%systemd_postun_with_restart lorax-composer.service -%systemd_postun_with_restart lorax-composer.socket - %files %defattr(-,root,root,-) %license COPYING @@ -245,22 +185,6 @@ getent passwd weldr >/dev/null 2>&1 || useradd -r -g weldr -d / -s /sbin/nologin %dir %{_datadir}/lorax/templates.d %{_datadir}/lorax/templates.d/* -%files composer -%config(noreplace) %{_sysconfdir}/lorax/composer.conf -%{python3_sitelib}/pylorax/api/* -%{python3_sitelib}/lifted/* -%{_sbindir}/lorax-composer -%{_unitdir}/lorax-composer.service -%{_unitdir}/lorax-composer.socket -%dir %{_datadir}/lorax/composer -%{_datadir}/lorax/composer/* -%{_datadir}/lorax/lifted/* -%{_tmpfilesdir}/lorax-composer.conf -%dir %attr(0771, root, weldr) %{_sharedstatedir}/lorax/composer/ -%dir %attr(0771, root, weldr) %{_sharedstatedir}/lorax/composer/blueprints/ -%attr(0771, weldr, weldr) %{_sharedstatedir}/lorax/composer/blueprints/* -%{_mandir}/man1/lorax-composer.1* - %files -n composer-cli %{_bindir}/composer-cli %{python3_sitelib}/composer/* diff --git a/setup.py b/setup.py index f9035035..0de0a346 100644 --- a/setup.py +++ b/setup.py @@ -7,11 +7,7 @@ import sys # config file data_files = [("/etc/lorax", ["etc/lorax.conf"]), - ("/etc/lorax", ["etc/composer.conf"]), - ("/usr/lib/systemd/system", ["systemd/lorax-composer.service", - "systemd/lorax-composer.socket"]), - ("/usr/lib/tmpfiles.d/", ["systemd/lorax-composer.conf", - "systemd/lorax.conf"])] + ("/usr/lib/tmpfiles.d/", ["systemd/lorax.conf"])] # shared files for root, dnames, fnames in os.walk("share"): @@ -21,8 +17,7 @@ for root, dnames, fnames in os.walk("share"): # executable data_files.append(("/usr/sbin", ["src/sbin/lorax", "src/sbin/mkefiboot", - "src/sbin/livemedia-creator", "src/sbin/lorax-composer", - "src/sbin/mkksiso"])) + "src/sbin/livemedia-creator", "src/sbin/mkksiso"])) data_files.append(("/usr/bin", ["src/bin/image-minimizer", "src/bin/mk-s390-cdboot", "src/bin/composer-cli"])) @@ -48,7 +43,7 @@ setup(name="lorax", url="http://www.github.com/weldr/lorax/", download_url="http://www.github.com/weldr/lorax/releases/", license="GPLv2+", - packages=["pylorax", "pylorax.api", "composer", "composer.cli", "lifted"], + packages=["pylorax", "composer", "composer.cli"], package_dir={"" : "src"}, data_files=data_files ) diff --git a/share/composer/alibaba.ks b/share/composer/alibaba.ks deleted file mode 100644 index d1ff2724..00000000 --- a/share/composer/alibaba.ks +++ /dev/null @@ -1,44 +0,0 @@ -# Lorax Composer partitioned disk output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr - -# Basic services -services --enabled=sshd,cloud-init - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -cloud-init - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/ami.ks b/share/composer/ami.ks deleted file mode 100644 index 021c2320..00000000 --- a/share/composer/ami.ks +++ /dev/null @@ -1,51 +0,0 @@ -# Lorax Composer AMI output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr --append="no_timer_check console=ttyS0,115200n8 console=tty1 net.ifnames=0" -# Add platform specific partitions -reqpart --add-boot - -# Basic services -services --enabled=sshd,chronyd,cloud-init - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# tell cloud-init to create the ec2-user account -sed -i 's/cloud-user/ec2-user/' /etc/cloud/cloud.cfg - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -chrony - -cloud-init - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/composer/ext4-filesystem.ks b/share/composer/ext4-filesystem.ks deleted file mode 100644 index 2145b21d..00000000 --- a/share/composer/ext4-filesystem.ks +++ /dev/null @@ -1,42 +0,0 @@ -# Lorax Composer filesystem output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration (unpartitioned fs image doesn't use a bootloader) -bootloader --location=none - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -# NOTE Do NOT add any other sections after %packages -%packages --nocore -# Packages requires to support this output format go here -policycoreutils -selinux-policy-targeted -kernel - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/google.ks b/share/composer/google.ks deleted file mode 100644 index f664966d..00000000 --- a/share/composer/google.ks +++ /dev/null @@ -1,78 +0,0 @@ -# Lorax Composer partitioned disk output kickstart template - -# Firewall configuration -firewall --disabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --mtu=1460 --noipv6 --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System timezone -timezone --ntpservers metadata.google.internal UTC -# System bootloader configuration -bootloader --location=mbr --append="console=ttyS0,38400n8d" -# Add platform specific partitions -reqpart --add-boot - -services --disabled=irqbalance - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* - -# Replace the ssh configuration -cat > /etc/ssh/sshd_config << EOF -# Disable PasswordAuthentication as ssh keys are more secure. -PasswordAuthentication no - -# Disable root login, using sudo provides better auditing. -PermitRootLogin no - -PermitTunnel no -AllowTcpForwarding yes -X11Forwarding no - -# Compute times out connections after 10 minutes of inactivity. Keep alive -# ssh connections by sending a packet every 7 minutes. -ClientAliveInterval 420 -EOF - -cat > /etc/ssh/ssh_config << EOF -Host * -Protocol 2 -ForwardAgent no -ForwardX11 no -HostbasedAuthentication no -StrictHostKeyChecking no -Ciphers aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc -Tunnel no - -# Google Compute Engine times out connections after 10 minutes of inactivity. -# Keep alive ssh connections by sending a packet every 7 minutes. -ServerAliveInterval 420 -EOF - -%end - -%packages -kernel -selinux-policy-targeted - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/hyper-v.ks b/share/composer/hyper-v.ks deleted file mode 100644 index 08a9f404..00000000 --- a/share/composer/hyper-v.ks +++ /dev/null @@ -1,59 +0,0 @@ -# Lorax Composer VHD (Azure, Hyper-V) output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr --append="no_timer_check console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 net.ifnames=0" -# Add platform specific partitions -reqpart --add-boot - -# Basic services -services --enabled=sshd,chronyd - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* - -# Add Hyper-V modules into initramfs -cat > /etc/dracut.conf.d/10-hyperv.conf << EOF -add_drivers+=" hv_vmbus hv_netvsc hv_storvsc " -EOF - -# Regenerate the intramfs image -dracut -f -v --persistent-policy by-uuid -%end - -%addon com_redhat_kdump --disable -%end - -%packages -kernel -selinux-policy-targeted - -chrony - -hyperv-daemons - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/composer/live-iso.ks b/share/composer/live-iso.ks deleted file mode 100644 index 2ac7e25d..00000000 --- a/share/composer/live-iso.ks +++ /dev/null @@ -1,374 +0,0 @@ -# Lorax Composer Live ISO output kickstart template - -# Firewall configuration -firewall --enabled --service=mdns - -# X Window System configuration information -xconfig --startxonboot -# Root password is removed for live-iso -rootpw --plaintext removethispw -# Network information -network --bootproto=dhcp --device=link --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System services -services --disabled="network,sshd" --enabled="NetworkManager" -# System bootloader configuration -bootloader --location=none - -%post -# FIXME: it'd be better to get this installed from a package -cat > /etc/rc.d/init.d/livesys << EOF -#!/bin/bash -# -# live: Init script for live image -# -# chkconfig: 345 00 99 -# description: Init script for live image. -### BEGIN INIT INFO -# X-Start-Before: display-manager -### END INIT INFO - -. /etc/init.d/functions - -if ! strstr "\`cat /proc/cmdline\`" rd.live.image || [ "\$1" != "start" ]; then - exit 0 -fi - -if [ -e /.liveimg-configured ] ; then - configdone=1 -fi - -exists() { - which \$1 >/dev/null 2>&1 || return - \$* -} - -livedir="LiveOS" -for arg in \`cat /proc/cmdline\` ; do - if [ "\${arg##rd.live.dir=}" != "\${arg}" ]; then - livedir=\${arg##rd.live.dir=} - return - fi - if [ "\${arg##live_dir=}" != "\${arg}" ]; then - livedir=\${arg##live_dir=} - return - fi -done - -# enable swaps unless requested otherwise -swaps=\`blkid -t TYPE=swap -o device\` -if ! strstr "\`cat /proc/cmdline\`" noswap && [ -n "\$swaps" ] ; then - for s in \$swaps ; do - action "Enabling swap partition \$s" swapon \$s - done -fi -if ! strstr "\`cat /proc/cmdline\`" noswap && [ -f /run/initramfs/live/\${livedir}/swap.img ] ; then - action "Enabling swap file" swapon /run/initramfs/live/\${livedir}/swap.img -fi - -mountPersistentHome() { - # support label/uuid - if [ "\${homedev##LABEL=}" != "\${homedev}" -o "\${homedev##UUID=}" != "\${homedev}" ]; then - homedev=\`/sbin/blkid -o device -t "\$homedev"\` - fi - - # if we're given a file rather than a blockdev, loopback it - if [ "\${homedev##mtd}" != "\${homedev}" ]; then - # mtd devs don't have a block device but get magic-mounted with -t jffs2 - mountopts="-t jffs2" - elif [ ! -b "\$homedev" ]; then - loopdev=\`losetup -f\` - if [ "\${homedev##/run/initramfs/live}" != "\${homedev}" ]; then - action "Remounting live store r/w" mount -o remount,rw /run/initramfs/live - fi - losetup \$loopdev \$homedev - homedev=\$loopdev - fi - - # if it's encrypted, we need to unlock it - if [ "\$(/sbin/blkid -s TYPE -o value \$homedev 2>/dev/null)" = "crypto_LUKS" ]; then - echo - echo "Setting up encrypted /home device" - plymouth ask-for-password --command="cryptsetup luksOpen \$homedev EncHome" - homedev=/dev/mapper/EncHome - fi - - # and finally do the mount - mount \$mountopts \$homedev /home - # if we have /home under what's passed for persistent home, then - # we should make that the real /home. useful for mtd device on olpc - if [ -d /home/home ]; then mount --bind /home/home /home ; fi - [ -x /sbin/restorecon ] && /sbin/restorecon /home - if [ -d /home/liveuser ]; then USERADDARGS="-M" ; fi -} - -findPersistentHome() { - for arg in \`cat /proc/cmdline\` ; do - if [ "\${arg##persistenthome=}" != "\${arg}" ]; then - homedev=\${arg##persistenthome=} - return - fi - done -} - -if strstr "\`cat /proc/cmdline\`" persistenthome= ; then - findPersistentHome -elif [ -e /run/initramfs/live/\${livedir}/home.img ]; then - homedev=/run/initramfs/live/\${livedir}/home.img -fi - -# if we have a persistent /home, then we want to go ahead and mount it -if ! strstr "\`cat /proc/cmdline\`" nopersistenthome && [ -n "\$homedev" ] ; then - action "Mounting persistent /home" mountPersistentHome -fi - -if [ -n "\$configdone" ]; then - exit 0 -fi - -# add fedora user with no passwd -action "Adding live user" useradd \$USERADDARGS -c "Live System User" liveuser -passwd -d liveuser > /dev/null -usermod -aG wheel liveuser > /dev/null - -# Remove root password lock -passwd -d root > /dev/null - -# turn off firstboot for livecd boots -systemctl --no-reload disable firstboot-text.service 2> /dev/null || : -systemctl --no-reload disable firstboot-graphical.service 2> /dev/null || : -systemctl stop firstboot-text.service 2> /dev/null || : -systemctl stop firstboot-graphical.service 2> /dev/null || : - -# don't use prelink on a running live image -sed -i 's/PRELINKING=yes/PRELINKING=no/' /etc/sysconfig/prelink &>/dev/null || : - -# turn off mdmonitor by default -systemctl --no-reload disable mdmonitor.service 2> /dev/null || : -systemctl --no-reload disable mdmonitor-takeover.service 2> /dev/null || : -systemctl stop mdmonitor.service 2> /dev/null || : -systemctl stop mdmonitor-takeover.service 2> /dev/null || : - -# don't enable the gnome-settings-daemon packagekit plugin -gsettings set org.gnome.software download-updates 'false' || : - -# don't start cron/at as they tend to spawn things which are -# disk intensive that are painful on a live image -systemctl --no-reload disable crond.service 2> /dev/null || : -systemctl --no-reload disable atd.service 2> /dev/null || : -systemctl stop crond.service 2> /dev/null || : -systemctl stop atd.service 2> /dev/null || : - -# turn off abrtd on a live image -systemctl --no-reload disable abrtd.service 2> /dev/null || : -systemctl stop abrtd.service 2> /dev/null || : - -# Don't sync the system clock when running live (RHBZ #1018162) -sed -i 's/rtcsync//' /etc/chrony.conf - -# Mark things as configured -touch /.liveimg-configured - -# add static hostname to work around xauth bug -# https://bugzilla.redhat.com/show_bug.cgi?id=679486 -echo "localhost" > /etc/hostname - -EOF - -# bah, hal starts way too late -cat > /etc/rc.d/init.d/livesys-late << EOF -#!/bin/bash -# -# live: Late init script for live image -# -# chkconfig: 345 99 01 -# description: Late init script for live image. - -. /etc/init.d/functions - -if ! strstr "\`cat /proc/cmdline\`" rd.live.image || [ "\$1" != "start" ] || [ -e /.liveimg-late-configured ] ; then - exit 0 -fi - -exists() { - which \$1 >/dev/null 2>&1 || return - \$* -} - -touch /.liveimg-late-configured - -# read some variables out of /proc/cmdline -for o in \`cat /proc/cmdline\` ; do - case \$o in - ks=*) - ks="--kickstart=\${o#ks=}" - ;; - xdriver=*) - xdriver="\${o#xdriver=}" - ;; - esac -done - -# if liveinst or textinst is given, start anaconda -if strstr "\`cat /proc/cmdline\`" liveinst ; then - plymouth --quit - /usr/sbin/liveinst \$ks -fi -if strstr "\`cat /proc/cmdline\`" textinst ; then - plymouth --quit - /usr/sbin/liveinst --text \$ks -fi - -# configure X, allowing user to override xdriver -if [ -n "\$xdriver" ]; then - cat > /etc/X11/xorg.conf.d/00-xdriver.conf <> /etc/fstab << EOF -vartmp /var/tmp tmpfs defaults 0 0 -EOF - -# work around for poor key import UI in PackageKit -rm -f /var/lib/rpm/__db* -releasever=$(rpm -q --qf '%{version}\n' --whatprovides system-release) -basearch=$(uname -i) -rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -echo "Packages within this LiveCD" -rpm -qa -# Note that running rpm recreates the rpm db files which aren't needed or wanted -rm -f /var/lib/rpm/__db* - -# go ahead and pre-make the man -k cache (#455968) -/usr/bin/mandb - -# make sure there aren't core files lying around -rm -f /core* - -# convince readahead not to collect -# FIXME: for systemd - -echo 'File created by kickstart. See systemd-update-done.service(8).' \ - | tee /etc/.updated >/var/.updated - -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Remove the rescue kernel and image to save space -# Installation will recreate these on the target -rm -f /boot/*-rescue* -%end - -%post -cat >> /etc/rc.d/init.d/livesys << EOF - -# disable updates plugin -cat >> /usr/share/glib-2.0/schemas/org.gnome.software.gschema.override << FOE -[org.gnome.software] -download-updates=false -FOE - -# don't autostart gnome-software session service -rm -f /etc/xdg/autostart/gnome-software-service.desktop - -# disable the gnome-software shell search provider -cat >> /usr/share/gnome-shell/search-providers/org.gnome.Software-search-provider.ini << FOE -DefaultDisabled=true -FOE - -# don't run gnome-initial-setup -mkdir ~liveuser/.config -touch ~liveuser/.config/gnome-initial-setup-done - -# make the installer show up -if [ -f /usr/share/applications/liveinst.desktop ]; then - # Show harddisk install in shell dash - sed -i -e 's/NoDisplay=true/NoDisplay=false/' /usr/share/applications/liveinst.desktop "" - # need to move it to anaconda.desktop to make shell happy - mv /usr/share/applications/liveinst.desktop /usr/share/applications/anaconda.desktop - - cat >> /usr/share/glib-2.0/schemas/org.gnome.shell.gschema.override << FOE -[org.gnome.shell] -favorite-apps=['firefox.desktop', 'evolution.desktop', 'rhythmbox.desktop', 'shotwell.desktop', 'org.gnome.Nautilus.desktop', 'anaconda.desktop'] -FOE - - # Make the welcome screen show up - if [ -f /usr/share/anaconda/gnome/fedora-welcome.desktop ]; then - mkdir -p ~liveuser/.config/autostart - cp /usr/share/anaconda/gnome/fedora-welcome.desktop /usr/share/applications/ - cp /usr/share/anaconda/gnome/fedora-welcome.desktop ~liveuser/.config/autostart/ - fi - - # Copy Anaconda branding in place - if [ -d /usr/share/lorax/product/usr/share/anaconda ]; then - cp -a /usr/share/lorax/product/* / - fi -fi - -# rebuild schema cache with any overrides we installed -glib-compile-schemas /usr/share/glib-2.0/schemas - -# set up auto-login -cat > /etc/gdm/custom.conf << FOE -[daemon] -AutomaticLoginEnable=True -AutomaticLogin=liveuser -FOE - -# Turn off PackageKit-command-not-found while uninstalled -if [ -f /etc/PackageKit/CommandNotFound.conf ]; then - sed -i -e 's/^SoftwareSourceSearch=true/SoftwareSourceSearch=false/' /etc/PackageKit/CommandNotFound.conf -fi - -# make sure to set the right permissions and selinux contexts -chown -R liveuser:liveuser /home/liveuser/ -restorecon -R /home/liveuser/ - -EOF -%end - -# NOTE Do NOT add any other sections after %packages -%packages -# Packages requires to support this output format go here -isomd5sum -kernel -dracut-config-generic -dracut-live -system-logos -selinux-policy-targeted - -# no longer in @core since 2018-10, but needed for livesys script -initscripts -chkconfig - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end%packages diff --git a/share/composer/liveimg-tar.ks b/share/composer/liveimg-tar.ks deleted file mode 100644 index 3ddbf759..00000000 --- a/share/composer/liveimg-tar.ks +++ /dev/null @@ -1,47 +0,0 @@ -# Lorax Composer tar output kickstart template -# Add kernel and grub2 for use with anaconda's kickstart liveimg command - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration (tar doesn't need a bootloader) -bootloader --location=none - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -# NOTE Do NOT add any other sections after %packages -%packages --nocore -# Packages requires to support this output format go here -policycoreutils -selinux-policy-targeted - -# Packages needed for liveimg -kernel -grub2 -grub2-tools - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/openstack.ks b/share/composer/openstack.ks deleted file mode 100644 index 3f9f1c6e..00000000 --- a/share/composer/openstack.ks +++ /dev/null @@ -1,49 +0,0 @@ -# Lorax Composer openstack output kickstart template - -# Firewall configuration -firewall --disabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr --append="no_timer_check console=ttyS0,115200n8 console=tty1 net.ifnames=0" -# Add platform specific partitions -reqpart --add-boot - -# Start sshd and cloud-init at boot time -services --enabled=sshd,cloud-init,cloud-init-local,cloud-config,cloud-final - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -# Make sure virt guest agents are installed -qemu-guest-agent -spice-vdagent -cloud-init - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/composer/partitioned-disk.ks b/share/composer/partitioned-disk.ks deleted file mode 100644 index fc5b826b..00000000 --- a/share/composer/partitioned-disk.ks +++ /dev/null @@ -1,41 +0,0 @@ -# Lorax Composer partitioned disk output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr -# Add platform specific partitions -reqpart --add-boot - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/qcow2.ks b/share/composer/qcow2.ks deleted file mode 100644 index 99b50f4b..00000000 --- a/share/composer/qcow2.ks +++ /dev/null @@ -1,45 +0,0 @@ -# Lorax Composer qcow2 output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr -# Add platform specific partitions -reqpart --add-boot - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -# Make sure virt guest agents are installed -qemu-guest-agent -spice-vdagent - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/composer/tar.ks b/share/composer/tar.ks deleted file mode 100644 index 4e1dd999..00000000 --- a/share/composer/tar.ks +++ /dev/null @@ -1,41 +0,0 @@ -# Lorax Composer tar output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration (tar doesn't need a bootloader) -bootloader --location=none - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -# NOTE Do NOT add any other sections after %packages -%packages --nocore -# Packages requires to support this output format go here -policycoreutils -selinux-policy-targeted - -# NOTE lorax-composer will add the blueprint packages below here, including the final %end diff --git a/share/composer/vhd.ks b/share/composer/vhd.ks deleted file mode 100644 index f0bf01b5..00000000 --- a/share/composer/vhd.ks +++ /dev/null @@ -1,89 +0,0 @@ -# Lorax Composer VHD (Azure, Hyper-V) output kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr --append="no_timer_check console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 net.ifnames=0" -# Add platform specific partitions -reqpart --add-boot - -# Basic services -services --enabled=sshd,chronyd,waagent,cloud-init,cloud-init-local,cloud-config,cloud-final - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* - -# This file is required by waagent in RHEL, but compatible with NetworkManager -cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF -DEVICE=eth0 -ONBOOT=yes -BOOTPROTO=dhcp -TYPE=Ethernet -USERCTL=yes -PEERDNS=yes -IPV6INIT=no -EOF - -# Restrict cloud-init to Azure datasource -cat > /etc/cloud/cloud.cfg.d/91-azure_datasource.cfg << EOF -# Azure Data Source config -datasource_list: [ Azure ] -datasource: - Azure: - apply_network_config: False -EOF - -# Setup waagent to work with cloud-init -sed -i 's/Provisioning.Enabled=y/Provisioning.Enabled=n/g' /etc/waagent.conf -sed -i 's/Provisioning.UseCloudInit=n/Provisioning.UseCloudInit=y/g' /etc/waagent.conf - -# Add Hyper-V modules into initramfs -cat > /etc/dracut.conf.d/10-hyperv.conf << EOF -add_drivers+=" hv_vmbus hv_netvsc hv_storvsc " -EOF - -# Regenerate the intramfs image -dracut -f -v --persistent-policy by-uuid -%end - -%addon com_redhat_kdump --disable -%end - -%packages -kernel -selinux-policy-targeted - -chrony - -WALinuxAgent -python3 -net-tools - -cloud-init -cloud-utils-growpart -gdisk - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/composer/vmdk.ks b/share/composer/vmdk.ks deleted file mode 100644 index 4f320e95..00000000 --- a/share/composer/vmdk.ks +++ /dev/null @@ -1,47 +0,0 @@ -# Lorax Composer vmdk kickstart template - -# Firewall configuration -firewall --enabled - -# NOTE: The root account is locked by default -# Network information -network --bootproto=dhcp --onboot=on --activate -# NOTE: keyboard and lang can be replaced by blueprint customizations.locale settings -# System keyboard -keyboard --xlayouts=us --vckeymap=us -# System language -lang en_US.UTF-8 -# SELinux configuration -selinux --enforcing -# Installation logging level -logging --level=info -# Shutdown after installation -shutdown -# System bootloader configuration -bootloader --location=mbr -# Add platform specific partitions -reqpart --add-boot - -# Basic services -services --enabled=sshd,chronyd,vmtoolsd - -%post -# Remove random-seed -rm /var/lib/systemd/random-seed - -# Clear /etc/machine-id -rm /etc/machine-id -touch /etc/machine-id - -# Remove the rescue kernel and image to save space -rm -f /boot/*-rescue* -%end - -%packages -kernel -selinux-policy-targeted - -chrony -open-vm-tools - -# NOTE lorax-composer will add the recipe packages below here, including the final %end diff --git a/share/lifted/providers/aws/library/ec2_snapshot_import.py b/share/lifted/providers/aws/library/ec2_snapshot_import.py deleted file mode 100644 index f7fd4df6..00000000 --- a/share/lifted/providers/aws/library/ec2_snapshot_import.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2019 Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - -DOCUMENTATION = ''' ---- -module: ec2_snapshot_import -short_description: Imports a disk into an EBS snapshot -description: - - Imports a disk into an EBS snapshot -version_added: "2.10" -options: - description: - description: - - description of the import snapshot task - required: false - type: str - format: - description: - - The format of the disk image being imported. - required: true - type: str - url: - description: - - The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..). - Either C(url) or C(s3_bucket) and C(s3_key) are required. - required: false - type: str - s3_bucket: - description: - - The name of the S3 bucket where the disk image is located. - - C(s3_bucket) and C(s3_key) are required together if C(url) is not used. - required: false - type: str - s3_key: - description: - - The file name of the disk image. - - C(s3_bucket) and C(s3_key) are required together if C(url) is not used. - required: false - type: str - encrypted: - description: - - Whether or not the destination Snapshot should be encrypted. - type: bool - default: 'no' - kms_key_id: - description: - - KMS key id used to encrypt snapshot. If not specified, defaults to EBS Customer Master Key (CMK) for that account. - required: false - type: str - role_name: - description: - - The name of the role to use when not using the default role, 'vmimport'. - required: false - type: str - wait: - description: - - wait for the snapshot to be ready - type: bool - required: false - default: yes - wait_timeout: - description: - - how long before wait gives up, in seconds - - specify 0 to wait forever - required: false - type: int - default: 900 - tags: - description: - - A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}' - required: false - type: dict - -author: "Brian C. Lane (@bcl)" -extends_documentation_fragment: - - aws - - ec2 -''' - -EXAMPLES = ''' -# Import an S3 object as a snapshot -ec2_snapshot_import: - description: simple-http-server - format: raw - s3_bucket: mybucket - s3_key: server-image.ami - wait: yes - tags: - Name: Snapshot-Name -''' - -RETURN = ''' -snapshot_id: - description: id of the created snapshot - returned: when snapshot is created - type: str - sample: "snap-1234abcd" -description: - description: description of snapshot - returned: when snapshot is created - type: str - sample: "simple-http-server" -format: - description: format of the disk image being imported - returned: when snapshot is created - type: str - sample: "raw" -disk_image_size: - description: size of the disk image being imported, in bytes. - returned: when snapshot is created - type: float - sample: 3836739584.0 -user_bucket: - description: S3 bucket with the image to import - returned: when snapshot is created - type: dict - sample: { - "s3_bucket": "mybucket", - "s3_key": "server-image.ami" - } -status: - description: status of the import operation - returned: when snapshot is created - type: str - sample: "completed" -''' - - -import time - -from ansible.module_utils.aws.core import AnsibleAWSModule -from ansible.module_utils.ec2 import camel_dict_to_snake_dict - -try: - import botocore -except ImportError: - pass - - -def wait_for_import_snapshot(connection, wait_timeout, import_task_id): - - params = { - 'ImportTaskIds': [import_task_id] - } - start_time = time.time() - while True: - status = connection.describe_import_snapshot_tasks(**params) - - # What are the valid status values? - if len(status['ImportSnapshotTasks']) > 1: - raise RuntimeError("Should only be 1 Import Snapshot Task with this id.") - - task = status['ImportSnapshotTasks'][0] - if task['SnapshotTaskDetail']['Status'] in ['completed']: - return status - - if time.time() - start_time > wait_timeout: - raise RuntimeError('Wait timeout exceeded (%s sec)' % wait_timeout) - - time.sleep(5) - - -def import_snapshot(module, connection): - description = module.params.get('description') - image_format = module.params.get('format') - url = module.params.get('url') - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - encrypted = module.params.get('encrypted') - kms_key_id = module.params.get('kms_key_id') - role_name = module.params.get('role_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - tags = module.params.get('tags') - - if module.check_mode: - module.exit_json(changed=True, msg="IMPORT operation skipped - running in check mode") - - try: - params = { - 'Description': description, - 'DiskContainer': { - 'Description': description, - 'Format': image_format, - }, - 'Encrypted': encrypted - } - if url: - params['DiskContainer']['Url'] = url - else: - params['DiskContainer']['UserBucket'] = { - 'S3Bucket': s3_bucket, - 'S3Key': s3_key - } - if kms_key_id: - params['KmsKeyId'] = kms_key_id - if role_name: - params['RoleName'] = role_name - - task = connection.import_snapshot(**params) - import_task_id = task['ImportTaskId'] - detail = task['SnapshotTaskDetail'] - - if wait: - status = wait_for_import_snapshot(connection, wait_timeout, import_task_id) - detail = status['ImportSnapshotTasks'][0]['SnapshotTaskDetail'] - - if tags: - connection.create_tags( - Resources=[detail["SnapshotId"]], - Tags=[{'Key': k, 'Value': v} for k, v in tags.items()] - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, RuntimeError) as e: - module.fail_json_aws(e, msg="Error importing image") - - module.exit_json(changed=True, **camel_dict_to_snake_dict(detail)) - - -def snapshot_import_ansible_module(): - argument_spec = dict( - description=dict(default=''), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=900), - format=dict(required=True), - url=dict(), - s3_bucket=dict(), - s3_key=dict(), - encrypted=dict(type='bool', default=False), - kms_key_id=dict(), - role_name=dict(), - tags=dict(type='dict') - ) - return AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[['s3_bucket', 'url']], - required_one_of=[['s3_bucket', 'url']], - required_together=[['s3_bucket', 's3_key']] - ) - - -def main(): - module = snapshot_import_ansible_module() - connection = module.client('ec2') - import_snapshot(module, connection) - - -if __name__ == '__main__': - main() diff --git a/share/lifted/providers/aws/playbook.yaml b/share/lifted/providers/aws/playbook.yaml deleted file mode 100644 index a4f32236..00000000 --- a/share/lifted/providers/aws/playbook.yaml +++ /dev/null @@ -1,94 +0,0 @@ -- hosts: localhost - tasks: - - name: Make sure bucket exists - aws_s3: - bucket: "{{ aws_bucket }}" - mode: create - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - register: bucket_facts - - fail: - msg: "Bucket creation failed" - when: - - bucket_facts.msg != "Bucket created successfully" - - bucket_facts.msg != "Bucket already exists." - - name: Make sure vmimport role exists - iam_role_facts: - name: vmimport - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - register: role_facts - - fail: - msg: "Role vmimport doesn't exist" - when: role_facts.iam_roles | length < 1 - - name: Make sure the AMI name isn't already in use - ec2_ami_facts: - filters: - name: "{{ image_name }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - register: ami_facts - - fail: - msg: "An AMI named {{ image_name }} already exists" - when: ami_facts.images | length > 0 - - stat: - path: "{{ image_path }}" - register: image_stat - - set_fact: - image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.ami" - - name: Upload the .ami image to an s3 bucket - aws_s3: - bucket: "{{ aws_bucket }}" - src: "{{ image_path }}" - object: "{{ image_id }}" - mode: put - overwrite: different - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - - name: Import a snapshot from an AMI stored as an s3 object - ec2_snapshot_import: - description: "{{ image_name }}" - format: raw - s3_bucket: "{{ aws_bucket }}" - s3_key: "{{ image_id }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - wait: yes - tags: - Name: "{{ image_name }}" - register: import_facts - - fail: - msg: "Import of image from s3 failed" - when: - - import_facts.status != "completed" - - name: Register the snapshot as an AMI - ec2_ami: - name: "{{ image_name }}" - state: present - virtualization_type: hvm - root_device_name: /dev/sda1 - device_mapping: - - device_name: /dev/sda1 - snapshot_id: "{{ import_facts.snapshot_id }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" - wait: yes - register: register_facts - - fail: - msg: "Registering snapshot as an AMI failed" - when: - - register_facts.msg != "AMI creation operation complete." - - name: Delete the s3 object used for the snapshot/AMI - aws_s3: - bucket: "{{ aws_bucket }}" - object: "{{ image_id }}" - mode: delobj - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - region: "{{ aws_region }}" diff --git a/share/lifted/providers/aws/provider.toml b/share/lifted/providers/aws/provider.toml deleted file mode 100644 index 450186f9..00000000 --- a/share/lifted/providers/aws/provider.toml +++ /dev/null @@ -1,29 +0,0 @@ -display = "AWS" - -supported_types = [ - "ami", -] - -[settings-info.aws_access_key] -display = "AWS Access Key" -type = "string" -placeholder = "" -regex = '' - -[settings-info.aws_secret_key] -display = "AWS Secret Key" -type = "string" -placeholder = "" -regex = '' - -[settings-info.aws_region] -display = "AWS Region" -type = "string" -placeholder = "" -regex = '' - -[settings-info.aws_bucket] -display = "AWS Bucket" -type = "string" -placeholder = "" -regex = '' diff --git a/share/lifted/providers/dummy/playbook.yaml b/share/lifted/providers/dummy/playbook.yaml deleted file mode 100644 index f4dbce43..00000000 --- a/share/lifted/providers/dummy/playbook.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: localhost - connection: local - tasks: - - pause: seconds=30 diff --git a/share/lifted/providers/dummy/provider.toml b/share/lifted/providers/dummy/provider.toml deleted file mode 100644 index 3415f111..00000000 --- a/share/lifted/providers/dummy/provider.toml +++ /dev/null @@ -1,5 +0,0 @@ -display = "Dummy" -supported_types = [] - -[settings-info] -# This provider has no settings. diff --git a/share/lifted/providers/openstack/playbook.yaml b/share/lifted/providers/openstack/playbook.yaml deleted file mode 100644 index bac3fec7..00000000 --- a/share/lifted/providers/openstack/playbook.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- hosts: localhost - connection: local - tasks: - - stat: - path: "{{ image_path }}" - register: image_stat - - set_fact: - image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.qcow2" - - name: Upload image to OpenStack - os_image: - auth: - auth_url: "{{ auth_url }}" - username: "{{ username }}" - password: "{{ password }}" - project_name: "{{ project_name }}" - os_user_domain_name: "{{ user_domain_name }}" - os_project_domain_name: "{{ project_domain_name }}" - name: "{{ image_id }}" - filename: "{{ image_path }}" - is_public: "{{ is_public }}" diff --git a/share/lifted/providers/openstack/provider.toml b/share/lifted/providers/openstack/provider.toml deleted file mode 100644 index 2b51f7bf..00000000 --- a/share/lifted/providers/openstack/provider.toml +++ /dev/null @@ -1,45 +0,0 @@ -display = "OpenStack" - -supported_types = [ - "qcow2", -] - -[settings-info.auth_url] -display = "Authentication URL" -type = "string" -placeholder = "" -regex = '' - -[settings-info.username] -display = "Username" -type = "string" -placeholder = "" -regex = '' - -[settings-info.password] -display = "Password" -type = "string" -placeholder = "" -regex = '' - -[settings-info.project_name] -display = "Project name" -type = "string" -placeholder = "" -regex = '' - -[settings-info.user_domain_name] -display = "User domain name" -type = "string" -placeholder = "" -regex = '' - -[settings-info.project_domain_name] -display = "Project domain name" -type = "string" -placeholder = "" -regex = '' - -[settings-info.is_public] -display = "Allow public access" -type = "boolean" diff --git a/share/lifted/providers/vsphere/playbook.yaml b/share/lifted/providers/vsphere/playbook.yaml deleted file mode 100644 index fbc92f4b..00000000 --- a/share/lifted/providers/vsphere/playbook.yaml +++ /dev/null @@ -1,17 +0,0 @@ -- hosts: localhost - connection: local - tasks: - - stat: - path: "{{ image_path }}" - register: image_stat - - set_fact: - image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.vmdk" - - name: Upload image to vSphere - vsphere_copy: - login: "{{ username }}" - password: "{{ password }}" - host: "{{ host }}" - datacenter: "{{ datacenter }}" - datastore: "{{ datastore }}" - src: "{{ image_path }}" - path: "{{ folder }}/{{ image_id }}" diff --git a/share/lifted/providers/vsphere/provider.toml b/share/lifted/providers/vsphere/provider.toml deleted file mode 100644 index 85ac61ea..00000000 --- a/share/lifted/providers/vsphere/provider.toml +++ /dev/null @@ -1,42 +0,0 @@ -display = "vSphere" - -supported_types = [ - "vmdk", -] - -[settings-info.datacenter] -display = "Datacenter" -type = "string" -placeholder = "" -regex = '' - -[settings-info.datastore] -display = "Datastore" -type = "string" -placeholder = "" -regex = '' - -[settings-info.host] -display = "Host" -type = "string" -placeholder = "" -regex = '' - -[settings-info.folder] -display = "Folder" -type = "string" -placeholder = "" -regex = '' - -[settings-info.username] -display = "Username" -type = "string" -placeholder = "" -regex = '' - -[settings-info.password] -display = "Password" -type = "string" -placeholder = "" -regex = '' - diff --git a/src/lifted/__init__.py b/src/lifted/__init__.py deleted file mode 100644 index 171c52fd..00000000 --- a/src/lifted/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# diff --git a/src/lifted/config.py b/src/lifted/config.py deleted file mode 100644 index 0ab0817a..00000000 --- a/src/lifted/config.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -from pylorax.sysutils import joinpaths - -def configure(conf): - """Add lifted settings to the configuration - - :param conf: configuration object - :type conf: ComposerConfig - :returns: None - - This uses the composer.share_dir and composer.lib_dir as the base - directories for the settings. - """ - share_dir = conf.get("composer", "share_dir") - lib_dir = conf.get("composer", "lib_dir") - - conf.add_section("upload") - conf.set("upload", "providers_dir", joinpaths(share_dir, "/lifted/providers/")) - conf.set("upload", "queue_dir", joinpaths(lib_dir, "/upload/queue/")) - conf.set("upload", "settings_dir", joinpaths(lib_dir, "/upload/settings/")) diff --git a/src/lifted/providers.py b/src/lifted/providers.py deleted file mode 100644 index 3397e762..00000000 --- a/src/lifted/providers.py +++ /dev/null @@ -1,245 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -from glob import glob -import os -import re -import stat - -import pylorax.api.toml as toml - - -def _get_profile_path(ucfg, provider_name, profile, exists=True): - """Helper to return the directory and path for a provider's profile file - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the cloud provider, e.g. "azure" - :type provider_name: str - :param profile: the name of the profile to save - :type profile: str != "" - :returns: Full path of the profile .toml file - :rtype: str - :raises: ValueError when passed invalid settings or an invalid profile name - :raises: RuntimeError when the provider or profile couldn't be found - """ - # Make sure no path elements are present - profile = os.path.basename(profile) - provider_name = os.path.basename(provider_name) - if not profile: - raise ValueError("Profile name cannot be empty!") - if not provider_name: - raise ValueError("Provider name cannot be empty!") - - directory = os.path.join(ucfg["settings_dir"], provider_name) - # create the settings directory if it doesn't exist - os.makedirs(directory, exist_ok=True) - - path = os.path.join(directory, f"{profile}.toml") - if exists and not os.path.isfile(path): - raise RuntimeError(f'Couldn\'t find profile "{profile}"!') - - return os.path.abspath(path) - -def resolve_provider(ucfg, provider_name): - """Get information about the specified provider as defined in that - provider's `provider.toml`, including the provider's display name and expected - settings. - - At a minimum, each setting has a display name (that likely differs from its - snake_case name) and a type. Currently, there are two types of settings: - string and boolean. String settings can optionally have a "placeholder" - value for use on the front end and a "regex" for making sure that a value - follows an expected pattern. - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the provider to look for - :type provider_name: str - :raises: RuntimeError when the provider couldn't be found - :returns: the provider - :rtype: dict - """ - # Make sure no path elements are present - provider_name = os.path.basename(provider_name) - path = os.path.join(ucfg["providers_dir"], provider_name, "provider.toml") - try: - with open(path) as provider_file: - provider = toml.load(provider_file) - except OSError as error: - raise RuntimeError(f'Couldn\'t find provider "{provider_name}"!') from error - - return provider - - -def load_profiles(ucfg, provider_name): - """Return all settings profiles associated with a provider - - :param ucfg: upload config - :type ucfg: object - :param provider_name: name a provider to find profiles for - :type provider_name: str - :returns: a dict of settings dicts, keyed by profile name - :rtype: dict - """ - # Make sure no path elements are present - provider_name = os.path.basename(provider_name) - - def load_path(path): - with open(path) as file: - return toml.load(file) - - def get_name(path): - return os.path.splitext(os.path.basename(path))[0] - - paths = glob(os.path.join(ucfg["settings_dir"], provider_name, "*")) - return {get_name(path): load_path(path) for path in paths} - - -def resolve_playbook_path(ucfg, provider_name): - """Given a provider's name, return the path to its playbook - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the provider to find the playbook for - :type provider_name: str - :raises: RuntimeError when the provider couldn't be found - :returns: the path to the playbook - :rtype: str - """ - # Make sure no path elements are present - provider_name = os.path.basename(provider_name) - - path = os.path.join(ucfg["providers_dir"], provider_name, "playbook.yaml") - if not os.path.isfile(path): - raise RuntimeError(f'Couldn\'t find playbook for "{provider_name}"!') - return path - - -def list_providers(ucfg): - """List the names of the available upload providers - - :param ucfg: upload config - :type ucfg: object - :returns: a list of all available provider_names - :rtype: list of str - """ - paths = glob(os.path.join(ucfg["providers_dir"], "*")) - return sorted(os.path.basename(path) for path in paths) - - -def validate_settings(ucfg, provider_name, settings, image_name=None): - """Raise a ValueError if any settings are invalid - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the provider to validate the settings against - :type provider_name: str - :param settings: the settings to validate - :type settings: dict - :param image_name: optionally check whether an image_name is valid - :type image_name: str - :raises: ValueError when the passed settings are invalid - :raises: RuntimeError when provider_name can't be found - """ - if image_name == "": - raise ValueError("Image name cannot be empty!") - type_map = {"string": str, "boolean": bool} - settings_info = resolve_provider(ucfg, provider_name)["settings-info"] - for key, value in settings.items(): - if key not in settings_info: - raise ValueError(f'Received unexpected setting: "{key}"!') - setting_type = settings_info[key]["type"] - correct_type = type_map[setting_type] - if not isinstance(value, correct_type): - raise ValueError( - f'Expected a {correct_type} for "{key}", received a {type(value)}!' - ) - if setting_type == "string" and "regex" in settings_info[key]: - if not re.match(settings_info[key]["regex"], value): - raise ValueError(f'Value "{value}" is invalid for setting "{key}"!') - - -def save_settings(ucfg, provider_name, profile, settings): - """Save (and overwrite) settings for a given provider - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the cloud provider, e.g. "azure" - :type provider_name: str - :param profile: the name of the profile to save - :type profile: str != "" - :param settings: settings to save for that provider - :type settings: dict - :raises: ValueError when passed invalid settings or an invalid profile name - """ - path = _get_profile_path(ucfg, provider_name, profile, exists=False) - validate_settings(ucfg, provider_name, settings, image_name=None) - - # touch the TOML file if it doesn't exist - if not os.path.isfile(path): - open(path, "a").close() - - # make sure settings files aren't readable by others, as they will contain - # sensitive credentials - current = stat.S_IMODE(os.lstat(path).st_mode) - os.chmod(path, current & ~stat.S_IROTH) - - with open(path, "w") as settings_file: - toml.dump(settings, settings_file) - -def load_settings(ucfg, provider_name, profile): - """Load settings for a provider's profile - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the cloud provider, e.g. "azure" - :type provider_name: str - :param profile: the name of the profile to save - :type profile: str != "" - :returns: The profile settings for the selected provider - :rtype: dict - :raises: ValueError when passed invalid settings or an invalid profile name - :raises: RuntimeError when the provider or profile couldn't be found - :raises: ValueError when the passed settings are invalid - - This also calls validate_settings on the loaded settings, potentially - raising an error if the saved settings are invalid. - """ - path = _get_profile_path(ucfg, provider_name, profile) - - with open(path) as file: - settings = toml.load(file) - validate_settings(ucfg, provider_name, settings) - return settings - -def delete_profile(ucfg, provider_name, profile): - """Delete a provider's profile settings file - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the cloud provider, e.g. "azure" - :type provider_name: str - :param profile: the name of the profile to save - :type profile: str != "" - :raises: ValueError when passed invalid settings or an invalid profile name - :raises: RuntimeError when the provider or profile couldn't be found - """ - path = _get_profile_path(ucfg, provider_name, profile) - - if os.path.exists(path): - os.unlink(path) diff --git a/src/lifted/queue.py b/src/lifted/queue.py deleted file mode 100644 index aa398fc7..00000000 --- a/src/lifted/queue.py +++ /dev/null @@ -1,269 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -from functools import partial -from glob import glob -import logging -import multiprocessing - -# We use a multiprocessing Pool for uploads so that we can cancel them with a -# simple SIGINT, which should bubble down to subprocesses. -from multiprocessing import Pool - -# multiprocessing.dummy is to threads as multiprocessing is to processes. -# Since daemonic processes can't have children, we use a thread to monitor the -# upload pool. -from multiprocessing.dummy import Process - -from operator import attrgetter -import os -import stat -import time - -import pylorax.api.toml as toml - -from lifted.upload import Upload -from lifted.providers import resolve_playbook_path, validate_settings - -# the maximum number of simultaneous uploads -SIMULTANEOUS_UPLOADS = 1 - -log = logging.getLogger("lifted") -multiprocessing.log_to_stderr().setLevel(logging.INFO) - - -def _get_queue_path(ucfg): - path = ucfg["queue_dir"] - - # create the upload_queue directory if it doesn't exist - os.makedirs(path, exist_ok=True) - - return path - - -def _get_upload_path(ucfg, uuid, write=False): - # Make sure no path elements are present - uuid = os.path.basename(uuid) - - path = os.path.join(_get_queue_path(ucfg), f"{uuid}.toml") - if write and not os.path.exists(path): - open(path, "a").close() - if os.path.exists(path): - # make sure uploads aren't readable by others, as they will contain - # sensitive credentials - current = stat.S_IMODE(os.lstat(path).st_mode) - os.chmod(path, current & ~stat.S_IROTH) - return path - - -def _list_upload_uuids(ucfg): - paths = glob(os.path.join(_get_queue_path(ucfg), "*")) - return [os.path.splitext(os.path.basename(path))[0] for path in paths] - - -def _write_upload(ucfg, upload): - with open(_get_upload_path(ucfg, upload.uuid, write=True), "w") as upload_file: - toml.dump(upload.serializable(), upload_file) - - -def _write_callback(ucfg): - return partial(_write_upload, ucfg) - - -def get_upload(ucfg, uuid, ignore_missing=False, ignore_corrupt=False): - """Get an Upload object by UUID - - :param ucfg: upload config - :type ucfg: object - :param uuid: UUID of the upload to get - :type uuid: str - :param ignore_missing: if True, don't raise a RuntimeError when the specified upload is missing, instead just return None - :type ignore_missing: bool - :param ignore_corrupt: if True, don't raise a RuntimeError when the specified upload could not be deserialized, instead just return None - :type ignore_corrupt: bool - :returns: the upload object or None - :rtype: Upload or None - :raises: RuntimeError - """ - try: - with open(_get_upload_path(ucfg, uuid), "r") as upload_file: - return Upload(**toml.load(upload_file)) - except FileNotFoundError as error: - if not ignore_missing: - raise RuntimeError(f"Could not find upload {uuid}!") from error - except toml.TomlError as error: - if not ignore_corrupt: - raise RuntimeError(f"Could not parse upload {uuid}!") from error - - -def get_uploads(ucfg, uuids): - """Gets a list of Upload objects from a list of upload UUIDs, ignoring - missing or corrupt uploads - - :param ucfg: upload config - :type ucfg: object - :param uuids: list of upload UUIDs to get - :type uuids: list of str - :returns: a list of the uploads that were successfully deserialized - :rtype: list of Upload - """ - uploads = ( - get_upload(ucfg, uuid, ignore_missing=True, ignore_corrupt=True) - for uuid in uuids - ) - return list(filter(None, uploads)) - - -def get_all_uploads(ucfg): - """Get a list of all stored Upload objects - - :param ucfg: upload config - :type ucfg: object - :returns: a list of all stored upload objects - :rtype: list of Upload - """ - return get_uploads(ucfg, _list_upload_uuids(ucfg)) - - -def create_upload(ucfg, provider_name, image_name, settings): - """Creates a new upload - - :param ucfg: upload config - :type ucfg: object - :param provider_name: the name of the cloud provider to upload to, e.g. "azure" - :type provider_name: str - :param image_name: what to name the image in the cloud - :type image_name: str - :param settings: settings to pass to the upload, specific to the cloud provider - :type settings: dict - :returns: the created upload object - :rtype: Upload - """ - validate_settings(ucfg, provider_name, settings, image_name) - return Upload( - provider_name=provider_name, - playbook_path=resolve_playbook_path(ucfg, provider_name), - image_name=image_name, - settings=settings, - status_callback=_write_callback(ucfg), - ) - - -def ready_upload(ucfg, uuid, image_path): - """Pass an image_path to an upload and mark it ready to execute - - :param ucfg: upload config - :type ucfg: object - :param uuid: the UUID of the upload to mark ready - :type uuid: str - :param image_path: the path of the image to pass to the upload - :type image_path: str - """ - get_upload(ucfg, uuid).ready(image_path, _write_callback(ucfg)) - - -def reset_upload(ucfg, uuid, new_image_name=None, new_settings=None): - """Reset an upload so it can be attempted again - - :param ucfg: upload config - :type ucfg: object - :param uuid: the UUID of the upload to reset - :type uuid: str - :param new_image_name: optionally update the upload's image_name - :type new_image_name: str - :param new_settings: optionally update the upload's settings - :type new_settings: dict - """ - upload = get_upload(ucfg, uuid) - validate_settings( - ucfg, - upload.provider_name, - new_settings or upload.settings, - new_image_name or upload.image_name, - ) - if new_image_name: - upload.image_name = new_image_name - if new_settings: - upload.settings = new_settings - upload.reset(_write_callback(ucfg)) - - -def cancel_upload(ucfg, uuid): - """Cancel an upload - - :param ucfg: the compose config - :type ucfg: ComposerConfig - :param uuid: the UUID of the upload to cancel - :type uuid: str - """ - get_upload(ucfg, uuid).cancel(_write_callback(ucfg)) - - -def delete_upload(ucfg, uuid): - """Delete an upload - - :param ucfg: the compose config - :type ucfg: ComposerConfig - :param uuid: the UUID of the upload to delete - :type uuid: str - """ - upload = get_upload(ucfg, uuid) - if upload and upload.is_cancellable(): - upload.cancel() - os.remove(_get_upload_path(ucfg, uuid)) - - -def start_upload_monitor(ucfg): - """Start a thread that manages the upload queue - - :param ucfg: the compose config - :type ucfg: ComposerConfig - """ - process = Process(target=_monitor, args=(ucfg,)) - process.daemon = True - process.start() - - -def _monitor(ucfg): - log.info("Started upload monitor.") - for upload in get_all_uploads(ucfg): - # Set abandoned uploads to FAILED - if upload.status == "RUNNING": - upload.set_status("FAILED", _write_callback(ucfg)) - pool = Pool(processes=SIMULTANEOUS_UPLOADS) - pool_uuids = set() - - def remover(uuid): - return lambda _: pool_uuids.remove(uuid) - - while True: - # Every second, scoop up READY uploads from the filesystem and throw - # them in the pool - all_uploads = get_all_uploads(ucfg) - for upload in sorted(all_uploads, key=attrgetter("creation_time")): - ready = upload.status == "READY" - if ready and upload.uuid not in pool_uuids: - log.info("Starting upload %s...", upload.uuid) - pool_uuids.add(upload.uuid) - callback = remover(upload.uuid) - pool.apply_async( - upload.execute, - (_write_callback(ucfg),), - callback=callback, - error_callback=callback, - ) - time.sleep(1) diff --git a/src/lifted/upload.py b/src/lifted/upload.py deleted file mode 100644 index a042079e..00000000 --- a/src/lifted/upload.py +++ /dev/null @@ -1,212 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -from datetime import datetime -import logging -from multiprocessing import current_process -import os -import signal -from uuid import uuid4 - -from ansible_runner.interface import run as ansible_run -from ansible_runner.exceptions import AnsibleRunnerException - -log = logging.getLogger("lifted") - - -class Upload: - """Represents an upload of an image to a cloud provider. Instances of this - class are serialized as TOML and stored in the upload queue directory, - which is /var/lib/lorax/upload/queue/ by default""" - - def __init__( - self, - uuid=None, - provider_name=None, - playbook_path=None, - image_name=None, - settings=None, - creation_time=None, - upload_log=None, - upload_pid=None, - image_path=None, - status_callback=None, - status=None, - ): - self.uuid = uuid or str(uuid4()) - self.provider_name = provider_name - self.playbook_path = playbook_path - self.image_name = image_name - self.settings = settings - self.creation_time = creation_time or datetime.now().timestamp() - self.upload_log = upload_log or "" - self.upload_pid = upload_pid - self.image_path = image_path - if status: - self.status = status - else: - self.set_status("WAITING", status_callback) - - def _log(self, message, callback=None): - """Logs something to the upload log with an optional callback - - :param message: the object to log - :type message: object - :param callback: a function of the form callback(self) - :type callback: function - """ - if message: - messages = str(message).splitlines() - - # Log multi-line messages as individual log lines - for m in messages: - log.info(m) - self.upload_log += f"{message}\n" - if callback: - callback(self) - - def serializable(self): - """Returns a representation of the object as a dict for serialization - - :returns: the object's __dict__ - :rtype: dict - """ - return self.__dict__ - - def summary(self): - """Return a dict with useful information about the upload - - :returns: upload information - :rtype: dict - """ - - return { - "uuid": self.uuid, - "status": self.status, - "provider_name": self.provider_name, - "image_name": self.image_name, - "image_path": self.image_path, - "creation_time": self.creation_time, - "settings": self.settings, - } - - def set_status(self, status, status_callback=None): - """Sets the status of the upload with an optional callback - - :param status: the new status - :type status: str - :param status_callback: a function of the form callback(self) - :type status_callback: function - """ - self._log("Setting status to %s" % status) - self.status = status - if status_callback: - status_callback(self) - - def ready(self, image_path, status_callback): - """Provide an image_path and mark the upload as ready to execute - - :param image_path: path of the image to upload - :type image_path: str - :param status_callback: a function of the form callback(self) - :type status_callback: function - """ - self._log("Setting image_path to %s" % image_path) - self.image_path = image_path - if self.status == "WAITING": - self.set_status("READY", status_callback) - - def reset(self, status_callback): - """Reset the upload so it can be attempted again - - :param status_callback: a function of the form callback(self) - :type status_callback: function - """ - if self.is_cancellable(): - raise RuntimeError(f"Can't reset, status is {self.status}!") - if not self.image_path: - raise RuntimeError("Can't reset, no image supplied yet!") - # self.error = None - self._log("Resetting state") - self.set_status("READY", status_callback) - - def is_cancellable(self): - """Is the upload in a cancellable state? - - :returns: whether the upload is cancellable - :rtype: bool - """ - return self.status in ("WAITING", "READY", "RUNNING") - - def cancel(self, status_callback=None): - """Cancel the upload. Sends a SIGINT to self.upload_pid. - - :param status_callback: a function of the form callback(self) - :type status_callback: function - """ - if not self.is_cancellable(): - raise RuntimeError(f"Can't cancel, status is already {self.status}!") - if self.upload_pid: - os.kill(self.upload_pid, signal.SIGINT) - self.set_status("CANCELLED", status_callback) - - def execute(self, status_callback=None): - """Execute the upload. Meant to be called from a dedicated process so - that the upload can be cancelled by sending a SIGINT to - self.upload_pid. - - :param status_callback: a function of the form callback(self) - :type status_callback: function - """ - if self.status != "READY": - raise RuntimeError("This upload is not ready!") - - try: - self.upload_pid = current_process().pid - self.set_status("RUNNING", status_callback) - self._log("Executing playbook.yml") - - # NOTE: event_handler doesn't seem to be called for playbook errors - logger = lambda e: self._log(e["stdout"], status_callback) - - runner = ansible_run( - playbook=self.playbook_path, - extravars={ - **self.settings, - "image_name": self.image_name, - "image_path": self.image_path, - }, - event_handler=logger, - verbosity=2, - ) - - # Try logging events and stats -- but they may not exist, so catch the error - try: - for e in runner.events: - self._log("%s" % dir(e), status_callback) - - self._log("%s" % runner.stats, status_callback) - except AnsibleRunnerException: - self._log("%s" % runner.stdout.read(), status_callback) - - if runner.status == "successful": - self.set_status("FINISHED", status_callback) - else: - self.set_status("FAILED", status_callback) - except Exception: - import traceback - log.error(traceback.format_exc(limit=2)) diff --git a/src/pylorax/api/__init__.py b/src/pylorax/api/__init__.py deleted file mode 100644 index 60a620a0..00000000 --- a/src/pylorax/api/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# -# lorax-composer API server -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/src/pylorax/api/bisect.py b/src/pylorax/api/bisect.py deleted file mode 100644 index a1bbdcc0..00000000 --- a/src/pylorax/api/bisect.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -def insort_left(a, x, key=None, lo=0, hi=None): - """Insert item x in list a, and keep it sorted assuming a is sorted. - - :param a: sorted list - :type a: list - :param x: item to insert into the list - :type x: object - :param key: Function to use to compare items in the list - :type key: function - :returns: index where the item was inserted - :rtype: int - - If x is already in a, insert it to the left of the leftmost x. - Optional args lo (default 0) and hi (default len(a)) bound the - slice of a to be searched. - - This is a modified version of bisect.insort_left that can use a - function for the compare, and returns the index position where it - was inserted. - """ - if key is None: - key = lambda i: i - - if lo < 0: - raise ValueError('lo must be non-negative') - if hi is None: - hi = len(a) - while lo < hi: - mid = (lo+hi)//2 - if key(a[mid]) < key(x): lo = mid+1 - else: hi = mid - a.insert(lo, x) - return lo diff --git a/src/pylorax/api/checkparams.py b/src/pylorax/api/checkparams.py deleted file mode 100644 index f15d95d8..00000000 --- a/src/pylorax/api/checkparams.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -import logging -log = logging.getLogger("lorax-composer") - -from flask import jsonify -from functools import update_wrapper - -# A decorator for checking the parameters provided to the API route implementing -# functions. The tuples parameter is a list of tuples. Each tuple is the string -# name of a parameter ("blueprint_name", not blueprint_name), the value it's set -# to by flask if the caller did not provide it, and a message to be returned to -# the user. -# -# If the parameter is set to its default, the error message is returned. Otherwise, -# the decorated function is called and its return value is returned. -def checkparams(tuples): - def decorator(f): - def wrapped_function(*args, **kwargs): - for tup in tuples: - if kwargs[tup[0]] == tup[1]: - log.error("(%s) %s", f.__name__, tup[2]) - return jsonify(status=False, errors=[tup[2]]), 400 - - return f(*args, **kwargs) - - return update_wrapper(wrapped_function, f) - - return decorator diff --git a/src/pylorax/api/cmdline.py b/src/pylorax/api/cmdline.py deleted file mode 100644 index c4f09a9f..00000000 --- a/src/pylorax/api/cmdline.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# cmdline.py -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import sys -import argparse - -from pylorax import vernum - -DEFAULT_USER = "root" -DEFAULT_GROUP = "weldr" - -version = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum) - -def lorax_composer_parser(): - """ Return the ArgumentParser for lorax-composer""" - - parser = argparse.ArgumentParser(description="Lorax Composer API Server", - fromfile_prefix_chars="@") - - parser.add_argument("--socket", default="/run/weldr/api.socket", metavar="SOCKET", - help="Path to the socket file to listen on") - parser.add_argument("--user", default=DEFAULT_USER, metavar="USER", - help="User to use for reduced permissions") - parser.add_argument("--group", default=DEFAULT_GROUP, metavar="GROUP", - help="Group to set ownership of the socket to") - parser.add_argument("--log", dest="logfile", default="/var/log/lorax-composer/composer.log", metavar="LOG", - help="Path to logfile (/var/log/lorax-composer/composer.log)") - parser.add_argument("--mockfiles", default="/var/tmp/bdcs-mockfiles/", metavar="MOCKFILES", - help="Path to JSON files used for /api/mock/ paths (/var/tmp/bdcs-mockfiles/)") - parser.add_argument("--sharedir", type=os.path.abspath, metavar="SHAREDIR", - help="Directory containing all the templates. Overrides config file sharedir") - parser.add_argument("-V", action="store_true", dest="showver", - help="show program's version number and exit") - parser.add_argument("-c", "--config", default="/etc/lorax/composer.conf", metavar="CONFIG", - help="Path to lorax-composer configuration file.") - parser.add_argument("--releasever", default=None, metavar="STRING", - help="Release version to use for $releasever in dnf repository urls") - parser.add_argument("--tmp", default="/var/tmp", - help="Top level temporary directory") - parser.add_argument("--proxy", default=None, metavar="PROXY", - help="Set proxy for DNF, overrides configuration file setting.") - parser.add_argument("--no-system-repos", action="store_true", default=False, - help="Do not copy over system repos from /etc/yum.repos.d/ at startup") - parser.add_argument("BLUEPRINTS", metavar="BLUEPRINTS", - help="Path to the blueprints") - - return parser diff --git a/src/pylorax/api/compose.py b/src/pylorax/api/compose.py deleted file mode 100644 index 2ba02080..00000000 --- a/src/pylorax/api/compose.py +++ /dev/null @@ -1,1267 +0,0 @@ -# Copyright (C) 2018-2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Setup for composing an image - -Adding New Output Types ------------------------ - -The new output type must add a kickstart template to ./share/composer/ where the -name of the kickstart (without the trailing .ks) matches the entry in compose_args. - -The kickstart should not have any url or repo entries, these will be added at build -time. The %packages section should be the last thing, and while it can contain mandatory -packages required by the output type, it should not have the trailing %end because the -package NEVRAs will be appended to it at build time. - -compose_args should have a name matching the kickstart, and it should set the novirt_install -parameters needed to generate the desired output. Other types should be set to False. - -""" -import logging -log = logging.getLogger("lorax-composer") - -import os -from glob import glob -from io import StringIO -from math import ceil -import shutil -from uuid import uuid4 - -# Use pykickstart to calculate disk image size -from pykickstart.parser import KickstartParser -from pykickstart.version import makeVersion - -from pylorax import ArchData, find_templates, get_buildarch -from pylorax.api.gitrpm import create_gitrpm_repo -from pylorax.api.projects import projects_depsolve, projects_depsolve_with_size, dep_nevra -from pylorax.api.projects import ProjectsError -from pylorax.api.recipes import read_recipe_and_id -from pylorax.api.timestamp import TS_CREATED, write_timestamp -import pylorax.api.toml as toml -from pylorax.base import DataHolder -from pylorax.imgutils import default_image_name -from pylorax.ltmpl import LiveTemplateRunner -from pylorax.sysutils import joinpaths, flatconfig - - -def test_templates(dbo, share_dir): - """ Try depsolving each of the the templates and report any errors - - :param dbo: dnf base object - :type dbo: dnf.Base - :returns: List of template types and errors - :rtype: List of errors - - Return a list of templates and errors encountered or an empty list - """ - template_errors = [] - for compose_type, enabled in compose_types(share_dir): - if not enabled: - continue - - # Read the kickstart template for this type - ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" - ks_template = open(ks_template_path, "r").read() - - # How much space will the packages in the default template take? - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(ks_template+"\n%end\n") - pkgs = [(name, "*") for name in ks.handler.packages.packageList] - grps = [grp.name for grp in ks.handler.packages.groupList] - try: - projects_depsolve(dbo, pkgs, grps) - except ProjectsError as e: - template_errors.append("Error depsolving %s: %s" % (compose_type, str(e))) - - return template_errors - - -def repo_to_ks(r, url="url"): - """ Return a kickstart line with the correct args. - :param r: DNF repository information - :type r: dnf.Repo - :param url: "url" or "baseurl" to use for the baseurl parameter - :type url: str - :returns: kickstart command arguments for url/repo command - :rtype: str - - Set url to "baseurl" if it is a repo, leave it as "url" for the installation url. - """ - cmd = "" - # url uses --url not --baseurl - if r.baseurl: - cmd += '--%s="%s" ' % (url, r.baseurl[0]) - elif r.metalink: - cmd += '--metalink="%s" ' % r.metalink - elif r.mirrorlist: - cmd += '--mirrorlist="%s" ' % r.mirrorlist - else: - raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") - - if r.proxy: - cmd += '--proxy="%s" ' % r.proxy - - if not r.sslverify: - cmd += '--noverifyssl' - - if r.sslcacert: - cmd += ' --sslcacert="%s"' % r.sslcacert - if r.sslclientcert: - cmd += ' --sslclientcert="%s"' % r.sslclientcert - if r.sslclientkey: - cmd += ' --sslclientkey="%s"' % r.sslclientkey - - return cmd - - -def bootloader_append(line, kernel_append): - """ Insert the kernel_append string into the --append argument - - :param line: The bootloader ... line - :type line: str - :param kernel_append: The arguments to append to the --append section - :type kernel_append: str - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(line) - - if ks.handler.bootloader.appendLine: - ks.handler.bootloader.appendLine += " %s" % kernel_append - else: - ks.handler.bootloader.appendLine = kernel_append - - # Converting back to a string includes a comment, return just the bootloader line - return str(ks.handler.bootloader).splitlines()[-1] - - -def get_kernel_append(recipe): - """Return the customizations.kernel append value - - :param recipe: - :type recipe: Recipe object - :returns: append value or empty string - :rtype: str - """ - if "customizations" not in recipe or \ - "kernel" not in recipe["customizations"] or \ - "append" not in recipe["customizations"]["kernel"]: - return "" - return recipe["customizations"]["kernel"]["append"] - - -def timezone_cmd(line, settings): - """ Update the timezone line with the settings - - :param line: The timezone ... line - :type line: str - :param settings: A dict with timezone and/or ntpservers list - :type settings: dict - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(line) - - if "timezone" in settings: - ks.handler.timezone.timezone = settings["timezone"] - if "ntpservers" in settings: - ks.handler.timezone.ntpservers = settings["ntpservers"] - - # Converting back to a string includes a comment, return just the timezone line - return str(ks.handler.timezone).splitlines()[-1] - - -def get_timezone_settings(recipe): - """Return the customizations.timezone dict - - :param recipe: - :type recipe: Recipe object - :returns: append value or empty string - :rtype: dict - """ - if "customizations" not in recipe or \ - "timezone" not in recipe["customizations"]: - return {} - return recipe["customizations"]["timezone"] - - -def lang_cmd(line, languages): - """ Update the lang line with the languages - - :param line: The lang ... line - :type line: str - :param settings: The list of languages - :type settings: list - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(line) - - if languages: - ks.handler.lang.lang = languages[0] - - if len(languages) > 1: - ks.handler.lang.addsupport = languages[1:] - - # Converting back to a string includes a comment, return just the lang line - return str(ks.handler.lang).splitlines()[-1] - - -def get_languages(recipe): - """Return the customizations.locale.languages list - - :param recipe: The recipe - :type recipe: Recipe object - :returns: list of language strings - :rtype: list - """ - if "customizations" not in recipe or \ - "locale" not in recipe["customizations"] or \ - "languages" not in recipe["customizations"]["locale"]: - return [] - return recipe["customizations"]["locale"]["languages"] - - -def keyboard_cmd(line, layout): - """ Update the keyboard line with the layout - - :param line: The keyboard ... line - :type line: str - :param settings: The keyboard layout - :type settings: str - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(line) - - if layout: - ks.handler.keyboard.keyboard = layout - ks.handler.keyboard.vc_keymap = "" - ks.handler.keyboard.x_layouts = [] - - # Converting back to a string includes a comment, return just the keyboard line - return str(ks.handler.keyboard).splitlines()[-1] - - -def get_keyboard_layout(recipe): - """Return the customizations.locale.keyboard list - - :param recipe: The recipe - :type recipe: Recipe object - :returns: The keyboard layout string - :rtype: str - """ - if "customizations" not in recipe or \ - "locale" not in recipe["customizations"] or \ - "keyboard" not in recipe["customizations"]["locale"]: - return [] - return recipe["customizations"]["locale"]["keyboard"] - - -def firewall_cmd(line, settings): - """ Update the firewall line with the new ports and services - - :param line: The firewall ... line - :type line: str - :param settings: A dict with the list of services and ports to enable and disable - :type settings: dict - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(line) - - # Do not override firewall --disabled - if ks.handler.firewall.enabled != False and settings: - ks.handler.firewall.ports = sorted(set(settings["ports"] + ks.handler.firewall.ports)) - ks.handler.firewall.services = sorted(set(settings["enabled"] + ks.handler.firewall.services)) - ks.handler.firewall.remove_services = sorted(set(settings["disabled"] + ks.handler.firewall.remove_services)) - - # Converting back to a string includes a comment, return just the keyboard line - return str(ks.handler.firewall).splitlines()[-1] - - -def get_firewall_settings(recipe): - """Return the customizations.firewall settings - - :param recipe: The recipe - :type recipe: Recipe object - :returns: A dict of settings - :rtype: dict - """ - settings = {"ports": [], "enabled": [], "disabled": []} - - if "customizations" not in recipe or \ - "firewall" not in recipe["customizations"]: - return settings - - settings["ports"] = recipe["customizations"]["firewall"].get("ports", []) - - if "services" in recipe["customizations"]["firewall"]: - settings["enabled"] = recipe["customizations"]["firewall"]["services"].get("enabled", []) - settings["disabled"] = recipe["customizations"]["firewall"]["services"].get("disabled", []) - return settings - - -def services_cmd(line, settings): - """ Update the services line with additional services to enable/disable - - :param line: The services ... line - :type line: str - :param settings: A dict with the list of services to enable and disable - :type settings: dict - - Using pykickstart to process the line is the best way to make sure it - is parsed correctly, and re-assembled for inclusion into the final kickstart - """ - # Empty services and no additional settings, return an empty string - if not line and not settings["enabled"] and not settings["disabled"]: - return "" - - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - - # Allow passing in a 'default' so that the enable/disable may be applied to it, without - # parsing it and emitting a kickstart error message - if line != "services": - ks.readKickstartFromString(line) - - # Add to any existing services, removing any duplicates - ks.handler.services.enabled = sorted(set(settings["enabled"] + ks.handler.services.enabled)) - ks.handler.services.disabled = sorted(set(settings["disabled"] + ks.handler.services.disabled)) - - # Converting back to a string includes a comment, return just the keyboard line - return str(ks.handler.services).splitlines()[-1] - - -def get_services(recipe): - """Return the customizations.services settings - - :param recipe: The recipe - :type recipe: Recipe object - :returns: A dict of settings - :rtype: dict - """ - settings = {"enabled": [], "disabled": []} - - if "customizations" not in recipe or \ - "services" not in recipe["customizations"]: - return settings - - settings["enabled"] = sorted(recipe["customizations"]["services"].get("enabled", [])) - settings["disabled"] = sorted(recipe["customizations"]["services"].get("disabled", [])) - return settings - - -def get_default_services(recipe): - """Get the default string for services, based on recipe - :param recipe: The recipe - - :type recipe: Recipe object - :returns: string with "services" or "" - :rtype: str - - When no services have been selected we don't need to add anything to the kickstart - so return an empty string. Otherwise return "services" which will be updated with - the settings. - """ - services = get_services(recipe) - - if services["enabled"] or services["disabled"]: - return "services" - else: - return "" - - -def customize_ks_template(ks_template, recipe): - """ Customize the kickstart template and return it - - :param ks_template: The kickstart template - :type ks_template: str - :param recipe: - :type recipe: Recipe object - - Apply customizations to existing template commands, or add defaults for ones that are - missing and required. - - Apply customizations.kernel.append to the bootloader argument in the template. - Add bootloader line if it is missing. - - Add default timezone if needed. It does NOT replace an existing timezone entry - """ - # Commands to be modified [NEW-COMMAND-FUNC, NEW-VALUE, DEFAULT, REPLACE] - # The function is called with a kickstart command string and the value to replace - # The value is specific to the command, and is understood by the function - # The default is a complete kickstart command string, suitable for writing to the template - # If REPLACE is False it will not change an existing entry only add a missing one - commands = {"bootloader": [bootloader_append, - get_kernel_append(recipe), - 'bootloader --location=none', True], - "timezone": [timezone_cmd, - get_timezone_settings(recipe), - 'timezone UTC', False], - "lang": [lang_cmd, - get_languages(recipe), - 'lang en_US.UTF-8', True], - "keyboard": [keyboard_cmd, - get_keyboard_layout(recipe), - 'keyboard --xlayouts us --vckeymap us', True], - "firewall": [firewall_cmd, - get_firewall_settings(recipe), - 'firewall --enabled', True], - "services": [services_cmd, - get_services(recipe), - get_default_services(recipe), True] - } - found = {} - - output = StringIO() - for line in ks_template.splitlines(): - for cmd in commands: - (new_command, value, default, replace) = commands[cmd] - if line.startswith(cmd): - found[cmd] = True - if value and replace: - log.debug("Replacing %s with %s", cmd, value) - print(new_command(line, value), file=output) - else: - log.debug("Skipping %s", cmd) - print(line, file=output) - break - else: - # No matches, write the line as-is - print(line, file=output) - - # Write out defaults for the ones not found - # These must go FIRST because the template still needs to have the packages added - defaults = StringIO() - for cmd in commands: - if cmd in found: - continue - (new_command, value, default, _) = commands[cmd] - if value and default: - log.debug("Setting %s to use %s", cmd, value) - print(new_command(default, value), file=defaults) - elif default: - log.debug("Setting %s to %s", cmd, default) - print(default, file=defaults) - - return defaults.getvalue() + output.getvalue() - - -def write_ks_root(f, user): - """ Write kickstart root password and sshkey entry - - :param f: kickstart file object - :type f: open file object - :param user: A blueprint user dictionary - :type user: dict - :returns: True if it wrote a rootpw command to the kickstart - :rtype: bool - - If the entry contains a ssh key, use sshkey to write it - If it contains password, use rootpw to set it - - root cannot be used with the user command. So only key and password are supported - for root. - """ - wrote_rootpw = False - - # ssh key uses the sshkey kickstart command - if "key" in user: - f.write('sshkey --user %s "%s"\n' % (user["name"], user["key"])) - - if "password" in user: - if any(user["password"].startswith(prefix) for prefix in ["$2b$", "$6$", "$5$"]): - log.debug("Detected pre-crypted password") - f.write('rootpw --iscrypted "%s"\n' % user["password"]) - wrote_rootpw = True - else: - log.debug("Detected plaintext password") - f.write('rootpw --plaintext "%s"\n' % user["password"]) - wrote_rootpw = True - - return wrote_rootpw - -def write_ks_user(f, user): - """ Write kickstart user and sshkey entry - - :param f: kickstart file object - :type f: open file object - :param user: A blueprint user dictionary - :type user: dict - - If the entry contains a ssh key, use sshkey to write it - All of the user fields are optional, except name, write out a kickstart user entry - with whatever options are relevant. - """ - # ssh key uses the sshkey kickstart command - if "key" in user: - f.write('sshkey --user %s "%s"\n' % (user["name"], user["key"])) - - # Write out the user kickstart command, much of it is optional - f.write("user --name %s" % user["name"]) - if "home" in user: - f.write(" --homedir %s" % user["home"]) - - if "password" in user: - if any(user["password"].startswith(prefix) for prefix in ["$2b$", "$6$", "$5$"]): - log.debug("Detected pre-crypted password") - f.write(" --iscrypted") - else: - log.debug("Detected plaintext password") - f.write(" --plaintext") - - f.write(" --password \"%s\"" % user["password"]) - - if "shell" in user: - f.write(" --shell %s" % user["shell"]) - - if "uid" in user: - f.write(" --uid %d" % int(user["uid"])) - - if "gid" in user: - f.write(" --gid %d" % int(user["gid"])) - - if "description" in user: - f.write(" --gecos \"%s\"" % user["description"]) - - if "groups" in user: - f.write(" --groups %s" % ",".join(user["groups"])) - - f.write("\n") - - -def write_ks_group(f, group): - """ Write kickstart group entry - - :param f: kickstart file object - :type f: open file object - :param group: A blueprint group dictionary - :type user: dict - - gid is optional - """ - if "name" not in group: - raise RuntimeError("group entry requires a name") - - f.write("group --name %s" % group["name"]) - if "gid" in group: - f.write(" --gid %d" % int(group["gid"])) - - f.write("\n") - - -def add_customizations(f, recipe): - """ Add customizations to the kickstart file - - :param f: kickstart file object - :type f: open file object - :param recipe: - :type recipe: Recipe object - :returns: None - :raises: RuntimeError if there was a problem writing to the kickstart - """ - if "customizations" not in recipe: - f.write('rootpw --lock\n') - return - customizations = recipe["customizations"] - - # allow customizations to be incorrectly specified as [[customizations]] instead of [customizations] - if isinstance(customizations, list): - customizations = customizations[0] - - if "hostname" in customizations: - f.write("network --hostname=%s\n" % customizations["hostname"]) - - # TODO - remove this, should use user section to define this - if "sshkey" in customizations: - # This is a list of entries - for sshkey in customizations["sshkey"]: - if "user" not in sshkey or "key" not in sshkey: - log.error("%s is incorrect, skipping", sshkey) - continue - f.write('sshkey --user %s "%s"\n' % (sshkey["user"], sshkey["key"])) - - # Creating a user also creates a group. Make a list of the names for later - user_groups = [] - # kickstart requires a rootpw line - wrote_rootpw = False - if "user" in customizations: - # only name is required, everything else is optional - for user in customizations["user"]: - if "name" not in user: - raise RuntimeError("user entry requires a name") - - # root is special, cannot use normal user command for it - if user["name"] == "root": - wrote_rootpw = write_ks_root(f, user) - continue - - write_ks_user(f, user) - user_groups.append(user["name"]) - - if "group" in customizations: - for group in customizations["group"]: - if group["name"] not in user_groups: - write_ks_group(f, group) - else: - log.warning("Skipping group %s, already created by user", group["name"]) - - # Lock the root account if no root user password has been specified - if not wrote_rootpw: - f.write('rootpw --lock\n') - - -def get_extra_pkgs(dbo, share_dir, compose_type): - """Return extra packages needed for the output type - - :param dbo: dnf base object - :type dbo: dnf.Base - :param share_dir: Path to the top level share directory - :type share_dir: str - :param compose_type: The type of output to create from the recipe - :type compose_type: str - :returns: List of package names (name only, not NEVRA) - :rtype: list - - Currently this is only needed by live-iso, it reads ./live/live-install.tmpl and - processes only the installpkg lines. It lists the packages needed to complete creation of the - iso using the templates such as x86.tmpl - - Keep in mind that the live-install.tmpl is shared between livemedia-creator and lorax-composer, - even though the results are applied differently. - """ - if compose_type != "live-iso": - return [] - - # get the arch information to pass to the runner - arch = ArchData(get_buildarch(dbo)) - defaults = DataHolder(basearch=arch.basearch) - templatedir = joinpaths(find_templates(share_dir), "live") - runner = LiveTemplateRunner(dbo, templatedir=templatedir, defaults=defaults) - runner.run("live-install.tmpl") - log.debug("extra pkgs = %s", runner.pkgs) - - return runner.pkgnames - - -def start_build(cfg, dnflock, gitlock, branch, recipe_name, compose_type, test_mode=0): - """ Start the build - - :param cfg: Configuration object - :type cfg: ComposerConfig - :param dnflock: Lock and YumBase for depsolving - :type dnflock: YumLock - :param recipe: The recipe to build - :type recipe: str - :param compose_type: The type of output to create from the recipe - :type compose_type: str - :returns: Unique ID for the build that can be used to track its status - :rtype: str - """ - share_dir = cfg.get("composer", "share_dir") - lib_dir = cfg.get("composer", "lib_dir") - - # Make sure compose_type is valid, only allow enabled types - type_enabled = dict(compose_types(share_dir)).get(compose_type) - if type_enabled is None: - raise RuntimeError("Invalid compose type (%s), must be one of %s" % (compose_type, [t for t, e in compose_types(share_dir)])) - if not type_enabled: - raise RuntimeError("Compose type '%s' is disabled on this architecture" % compose_type) - - # Some image types (live-iso) need extra packages for composer to execute the output template - with dnflock.lock: - extra_pkgs = get_extra_pkgs(dnflock.dbo, share_dir, compose_type) - log.debug("Extra packages needed for %s: %s", compose_type, extra_pkgs) - - with gitlock.lock: - (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch, recipe_name) - - # Combine modules and packages and depsolve the list - module_nver = recipe.module_nver - package_nver = recipe.package_nver - package_nver.extend([(name, '*') for name in extra_pkgs]) - - projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) - deps = [] - log.info("depsolving %s", recipe["name"]) - try: - # This can possibly update repodata and reset the YumBase object. - with dnflock.lock_check: - (installed_size, deps) = projects_depsolve_with_size(dnflock.dbo, projects, recipe.group_names, with_core=False) - except ProjectsError as e: - log.error("start_build depsolve: %s", str(e)) - raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) - - # Read the kickstart template for this type - ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" - ks_template = open(ks_template_path, "r").read() - - # How much space will the packages in the default template take? - ks_version = makeVersion() - ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) - ks.readKickstartFromString(ks_template+"\n%end\n") - pkgs = [(name, "*") for name in ks.handler.packages.packageList] - grps = [grp.name for grp in ks.handler.packages.groupList] - try: - with dnflock.lock: - (template_size, _) = projects_depsolve_with_size(dnflock.dbo, pkgs, grps, with_core=not ks.handler.packages.nocore) - except ProjectsError as e: - log.error("start_build depsolve: %s", str(e)) - raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) - log.debug("installed_size = %d, template_size=%d", installed_size, template_size) - - # Minimum LMC disk size is 1GiB, and anaconda bumps the estimated size up by 10% (which doesn't always work). - installed_size = int((installed_size+template_size)) * 1.2 - log.debug("/ partition size = %d", installed_size) - - # Create the results directory - build_id = str(uuid4()) - results_dir = joinpaths(lib_dir, "results", build_id) - os.makedirs(results_dir) - - # Write the recipe commit hash - commit_path = joinpaths(results_dir, "COMMIT") - with open(commit_path, "w") as f: - f.write(commit_id) - - # Write the original recipe - recipe_path = joinpaths(results_dir, "blueprint.toml") - with open(recipe_path, "w") as f: - f.write(recipe.toml()) - - # Write the frozen recipe - frozen_recipe = recipe.freeze(deps) - recipe_path = joinpaths(results_dir, "frozen.toml") - with open(recipe_path, "w") as f: - f.write(frozen_recipe.toml()) - - # Write out the dependencies to the results dir - deps_path = joinpaths(results_dir, "deps.toml") - with open(deps_path, "w") as f: - f.write(toml.dumps({"packages":deps})) - - # Save a copy of the original kickstart - shutil.copy(ks_template_path, results_dir) - - with dnflock.lock: - repos = list(dnflock.dbo.repos.iter_enabled()) - if not repos: - raise RuntimeError("No enabled repos, canceling build.") - - # Create the git rpms, if any, and return the path to the repo under results_dir - gitrpm_repo = create_gitrpm_repo(results_dir, recipe) - - # Create the final kickstart with repos and package list - ks_path = joinpaths(results_dir, "final-kickstart.ks") - with open(ks_path, "w") as f: - ks_url = repo_to_ks(repos[0], "url") - log.debug("url = %s", ks_url) - f.write('url %s\n' % ks_url) - for idx, r in enumerate(repos[1:]): - ks_repo = repo_to_ks(r, "baseurl") - log.debug("repo composer-%s = %s", idx, ks_repo) - f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo)) - - if gitrpm_repo: - log.debug("repo gitrpms = %s", gitrpm_repo) - f.write('repo --name="gitrpms" --baseurl="file://%s"\n' % gitrpm_repo) - - # Setup the disk for booting - # TODO Add GPT and UEFI boot support - f.write('clearpart --all --initlabel\n') - - # Write the root partition and it's size in MB (rounded up) - f.write('part / --size=%d\n' % ceil(installed_size / 1024**2)) - - # Some customizations modify the template before writing it - f.write(customize_ks_template(ks_template, recipe)) - - for d in deps: - f.write(dep_nevra(d)+"\n") - - # Include the rpms from the gitrpm repo directory - if gitrpm_repo: - for rpm in glob(os.path.join(gitrpm_repo, "*.rpm")): - f.write(os.path.basename(rpm)[:-4]+"\n") - - f.write("%end\n") - - # Other customizations can be appended to the kickstart - add_customizations(f, recipe) - - # Setup the config to pass to novirt_install - log_dir = joinpaths(results_dir, "logs/") - cfg_args = compose_args(compose_type) - - # Get the title, project, and release version from the host - if not os.path.exists("/etc/os-release"): - log.error("/etc/os-release is missing, cannot determine product or release version") - os_release = flatconfig("/etc/os-release") - - log.debug("os_release = %s", dict(os_release.items())) - - cfg_args["title"] = os_release.get("PRETTY_NAME", "") - cfg_args["project"] = os_release.get("NAME", "") - cfg_args["releasever"] = os_release.get("VERSION_ID", "") - cfg_args["volid"] = "" - cfg_args["extra_boot_args"] = get_kernel_append(recipe) - - if "compression" not in cfg_args: - cfg_args["compression"] = "xz" - - if "compress_args" not in cfg_args: - cfg_args["compress_args"] = [] - - cfg_args.update({ - "ks": [ks_path], - "logfile": log_dir, - "timeout": 60, # 60 minute timeout - }) - with open(joinpaths(results_dir, "config.toml"), "w") as f: - f.write(toml.dumps(cfg_args)) - - # Set the initial status - open(joinpaths(results_dir, "STATUS"), "w").write("WAITING") - - # Set the test mode, if requested - if test_mode > 0: - open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode) - - write_timestamp(results_dir, TS_CREATED) - log.info("Adding %s (%s %s) to compose queue", build_id, recipe["name"], compose_type) - os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id)) - - return build_id - -# Supported output types -def compose_types(share_dir): - r""" Returns a list of tuples of the supported output types, and their state - - The output types come from the kickstart names in /usr/share/lorax/composer/\*ks - - If they are disabled on the current arch their state is False. If enabled, it is True. - eg. [("alibaba", False), ("ext4-filesystem", True), ...] - """ - # These are compose types that are not supported on an architecture. eg. hyper-v on s390 - # If it is not listed, it is allowed - disable_map = { - "arm": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "armhfp": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "aarch64": ["alibaba", "google", "hyper-v", "vhd", "vmdk"], - "ppc": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "ppc64": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "ppc64le": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "s390": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - "s390x": ["alibaba", "ami", "google", "hyper-v", "vhd", "vmdk"], - } - - all_types = sorted([os.path.basename(ks)[:-3] for ks in glob(joinpaths(share_dir, "composer/*.ks"))]) - arch_disabled = disable_map.get(os.uname().machine, []) - - return [(t, t not in arch_disabled) for t in all_types] - -def compose_args(compose_type): - """ Returns the settings to pass to novirt_install for the compose type - - :param compose_type: The type of compose to create, from `compose_types()` - :type compose_type: str - - This will return a dict of options that match the ArgumentParser options for livemedia-creator. - These are the ones the define the type of output, it's filename, etc. - Other options will be filled in by `make_compose()` - """ - _MAP = {"tar": {"make_iso": False, - "make_disk": False, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": True, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": default_image_name("xz", "root.tar"), - "tar_disk_name": None, - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "liveimg-tar": {"make_iso": False, - "make_disk": False, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": True, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": default_image_name("xz", "root.tar"), - "tar_disk_name": None, - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "live-iso": {"make_iso": True, - "make_disk": False, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": "live.iso", - "tar_disk_name": None, - "fs_label": "Anaconda", # Live booting may expect this to be 'Anaconda' - "image_only": False, - "app_name": None, - "app_template": None, - "app_file": None, - "iso_only": True, - "iso_name": "live.iso", - "squashfs_only": False, - }, - "partitioned-disk": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": "disk.img", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "qcow2": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "qcow2", - "qemu_args": [], - "image_name": "disk.qcow2", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "ext4-filesystem": {"make_iso": False, - "make_disk": False, - "make_fsimage": True, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": "filesystem.img", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "ami": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": False, - "qemu_args": [], - "image_name": "disk.ami", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "vhd": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "vpc", - "qemu_args": ["-o", "subformat=fixed,force_size"], - "image_name": "disk.vhd", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "vmdk": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "vmdk", - "qemu_args": [], - "image_name": "disk.vmdk", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "openstack": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "qcow2", - "qemu_args": [], - "image_name": "disk.qcow2", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "google": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": True, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 1024, - "image_type": False, # False instead of None because of TOML - "qemu_args": [], - "image_name": "disk.tar.gz", - "tar_disk_name": "disk.raw", - "compression": "gzip", - "compress_args": ["-9"], - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "hyper-v": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "vhdx", - "qemu_args": [], - "image_name": "disk.vhdx", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - "alibaba": {"make_iso": False, - "make_disk": True, - "make_fsimage": False, - "make_appliance": False, - "make_ami": False, - "make_tar": False, - "make_tar_disk": False, - "make_pxe_live": False, - "make_ostree_live": False, - "make_oci": False, - "make_vagrant": False, - "ostree": False, - "live_rootfs_keep_size": False, - "live_rootfs_size": 0, - "image_size_align": 0, - "image_type": "qcow2", - "qemu_args": [], - "image_name": "disk.qcow2", - "tar_disk_name": None, - "fs_label": "", - "image_only": True, - "app_name": None, - "app_template": None, - "app_file": None, - "squashfs_only": False, - }, - } - return _MAP[compose_type] - -def move_compose_results(cfg, results_dir): - """Move the final image to the results_dir and cleanup the unneeded compose files - - :param cfg: Build configuration - :type cfg: DataHolder - :param results_dir: Directory to put the results into - :type results_dir: str - """ - if cfg["make_tar"]: - shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), results_dir) - elif cfg["make_iso"]: - # Output from live iso is always a boot.iso under images/, move and rename it - shutil.move(joinpaths(cfg["result_dir"], cfg["iso_name"]), joinpaths(results_dir, cfg["image_name"])) - elif cfg["make_disk"] or cfg["make_fsimage"]: - shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), joinpaths(results_dir, cfg["image_name"])) - - - # Cleanup the compose directory, but only if it looks like a compose directory - if os.path.basename(cfg["result_dir"]) == "compose": - shutil.rmtree(cfg["result_dir"]) - else: - log.error("Incorrect compose directory, not cleaning up") diff --git a/src/pylorax/api/config.py b/src/pylorax/api/config.py deleted file mode 100644 index fa81cdf4..00000000 --- a/src/pylorax/api/config.py +++ /dev/null @@ -1,140 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import configparser -import grp -import os -import pwd - -from pylorax.sysutils import joinpaths - -class ComposerConfig(configparser.ConfigParser): - def get_default(self, section, option, default): - try: - return self.get(section, option) - except configparser.Error: - return default - - -def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=False): - """lorax-composer configuration - - :param conf_file: Path to the config file overriding the default settings - :type conf_file: str - :param root_dir: Directory to prepend to paths, defaults to / - :type root_dir: str - :param test_config: Set to True to skip reading conf_file - :type test_config: bool - :returns: Configuration - :rtype: ComposerConfig - """ - conf = ComposerConfig() - - # set defaults - conf.add_section("composer") - conf.set("composer", "share_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/"))) - conf.set("composer", "lib_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/"))) - conf.set("composer", "repo_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/repos.d/"))) - conf.set("composer", "dnf_conf", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf.conf"))) - conf.set("composer", "dnf_root", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf/root/"))) - conf.set("composer", "cache_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/cache/"))) - conf.set("composer", "tmp", os.path.realpath(joinpaths(root_dir, "/var/tmp/"))) - - conf.add_section("users") - conf.set("users", "root", "1") - - # Enable all available repo files by default - conf.add_section("repos") - conf.set("repos", "use_system_repos", "1") - conf.set("repos", "enabled", "*") - - conf.add_section("dnf") - - if not test_config: - # read the config file - if os.path.isfile(conf_file): - conf.read(conf_file) - - return conf - -def make_owned_dir(p_dir, uid, gid): - """Make a directory and its parents, setting owner and group - - :param p_dir: path to directory to create - :type p_dir: string - :param uid: uid of owner - :type uid: int - :param gid: gid of owner - :type gid: int - :returns: list of errors - :rtype: list of str - - Check to make sure it does not have o+rw permissions and that it is owned by uid:gid - """ - errors = [] - if not os.path.isdir(p_dir): - # Make sure no o+rw permissions are set - orig_umask = os.umask(0o006) - os.makedirs(p_dir, 0o771) - os.chown(p_dir, uid, gid) - os.umask(orig_umask) - else: - p_stat = os.stat(p_dir) - if p_stat.st_mode & 0o006 != 0: - errors.append("Incorrect permissions on %s, no o+rw permissions are allowed." % p_dir) - - if p_stat.st_gid != gid or p_stat.st_uid != 0: - gr_name = grp.getgrgid(gid).gr_name - u_name = pwd.getpwuid(uid) - errors.append("%s should be owned by %s:%s" % (p_dir, u_name, gr_name)) - - return errors - -def make_dnf_dirs(conf, uid, gid): - """Make any missing dnf directories owned by user:group - - :param conf: The configuration to use - :type conf: ComposerConfig - :param uid: uid of owner - :type uid: int - :param gid: gid of owner - :type gid: int - :returns: list of errors - :rtype: list of str - """ - errors = [] - for p in ["dnf_conf", "repo_dir", "cache_dir", "dnf_root"]: - p_dir = os.path.abspath(conf.get("composer", p)) - if p == "dnf_conf": - p_dir = os.path.dirname(p_dir) - errors.extend(make_owned_dir(p_dir, uid, gid)) - -def make_queue_dirs(conf, gid): - """Make any missing queue directories - - :param conf: The configuration to use - :type conf: ComposerConfig - :param gid: Group ID that has access to the queue directories - :type gid: int - :returns: list of errors - :rtype: list of str - """ - errors = [] - lib_dir = conf.get("composer", "lib_dir") - for p in ["queue/run", "queue/new", "results"]: - p_dir = joinpaths(lib_dir, p) - errors.extend(make_owned_dir(p_dir, 0, gid)) - return errors diff --git a/src/pylorax/api/dnfbase.py b/src/pylorax/api/dnfbase.py deleted file mode 100644 index 1d72b28a..00000000 --- a/src/pylorax/api/dnfbase.py +++ /dev/null @@ -1,186 +0,0 @@ -# -# Copyright (C) 2017-2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# pylint: disable=bad-preconf-access - -import logging -log = logging.getLogger("lorax-composer") - -import dnf -import dnf.logging -from glob import glob -import os -import shutil -from threading import Lock -import time - -from pylorax import DEFAULT_PLATFORM_ID -from pylorax.sysutils import flatconfig - -class DNFLock(object): - """Hold the dnf.Base object and a Lock to control access to it. - - self.dbo is a property that returns the dnf.Base object, but it *may* change - from one call to the next if the upstream repositories have changed. - """ - def __init__(self, conf, expire_secs=6*60*60): - self._conf = conf - self._lock = Lock() - self.dbo = get_base_object(self._conf) - self._expire_secs = expire_secs - self._expire_time = time.time() + self._expire_secs - - @property - def lock(self): - """Check for repo updates (using expiration time) and return the lock - - If the repository has been updated, tear down the old dnf.Base and - create a new one. This is the only way to force dnf to use the new - metadata. - """ - if time.time() > self._expire_time: - return self.lock_check - return self._lock - - @property - def lock_check(self): - """Force a check for repo updates and return the lock - - Use this method sparingly, it removes the repodata and downloads a new copy every time. - """ - self._expire_time = time.time() + self._expire_secs - self.dbo.update_cache() - return self._lock - -def get_base_object(conf): - """Get the DNF object with settings from the config file - - :param conf: configuration object - :type conf: ComposerParser - :returns: A DNF Base object - :rtype: dnf.Base - """ - cachedir = os.path.abspath(conf.get("composer", "cache_dir")) - dnfconf = os.path.abspath(conf.get("composer", "dnf_conf")) - dnfroot = os.path.abspath(conf.get("composer", "dnf_root")) - repodir = os.path.abspath(conf.get("composer", "repo_dir")) - - # Setup the config for the DNF Base object - dbo = dnf.Base() - dbc = dbo.conf -# TODO - Handle this -# dbc.logdir = logdir - dbc.installroot = dnfroot - if not os.path.isdir(dnfroot): - os.makedirs(dnfroot) - if not os.path.isdir(repodir): - os.makedirs(repodir) - - dbc.cachedir = cachedir - dbc.reposdir = [repodir] - dbc.install_weak_deps = False - dbc.prepend_installroot('persistdir') - # this is a weird 'AppendOption' thing that, when you set it, - # actually appends. Doing this adds 'nodocs' to the existing list - # of values, over in libdnf, it does not replace the existing values. - dbc.tsflags = ['nodocs'] - - if conf.get_default("dnf", "proxy", None): - dbc.proxy = conf.get("dnf", "proxy") - - if conf.has_option("dnf", "sslverify") and not conf.getboolean("dnf", "sslverify"): - dbc.sslverify = False - - # If the system repos are enabled read the dnf vars from /etc/dnf/vars/ - if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"): - dbc.substitutions.update_from_etc("/") - log.info("dnf vars: %s", dbc.substitutions) - - _releasever = conf.get_default("composer", "releasever", None) - if not _releasever: - # Use the releasever of the host system - _releasever = dnf.rpm.detect_releasever("/") - log.info("releasever = %s", _releasever) - dbc.releasever = _releasever - - # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly - if not os.path.exists("/etc/os-release"): - log.warning("/etc/os-release is missing, cannot determine platform id, falling back to %s", DEFAULT_PLATFORM_ID) - platform_id = DEFAULT_PLATFORM_ID - else: - os_release = flatconfig("/etc/os-release") - platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID) - log.info("Using %s for module_platform_id", platform_id) - dbc.module_platform_id = platform_id - - # Make sure metadata is always current - dbc.metadata_expire = 0 - dbc.metadata_expire_filter = "never" - - # write the dnf configuration file - with open(dnfconf, "w") as f: - f.write(dbc.dump()) - - # dnf needs the repos all in one directory, composer uses repodir for this - # if system repos are supposed to be used, copy them into repodir, overwriting any previous copies - if not conf.has_option("repos", "use_system_repos") or conf.getboolean("repos", "use_system_repos"): - for repo_file in glob("/etc/yum.repos.d/*.repo"): - shutil.copy2(repo_file, repodir) - dbo.read_all_repos() - - # Remove any duplicate repo entries. These can cause problems with Anaconda, which will fail - # with space problems. - repos = sorted(list(r.id for r in dbo.repos.iter_enabled())) - seen = {"baseurl": [], "mirrorlist": [], "metalink": []} - for source_name in repos: - remove = False - repo = dbo.repos.get(source_name, None) - if repo is None: - log.warning("repo %s vanished while removing duplicates", source_name) - continue - if repo.baseurl: - if repo.baseurl[0] in seen["baseurl"]: - log.info("Removing duplicate repo: %s baseurl=%s", source_name, repo.baseurl[0]) - remove = True - else: - seen["baseurl"].append(repo.baseurl[0]) - elif repo.mirrorlist: - if repo.mirrorlist in seen["mirrorlist"]: - log.info("Removing duplicate repo: %s mirrorlist=%s", source_name, repo.mirrorlist) - remove = True - else: - seen["mirrorlist"].append(repo.mirrorlist) - elif repo.metalink: - if repo.metalink in seen["metalink"]: - log.info("Removing duplicate repo: %s metalink=%s", source_name, repo.metalink) - remove = True - else: - seen["metalink"].append(repo.metalink) - - if remove: - del dbo.repos[source_name] - - # Update the metadata from the enabled repos to speed up later operations - log.info("Updating repository metadata") - try: - dbo.fill_sack(load_system_repo=False) - dbo.read_comps() - dbo.update_cache() - except dnf.exceptions.Error as e: - log.error("Failed to update metadata: %s", str(e)) - raise RuntimeError("Fetching metadata failed: %s" % str(e)) - - return dbo diff --git a/src/pylorax/api/errors.py b/src/pylorax/api/errors.py deleted file mode 100644 index c4f45656..00000000 --- a/src/pylorax/api/errors.py +++ /dev/null @@ -1,84 +0,0 @@ -# -# lorax-composer API server -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# HTTP errors -HTTP_ERROR = "HTTPError" - -# Returned from the API when either an invalid compose type is given, or not -# compose type is given. -BAD_COMPOSE_TYPE = "BadComposeType" - -# Returned from the API when ?limit= or ?offset= is given something that does -# not convert into an integer. -BAD_LIMIT_OR_OFFSET = "BadLimitOrOffset" - -# Returned from the API for all other errors from a /blueprints/* route. -BLUEPRINTS_ERROR = "BlueprintsError" - -# Returned from the API for any other error resulting from /compose failing. -BUILD_FAILED = "BuildFailed" - -# Returned from the API when it expected a build to be in a state other than -# what it currently is. This most often happens when asking for results from -# a build that is not yet done. -BUILD_IN_WRONG_STATE = "BuildInWrongState" - -# Returned from the API when some file is requested that is not present - a log -# file, the compose results, etc. -BUILD_MISSING_FILE = "BuildMissingFile" - -# Returned from the API for all other errors from a /compose/* route. -COMPOSE_ERROR = "ComposeError" - -# Returned from the API for all errors from a /upload/* route. -UPLOAD_ERROR = "UploadError" # TODO these errors should be more specific - -# Returned from the API when invalid characters are used in a route path or in -# some identifier. -INVALID_CHARS = "InvalidChars" - -# Returned from the API when /compose is called without the POST body telling it -# what to compose. -MISSING_POST = "MissingPost" - -# Returned from the API for all other errors from a /modules/* route. -MODULES_ERROR = "ModulesError" - -# Returned from the API for all other errors from a /projects/* route. -PROJECTS_ERROR = "ProjectsError" - -# Returned from the API when someone tries to modify an immutable system source. -SYSTEM_SOURCE = "SystemSource" - -# Returned from the API when a blueprint that was requested does not exist. -UNKNOWN_BLUEPRINT = "UnknownBlueprint" - -# Returned from the API when a commit that was requested does not exist. -UNKNOWN_COMMIT = "UnknownCommit" - -# Returned from the API when a module that was requested does not exist. -UNKNOWN_MODULE = "UnknownModule" - -# Returned from the API when a project that was requested does not exist. -UNKNOWN_PROJECT = "UnknownProject" - -# Returned from the API when a source that was requested does not exist. -UNKNOWN_SOURCE = "UnknownSource" - -# Returned from the API when a UUID that was requested does not exist. -UNKNOWN_UUID = "UnknownUUID" diff --git a/src/pylorax/api/flask_blueprint.py b/src/pylorax/api/flask_blueprint.py deleted file mode 100644 index fee3bf05..00000000 --- a/src/pylorax/api/flask_blueprint.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Flask Blueprints that support skipping routes - -When using Blueprints for API versioning you will usually want to fall back -to the previous version's rules for routes that have no new behavior. To do -this we add a 'skip_rule' list to the Blueprint's options dictionary. It lists -all of the routes that you do not want to register. - -For example: - from pylorax.api.v0 import v0 - from pylorax.api.v1 import v1 - - server.register_blueprint(v0, url_prefix="/api/v0/") - server.register_blueprint(v0, url_prefix="/api/v1/", skip_rules=["/blueprints/list"] - server.register_blueprint(v1, url_prefix="/api/v1/") - -This will register all of v0's routes under `/api/v0`, and all but `/blueprints/list` under /api/v1, -and then register v1's version of `/blueprints/list` under `/api/v1` - -""" -from flask import Blueprint -from flask.blueprints import BlueprintSetupState - -class BlueprintSetupStateSkip(BlueprintSetupState): - def __init__(self, blueprint, app, options, first_registration, skip_rules): - self._skip_rules = skip_rules - super(BlueprintSetupStateSkip, self).__init__(blueprint, app, options, first_registration) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - if rule not in self._skip_rules: - super(BlueprintSetupStateSkip, self).add_url_rule(rule, endpoint, view_func, **options) - -class BlueprintSkip(Blueprint): - def __init__(self, *args, **kwargs): - super(BlueprintSkip, self).__init__(*args, **kwargs) - - def make_setup_state(self, app, options, first_registration=False): - skip_rules = options.pop("skip_rules", []) - return BlueprintSetupStateSkip(self, app, options, first_registration, skip_rules) diff --git a/src/pylorax/api/gitrpm.py b/src/pylorax/api/gitrpm.py deleted file mode 100644 index 004e3142..00000000 --- a/src/pylorax/api/gitrpm.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Clone a git repository and package it as an rpm - -This module contains functions for cloning a git repo, creating a tar archive of -the selected commit, branch, or tag, and packaging the files into an rpm that will -be installed by anaconda when creating the image. -""" -import logging -log = logging.getLogger("lorax-composer") - -import os -from rpmfluff import SimpleRpmBuild -import shutil -import subprocess -import tempfile -import time - -from pylorax.sysutils import joinpaths - -def get_repo_description(gitRepo): - """ Return a description including the git repo and reference - - :param gitRepo: A dict with the repository details - :type gitRepo: dict - :returns: A string with the git repo url and reference - :rtype: str - """ - return "Created from %s, reference '%s', on %s" % (gitRepo["repo"], gitRepo["ref"], time.ctime()) - -class GitArchiveTarball: - """Create a git archive of the selected git repo and reference""" - def __init__(self, gitRepo): - self._gitRepo = gitRepo - self.sourceName = self._gitRepo["rpmname"]+".tar.xz" - - def write_file(self, sourcesDir): - """ Create the tar archive - - :param sourcesDir: Path to use for creating the archive - :type sourcesDir: str - - This clones the git repository and creates a git archive from the specified reference. - The result is in RPMNAME.tar.xz under the sourcesDir - """ - # Clone the repository into a temporary location - cmd = ["git", "clone", self._gitRepo["repo"], joinpaths(sourcesDir, "gitrepo")] - log.debug(cmd) - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - log.error("Failed to clone %s: %s", self._gitRepo["repo"], e.output) - raise RuntimeError("Failed to clone %s" % self._gitRepo["repo"]) - - oldcwd = os.getcwd() - try: - os.chdir(joinpaths(sourcesDir, "gitrepo")) - - # Configure archive to create a .tar.xz - cmd = ["git", "config", "tar.tar.xz.command", "xz -c"] - log.debug(cmd) - subprocess.check_call(cmd) - - cmd = ["git", "archive", "--prefix", self._gitRepo["rpmname"] + "/", "-o", joinpaths(sourcesDir, self.sourceName), self._gitRepo["ref"]] - log.debug(cmd) - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - log.error("Failed to archive %s: %s", self._gitRepo["repo"], e.output) - raise RuntimeError('Failed to archive %s from ref "%s"' % (self._gitRepo["repo"], - self._gitRepo["ref"])) - finally: - # Cleanup even if there was an error - os.chdir(oldcwd) - shutil.rmtree(joinpaths(sourcesDir, "gitrepo")) - -class GitRpmBuild(SimpleRpmBuild): - """Build an rpm containing files from a git repository""" - def __init__(self, *args, **kwargs): - self._base_dir = None - super().__init__(*args, **kwargs) - - def check(self): - raise NotImplementedError - - def get_base_dir(self): - """Place all the files under a temporary directory + rpmbuild/ - """ - if not self._base_dir: - self._base_dir = tempfile.mkdtemp(prefix="lorax-git-rpm.") - return joinpaths(self._base_dir, "rpmbuild") - - def cleanup_tmpdir(self): - """Remove the temporary directory and all of its contents - """ - if len(self._base_dir) < 5: - raise RuntimeError("Invalid base_dir: %s" % self.get_base_dir()) - - shutil.rmtree(self._base_dir) - - def clean(self): - """Remove the base directory from inside the tmpdir""" - if len(self.get_base_dir()) < 5: - raise RuntimeError("Invalid base_dir: %s" % self.get_base_dir()) - shutil.rmtree(self.get_base_dir(), ignore_errors=True) - - def add_git_tarball(self, gitRepo): - """Add a tar archive of a git repository to the rpm - - :param gitRepo: A dict with the repository details - :type gitRepo: dict - - This populates the rpm with the URL of the git repository, the summary - describing the repo, the description of the repository and reference used, - and sets up the rpm to install the archive contents into the destination - path. - """ - self.addUrl(gitRepo["repo"]) - self.add_summary(gitRepo["summary"]) - self.add_description(get_repo_description(gitRepo)) - self.addLicense("Unknown") - sourceIndex = self.add_source(GitArchiveTarball(gitRepo)) - self.section_build += "tar -xvf %s\n" % self.sources[sourceIndex].sourceName - dest = os.path.normpath(gitRepo["destination"]) - # Prevent double slash root - if dest == "/": - dest = "" - self.create_parent_dirs(dest) - self.section_install += "cp -r %s/. $RPM_BUILD_ROOT/%s\n" % (gitRepo["rpmname"], dest) - sub = self.get_subpackage(None) - if not dest: - # / is special, we don't want to include / itself, just what's under it - sub.section_files += "/*\n" - else: - sub.section_files += "%s/\n" % dest - -def make_git_rpm(gitRepo, dest): - """ Create an rpm from the specified git repo - - :param gitRepo: A dict with the repository details - :type gitRepo: dict - - This will clone the git repository, create an archive of the selected reference, - and build an rpm that will install the files from the repository under the destination - directory. The gitRepo dict should have the following fields:: - - rpmname: "server-config" - rpmversion: "1.0" - rpmrelease: "1" - summary: "Setup files for server deployment" - repo: "PATH OF GIT REPO TO CLONE" - ref: "v1.0" - destination: "/opt/server/" - - * rpmname: Name of the rpm to create, also used as the prefix name in the tar archive - * rpmversion: Version of the rpm, eg. "1.0.0" - * rpmrelease: Release of the rpm, eg. "1" - * summary: Summary string for the rpm - * repo: URL of the get repo to clone and create the archive from - * ref: Git reference to check out. eg. origin/branch-name, git tag, or git commit hash - * destination: Path to install the / of the git repo at when installing the rpm - """ - gitRpm = GitRpmBuild(gitRepo["rpmname"], gitRepo["rpmversion"], gitRepo["rpmrelease"], ["noarch"]) - try: - gitRpm.add_git_tarball(gitRepo) - gitRpm.do_make() - rpmfile = gitRpm.get_built_rpm("noarch") - shutil.move(rpmfile, dest) - except Exception as e: - log.error("Creating git repo rpm: %s", e) - raise RuntimeError("Creating git repo rpm: %s" % e) - finally: - gitRpm.cleanup_tmpdir() - - return os.path.basename(rpmfile) - -# Create the git rpms, if any, and return the path to the repo under results_dir -def create_gitrpm_repo(results_dir, recipe): - """Create a dnf repository with the rpms from the recipe - - :param results_dir: Path to create the repository under - :type results_dir: str - :param recipe: The recipe to get the repos.git entries from - :type recipe: Recipe - :returns: Path to the dnf repository or "" - :rtype: str - - This function creates a dnf repository directory at results_dir+"repo/", - creates rpms for all of the repos.git entries in the recipe, runs createrepo_c - on the dnf repository so that Anaconda can use it, and returns the path to the - repository to the caller. - """ - if "repos" not in recipe or "git" not in recipe["repos"]: - return "" - - gitrepo = joinpaths(results_dir, "repo/") - if not os.path.exists(gitrepo): - os.makedirs(gitrepo) - for r in recipe["repos"]["git"]: - make_git_rpm(r, gitrepo) - cmd = ["createrepo_c", gitrepo] - log.debug(cmd) - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - log.error("Failed to create repo at %s: %s", gitrepo, e.output) - raise RuntimeError("Failed to create repo at %s" % gitrepo) - - return gitrepo diff --git a/src/pylorax/api/projects.py b/src/pylorax/api/projects.py deleted file mode 100644 index a9cc1a21..00000000 --- a/src/pylorax/api/projects.py +++ /dev/null @@ -1,697 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import logging -log = logging.getLogger("lorax-composer") - -from configparser import ConfigParser -import dnf -from glob import glob -import os -import time - -from pylorax.api.bisect import insort_left -from pylorax.sysutils import joinpaths - -TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" - - -class ProjectsError(Exception): - pass - - -def api_time(t): - """Convert time since epoch to a string - - :param t: Seconds since epoch - :type t: int - :returns: Time string - :rtype: str - """ - return time.strftime(TIME_FORMAT, time.localtime(t)) - - -def api_changelog(changelog): - """Convert the changelog to a string - - :param changelog: A list of time, author, string tuples. - :type changelog: tuple - :returns: The most recent changelog text or "" - :rtype: str - - This returns only the most recent changelog entry. - """ - try: - entry = changelog[0][2] - except IndexError: - entry = "" - return entry - - -def pkg_to_project(pkg): - """Extract the details from a hawkey.Package object - - :param pkgs: hawkey.Package object with package details - :type pkgs: hawkey.Package - :returns: A dict with the name, summary, description, and url. - :rtype: dict - - upstream_vcs is hard-coded to UPSTREAM_VCS - """ - return {"name": pkg.name, - "summary": pkg.summary, - "description": pkg.description, - "homepage": pkg.url, - "upstream_vcs": "UPSTREAM_VCS"} - - -def pkg_to_build(pkg): - """Extract the build details from a hawkey.Package object - - :param pkg: hawkey.Package object with package details - :type pkg: hawkey.Package - :returns: A dict with the build details, epoch, release, arch, build_time, changelog, ... - :rtype: dict - - metadata entries are hard-coded to {} - - Note that this only returns the build dict, it does not include the name, description, etc. - """ - return {"epoch": pkg.epoch, - "release": pkg.release, - "arch": pkg.arch, - "build_time": api_time(pkg.buildtime), - "changelog": "CHANGELOG_NEEDED", # XXX Not in hawkey.Package - "build_config_ref": "BUILD_CONFIG_REF", - "build_env_ref": "BUILD_ENV_REF", - "metadata": {}, - "source": {"license": pkg.license, - "version": pkg.version, - "source_ref": "SOURCE_REF", - "metadata": {}}} - - -def pkg_to_project_info(pkg): - """Extract the details from a hawkey.Package object - - :param pkg: hawkey.Package object with package details - :type pkg: hawkey.Package - :returns: A dict with the project details, as well as epoch, release, arch, build_time, changelog, ... - :rtype: dict - - metadata entries are hard-coded to {} - """ - return {"name": pkg.name, - "summary": pkg.summary, - "description": pkg.description, - "homepage": pkg.url, - "upstream_vcs": "UPSTREAM_VCS", - "builds": [pkg_to_build(pkg)]} - - -def pkg_to_dep(pkg): - """Extract the info from a hawkey.Package object - - :param pkg: A hawkey.Package object - :type pkg: hawkey.Package - :returns: A dict with name, epoch, version, release, arch - :rtype: dict - """ - return {"name": pkg.name, - "epoch": pkg.epoch, - "version": pkg.version, - "release": pkg.release, - "arch": pkg.arch} - - -def proj_to_module(proj): - """Extract the name from a project_info dict - - :param pkg: dict with package details - :type pkg: dict - :returns: A dict with name, and group_type - :rtype: dict - - group_type is hard-coded to "rpm" - """ - return {"name": proj["name"], - "group_type": "rpm"} - - -def dep_evra(dep): - """Return the epoch:version-release.arch for the dep - - :param dep: dependency dict - :type dep: dict - :returns: epoch:version-release.arch - :rtype: str - """ - if dep["epoch"] == 0: - return dep["version"]+"-"+dep["release"]+"."+dep["arch"] - else: - return str(dep["epoch"])+":"+dep["version"]+"-"+dep["release"]+"."+dep["arch"] - -def dep_nevra(dep): - """Return the name-epoch:version-release.arch""" - return dep["name"]+"-"+dep_evra(dep) - - -def projects_list(dbo): - """Return a list of projects - - :param dbo: dnf base object - :type dbo: dnf.Base - :returns: List of project info dicts with name, summary, description, homepage, upstream_vcs - :rtype: list of dicts - """ - return projects_info(dbo, None) - - -def projects_info(dbo, project_names): - """Return details about specific projects - - :param dbo: dnf base object - :type dbo: dnf.Base - :param project_names: List of names of projects to get info about - :type project_names: str - :returns: List of project info dicts with pkg_to_project as well as epoch, version, release, etc. - :rtype: list of dicts - - If project_names is None it will return the full list of available packages - """ - if project_names: - pkgs = dbo.sack.query().available().filter(name__glob=project_names) - else: - pkgs = dbo.sack.query().available() - - # iterate over pkgs - # - if pkg.name isn't in the results yet, add pkg_to_project_info in sorted position - # - if pkg.name is already in results, get its builds. If the build for pkg is different - # in any way (version, arch, etc.) add it to the entry's builds list. If it is the same, - # skip it. - results = [] - results_names = {} - for p in pkgs: - if p.name.lower() not in results_names: - idx = insort_left(results, pkg_to_project_info(p), key=lambda p: p["name"].lower()) - results_names[p.name.lower()] = idx - else: - build = pkg_to_build(p) - if build not in results[results_names[p.name.lower()]]["builds"]: - results[results_names[p.name.lower()]]["builds"].append(build) - - return results - -def _depsolve(dbo, projects, groups): - """Add projects to a new transaction - - :param dbo: dnf base object - :type dbo: dnf.Base - :param projects: The projects and version globs to find the dependencies for - :type projects: List of tuples - :param groups: The groups to include in dependency solving - :type groups: List of str - :returns: None - :rtype: None - :raises: ProjectsError if there was a problem installing something - """ - # This resets the transaction and updates the cache. - # It is important that the cache always be synchronized because Anaconda will grab its own copy - # and if that is different the NEVRAs will not match and the build will fail. - dbo.reset(goal=True) - install_errors = [] - for name in groups: - try: - dbo.group_install(name, ["mandatory", "default"]) - except dnf.exceptions.MarkingError as e: - install_errors.append(("Group %s" % (name), str(e))) - - for name, version in projects: - # Find the best package matching the name + version glob - # dnf can return multiple packages if it is in more than 1 repository - query = dbo.sack.query().filterm(provides__glob=name) - if version: - query.filterm(version__glob=version) - - query.filterm(latest=1) - if not query: - install_errors.append(("%s-%s" % (name, version), "No match")) - continue - sltr = dnf.selector.Selector(dbo.sack).set(pkg=query) - - # NOTE: dnf says in near future there will be a "goal" attribute of Base class - # so yes, we're using a 'private' attribute here on purpose and with permission. - dbo._goal.install(select=sltr, optional=False) - - if install_errors: - raise ProjectsError("The following package(s) had problems: %s" % ",".join(["%s (%s)" % (pattern, err) for pattern, err in install_errors])) - -def projects_depsolve(dbo, projects, groups): - """Return the dependencies for a list of projects - - :param dbo: dnf base object - :type dbo: dnf.Base - :param projects: The projects to find the dependencies for - :type projects: List of Strings - :param groups: The groups to include in dependency solving - :type groups: List of str - :returns: NEVRA's of the project and its dependencies - :rtype: list of dicts - :raises: ProjectsError if there was a problem installing something - """ - _depsolve(dbo, projects, groups) - - try: - dbo.resolve() - except dnf.exceptions.DepsolveError as e: - raise ProjectsError("There was a problem depsolving %s: %s" % (projects, str(e))) - - if len(dbo.transaction) == 0: - return [] - - return sorted(map(pkg_to_dep, dbo.transaction.install_set), key=lambda p: p["name"].lower()) - - -def estimate_size(packages, block_size=6144): - """Estimate the installed size of a package list - - :param packages: The packages to be installed - :type packages: list of hawkey.Package objects - :param block_size: The block size to use for rounding up file sizes. - :type block_size: int - :returns: The estimated size of installed packages - :rtype: int - - Estimating actual requirements is difficult without the actual file sizes, which - dnf doesn't provide access to. So use the file count and block size to estimate - a minimum size for each package. - """ - installed_size = 0 - for p in packages: - installed_size += len(p.files) * block_size - installed_size += p.installsize - return installed_size - - -def projects_depsolve_with_size(dbo, projects, groups, with_core=True): - """Return the dependencies and installed size for a list of projects - - :param dbo: dnf base object - :type dbo: dnf.Base - :param project_names: The projects to find the dependencies for - :type project_names: List of Strings - :param groups: The groups to include in dependency solving - :type groups: List of str - :returns: installed size and a list of NEVRA's of the project and its dependencies - :rtype: tuple of (int, list of dicts) - :raises: ProjectsError if there was a problem installing something - """ - _depsolve(dbo, projects, groups) - - if with_core: - dbo.group_install("core", ['mandatory', 'default', 'optional']) - - try: - dbo.resolve() - except dnf.exceptions.DepsolveError as e: - raise ProjectsError("There was a problem depsolving %s: %s" % (projects, str(e))) - - if len(dbo.transaction) == 0: - return (0, []) - - installed_size = estimate_size(dbo.transaction.install_set) - deps = sorted(map(pkg_to_dep, dbo.transaction.install_set), key=lambda p: p["name"].lower()) - return (installed_size, deps) - - -def modules_list(dbo, module_names): - """Return a list of modules - - :param dbo: dnf base object - :type dbo: dnf.Base - :param offset: Number of modules to skip - :type limit: int - :param limit: Maximum number of modules to return - :type limit: int - :returns: List of module information and total count - :rtype: tuple of a list of dicts and an Int - - Modules don't exist in RHEL7 so this only returns projects - and sets the type to "rpm" - - """ - # TODO - Figure out what to do with this for Fedora 'modules' - return list(map(proj_to_module, projects_info(dbo, module_names))) - -def modules_info(dbo, module_names): - """Return details about a module, including dependencies - - :param dbo: dnf base object - :type dbo: dnf.Base - :param module_names: Names of the modules to get info about - :type module_names: str - :returns: List of dicts with module details and dependencies. - :rtype: list of dicts - """ - modules = projects_info(dbo, module_names) - - # Add the dependency info to each one - for module in modules: - module["dependencies"] = projects_depsolve(dbo, [(module["name"], "*.*")], []) - - return modules - -def dnf_repo_to_file_repo(repo): - """Return a string representation of a DNF Repo object suitable for writing to a .repo file - - :param repo: DNF Repository - :type repo: dnf.RepoDict - :returns: A string - :rtype: str - - The DNF Repo.dump() function does not produce a string that can be used as a dnf .repo file, - it ouputs baseurl and gpgkey as python lists which DNF cannot read. So do this manually with - only the attributes we care about. - """ - repo_str = "[%s]\nname = %s\n" % (repo.id, repo.name) - if repo.metalink: - repo_str += "metalink = %s\n" % repo.metalink - elif repo.mirrorlist: - repo_str += "mirrorlist = %s\n" % repo.mirrorlist - elif repo.baseurl: - repo_str += "baseurl = %s\n" % repo.baseurl[0] - else: - raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") - - # proxy is optional - if repo.proxy: - repo_str += "proxy = %s\n" % repo.proxy - - repo_str += "sslverify = %s\n" % repo.sslverify - repo_str += "gpgcheck = %s\n" % repo.gpgcheck - if repo.gpgkey: - repo_str += "gpgkey = %s\n" % ",".join(repo.gpgkey) - - if repo.skip_if_unavailable: - repo_str += "skip_if_unavailable=1\n" - - return repo_str - -def repo_to_source(repo, system_source, api=1): - """Return a Weldr Source dict created from the DNF Repository - - :param repo: DNF Repository - :type repo: dnf.RepoDict - :param system_source: True if this source is an immutable system source - :type system_source: bool - :param api: Select which api version of the dict to return (default 1) - :type api: int - :returns: A dict with Weldr Source fields filled in - :rtype: dict - - Example:: - - { - "check_gpg": true, - "check_ssl": true, - "gpgkey_url": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" - ], - "id": "fedora", - "name": "Fedora $releasever - $basearch", - "proxy": "http://proxy.brianlane.com:8123", - "system": true - "type": "yum-metalink", - "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" - } - - The ``name`` field has changed in v1 of the API. - In v0 of the API ``name`` is the repo.id, in v1 it is the repo.name and a new field, - ``id`` has been added for the repo.id - - """ - if api==0: - source = {"name": repo.id, "system": system_source} - else: - source = {"id": repo.id, "name": repo.name, "system": system_source} - if repo.baseurl: - source["url"] = repo.baseurl[0] - source["type"] = "yum-baseurl" - elif repo.metalink: - source["url"] = repo.metalink - source["type"] = "yum-metalink" - elif repo.mirrorlist: - source["url"] = repo.mirrorlist - source["type"] = "yum-mirrorlist" - else: - raise RuntimeError("Repo has no baseurl, metalink, or mirrorlist") - - # proxy is optional - if repo.proxy: - source["proxy"] = repo.proxy - - if not repo.sslverify: - source["check_ssl"] = False - else: - source["check_ssl"] = True - - if not repo.gpgcheck: - source["check_gpg"] = False - else: - source["check_gpg"] = True - - if repo.gpgkey: - source["gpgkey_urls"] = list(repo.gpgkey) - - return source - -def source_to_repodict(source): - """Return a tuple suitable for use with dnf.add_new_repo - - :param source: A Weldr source dict - :type source: dict - :returns: A tuple of dnf.Repo attributes - :rtype: (str, list, dict) - - Return a tuple with (id, baseurl|(), kwargs) that can be used - with dnf.repos.add_new_repo - """ - kwargs = {} - if "id" in source: - # This is an API v1 source definition - repoid = source["id"] - if "name" in source: - kwargs["name"] = source["name"] - else: - repoid = source["name"] - - # This will allow errors to be raised so we can catch them - # without this they are logged, but the repo is silently disabled - kwargs["skip_if_unavailable"] = False - - if source["type"] == "yum-baseurl": - baseurl = [source["url"]] - elif source["type"] == "yum-metalink": - kwargs["metalink"] = source["url"] - baseurl = () - elif source["type"] == "yum-mirrorlist": - kwargs["mirrorlist"] = source["url"] - baseurl = () - - if "proxy" in source: - kwargs["proxy"] = source["proxy"] - - if source["check_ssl"]: - kwargs["sslverify"] = True - else: - kwargs["sslverify"] = False - - if source["check_gpg"]: - kwargs["gpgcheck"] = True - else: - kwargs["gpgcheck"] = False - - if "gpgkey_urls" in source: - kwargs["gpgkey"] = tuple(source["gpgkey_urls"]) - - return (repoid, baseurl, kwargs) - - -def source_to_repo(source, dnf_conf): - """Return a dnf Repo object created from a source dict - - :param source: A Weldr source dict - :type source: dict - :param dnf_conf: The dnf Config object - :type dnf_conf: dnf.conf - :returns: A dnf Repo object - :rtype: dnf.Repo - - Example:: - - { - "check_gpg": True, - "check_ssl": True, - "gpgkey_urls": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" - ], - "id": "fedora", - "name": "Fedora $releasever - $basearch", - "proxy": "http://proxy.brianlane.com:8123", - "system": True - "type": "yum-metalink", - "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" - } - - If the ``id`` field is included it is used for the repo id, otherwise ``name`` is used. - v0 of the API only used ``name``, v1 added the distinction between ``id`` and ``name``. - """ - repoid, baseurl, kwargs = source_to_repodict(source) - repo = dnf.repo.Repo(repoid, dnf_conf) - if baseurl: - repo.baseurl = baseurl - - # Apply the rest of the kwargs to the Repo object - for k, v in kwargs.items(): - setattr(repo, k, v) - - repo.enable() - - return repo - -def get_source_ids(source_path): - """Return a list of the source ids in a file - - :param source_path: Full path and filename of the source (yum repo) file - :type source_path: str - :returns: A list of source id strings - :rtype: list of str - """ - if not os.path.exists(source_path): - return [] - - cfg = ConfigParser() - cfg.read(source_path) - return cfg.sections() - -def get_repo_sources(source_glob): - """Return a list of sources from a directory of yum repositories - - :param source_glob: A glob to use to match the source files, including full path - :type source_glob: str - :returns: A list of the source ids in all of the matching files - :rtype: list of str - """ - sources = [] - for f in glob(source_glob): - sources.extend(get_source_ids(f)) - return sources - -def delete_repo_source(source_glob, source_id): - """Delete a source from a repo file - - :param source_glob: A glob of the repo sources to search - :type source_glob: str - :param source_id: The repo id to delete - :type source_id: str - :returns: None - :raises: ProjectsError if there was a problem - - A repo file may have multiple sources in it, delete only the selected source. - If it is the last one in the file, delete the file. - - WARNING: This will delete ANY source, the caller needs to ensure that a system - source_id isn't passed to it. - """ - found = False - for f in glob(source_glob): - try: - cfg = ConfigParser() - cfg.read(f) - if source_id in cfg.sections(): - found = True - cfg.remove_section(source_id) - # If there are other sections, rewrite the file without the deleted one - if len(cfg.sections()) > 0: - with open(f, "w") as cfg_file: - cfg.write(cfg_file) - else: - # No sections left, just delete the file - os.unlink(f) - except Exception as e: - raise ProjectsError("Problem deleting repo source %s: %s" % (source_id, str(e))) - if not found: - raise ProjectsError("source %s not found" % source_id) - -def new_repo_source(dbo, repoid, source, repo_dir): - """Add a new repo source from a Weldr source dict - - :param dbo: dnf base object - :type dbo: dnf.Base - :param id: The repo id (API v0 uses the name, v1 uses the id) - :type id: str - :param source: A Weldr source dict - :type source: dict - :returns: None - :raises: ... - - Make sure access to the dbo has been locked before calling this. - The `id` parameter will the the 'name' field for API v0, and the 'id' field for API v1 - - DNF variables will be substituted at load time, and on restart. - """ - try: - # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) - # If this repo already exists, delete it and replace it with the new one - repos = list(r.id for r in dbo.repos.iter_enabled()) - if repoid in repos: - del dbo.repos[repoid] - - # Add the repo and substitute any dnf variables - _, baseurl, kwargs = source_to_repodict(source) - log.debug("repoid=%s, baseurl=%s, kwargs=%s", repoid, baseurl, kwargs) - r = dbo.repos.add_new_repo(repoid, dbo.conf, baseurl, **kwargs) - r.enable() - - log.info("Updating repository metadata after adding %s", repoid) - dbo.fill_sack(load_system_repo=False) - dbo.read_comps() - - # Remove any previous sources with this id, ignore it if it isn't found - try: - delete_repo_source(joinpaths(repo_dir, "*.repo"), repoid) - except ProjectsError: - pass - - # Make sure the source id can't contain a path traversal by taking the basename - source_path = joinpaths(repo_dir, os.path.basename("%s.repo" % repoid)) - # Write the un-substituted version of the repo to disk - with open(source_path, "w") as f: - repo = source_to_repo(source, dbo.conf) - f.write(dnf_repo_to_file_repo(repo)) - except Exception as e: - log.error("(new_repo_source) adding %s failed: %s", repoid, str(e)) - - # Cleanup the mess, if loading it failed we don't want to leave it in memory - repos = list(r.id for r in dbo.repos.iter_enabled()) - if repoid in repos: - del dbo.repos[repoid] - - log.info("Updating repository metadata after adding %s failed", repoid) - dbo.fill_sack(load_system_repo=False) - dbo.read_comps() - - raise diff --git a/src/pylorax/api/queue.py b/src/pylorax/api/queue.py deleted file mode 100644 index 33d6b122..00000000 --- a/src/pylorax/api/queue.py +++ /dev/null @@ -1,863 +0,0 @@ -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Functions to monitor compose queue and run anaconda""" -import logging -log = logging.getLogger("pylorax") -program_log = logging.getLogger("program") -dnf_log = logging.getLogger("dnf") - -import os -import grp -from glob import glob -import multiprocessing as mp -import pwd -import shutil -import subprocess -from subprocess import Popen, PIPE -import time - -from pylorax import find_templates -from pylorax.api.compose import move_compose_results -from pylorax.api.recipes import recipe_from_file -from pylorax.api.timestamp import TS_CREATED, TS_STARTED, TS_FINISHED, write_timestamp, timestamp_dict -import pylorax.api.toml as toml -from pylorax.base import DataHolder -from pylorax.creator import run_creator -from pylorax.sysutils import joinpaths, read_tail - -from lifted.queue import create_upload, get_uploads, ready_upload, delete_upload - -def check_queues(cfg): - """Check to make sure the new and run queue symlinks are correct - - :param cfg: Configuration settings - :type cfg: DataHolder - - Also check all of the existing results and make sure any with WAITING - set in STATUS have a symlink in queue/new/ - """ - # Remove broken symlinks from the new and run queues - queue_symlinks = glob(joinpaths(cfg.composer_dir, "queue/new/*")) + \ - glob(joinpaths(cfg.composer_dir, "queue/run/*")) - for link in queue_symlinks: - if not os.path.isdir(os.path.realpath(link)): - log.info("Removing broken symlink %s", link) - os.unlink(link) - - # Write FAILED to the STATUS of any run queue symlinks and remove them - for link in glob(joinpaths(cfg.composer_dir, "queue/run/*")): - log.info("Setting build %s to FAILED, and removing symlink from queue/run/", os.path.basename(link)) - open(joinpaths(link, "STATUS"), "w").write("FAILED\n") - os.unlink(link) - - # Check results STATUS messages - # - If STATUS is missing, set it to FAILED - # - RUNNING should be changed to FAILED - # - WAITING should have a symlink in the new queue - for link in glob(joinpaths(cfg.composer_dir, "results/*")): - if not os.path.exists(joinpaths(link, "STATUS")): - open(joinpaths(link, "STATUS"), "w").write("FAILED\n") - continue - - status = open(joinpaths(link, "STATUS")).read().strip() - if status == "RUNNING": - log.info("Setting build %s to FAILED", os.path.basename(link)) - open(joinpaths(link, "STATUS"), "w").write("FAILED\n") - elif status == "WAITING": - if not os.path.islink(joinpaths(cfg.composer_dir, "queue/new/", os.path.basename(link))): - log.info("Creating missing symlink to new build %s", os.path.basename(link)) - os.symlink(link, joinpaths(cfg.composer_dir, "queue/new/", os.path.basename(link))) - -def start_queue_monitor(cfg, uid, gid): - """Start the queue monitor as a mp process - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uid: User ID that owns the queue - :type uid: int - :param gid: Group ID that owns the queue - :type gid: int - :returns: None - """ - lib_dir = cfg.get("composer", "lib_dir") - share_dir = cfg.get("composer", "share_dir") - tmp = cfg.get("composer", "tmp") - monitor_cfg = DataHolder(cfg=cfg, composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid, tmp=tmp) - p = mp.Process(target=monitor, args=(monitor_cfg,)) - p.daemon = True - p.start() - -def monitor(cfg): - """Monitor the queue for new compose requests - - :param cfg: Configuration settings - :type cfg: DataHolder - :returns: Does not return - - The queue has 2 subdirectories, new and run. When a compose is ready to be run - a symlink to the uniquely named results directory should be placed in ./queue/new/ - - When the it is ready to be run (it is checked every 30 seconds or after a previous - compose is finished) the symlink will be moved into ./queue/run/ and a STATUS file - will be created in the results directory. - - STATUS can contain one of: WAITING, RUNNING, FINISHED, FAILED - - If the system is restarted while a compose is running it will move any old symlinks - from ./queue/run/ to ./queue/new/ and rerun them. - """ - def queue_sort(uuid): - """Sort the queue entries by their mtime, not their names""" - return os.stat(joinpaths(cfg.composer_dir, "queue/new", uuid)).st_mtime - - check_queues(cfg) - while True: - uuids = sorted(os.listdir(joinpaths(cfg.composer_dir, "queue/new")), key=queue_sort) - - # Pick the oldest and move it into ./run/ - if not uuids: - # No composes left to process, sleep for a bit - time.sleep(5) - else: - src = joinpaths(cfg.composer_dir, "queue/new", uuids[0]) - dst = joinpaths(cfg.composer_dir, "queue/run", uuids[0]) - try: - os.rename(src, dst) - except OSError: - # The symlink may vanish if uuid_cancel() has been called - continue - - # The anaconda logs are also copied into ./anaconda/ in this directory - os.makedirs(joinpaths(dst, "logs"), exist_ok=True) - - def open_handler(loggers, file_name): - handler = logging.FileHandler(joinpaths(dst, "logs", file_name)) - handler.setLevel(logging.DEBUG) - handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s")) - for logger in loggers: - logger.addHandler(handler) - return (handler, loggers) - - loggers = (((log, program_log, dnf_log), "combined.log"), - ((log,), "composer.log"), - ((program_log,), "program.log"), - ((dnf_log,), "dnf.log")) - handlers = [open_handler(loggers, file_name) for loggers, file_name in loggers] - - log.info("Starting new compose: %s", dst) - open(joinpaths(dst, "STATUS"), "w").write("RUNNING\n") - - try: - make_compose(cfg, os.path.realpath(dst)) - log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst)) - open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n") - write_timestamp(dst, TS_FINISHED) - - upload_cfg = cfg.cfg["upload"] - for upload in get_uploads(upload_cfg, uuid_get_uploads(cfg.cfg, uuids[0])): - log.info("Readying upload %s", upload.uuid) - uuid_ready_upload(cfg.cfg, uuids[0], upload.uuid) - except Exception: - import traceback - log.error("traceback: %s", traceback.format_exc()) - -# TODO - Write the error message to an ERROR-LOG file to include with the status -# log.error("Error running compose: %s", e) - open(joinpaths(dst, "STATUS"), "w").write("FAILED\n") - write_timestamp(dst, TS_FINISHED) - finally: - for handler, loggers in handlers: - for logger in loggers: - logger.removeHandler(handler) - handler.close() - - os.unlink(dst) - -def make_compose(cfg, results_dir): - """Run anaconda with the final-kickstart.ks from results_dir - - :param cfg: Configuration settings - :type cfg: DataHolder - :param results_dir: The directory containing the metadata and results for the build - :type results_dir: str - :returns: Nothing - :raises: May raise various exceptions - - This takes the final-kickstart.ks, and the settings in config.toml and runs Anaconda - in no-virt mode (directly on the host operating system). Exceptions should be caught - at the higer level. - - If there is a failure, the build artifacts will be cleaned up, and any logs will be - moved into logs/anaconda/ and their ownership will be set to the user from the cfg - object. - """ - - # Check on the ks's presence - ks_path = joinpaths(results_dir, "final-kickstart.ks") - if not os.path.exists(ks_path): - raise RuntimeError("Missing kickstart file at %s" % ks_path) - - # Load the compose configuration - cfg_path = joinpaths(results_dir, "config.toml") - if not os.path.exists(cfg_path): - raise RuntimeError("Missing config.toml for %s" % results_dir) - cfg_dict = toml.loads(open(cfg_path, "r").read()) - - # The keys in cfg_dict correspond to the arguments setup in livemedia-creator - # keys that define what to build should be setup in compose_args, and keys with - # defaults should be setup here. - - # Make sure that image_name contains no path components - cfg_dict["image_name"] = os.path.basename(cfg_dict["image_name"]) - - # Only support novirt installation, set some other defaults - cfg_dict["no_virt"] = True - cfg_dict["disk_image"] = None - cfg_dict["fs_image"] = None - cfg_dict["keep_image"] = False - cfg_dict["domacboot"] = False - cfg_dict["anaconda_args"] = "" - cfg_dict["proxy"] = "" - cfg_dict["armplatform"] = "" - cfg_dict["squashfs_args"] = None - - cfg_dict["lorax_templates"] = find_templates(cfg.share_dir) - cfg_dict["tmp"] = cfg.tmp - # Use default args for dracut - cfg_dict["dracut_conf"] = None - cfg_dict["dracut_args"] = None - - # TODO How to support other arches? - cfg_dict["arch"] = None - - # Compose things in a temporary directory inside the results directory - cfg_dict["result_dir"] = joinpaths(results_dir, "compose") - os.makedirs(cfg_dict["result_dir"]) - - install_cfg = DataHolder(**cfg_dict) - - # Some kludges for the 99-copy-logs %post, failure in it will crash the build - for f in ["/tmp/NOSAVE_INPUT_KS", "/tmp/NOSAVE_LOGS"]: - open(f, "w") - - # Placing a CANCEL file in the results directory will make execWithRedirect send anaconda a SIGTERM - def cancel_build(): - return os.path.exists(joinpaths(results_dir, "CANCEL")) - - log.debug("cfg = %s", install_cfg) - try: - test_path = joinpaths(results_dir, "TEST") - write_timestamp(results_dir, TS_STARTED) - if os.path.exists(test_path): - # Pretend to run the compose - time.sleep(5) - try: - test_mode = int(open(test_path, "r").read()) - except Exception: - test_mode = 1 - if test_mode == 1: - raise RuntimeError("TESTING FAILED compose") - else: - open(joinpaths(results_dir, install_cfg.image_name), "w").write("TEST IMAGE") - else: - run_creator(install_cfg, cancel_func=cancel_build) - - # Extract the results of the compose into results_dir and cleanup the compose directory - move_compose_results(install_cfg, results_dir) - finally: - # Make sure any remaining temporary directories are removed (eg. if there was an exception) - for d in glob(joinpaths(cfg.tmp, "lmc-*")): - if os.path.isdir(d): - shutil.rmtree(d) - elif os.path.isfile(d): - os.unlink(d) - - # Make sure that everything under the results directory is owned by the user - user = pwd.getpwuid(cfg.uid).pw_name - group = grp.getgrgid(cfg.gid).gr_name - log.debug("Install finished, chowning results to %s:%s", user, group) - subprocess.call(["chown", "-R", "%s:%s" % (user, group), results_dir]) - -def get_compose_type(results_dir): - """Return the type of composition. - - :param results_dir: The directory containing the metadata and results for the build - :type results_dir: str - :returns: The type of compose (eg. 'tar') - :rtype: str - :raises: RuntimeError if no kickstart template can be found. - """ - # Should only be 2 kickstarts, the final-kickstart.ks and the template - t = [os.path.basename(ks)[:-3] for ks in glob(joinpaths(results_dir, "*.ks")) - if "final-kickstart" not in ks] - if len(t) != 1: - raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir)) - return t[0] - -def compose_detail(cfg, results_dir, api=1): - """Return details about the build. - - :param cfg: Configuration settings (required for api=1) - :type cfg: ComposerConfig - :param results_dir: The directory containing the metadata and results for the build - :type results_dir: str - :param api: Select which api version of the dict to return (default 1) - :type api: int - :returns: A dictionary with details about the compose - :rtype: dict - :raises: IOError if it cannot read the directory, STATUS, or blueprint file. - - The following details are included in the dict: - - * id - The uuid of the comoposition - * queue_status - The final status of the composition (FINISHED or FAILED) - * compose_type - The type of output generated (tar, iso, etc.) - * blueprint - Blueprint name - * version - Blueprint version - * image_size - Size of the image, if finished. 0 otherwise. - * uploads - For API v1 details about uploading the image are included - - Various timestamps are also included in the dict. These are all Unix UTC timestamps. - It is possible for these timestamps to not always exist, in which case they will be - None in Python (or null in JSON). The following timestamps are included: - - * job_created - When the user submitted the compose - * job_started - Anaconda started running - * job_finished - Job entered FINISHED or FAILED state - """ - build_id = os.path.basename(os.path.abspath(results_dir)) - status = open(joinpaths(results_dir, "STATUS")).read().strip() - blueprint = recipe_from_file(joinpaths(results_dir, "blueprint.toml")) - - compose_type = get_compose_type(results_dir) - - image_path = get_image_name(results_dir)[1] - if status == "FINISHED" and os.path.exists(image_path): - image_size = os.stat(image_path).st_size - else: - image_size = 0 - - times = timestamp_dict(results_dir) - - detail = {"id": build_id, - "queue_status": status, - "job_created": times.get(TS_CREATED), - "job_started": times.get(TS_STARTED), - "job_finished": times.get(TS_FINISHED), - "compose_type": compose_type, - "blueprint": blueprint["name"], - "version": blueprint["version"], - "image_size": image_size, - } - - if api == 1: - # Get uploads for this build_id - upload_uuids = uuid_get_uploads(cfg, build_id) - summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)] - detail["uploads"] = summaries - return detail - -def queue_status(cfg, api=1): - """Return details about what is in the queue. - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param api: Select which api version of the dict to return (default 1) - :type api: int - :returns: A list of the new composes, and a list of the running composes - :rtype: dict - - This returns a dict with 2 lists. "new" is the list of uuids that are waiting to be built, - and "run" has the uuids that are being built (currently limited to 1 at a time). - """ - queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue") - new_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "new/*"))] - run_queue = [os.path.realpath(p) for p in glob(joinpaths(queue_dir, "run/*"))] - - new_details = [] - for n in new_queue: - try: - d = compose_detail(cfg, n, api) - except IOError: - continue - new_details.append(d) - - run_details = [] - for r in run_queue: - try: - d = compose_detail(cfg, r, api) - except IOError: - continue - run_details.append(d) - - return { - "new": new_details, - "run": run_details - } - -def uuid_status(cfg, uuid, api=1): - """Return the details of a specific UUID compose - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param api: Select which api version of the dict to return (default 1) - :type api: int - :returns: Details about the build - :rtype: dict or None - - Returns the same dict as `compose_detail()` - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - try: - return compose_detail(cfg, uuid_dir, api) - except IOError: - return None - -def build_status(cfg, status_filter=None, api=1): - """Return the details of finished or failed builds - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param status_filter: What builds to return. None == all, "FINISHED", or "FAILED" - :type status_filter: str - :param api: Select which api version of the dict to return (default 1) - :type api: int - :returns: A list of the build details (from compose_detail) - :rtype: list of dicts - - This returns a list of build details for each of the matching builds on the - system. It does not return the status of builds that have not been finished. - Use queue_status() for those. - """ - if status_filter: - status_filter = [status_filter] - else: - status_filter = ["FINISHED", "FAILED"] - - results = [] - result_dir = joinpaths(cfg.get("composer", "lib_dir"), "results") - for build in glob(result_dir + "/*"): - log.debug("Checking status of build %s", build) - - try: - status = open(joinpaths(build, "STATUS"), "r").read().strip() - if status in status_filter: - results.append(compose_detail(cfg, build, api)) - except IOError: - pass - return results - -def _upload_list_path(cfg, uuid): - """Return the path to the UPLOADS file - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: Path to the UPLOADS file listing the build's associated uploads - :rtype: str - :raises: RuntimeError if the uuid is not found - """ - results_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - if not os.path.isdir(results_dir): - raise RuntimeError(f'"{uuid}" is not a valid build uuid!') - return joinpaths(results_dir, "UPLOADS") - -def uuid_schedule_upload(cfg, uuid, provider_name, image_name, settings): - """Schedule an upload of an image - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param provider_name: The name of the cloud provider, e.g. "azure" - :type provider_name: str - :param image_name: Path of the image to upload - :type image_name: str - :param settings: Settings to use for the selected provider - :type settings: dict - :returns: uuid of the upload - :rtype: str - :raises: RuntimeError if the uuid is not a valid build uuid - """ - status = uuid_status(cfg, uuid) - if status is None: - raise RuntimeError(f'"{uuid}" is not a valid build uuid!') - - upload = create_upload(cfg["upload"], provider_name, image_name, settings) - uuid_add_upload(cfg, uuid, upload.uuid) - return upload.uuid - -def uuid_get_uploads(cfg, uuid): - """Return the list of uploads for a build uuid - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: The upload UUIDs associated with the build UUID - :rtype: frozenset - """ - try: - with open(_upload_list_path(cfg, uuid)) as uploads_file: - return frozenset(uploads_file.read().split()) - except FileNotFoundError: - return frozenset() - -def uuid_add_upload(cfg, uuid, upload_uuid): - """Add an upload UUID to a build - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param upload_uuid: The UUID of the upload - :type upload_uuid: str - :returns: None - :rtype: None - """ - if upload_uuid not in uuid_get_uploads(cfg, uuid): - with open(_upload_list_path(cfg, uuid), "a") as uploads_file: - print(upload_uuid, file=uploads_file) - status = uuid_status(cfg, uuid) - if status and status["queue_status"] == "FINISHED": - uuid_ready_upload(cfg, uuid, upload_uuid) - -def uuid_remove_upload(cfg, upload_uuid): - """Remove an upload UUID from the build - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param upload_uuid: The UUID of the upload - :type upload_uuid: str - :returns: None - :rtype: None - :raises: RuntimeError if the upload_uuid is not found - """ - for build_uuid in (os.path.basename(b) for b in glob(joinpaths(cfg.get("composer", "lib_dir"), "results/*"))): - uploads = uuid_get_uploads(cfg, build_uuid) - if upload_uuid not in uploads: - continue - - uploads = uploads - frozenset((upload_uuid,)) - with open(_upload_list_path(cfg, build_uuid), "w") as uploads_file: - for upload in uploads: - print(upload, file=uploads_file) - return - - raise RuntimeError(f"{upload_uuid} is not a valid upload id!") - -def uuid_ready_upload(cfg, uuid, upload_uuid): - """Set an upload to READY if the build is in FINISHED state - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param upload_uuid: The UUID of the upload - :type upload_uuid: str - :returns: None - :rtype: None - :raises: RuntimeError if the build uuid is invalid or not in FINISHED state. - """ - status = uuid_status(cfg, uuid) - if not status: - raise RuntimeError(f"{uuid} is not a valid build id!") - if status["queue_status"] != "FINISHED": - raise RuntimeError(f"Build {uuid} is not finished!") - _, image_path = uuid_image(cfg, uuid) - ready_upload(cfg["upload"], upload_uuid, image_path) - -def uuid_cancel(cfg, uuid): - """Cancel a build and delete its results - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: True if it was canceled and deleted - :rtype: bool - - Only call this if the build status is WAITING or RUNNING - """ - cancel_path = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid, "CANCEL") - if os.path.exists(cancel_path): - log.info("Cancel has already been requested for %s", uuid) - return False - - # This status can change (and probably will) while it is in the middle of doing this: - # It can move from WAITING -> RUNNING or it can move from RUNNING -> FINISHED|FAILED - - # If it is in WAITING remove the symlink and then check to make sure it didn't show up - # in the run queue - queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue") - uuid_new = joinpaths(queue_dir, "new", uuid) - if os.path.exists(uuid_new): - try: - os.unlink(uuid_new) - except OSError: - # The symlink may vanish if the queue monitor started the build - pass - uuid_run = joinpaths(queue_dir, "run", uuid) - if not os.path.exists(uuid_run): - # Make sure the build is still in the waiting state - status = uuid_status(cfg, uuid) - if status is None or status["queue_status"] == "WAITING": - # Successfully removed it before the build started - return uuid_delete(cfg, uuid) - - # At this point the build has probably started. Write to the CANCEL file. - open(cancel_path, "w").write("\n") - - # Wait for status to move to FAILED or FINISHED - started = time.time() - while True: - status = uuid_status(cfg, uuid) - if status is None or status["queue_status"] == "FAILED": - break - elif status is not None and status["queue_status"] == "FINISHED": - # The build finished successfully, no point in deleting it now - return False - - # Is this taking too long? Exit anyway and try to cleanup. - if time.time() > started + (10 * 60): - log.error("Failed to cancel the build of %s", uuid) - break - - time.sleep(5) - - # Remove the partial results - uuid_delete(cfg, uuid) - -def uuid_delete(cfg, uuid): - """Delete all of the results from a compose - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: True if it was deleted - :rtype: bool - :raises: This will raise an error if the delete failed - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - if not uuid_dir or len(uuid_dir) < 10: - raise RuntimeError("Directory length is too short: %s" % uuid_dir) - - for upload in get_uploads(cfg["upload"], uuid_get_uploads(cfg, uuid)): - delete_upload(cfg["upload"], upload.uuid) - - shutil.rmtree(uuid_dir) - return True - -def uuid_info(cfg, uuid, api=1): - """Return information about the composition - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: dictionary of information about the composition or None - :rtype: dict - :raises: RuntimeError if there was a problem - - This will return a dict with the following fields populated: - - * id - The uuid of the comoposition - * config - containing the configuration settings used to run Anaconda - * blueprint - The depsolved blueprint used to generate the kickstart - * commit - The (local) git commit hash for the blueprint used - * deps - The NEVRA of all of the dependencies used in the composition - * compose_type - The type of output generated (tar, iso, etc.) - * queue_status - The final status of the composition (FINISHED or FAILED) - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - if not os.path.exists(uuid_dir): - return None - - # Load the compose configuration - cfg_path = joinpaths(uuid_dir, "config.toml") - if not os.path.exists(cfg_path): - raise RuntimeError("Missing config.toml for %s" % uuid) - cfg_dict = toml.loads(open(cfg_path, "r").read()) - - frozen_path = joinpaths(uuid_dir, "frozen.toml") - if not os.path.exists(frozen_path): - raise RuntimeError("Missing frozen.toml for %s" % uuid) - frozen_dict = toml.loads(open(frozen_path, "r").read()) - - deps_path = joinpaths(uuid_dir, "deps.toml") - if not os.path.exists(deps_path): - raise RuntimeError("Missing deps.toml for %s" % uuid) - deps_dict = toml.loads(open(deps_path, "r").read()) - - details = compose_detail(cfg, uuid_dir, api) - - commit_path = joinpaths(uuid_dir, "COMMIT") - if not os.path.exists(commit_path): - raise RuntimeError("Missing commit hash for %s" % uuid) - commit_id = open(commit_path, "r").read().strip() - - info = {"id": uuid, - "config": cfg_dict, - "blueprint": frozen_dict, - "commit": commit_id, - "deps": deps_dict, - "compose_type": details["compose_type"], - "queue_status": details["queue_status"], - "image_size": details["image_size"], - } - if api == 1: - upload_uuids = uuid_get_uploads(cfg, uuid) - summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)] - info["uploads"] = summaries - return info - -def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False): - """Return a tar of the build data - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param metadata: Set to true to include all the metadata needed to reproduce the build - :type metadata: bool - :param image: Set to true to include the output image - :type image: bool - :param logs: Set to true to include the logs from the build - :type logs: bool - :returns: A stream of bytes from tar - :rtype: A generator - :raises: RuntimeError if there was a problem (eg. missing config file) - - This yields an uncompressed tar's data to the caller. It includes - the selected data to the caller by returning the Popen stdout from the tar process. - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - if not os.path.exists(uuid_dir): - raise RuntimeError("%s is not a valid build_id" % uuid) - - # Load the compose configuration - cfg_path = joinpaths(uuid_dir, "config.toml") - if not os.path.exists(cfg_path): - raise RuntimeError("Missing config.toml for %s" % uuid) - cfg_dict = toml.loads(open(cfg_path, "r").read()) - image_name = cfg_dict["image_name"] - - def include_file(f): - if f.endswith("/logs"): - return logs - if f.endswith(image_name): - return image - return metadata - filenames = [os.path.basename(f) for f in glob(joinpaths(uuid_dir, "*")) if include_file(f)] - - tar = Popen(["tar", "-C", uuid_dir, "-cf-"] + filenames, stdout=PIPE) - return tar.stdout - -def uuid_image(cfg, uuid): - """Return the filename and full path of the build's image file - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :returns: The image filename and full path - :rtype: tuple of strings - :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - return get_image_name(uuid_dir) - -def get_image_name(uuid_dir): - """Return the filename and full path of the build's image file - - :param uuid: The UUID of the build - :type uuid: str - :returns: The image filename and full path - :rtype: tuple of strings - :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) - """ - uuid = os.path.basename(os.path.abspath(uuid_dir)) - if not os.path.exists(uuid_dir): - raise RuntimeError("%s is not a valid build_id" % uuid) - - # Load the compose configuration - cfg_path = joinpaths(uuid_dir, "config.toml") - if not os.path.exists(cfg_path): - raise RuntimeError("Missing config.toml for %s" % uuid) - cfg_dict = toml.loads(open(cfg_path, "r").read()) - image_name = cfg_dict["image_name"] - - return (image_name, joinpaths(uuid_dir, image_name)) - -def uuid_log(cfg, uuid, size=1024): - """Return `size` KiB from the end of the most currently relevant log for a - given compose - - :param cfg: Configuration settings - :type cfg: ComposerConfig - :param uuid: The UUID of the build - :type uuid: str - :param size: Number of KiB to read. Default is 1024 - :type size: int - :returns: Up to `size` KiB from the end of the log - :rtype: str - :raises: RuntimeError if there was a problem (eg. no log file available) - - This function will return the end of either the anaconda log, the packaging - log, or the combined composer logs, depending on the progress of the - compose. It tries to return lines from the end of the log, it will attempt - to start on a line boundary, and it may return less than `size` kbytes. - """ - uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) - if not os.path.exists(uuid_dir): - raise RuntimeError("%s is not a valid build_id" % uuid) - - # While a build is running the logs will be in /tmp/anaconda.log and when it - # has finished they will be in the results directory - status = uuid_status(cfg, uuid) - if status is None: - raise RuntimeError("Status is missing for %s" % uuid) - - def get_log_path(): - # Try to return the most relevant log at any given time during the - # compose. If the compose is not running, return the composer log. - anaconda_log = "/tmp/anaconda.log" - packaging_log = "/tmp/packaging.log" - combined_log = joinpaths(uuid_dir, "logs", "combined.log") - if status["queue_status"] != "RUNNING" or not os.path.isfile(anaconda_log): - return combined_log - if not os.path.isfile(packaging_log): - return anaconda_log - try: - anaconda_mtime = os.stat(anaconda_log).st_mtime - packaging_mtime = os.stat(packaging_log).st_mtime - # If the packaging log exists and its last message is at least 15 - # seconds newer than the anaconda log, return the packaging log. - if packaging_mtime > anaconda_mtime + 15: - return packaging_log - return anaconda_log - except OSError: - # Return the combined log if anaconda_log or packaging_log disappear - return combined_log - try: - tail = read_tail(get_log_path(), size) - except OSError as e: - raise RuntimeError("No log available.") from e - return tail diff --git a/src/pylorax/api/recipes.py b/src/pylorax/api/recipes.py deleted file mode 100644 index a3f5fbde..00000000 --- a/src/pylorax/api/recipes.py +++ /dev/null @@ -1,1276 +0,0 @@ -# -# Copyright (C) 2017-2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -import gi -gi.require_version("Ggit", "1.0") -from gi.repository import Ggit as Git -from gi.repository import Gio -from gi.repository import GLib - -import os -import semantic_version as semver - -from pylorax.api.projects import dep_evra -from pylorax.base import DataHolder -from pylorax.sysutils import joinpaths -import pylorax.api.toml as toml - - -class CommitTimeValError(Exception): - pass - -class RecipeFileError(Exception): - pass - -class RecipeError(Exception): - pass - - -class Recipe(dict): - """A Recipe of package and modules - - This is a subclass of dict that enforces the constructor arguments - and adds a .filename property to return the recipe's filename, - and a .toml() function to return the recipe as a TOML string. - """ - def __init__(self, name, description, version, modules, packages, groups, customizations=None, gitrepos=None): - # Check that version is empty or semver compatible - if version: - semver.Version(version) - - # Make sure modules, packages, and groups are listed by their case-insensitive names - if modules is not None: - modules = sorted(modules, key=lambda m: m["name"].lower()) - if packages is not None: - packages = sorted(packages, key=lambda p: p["name"].lower()) - if groups is not None: - groups = sorted(groups, key=lambda g: g["name"].lower()) - - # Only support [[repos.git]] for now - if gitrepos is not None: - repos = {"git": sorted(gitrepos, key=lambda g: g["repo"].lower())} - else: - repos = None - dict.__init__(self, name=name, - description=description, - version=version, - modules=modules, - packages=packages, - groups=groups, - customizations=customizations, - repos=repos) - - # We don't want customizations=None to show up in the TOML so remove it - if customizations is None: - del self["customizations"] - - # Don't include empty repos or repos.git - if repos is None or not repos["git"]: - del self["repos"] - - @property - def package_names(self): - """Return the names of the packages""" - return [p["name"] for p in self["packages"] or []] - - @property - def package_nver(self): - """Return the names and version globs of the packages""" - return [(p["name"], p["version"]) for p in self["packages"] or []] - - @property - def module_names(self): - """Return the names of the modules""" - return [m["name"] for m in self["modules"] or []] - - @property - def module_nver(self): - """Return the names and version globs of the modules""" - return [(m["name"], m["version"]) for m in self["modules"] or []] - - @property - def group_names(self): - """Return the names of the groups. Groups do not have versions.""" - return map(lambda g: g["name"], self["groups"] or []) - - @property - def filename(self): - """Return the Recipe's filename - - Replaces spaces in the name with '-' and appends .toml - """ - return recipe_filename(self.get("name")) - - def toml(self): - """Return the Recipe in TOML format""" - return toml.dumps(self) - - def bump_version(self, old_version=None): - """semver recipe version number bump - - :param old_version: An optional old version number - :type old_version: str - :returns: The new version number or None - :rtype: str - :raises: ValueError - - If neither have a version, 0.0.1 is returned - If there is no old version the new version is checked and returned - If there is no new version, but there is a old one, bump its patch level - If the old and new versions are the same, bump the patch level - If they are different, check and return the new version - """ - new_version = self.get("version") - if not new_version and not old_version: - self["version"] = "0.0.1" - - elif new_version and not old_version: - semver.Version(new_version) - self["version"] = new_version - - elif not new_version or new_version == old_version: - new_version = str(semver.Version(old_version).next_patch()) - self["version"] = new_version - - else: - semver.Version(new_version) - self["version"] = new_version - - # Return the new version - return str(semver.Version(self["version"])) - - def freeze(self, deps): - """ Return a new Recipe with full module and package NEVRA - - :param deps: A list of dependency NEVRA to use to fill in the modules and packages - :type deps: list( - :returns: A new Recipe object - :rtype: Recipe - """ - module_names = self.module_names - package_names = self.package_names - group_names = self.group_names - - new_modules = [] - new_packages = [] - new_groups = [] - for dep in deps: - if dep["name"] in package_names: - new_packages.append(RecipePackage(dep["name"], dep_evra(dep))) - elif dep["name"] in module_names: - new_modules.append(RecipeModule(dep["name"], dep_evra(dep))) - elif dep["name"] in group_names: - new_groups.append(RecipeGroup(dep["name"])) - if "customizations" in self: - customizations = self["customizations"] - else: - customizations = None - if "repos" in self and "git" in self["repos"]: - gitrepos = self["repos"]["git"] - else: - gitrepos = None - - return Recipe(self["name"], self["description"], self["version"], - new_modules, new_packages, new_groups, customizations, gitrepos) - -class RecipeModule(dict): - def __init__(self, name, version): - dict.__init__(self, name=name, version=version) - -class RecipePackage(RecipeModule): - pass - -class RecipeGroup(dict): - def __init__(self, name): - dict.__init__(self, name=name) - -def NewRecipeGit(toml_dict): - """Create a RecipeGit object from fields in a TOML dict - - :param rpmname: Name of the rpm to create, also used as the prefix name in the tar archive - :type rpmname: str - :param rpmversion: Version of the rpm, eg. "1.0.0" - :type rpmversion: str - :param rpmrelease: Release of the rpm, eg. "1" - :type rpmrelease: str - :param summary: Summary string for the rpm - :type summary: str - :param repo: URL of the get repo to clone and create the archive from - :type repo: str - :param ref: Git reference to check out. eg. origin/branch-name, git tag, or git commit hash - :type ref: str - :param destination: Path to install the / of the git repo at when installing the rpm - :type destination: str - :returns: A populated RecipeGit object - :rtype: RecipeGit - - The TOML should look like this:: - - [[repos.git]] - rpmname="server-config" - rpmversion="1.0" - rpmrelease="1" - summary="Setup files for server deployment" - repo="PATH OF GIT REPO TO CLONE" - ref="v1.0" - destination="/opt/server/" - - Note that the repo path supports anything that git supports, file://, https://, http:// - - Currently there is no support for authentication - """ - return RecipeGit(toml_dict.get("rpmname"), - toml_dict.get("rpmversion"), - toml_dict.get("rpmrelease"), - toml_dict.get("summary", ""), - toml_dict.get("repo"), - toml_dict.get("ref"), - toml_dict.get("destination")) - -class RecipeGit(dict): - def __init__(self, rpmname, rpmversion, rpmrelease, summary, repo, ref, destination): - dict.__init__(self, rpmname=rpmname, rpmversion=rpmversion, rpmrelease=rpmrelease, - summary=summary, repo=repo, ref=ref, destination=destination) - -def recipe_from_file(recipe_path): - """Return a recipe file as a Recipe object - - :param recipe_path: Path to the recipe fila - :type recipe_path: str - :returns: A Recipe object - :rtype: Recipe - """ - with open(recipe_path, 'rb') as f: - return recipe_from_toml(f.read()) - -def recipe_from_toml(recipe_str): - """Create a Recipe object from a toml string. - - :param recipe_str: The Recipe TOML string - :type recipe_str: str - :returns: A Recipe object - :rtype: Recipe - :raises: TomlError - """ - recipe_dict = toml.loads(recipe_str) - return recipe_from_dict(recipe_dict) - -def check_required_list(lst, fields): - """Check a list of dicts for required fields - - :param lst: A list of dicts with fields - :type lst: list of dict - :param fields: A list of field name strings - :type fields: list of str - :returns: A list of error strings - :rtype: list of str - """ - errors = [] - for i, m in enumerate(lst): - m_errs = [] - errors.extend(check_list_case(fields, m.keys(), prefix="%d " % (i+1))) - for f in fields: - if f not in m: - m_errs.append("'%s'" % f) - if m_errs: - errors.append("%d is missing %s" % (i+1, ", ".join(m_errs))) - return errors - -def check_list_case(expected_keys, recipe_keys, prefix=""): - """Check the case of the recipe keys - - :param expected_keys: A list of expected key strings - :type expected_keys: list of str - :param recipe_keys: A list of the recipe's key strings - :type recipe_keys: list of str - :returns: list of errors - :rtype: list of str - """ - errors = [] - for k in recipe_keys: - if k in expected_keys: - continue - if k.lower() in expected_keys: - errors.append(prefix + "%s should be %s" % (k, k.lower())) - return errors - -def check_recipe_dict(recipe_dict): - """Check a dict before using it to create a new Recipe - - :param recipe_dict: A plain dict of the recipe - :type recipe_dict: dict - :returns: True if dict is ok - :rtype: bool - :raises: RecipeError - - This checks a dict to make sure required fields are present, - that optional fields are correct, and that other optional fields - are of the correct format, when included. - - This collects all of the errors and returns a single RecipeError with - a string that can be presented to users. - """ - errors = [] - - # Check for wrong case of top level keys - top_keys = ["name", "description", "version", "modules", "packages", "groups", "repos", "customizations"] - errors.extend(check_list_case(recipe_dict.keys(), top_keys)) - - if "name" not in recipe_dict: - errors.append("Missing 'name'") - if "description" not in recipe_dict: - errors.append("Missing 'description'") - if "version" in recipe_dict: - try: - semver.Version(recipe_dict["version"]) - except ValueError: - errors.append("Invalid 'version', must use Semantic Versioning") - - # Examine all the modules - if recipe_dict.get("modules"): - module_errors = check_required_list(recipe_dict["modules"], ["name", "version"]) - if module_errors: - errors.append("'modules' errors:\n%s" % "\n".join(module_errors)) - - # Examine all the packages - if recipe_dict.get("packages"): - package_errors = check_required_list(recipe_dict["packages"], ["name", "version"]) - if package_errors: - errors.append("'packages' errors:\n%s" % "\n".join(package_errors)) - - if recipe_dict.get("groups"): - groups_errors = check_required_list(recipe_dict["groups"], ["name"]) - if groups_errors: - errors.append("'groups' errors:\n%s" % "\n".join(groups_errors)) - - if recipe_dict.get("repos") and recipe_dict.get("repos").get("git"): - repos_errors = check_required_list(recipe_dict.get("repos").get("git"), - ["rpmname", "rpmversion", "rpmrelease", "summary", "repo", "ref", "destination"]) - if repos_errors: - errors.append("'repos.git' errors:\n%s" % "\n".join(repos_errors)) - - # No customizations to check, exit now - c = recipe_dict.get("customizations") - if not c: - return errors - - # Make sure to catch empty sections by testing for keywords, not just looking at .get() result. - if "kernel" in c: - errors.extend(check_list_case(["append"], c["kernel"].keys(), prefix="kernel ")) - if "append" not in c.get("kernel", []): - errors.append("'customizations.kernel': missing append field.") - - if "sshkey" in c: - sshkey_errors = check_required_list(c.get("sshkey"), ["user", "key"]) - if sshkey_errors: - errors.append("'customizations.sshkey' errors:\n%s" % "\n".join(sshkey_errors)) - - if "user" in c: - user_errors = check_required_list(c.get("user"), ["name"]) - if user_errors: - errors.append("'customizations.user' errors:\n%s" % "\n".join(user_errors)) - - if "group" in c: - group_errors = check_required_list(c.get("group"), ["name"]) - if group_errors: - errors.append("'customizations.group' errors:\n%s" % "\n".join(group_errors)) - - if "timezone" in c: - errors.extend(check_list_case(["timezone", "ntpservers"], c["timezone"].keys(), prefix="timezone ")) - if not c.get("timezone"): - errors.append("'customizations.timezone': missing timezone or ntpservers fields.") - - if "locale" in c: - errors.extend(check_list_case(["languages", "keyboard"], c["locale"].keys(), prefix="locale ")) - if not c.get("locale"): - errors.append("'customizations.locale': missing languages or keyboard fields.") - - if "firewall" in c: - errors.extend(check_list_case(["ports"], c["firewall"].keys(), prefix="firewall ")) - if not c.get("firewall"): - errors.append("'customizations.firewall': missing ports field or services section.") - - if "services" in c.get("firewall", []): - errors.extend(check_list_case(["enabled", "disabled"], c["firewall"]["services"].keys(), prefix="firewall.services ")) - if not c.get("firewall").get("services"): - errors.append("'customizations.firewall.services': missing enabled or disabled fields.") - - if "services" in c: - errors.extend(check_list_case(["enabled", "disabled"], c["services"].keys(), prefix="services ")) - if not c.get("services"): - errors.append("'customizations.services': missing enabled or disabled fields.") - - return errors - -def recipe_from_dict(recipe_dict): - """Create a Recipe object from a plain dict. - - :param recipe_dict: A plain dict of the recipe - :type recipe_dict: dict - :returns: A Recipe object - :rtype: Recipe - :raises: RecipeError - """ - errors = check_recipe_dict(recipe_dict) - if errors: - msg = "\n".join(errors) - raise RecipeError(msg) - - # Make RecipeModule objects from the toml - # The TOML may not have modules or packages in it. Set them to None in this case - try: - if recipe_dict.get("modules"): - modules = [RecipeModule(m.get("name"), m.get("version")) for m in recipe_dict["modules"]] - else: - modules = [] - if recipe_dict.get("packages"): - packages = [RecipePackage(p.get("name"), p.get("version")) for p in recipe_dict["packages"]] - else: - packages = [] - if recipe_dict.get("groups"): - groups = [RecipeGroup(g.get("name")) for g in recipe_dict["groups"]] - else: - groups = [] - if recipe_dict.get("repos") and recipe_dict.get("repos").get("git"): - gitrepos = [NewRecipeGit(r) for r in recipe_dict["repos"]["git"]] - else: - gitrepos = [] - name = recipe_dict["name"] - description = recipe_dict["description"] - version = recipe_dict.get("version", None) - customizations = recipe_dict.get("customizations", None) - - # [customizations] was incorrectly documented at first, so we have to support using it - # as [[customizations]] by grabbing the first element. - if isinstance(customizations, list): - customizations = customizations[0] - - except KeyError as e: - raise RecipeError("There was a problem parsing the recipe: %s" % str(e)) - - return Recipe(name, description, version, modules, packages, groups, customizations, gitrepos) - -def gfile(path): - """Convert a string path to GFile for use with Git""" - return Gio.file_new_for_path(path) - -def recipe_filename(name): - """Return the toml filename for a recipe - - Replaces spaces with '-' and appends '.toml' - """ - # XXX Raise and error if this is empty? - return name.replace(" ", "-") + ".toml" - -def head_commit(repo, branch): - """Get the branch's HEAD Commit Object - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :returns: Branch's head commit - :rtype: Git.Commit - :raises: Can raise errors from Ggit - """ - branch_obj = repo.lookup_branch(branch, Git.BranchType.LOCAL) - commit_id = branch_obj.get_target() - return repo.lookup(commit_id, Git.Commit) - -def prepare_commit(repo, branch, builder): - """Prepare for a commit - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param builder: instance of TreeBuilder - :type builder: TreeBuilder - :returns: (Tree, Sig, Ref) - :rtype: tuple - :raises: Can raise errors from Ggit - """ - tree_id = builder.write() - tree = repo.lookup(tree_id, Git.Tree) - sig = Git.Signature.new_now("bdcs-api-server", "user-email") - ref = "refs/heads/%s" % branch - return (tree, sig, ref) - -def open_or_create_repo(path): - """Open an existing repo, or create a new one - - :param path: path to recipe directory - :type path: string - :returns: A repository object - :rtype: Git.Repository - :raises: Can raise errors from Ggit - - A bare git repo will be created in the git directory of the specified path. - If a repo already exists it will be opened and returned instead of - creating a new one. - """ - Git.init() - git_path = joinpaths(path, "git") - if os.path.exists(joinpaths(git_path, "HEAD")): - return Git.Repository.open(gfile(git_path)) - - repo = Git.Repository.init_repository(gfile(git_path), True) - - # Make an initial empty commit - sig = Git.Signature.new_now("bdcs-api-server", "user-email") - tree_id = repo.get_index().write_tree() - tree = repo.lookup(tree_id, Git.Tree) - repo.create_commit("HEAD", sig, sig, "UTF-8", "Initial Recipe repository commit", tree, []) - return repo - -def write_commit(repo, branch, filename, message, content): - """Make a new commit to a repository's branch - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: full path of the file to add - :type filename: str - :param message: The commit message - :type message: str - :param content: The data to write - :type content: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - try: - parent_commit = head_commit(repo, branch) - except GLib.GError: - # Branch doesn't exist, make a new one based on master - master_head = head_commit(repo, "master") - repo.create_branch(branch, master_head, 0) - parent_commit = head_commit(repo, branch) - - parent_commit = head_commit(repo, branch) - blob_id = repo.create_blob_from_buffer(content.encode("UTF-8")) - - # Use treebuilder to make a new entry for this filename and blob - parent_tree = parent_commit.get_tree() - builder = repo.create_tree_builder_from_tree(parent_tree) - builder.insert(filename, blob_id, Git.FileMode.BLOB) - (tree, sig, ref) = prepare_commit(repo, branch, builder) - return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit]) - -def read_commit_spec(repo, spec): - """Return the raw content of the blob specified by the spec - - :param repo: Open repository - :type repo: Git.Repository - :param spec: Git revparse spec - :type spec: str - :returns: Contents of the commit - :rtype: str - :raises: Can raise errors from Ggit - - eg. To read the README file from master the spec is "master:README" - """ - commit_id = repo.revparse(spec).get_id() - blob = repo.lookup(commit_id, Git.Blob) - return blob.get_raw_content() - -def read_commit(repo, branch, filename, commit=None): - """Return the contents of a file on a specific branch or commit. - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: filename to read - :type filename: str - :param commit: Optional commit hash - :type commit: str - :returns: The commit id, and the contents of the commit - :rtype: tuple(str, str) - :raises: Can raise errors from Ggit - - If no commit is passed the master:filename is returned, otherwise it will be - commit:filename - """ - if not commit: - # Find the most recent commit for filename on the selected branch - commits = list_commits(repo, branch, filename, 1) - if not commits: - raise RecipeError("No commits for %s on the %s branch." % (filename, branch)) - commit = commits[0].commit - return (commit, read_commit_spec(repo, "%s:%s" % (commit, filename))) - -def read_recipe_commit(repo, branch, recipe_name, commit=None): - """Read a recipe commit from git and return a Recipe object - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to read - :type recipe_name: str - :param commit: Optional commit hash - :type commit: str - :returns: A Recipe object - :rtype: Recipe - :raises: Can raise errors from Ggit - - If no commit is passed the master:filename is returned, otherwise it will be - commit:filename - """ - if not repo_file_exists(repo, branch, recipe_filename(recipe_name)): - raise RecipeFileError("Unknown blueprint") - - (_, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit) - return recipe_from_toml(recipe_toml) - -def read_recipe_and_id(repo, branch, recipe_name, commit=None): - """Read a recipe commit and its id from git - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to read - :type recipe_name: str - :param commit: Optional commit hash - :type commit: str - :returns: The commit id, and a Recipe object - :rtype: tuple(str, Recipe) - :raises: Can raise errors from Ggit - - If no commit is passed the master:filename is returned, otherwise it will be - commit:filename - """ - (commit_id, recipe_toml) = read_commit(repo, branch, recipe_filename(recipe_name), commit) - return (commit_id, recipe_from_toml(recipe_toml)) - -def list_branch_files(repo, branch): - """Return a sorted list of the files on the branch HEAD - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :returns: A sorted list of the filenames - :rtype: list(str) - :raises: Can raise errors from Ggit - """ - commit = head_commit(repo, branch).get_id().to_string() - return list_commit_files(repo, commit) - -def list_commit_files(repo, commit): - """Return a sorted list of the files on a commit - - :param repo: Open repository - :type repo: Git.Repository - :param commit: The commit hash to list - :type commit: str - :returns: A sorted list of the filenames - :rtype: list(str) - :raises: Can raise errors from Ggit - """ - commit_id = Git.OId.new_from_string(commit) - commit_obj = repo.lookup(commit_id, Git.Commit) - tree = commit_obj.get_tree() - return sorted([tree.get(i).get_name() for i in range(0, tree.size())]) - -def delete_recipe(repo, branch, recipe_name): - """Delete a recipe from a branch. - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to delete - :type recipe_name: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - return delete_file(repo, branch, recipe_filename(recipe_name)) - -def delete_file(repo, branch, filename): - """Delete a file from a branch. - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: filename to delete - :type filename: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - parent_commit = head_commit(repo, branch) - parent_tree = parent_commit.get_tree() - builder = repo.create_tree_builder_from_tree(parent_tree) - builder.remove(filename) - (tree, sig, ref) = prepare_commit(repo, branch, builder) - message = "Recipe %s deleted" % filename - return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit]) - -def revert_recipe(repo, branch, recipe_name, commit): - """Revert the contents of a recipe to that of a previous commit - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to revert - :type recipe_name: str - :param commit: Commit hash - :type commit: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - return revert_file(repo, branch, recipe_filename(recipe_name), commit) - -def revert_file(repo, branch, filename, commit): - """Revert the contents of a file to that of a previous commit - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: filename to revert - :type filename: str - :param commit: Commit hash - :type commit: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - commit_id = Git.OId.new_from_string(commit) - commit_obj = repo.lookup(commit_id, Git.Commit) - revert_tree = commit_obj.get_tree() - entry = revert_tree.get_by_name(filename) - blob_id = entry.get_id() - parent_commit = head_commit(repo, branch) - - # Use treebuilder to modify the tree - parent_tree = parent_commit.get_tree() - builder = repo.create_tree_builder_from_tree(parent_tree) - builder.insert(filename, blob_id, Git.FileMode.BLOB) - (tree, sig, ref) = prepare_commit(repo, branch, builder) - commit_hash = commit_id.to_string() - message = "%s reverted to commit %s" % (filename, commit_hash) - return repo.create_commit(ref, sig, sig, "UTF-8", message, tree, [parent_commit]) - -def commit_recipe(repo, branch, recipe): - """Commit a recipe to a branch - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe: Recipe to commit - :type recipe: Recipe - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit - """ - try: - old_recipe = read_recipe_commit(repo, branch, recipe["name"]) - old_version = old_recipe["version"] - except Exception: - old_version = None - - recipe.bump_version(old_version) - recipe_toml = recipe.toml() - message = "Recipe %s, version %s saved." % (recipe["name"], recipe["version"]) - return write_commit(repo, branch, recipe.filename, message, recipe_toml) - -def commit_recipe_file(repo, branch, filename): - """Commit a recipe file to a branch - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: Path to the recipe file to commit - :type filename: str - :returns: OId of the new commit - :rtype: Git.OId - :raises: Can raise errors from Ggit or RecipeFileError - """ - try: - recipe = recipe_from_file(filename) - except IOError: - raise RecipeFileError - - return commit_recipe(repo, branch, recipe) - -def commit_recipe_directory(repo, branch, directory): - r"""Commit all \*.toml files from a directory, if they aren't already in git. - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param directory: The directory of \*.toml recipes to commit - :type directory: str - :returns: None - :raises: Can raise errors from Ggit or RecipeFileError - - Files with Toml or RecipeFileErrors will be skipped, and the remainder will - be tried. - """ - dir_files = set([e for e in os.listdir(directory) if e.endswith(".toml")]) - branch_files = set(list_branch_files(repo, branch)) - new_files = dir_files.difference(branch_files) - - for f in new_files: - # Skip files with errors, but try the others - try: - commit_recipe_file(repo, branch, joinpaths(directory, f)) - except (RecipeError, RecipeFileError, toml.TomlError): - pass - -def tag_recipe_commit(repo, branch, recipe_name): - """Tag a file's most recent commit - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to tag - :type recipe_name: str - :returns: Tag id or None if it failed. - :rtype: Git.OId - :raises: Can raise errors from Ggit - - Uses tag_file_commit() - """ - if not repo_file_exists(repo, branch, recipe_filename(recipe_name)): - raise RecipeFileError("Unknown blueprint") - - return tag_file_commit(repo, branch, recipe_filename(recipe_name)) - -def tag_file_commit(repo, branch, filename): - """Tag a file's most recent commit - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: Filename to tag - :type filename: str - :returns: Tag id or None if it failed. - :rtype: Git.OId - :raises: Can raise errors from Ggit - - This uses git tags, of the form `refs/tags///r` - Only the most recent recipe commit can be tagged to prevent out of order tagging. - Revisions start at 1 and increment for each new commit that is tagged. - If the commit has already been tagged it will return false. - """ - file_commits = list_commits(repo, branch, filename) - if not file_commits: - return None - - # Find the most recently tagged version (may not be one) and add 1 to it. - for details in file_commits: - if details.revision is not None: - new_revision = details.revision + 1 - break - else: - new_revision = 1 - - name = "%s/%s/r%d" % (branch, filename, new_revision) - sig = Git.Signature.new_now("bdcs-api-server", "user-email") - commit_id = Git.OId.new_from_string(file_commits[0].commit) - commit = repo.lookup(commit_id, Git.Commit) - return repo.create_tag(name, commit, sig, name, Git.CreateFlags.NONE) - -def find_commit_tag(repo, branch, filename, commit_id): - """Find the tag that matches the commit_id - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: filename to revert - :type filename: str - :param commit_id: The commit id to check - :type commit_id: Git.OId - :returns: The tag or None if there isn't one - :rtype: str or None - - There should be only 1 tag pointing to a commit, but there may not - be a tag at all. - - The tag will look like: 'refs/tags///r' - """ - pattern = "%s/%s/r*" % (branch, filename) - tags = [t for t in repo.list_tags_match(pattern) if is_commit_tag(repo, commit_id, t)] - if len(tags) != 1: - return None - else: - return tags[0] - -def is_commit_tag(repo, commit_id, tag): - """Check to see if a tag points to a specific commit. - - :param repo: Open repository - :type repo: Git.Repository - :param commit_id: The commit id to check - :type commit_id: Git.OId - :param tag: The tag to check - :type tag: str - :returns: True if the tag points to the commit, False otherwise - :rtype: bool - """ - ref = repo.lookup_reference("refs/tags/" + tag) - tag_id = ref.get_target() - tag = repo.lookup(tag_id, Git.Tag) - target_id = tag.get_target_id() - return commit_id.compare(target_id) == 0 - -def get_revision_from_tag(tag): - """Return the revision number from a tag - - :param tag: The tag to exract the revision from - :type tag: str - :returns: The integer revision or None - :rtype: int or None - - The revision is the part after the r in 'branch/filename/rXXX' - """ - if tag is None: - return None - try: - return int(tag.rsplit('r', 2)[-1]) - except (ValueError, IndexError): - return None - -class CommitDetails(DataHolder): - def __init__(self, commit, timestamp, message, revision=None): - DataHolder.__init__(self, - commit = commit, - timestamp = timestamp, - message = message, - revision = revision) - -def list_commits(repo, branch, filename, limit=0): - """List the commit history of a file on a branch. - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: filename to revert - :type filename: str - :param limit: Number of commits to return (0=all) - :type limit: int - :returns: A list of commit details - :rtype: list(CommitDetails) - :raises: Can raise errors from Ggit - """ - revwalk = Git.RevisionWalker.new(repo) - branch_ref = "refs/heads/%s" % branch - revwalk.push_ref(branch_ref) - - commits = [] - while True: - commit_id = revwalk.next() - if not commit_id: - break - commit = repo.lookup(commit_id, Git.Commit) - - parents = commit.get_parents() - # No parents? Must be the first commit. - if parents.get_size() == 0: - continue - - tree = commit.get_tree() - # Is the filename in this tree? If not, move on. - if not tree.get_by_name(filename): - continue - - # Is filename different in all of the parent commits? - parent_commits = list(map(parents.get, range(0, parents.get_size()))) - is_diff = all([is_parent_diff(repo, filename, tree, pc) for pc in parent_commits]) - # No changes from parents, skip it. - if not is_diff: - continue - - tag = find_commit_tag(repo, branch, filename, commit.get_id()) - try: - commits.append(get_commit_details(commit, get_revision_from_tag(tag))) - if limit and len(commits) > limit: - break - except CommitTimeValError: - # Skip any commits that have trouble converting the time - # TODO - log details about this failure - pass - - # These will be in reverse time sort order thanks to revwalk - return commits - -def get_commit_details(commit, revision=None): - """Return the details about a specific commit. - - :param commit: The commit to get details from - :type commit: Git.Commit - :param revision: Optional commit revision - :type revision: int - :returns: Details about the commit - :rtype: CommitDetails - :raises: CommitTimeValError or Ggit exceptions - - """ - message = commit.get_message() - commit_str = commit.get_id().to_string() - sig = commit.get_committer() - - datetime = sig.get_time() - # XXX What do we do with timezone? - _timezone = sig.get_time_zone() - time_str = datetime.format_iso8601() - if not time_str: - raise CommitTimeValError - - return CommitDetails(commit_str, time_str, message, revision) - -def is_parent_diff(repo, filename, tree, parent): - """Check to see if the commit is different from its parents - - :param repo: Open repository - :type repo: Git.Repository - :param filename: filename to revert - :type filename: str - :param tree: The commit's tree - :type tree: Git.Tree - :param parent: The commit's parent commit - :type parent: Git.Commit - :retuns: True if filename in the commit is different from its parents - :rtype: bool - """ - diff_opts = Git.DiffOptions.new() - diff_opts.set_pathspec([filename]) - diff = Git.Diff.new_tree_to_tree(repo, parent.get_tree(), tree, diff_opts) - return diff.get_num_deltas() > 0 - -def find_field_value(field, value, lst): - """Find a field matching value in the list of dicts. - - :param field: field to search for - :type field: str - :param value: value to match in the field - :type value: str - :param lst: List of dict's with field - :type lst: list of dict - :returns: First dict with matching field:value, or None - :rtype: dict or None - - Used to return a specific entry from a list that looks like this: - - [{"name": "one", "attr": "green"}, ...] - - find_field_value("name", "one", lst) will return the matching dict. - """ - for d in lst: - if d.get(field) and d.get(field) == value: - return d - return None - -def find_name(name, lst): - """Find the dict matching the name in a list and return it. - - :param name: Name to search for - :type name: str - :param lst: List of dict's with "name" field - :type lst: list of dict - :returns: First dict with matching name, or None - :rtype: dict or None - - This is just a wrapper for find_field_value with field set to "name" - """ - return find_field_value("name", name, lst) - -def find_recipe_obj(path, recipe, default=None): - """Find a recipe object - - :param path: A list of dict field names - :type path: list of str - :param recipe: The recipe to search - :type recipe: Recipe - :param default: The value to return if it is not found - :type default: Any - - Return the object found by applying the path to the dicts in the recipe, or - return the default if it doesn't exist. - - eg. {"customizations": {"hostname": "foo", "users": [...]}} - - find_recipe_obj(["customizations", "hostname"], recipe, "") - """ - o = recipe - try: - for p in path: - if not o.get(p): - return default - o = o.get(p) - except AttributeError: - return default - - return o - -def diff_lists(title, field, old_items, new_items): - """Return the differences between two lists of dicts. - - :param title: Title of the entry - :type title: str - :param field: Field to use as the key for comparisons - :type field: str - :param old_items: List of item dicts with "name" field - :type old_items: list(dict) - :param new_items: List of item dicts with "name" field - :type new_items: list(dict) - :returns: List of diff dicts with old/new entries - :rtype: list(dict) - """ - diffs = [] - old_fields= set(m[field] for m in old_items) - new_fields= set(m[field] for m in new_items) - - added_items = new_fields.difference(old_fields) - added_items = sorted(added_items, key=lambda n: n.lower()) - - removed_items = old_fields.difference(new_fields) - removed_items = sorted(removed_items, key=lambda n: n.lower()) - - same_items = old_fields.intersection(new_fields) - same_items = sorted(same_items, key=lambda n: n.lower()) - - for v in added_items: - diffs.append({"old":None, - "new":{title:find_field_value(field, v, new_items)}}) - - for v in removed_items: - diffs.append({"old":{title:find_field_value(field, v, old_items)}, - "new":None}) - - for v in same_items: - old_item = find_field_value(field, v, old_items) - new_item = find_field_value(field, v, new_items) - if old_item != new_item: - diffs.append({"old":{title:old_item}, - "new":{title:new_item}}) - - return diffs - -def customizations_diff(old_recipe, new_recipe): - """Diff the customizations sections from two versions of a recipe - """ - diffs = [] - old_keys = set(old_recipe.get("customizations", {}).keys()) - new_keys = set(new_recipe.get("customizations", {}).keys()) - - added_keys = new_keys.difference(old_keys) - added_keys = sorted(added_keys, key=lambda n: n.lower()) - - removed_keys = old_keys.difference(new_keys) - removed_keys = sorted(removed_keys, key=lambda n: n.lower()) - - same_keys = old_keys.intersection(new_keys) - same_keys = sorted(same_keys, key=lambda n: n.lower()) - - for v in added_keys: - diffs.append({"old": None, - "new": {"Customizations."+v: new_recipe["customizations"][v]}}) - - for v in removed_keys: - diffs.append({"old": {"Customizations."+v: old_recipe["customizations"][v]}, - "new": None}) - - for v in same_keys: - if new_recipe["customizations"][v] == old_recipe["customizations"][v]: - continue - - if type(new_recipe["customizations"][v]) == type([]): - # Lists of dicts need to use diff_lists - # sshkey uses 'user', user and group use 'name' - if "user" in new_recipe["customizations"][v][0]: - field_name = "user" - elif "name" in new_recipe["customizations"][v][0]: - field_name = "name" - else: - raise RuntimeError("%s list has unrecognized key, not 'name' or 'user'" % "customizations."+v) - - diffs.extend(diff_lists("Customizations."+v, field_name, old_recipe["customizations"][v], new_recipe["customizations"][v])) - else: - diffs.append({"old": {"Customizations."+v: old_recipe["customizations"][v]}, - "new": {"Customizations."+v: new_recipe["customizations"][v]}}) - - return diffs - - -def recipe_diff(old_recipe, new_recipe): - """Diff two versions of a recipe - - :param old_recipe: The old version of the recipe - :type old_recipe: Recipe - :param new_recipe: The new version of the recipe - :type new_recipe: Recipe - :returns: A list of diff dict entries with old/new - :rtype: list(dict) - """ - - diffs = [] - # These cannot be added or removed, just different - for element in ["name", "description", "version"]: - if old_recipe[element] != new_recipe[element]: - diffs.append({"old":{element.title():old_recipe[element]}, - "new":{element.title():new_recipe[element]}}) - - # These lists always exist - diffs.extend(diff_lists("Module", "name", old_recipe["modules"], new_recipe["modules"])) - diffs.extend(diff_lists("Package", "name", old_recipe["packages"], new_recipe["packages"])) - diffs.extend(diff_lists("Group", "name", old_recipe["groups"], new_recipe["groups"])) - - # The customizations section can contain a number of different types - diffs.extend(customizations_diff(old_recipe, new_recipe)) - - # repos contains keys that are lists (eg. [[repos.git]]) - diffs.extend(diff_lists("Repos.git", "rpmname", - find_recipe_obj(["repos", "git"], old_recipe, []), - find_recipe_obj(["repos", "git"], new_recipe, []))) - - return diffs - -def repo_file_exists(repo, branch, filename): - """Return True if the filename exists on the branch - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param filename: Filename to check - :type filename: str - :returns: True if the filename exists on the HEAD of the branch, False otherwise. - :rtype: bool - """ - commit = head_commit(repo, branch).get_id().to_string() - commit_id = Git.OId.new_from_string(commit) - commit_obj = repo.lookup(commit_id, Git.Commit) - tree = commit_obj.get_tree() - return tree.get_by_name(filename) is not None diff --git a/src/pylorax/api/regexes.py b/src/pylorax/api/regexes.py deleted file mode 100644 index 5cfedf73..00000000 --- a/src/pylorax/api/regexes.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import re - -# These are the characters that we allow to be passed in via the -# API calls. -VALID_API_STRING = re.compile(r'^[a-zA-Z0-9_,.:+*-]+$') - -# These are the characters that we allow to be used in blueprint names. -VALID_BLUEPRINT_NAME = re.compile(r'^[a-zA-Z0-9._-]+$') \ No newline at end of file diff --git a/src/pylorax/api/server.py b/src/pylorax/api/server.py deleted file mode 100644 index 1c42d94e..00000000 --- a/src/pylorax/api/server.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# Copyright (C) 2017-2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import logging -log = logging.getLogger("lorax-composer") - -from collections import namedtuple -from flask import Flask, jsonify, redirect, send_from_directory -from glob import glob -import os -import werkzeug - -from pylorax import vernum -from pylorax.api.errors import HTTP_ERROR -from pylorax.api.v0 import v0_api -from pylorax.api.v1 import v1_api -from pylorax.sysutils import joinpaths - -GitLock = namedtuple("GitLock", ["repo", "lock", "dir"]) - -server = Flask(__name__) - -__all__ = ["server", "GitLock"] - -@server.route('/') -def server_root(): - redirect("/api/docs/") - -@server.route("/api/docs/") -@server.route("/api/docs/") -def api_docs(path=None): - # Find the html docs - try: - # This assumes it is running from the source tree - docs_path = os.path.abspath(joinpaths(os.path.dirname(__file__), "../../../docs/html")) - except IndexError: - docs_path = glob("/usr/share/doc/lorax-*/html/")[0] - - if not path: - path="index.html" - return send_from_directory(docs_path, path) - -@server.route("/api/status") -def api_status(): - """ - `/api/status` - ^^^^^^^^^^^^^^^^ - Return the status of the API Server:: - - { "api": "0", - "build": "devel", - "db_supported": true, - "db_version": "0", - "schema_version": "0", - "backend": "lorax-composer", - "msgs": []} - - The 'msgs' field can be a list of strings describing startup problems or status that - should be displayed to the user. eg. if the compose templates are not depsolving properly - the errors will be in 'msgs'. - """ - return jsonify(backend="lorax-composer", - build=vernum, - api="1", - db_version="0", - schema_version="0", - db_supported=True, - msgs=server.config["TEMPLATE_ERRORS"]) - -@server.errorhandler(werkzeug.exceptions.HTTPException) -def bad_request(error): - return jsonify(status=False, errors=[{ "id": HTTP_ERROR, "code": error.code, "msg": error.name }]), error.code - -# Register the v0 API on /api/v0/ -server.register_blueprint(v0_api, url_prefix="/api/v0/") - -# Register the v1 API on /api/v1/ -# Use v0 routes by default -skip_rules = [ - "/compose", - "/compose/queue", - "/compose/finished", - "/compose/failed", - "/compose/status/", - "/compose/info/", - "/projects/source/info/", - "/projects/source/new", -] -server.register_blueprint(v0_api, url_prefix="/api/v1/", skip_rules=skip_rules) -server.register_blueprint(v1_api, url_prefix="/api/v1/") diff --git a/src/pylorax/api/timestamp.py b/src/pylorax/api/timestamp.py deleted file mode 100644 index 4cc52e6a..00000000 --- a/src/pylorax/api/timestamp.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -import time - -from pylorax.sysutils import joinpaths -import pylorax.api.toml as toml - -TS_CREATED = "created" -TS_STARTED = "started" -TS_FINISHED = "finished" - -def write_timestamp(destdir, ty): - path = joinpaths(destdir, "times.toml") - - try: - contents = toml.loads(open(path, "r").read()) - except IOError: - contents = toml.loads("") - - if ty == TS_CREATED: - contents[TS_CREATED] = time.time() - elif ty == TS_STARTED: - contents[TS_STARTED] = time.time() - elif ty == TS_FINISHED: - contents[TS_FINISHED] = time.time() - - with open(path, "w") as f: - f.write(toml.dumps(contents)) - -def timestamp_dict(destdir): - path = joinpaths(destdir, "times.toml") - - try: - return toml.loads(open(path, "r").read()) - except IOError: - return toml.loads("") diff --git a/src/pylorax/api/toml.py b/src/pylorax/api/toml.py deleted file mode 100644 index a87f233d..00000000 --- a/src/pylorax/api/toml.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -import toml - -class TomlError(toml.TomlDecodeError): - pass - -def loads(s): - if isinstance(s, bytes): - s = s.decode('utf-8') - try: - return toml.loads(s) - except toml.TomlDecodeError as e: - raise TomlError(e.msg, e.doc, e.pos) - -def dumps(o): - # strip the result, because `toml.dumps` adds a lot of newlines - return toml.dumps(o, encoder=toml.TomlEncoder(dict)).strip() - -def load(file): - try: - return toml.load(file) - except toml.TomlDecodeError as e: - raise TomlError(e.msg, e.doc, e.pos) - -def dump(o, file): - return toml.dump(o, file) diff --git a/src/pylorax/api/utils.py b/src/pylorax/api/utils.py deleted file mode 100644 index ccab5ca2..00000000 --- a/src/pylorax/api/utils.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -""" API utility functions -""" -from pylorax.api.recipes import RecipeError, RecipeFileError, read_recipe_commit - -def take_limits(iterable, offset, limit): - """ Apply offset and limit to an iterable object - - :param iterable: The object to limit - :type iterable: iter - :param offset: The number of items to skip - :type offset: int - :param limit: The total number of items to return - :type limit: int - :returns: A subset of the iterable - """ - return iterable[offset:][:limit] - -def blueprint_exists(api, branch, blueprint_name): - """Return True if the blueprint exists - - :param api: flask object - :type api: Flask - :param branch: Branch name - :type branch: str - :param recipe_name: Recipe name to read - :type recipe_name: str - """ - try: - with api.config["GITLOCK"].lock: - read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - - return True - except (RecipeError, RecipeFileError): - return False diff --git a/src/pylorax/api/v0.py b/src/pylorax/api/v0.py deleted file mode 100644 index 813eb2e4..00000000 --- a/src/pylorax/api/v0.py +++ /dev/null @@ -1,1997 +0,0 @@ -# -# Copyright (C) 2017-2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Setup v0 of the API server - -v0_api() must be called to setup the API routes for Flask - -Status Responses ----------------- - -Some requests only return a status/error response. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - - Example response:: - - { - "status": true - } - - Error response:: - - { - "errors": ["ggit-error: Failed to remove entry. File isn't in the tree - jboss.toml (-1)"] - "status": false - } - -API Routes ----------- - -All of the blueprints routes support the optional `branch` argument. If it is not -used then the API will use the `master` branch for blueprints. If you want to create -a new branch use the `new` or `workspace` routes with ?branch= to -store the new blueprint on the new branch. -""" - -import logging -log = logging.getLogger("lorax-composer") - -import os -from flask import jsonify, request, Response, send_file -from flask import current_app as api - -from pylorax.sysutils import joinpaths -from pylorax.api.checkparams import checkparams -from pylorax.api.compose import start_build, compose_types -from pylorax.api.errors import * # pylint: disable=wildcard-import,unused-wildcard-import -from pylorax.api.flask_blueprint import BlueprintSkip -from pylorax.api.projects import projects_list, projects_info, projects_depsolve -from pylorax.api.projects import modules_list, modules_info, ProjectsError, repo_to_source -from pylorax.api.projects import get_repo_sources, delete_repo_source, new_repo_source -from pylorax.api.queue import queue_status, build_status, uuid_delete, uuid_status, uuid_info -from pylorax.api.queue import uuid_tar, uuid_image, uuid_cancel, uuid_log -from pylorax.api.recipes import list_branch_files, read_recipe_commit, recipe_filename, list_commits -from pylorax.api.recipes import recipe_from_dict, recipe_from_toml, commit_recipe, delete_recipe, revert_recipe -from pylorax.api.recipes import tag_recipe_commit, recipe_diff, RecipeFileError -from pylorax.api.regexes import VALID_API_STRING, VALID_BLUEPRINT_NAME -import pylorax.api.toml as toml -from pylorax.api.utils import take_limits, blueprint_exists -from pylorax.api.workspace import workspace_read, workspace_write, workspace_delete, workspace_exists - -# The API functions don't actually get called by any code here -# pylint: disable=unused-variable - -# Create the v0 routes Blueprint with skip_routes support -v0_api = BlueprintSkip("v0_routes", __name__) - -@v0_api.route("/blueprints/list") -def v0_blueprints_list(): - """List the available blueprints on a branch. - - **/api/v0/blueprints/list** - - List the available blueprints:: - - { "limit": 20, - "offset": 0, - "blueprints": [ - "atlas", - "development", - "glusterfs", - "http-server", - "jboss", - "kubernetes" ], - "total": 6 } - """ - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - limit = int(request.args.get("limit", "20")) - offset = int(request.args.get("offset", "0")) - except ValueError as e: - return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 - - with api.config["GITLOCK"].lock: - blueprints = [f[:-5] for f in list_branch_files(api.config["GITLOCK"].repo, branch)] - limited_blueprints = take_limits(blueprints, offset, limit) - return jsonify(blueprints=limited_blueprints, limit=limit, offset=offset, total=len(blueprints)) - -@v0_api.route("/blueprints/info", defaults={'blueprint_names': ""}) -@v0_api.route("/blueprints/info/") -@checkparams([("blueprint_names", "", "no blueprint names given")]) -def v0_blueprints_info(blueprint_names): - """Return the contents of the blueprint, or a list of blueprints - - **/api/v0/blueprints/info/[?format=]** - - Return the JSON representation of the blueprint. This includes 3 top level - objects. `changes` which lists whether or not the workspace is different from - the most recent commit. `blueprints` which lists the JSON representation of the - blueprint, and `errors` which will list any errors, like non-existant blueprints. - - By default the response is JSON, but if `?format=toml` is included in the URL's - arguments it will return the response as the blueprint's raw TOML content. - *Unless* there is an error which will only return a 400 and a standard error - `Status Responses`_. - - If there is an error when JSON is requested the successful blueprints and the - errors will both be returned. - - Example of json response:: - - { - "changes": [ - { - "changed": false, - "name": "glusterfs" - } - ], - "errors": [], - "blueprints": [ - { - "description": "An example GlusterFS server with samba", - "modules": [ - { - "name": "glusterfs", - "version": "3.7.*" - }, - { - "name": "glusterfs-cli", - "version": "3.7.*" - } - ], - "name": "glusterfs", - "packages": [ - { - "name": "2ping", - "version": "3.2.1" - }, - { - "name": "samba", - "version": "4.2.*" - } - ], - "version": "0.0.6" - } - ] - } - - Error example:: - - { - "changes": [], - "errors": ["ggit-error: the path 'missing.toml' does not exist in the given tree (-3)"] - "blueprints": [] - } - """ - if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - out_fmt = request.args.get("format", "json") - if VALID_API_STRING.match(out_fmt) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 - - blueprints = [] - changes = [] - errors = [] - for blueprint_name in [n.strip() for n in blueprint_names.split(",")]: - exceptions = [] - # Get the workspace version (if it exists) - try: - with api.config["GITLOCK"].lock: - ws_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) - except Exception as e: - ws_blueprint = None - exceptions.append(str(e)) - log.error("(v0_blueprints_info) %s", str(e)) - - # Get the git version (if it exists) - try: - with api.config["GITLOCK"].lock: - git_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - except RecipeFileError as e: - # Adding an exception would be redundant, skip it - git_blueprint = None - log.error("(v0_blueprints_info) %s", str(e)) - except Exception as e: - git_blueprint = None - exceptions.append(str(e)) - log.error("(v0_blueprints_info) %s", str(e)) - - if not ws_blueprint and not git_blueprint: - # Neither blueprint, return an error - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: %s" % (blueprint_name, ", ".join(exceptions))}) - elif ws_blueprint and not git_blueprint: - # No git blueprint, return the workspace blueprint - changes.append({"name":blueprint_name, "changed":True}) - blueprints.append(ws_blueprint) - elif not ws_blueprint and git_blueprint: - # No workspace blueprint, no change, return the git blueprint - changes.append({"name":blueprint_name, "changed":False}) - blueprints.append(git_blueprint) - else: - # Both exist, maybe changed, return the workspace blueprint - changes.append({"name":blueprint_name, "changed":ws_blueprint != git_blueprint}) - blueprints.append(ws_blueprint) - - # Sort all the results by case-insensitive blueprint name - changes = sorted(changes, key=lambda c: c["name"].lower()) - blueprints = sorted(blueprints, key=lambda r: r["name"].lower()) - - if out_fmt == "toml": - if errors: - # If there are errors they need to be reported, use JSON and 400 for this - return jsonify(status=False, errors=errors), 400 - else: - # With TOML output we just want to dump the raw blueprint, skipping the rest. - return "\n\n".join([r.toml() for r in blueprints]) - else: - return jsonify(changes=changes, blueprints=blueprints, errors=errors) - -@v0_api.route("/blueprints/changes", defaults={'blueprint_names': ""}) -@v0_api.route("/blueprints/changes/") -@checkparams([("blueprint_names", "", "no blueprint names given")]) -def v0_blueprints_changes(blueprint_names): - """Return the changes to a blueprint or list of blueprints - - **/api/v0/blueprints/changes/[?offset=0&limit=20]** - - Return the commits to a blueprint. By default it returns the first 20 commits, this - can be changed by passing `offset` and/or `limit`. The response will include the - commit hash, summary, timestamp, and optionally the revision number. The commit - hash can be passed to `/api/v0/blueprints/diff/` to retrieve the exact changes. - - Example:: - - { - "errors": [], - "limit": 20, - "offset": 0, - "blueprints": [ - { - "changes": [ - { - "commit": "e083921a7ed1cf2eec91ad12b9ad1e70ef3470be", - "message": "blueprint glusterfs, version 0.0.6 saved.", - "revision": null, - "timestamp": "2017-11-23T00:18:13Z" - }, - { - "commit": "cee5f4c20fc33ea4d54bfecf56f4ad41ad15f4f3", - "message": "blueprint glusterfs, version 0.0.5 saved.", - "revision": null, - "timestamp": "2017-11-11T01:00:28Z" - }, - { - "commit": "29b492f26ed35d80800b536623bafc51e2f0eff2", - "message": "blueprint glusterfs, version 0.0.4 saved.", - "revision": null, - "timestamp": "2017-11-11T00:28:30Z" - }, - { - "commit": "03374adbf080fe34f5c6c29f2e49cc2b86958bf2", - "message": "blueprint glusterfs, version 0.0.3 saved.", - "revision": null, - "timestamp": "2017-11-10T23:15:52Z" - }, - { - "commit": "0e08ecbb708675bfabc82952599a1712a843779d", - "message": "blueprint glusterfs, version 0.0.2 saved.", - "revision": null, - "timestamp": "2017-11-10T23:14:56Z" - }, - { - "commit": "3e11eb87a63d289662cba4b1804a0947a6843379", - "message": "blueprint glusterfs, version 0.0.1 saved.", - "revision": null, - "timestamp": "2017-11-08T00:02:47Z" - } - ], - "name": "glusterfs", - "total": 6 - } - ] - } - """ - if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - limit = int(request.args.get("limit", "20")) - offset = int(request.args.get("offset", "0")) - except ValueError as e: - return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 - - blueprints = [] - errors = [] - for blueprint_name in [n.strip() for n in blueprint_names.split(",")]: - filename = recipe_filename(blueprint_name) - try: - with api.config["GITLOCK"].lock: - commits = list_commits(api.config["GITLOCK"].repo, branch, filename) - except Exception as e: - errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) - log.error("(v0_blueprints_changes) %s", str(e)) - else: - if commits: - limited_commits = take_limits(commits, offset, limit) - blueprints.append({"name":blueprint_name, "changes":limited_commits, "total":len(commits)}) - else: - # no commits means there is no blueprint in the branch - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s" % blueprint_name}) - - blueprints = sorted(blueprints, key=lambda r: r["name"].lower()) - - return jsonify(blueprints=blueprints, errors=errors, offset=offset, limit=limit) - -@v0_api.route("/blueprints/new", methods=["POST"]) -def v0_blueprints_new(): - """Commit a new blueprint - - **POST /api/v0/blueprints/new** - - Create a new blueprint, or update an existing blueprint. This supports both JSON and TOML - for the blueprint format. The blueprint should be in the body of the request with the - `Content-Type` header set to either `application/json` or `text/x-toml`. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - if request.headers['Content-Type'] == "text/x-toml": - blueprint = recipe_from_toml(request.data) - else: - blueprint = recipe_from_dict(request.get_json(cache=False)) - - if VALID_BLUEPRINT_NAME.match(blueprint["name"]) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - with api.config["GITLOCK"].lock: - commit_recipe(api.config["GITLOCK"].repo, branch, blueprint) - - # Read the blueprint with new version and write it to the workspace - blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint["name"]) - workspace_write(api.config["GITLOCK"].repo, branch, blueprint) - except Exception as e: - log.error("(v0_blueprints_new) %s", str(e)) - return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/delete", defaults={'blueprint_name': ""}, methods=["DELETE"]) -@v0_api.route("/blueprints/delete/", methods=["DELETE"]) -@checkparams([("blueprint_name", "", "no blueprint name given")]) -def v0_blueprints_delete(blueprint_name): - """Delete a blueprint from git - - **DELETE /api/v0/blueprints/delete/** - - Delete a blueprint. The blueprint is deleted from the branch, and will no longer - be listed by the `list` route. A blueprint can be undeleted using the `undo` route - to revert to a previous commit. This will also delete the workspace copy of the - blueprint. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - with api.config["GITLOCK"].lock: - workspace_delete(api.config["GITLOCK"].repo, branch, blueprint_name) - delete_recipe(api.config["GITLOCK"].repo, branch, blueprint_name) - except Exception as e: - log.error("(v0_blueprints_delete) %s", str(e)) - return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/workspace", methods=["POST"]) -def v0_blueprints_workspace(): - """Write a blueprint to the workspace - - **POST /api/v0/blueprints/workspace** - - Write a blueprint to the temporary workspace. This works exactly the same as `new` except - that it does not create a commit. JSON and TOML bodies are supported. - - The workspace is meant to be used as a temporary blueprint storage for clients. - It will be read by the `info` and `diff` routes if it is different from the - most recent commit. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - if request.headers['Content-Type'] == "text/x-toml": - blueprint = recipe_from_toml(request.data) - else: - blueprint = recipe_from_dict(request.get_json(cache=False)) - - if VALID_BLUEPRINT_NAME.match(blueprint["name"]) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - with api.config["GITLOCK"].lock: - workspace_write(api.config["GITLOCK"].repo, branch, blueprint) - except Exception as e: - log.error("(v0_blueprints_workspace) %s", str(e)) - return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/workspace", defaults={'blueprint_name': ""}, methods=["DELETE"]) -@v0_api.route("/blueprints/workspace/", methods=["DELETE"]) -@checkparams([("blueprint_name", "", "no blueprint name given")]) -def v0_blueprints_delete_workspace(blueprint_name): - """Delete a blueprint from the workspace - - **DELETE /api/v0/blueprints/workspace/** - - Remove the temporary workspace copy of a blueprint. The `info` route will now - return the most recent commit of the blueprint. Any changes that were in the - workspace will be lost. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - with api.config["GITLOCK"].lock: - if not workspace_exists(api.config["GITLOCK"].repo, branch, blueprint_name): - raise Exception("Unknown blueprint: %s" % blueprint_name) - - workspace_delete(api.config["GITLOCK"].repo, branch, blueprint_name) - except Exception as e: - log.error("(v0_blueprints_delete_workspace) %s", str(e)) - return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/undo", defaults={'blueprint_name': "", 'commit': ""}, methods=["POST"]) -@v0_api.route("/blueprints/undo/", defaults={'commit': ""}, methods=["POST"]) -@v0_api.route("/blueprints/undo//", methods=["POST"]) -@checkparams([("blueprint_name", "", "no blueprint name given"), - ("commit", "", "no commit ID given")]) -def v0_blueprints_undo(blueprint_name, commit): - """Undo changes to a blueprint by reverting to a previous commit. - - **POST /api/v0/blueprints/undo//** - - This will revert the blueprint to a previous commit. The commit hash from the `changes` - route can be used in this request. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - if VALID_BLUEPRINT_NAME.match(commit) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - with api.config["GITLOCK"].lock: - revert_recipe(api.config["GITLOCK"].repo, branch, blueprint_name, commit) - - # Read the new recipe and write it to the workspace - blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - workspace_write(api.config["GITLOCK"].repo, branch, blueprint) - except Exception as e: - log.error("(v0_blueprints_undo) %s", str(e)) - return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/tag", defaults={'blueprint_name': ""}, methods=["POST"]) -@v0_api.route("/blueprints/tag/", methods=["POST"]) -@checkparams([("blueprint_name", "", "no blueprint name given")]) -def v0_blueprints_tag(blueprint_name): - """Tag a blueprint's latest blueprint commit as a 'revision' - - **POST /api/v0/blueprints/tag/** - - Tag a blueprint as a new release. This uses git tags with a special format. - `refs/tags///r`. Only the most recent blueprint commit - can be tagged. Revisions start at 1 and increment for each new tag - (per-blueprint). If the commit has already been tagged it will return false. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - try: - with api.config["GITLOCK"].lock: - tag_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - except RecipeFileError as e: - log.error("(v0_blueprints_tag) %s", str(e)) - return jsonify(status=False, errors=[{"id": UNKNOWN_BLUEPRINT, "msg": str(e)}]), 400 - except Exception as e: - log.error("(v0_blueprints_tag) %s", str(e)) - return jsonify(status=False, errors=[{"id": BLUEPRINTS_ERROR, "msg": str(e)}]), 400 - else: - return jsonify(status=True) - -@v0_api.route("/blueprints/diff", defaults={'blueprint_name': "", 'from_commit': "", 'to_commit': ""}) -@v0_api.route("/blueprints/diff/", defaults={'from_commit': "", 'to_commit': ""}) -@v0_api.route("/blueprints/diff//", defaults={'to_commit': ""}) -@v0_api.route("/blueprints/diff///") -@checkparams([("blueprint_name", "", "no blueprint name given"), - ("from_commit", "", "no from commit ID given"), - ("to_commit", "", "no to commit ID given")]) -def v0_blueprints_diff(blueprint_name, from_commit, to_commit): - """Return the differences between two commits of a blueprint - - **/api/v0/blueprints/diff///** - - Return the differences between two commits, or the workspace. The commit hash - from the `changes` response can be used here, or several special strings: - - - NEWEST will select the newest git commit. This works for `from_commit` or `to_commit` - - WORKSPACE will select the workspace copy. This can only be used in `to_commit` - - eg. `/api/v0/blueprints/diff/glusterfs/NEWEST/WORKSPACE` will return the differences - between the most recent git commit and the contents of the workspace. - - Each entry in the response's diff object contains the old blueprint value and the new one. - If old is null and new is set, then it was added. - If new is null and old is set, then it was removed. - If both are set, then it was changed. - - The old/new entries will have the name of the blueprint field that was changed. This - can be one of: Name, Description, Version, Module, or Package. - The contents for these will be the old/new values for them. - - In the example below the version was changed and the ping package was added. - - Example:: - - { - "diff": [ - { - "new": { - "Version": "0.0.6" - }, - "old": { - "Version": "0.0.5" - } - }, - { - "new": { - "Package": { - "name": "ping", - "version": "3.2.1" - } - }, - "old": null - } - ] - } - """ - for s in [blueprint_name, from_commit, to_commit]: - if VALID_API_STRING.match(s) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - if not blueprint_exists(api, branch, blueprint_name): - return jsonify(status=False, errors=[{"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}]) - - try: - if from_commit == "NEWEST": - with api.config["GITLOCK"].lock: - old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - else: - with api.config["GITLOCK"].lock: - old_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, from_commit) - except Exception as e: - log.error("(v0_blueprints_diff) %s", str(e)) - return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 - - try: - if to_commit == "WORKSPACE": - with api.config["GITLOCK"].lock: - new_blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) - # If there is no workspace, use the newest commit instead - if not new_blueprint: - with api.config["GITLOCK"].lock: - new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - elif to_commit == "NEWEST": - with api.config["GITLOCK"].lock: - new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - else: - with api.config["GITLOCK"].lock: - new_blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name, to_commit) - except Exception as e: - log.error("(v0_blueprints_diff) %s", str(e)) - return jsonify(status=False, errors=[{"id": UNKNOWN_COMMIT, "msg": str(e)}]), 400 - - diff = recipe_diff(old_blueprint, new_blueprint) - return jsonify(diff=diff) - -@v0_api.route("/blueprints/freeze", defaults={'blueprint_names': ""}) -@v0_api.route("/blueprints/freeze/") -@checkparams([("blueprint_names", "", "no blueprint names given")]) -def v0_blueprints_freeze(blueprint_names): - """Return the blueprint with the exact modules and packages selected by depsolve - - **/api/v0/blueprints/freeze/** - - Return a JSON representation of the blueprint with the package and module versions set - to the exact versions chosen by depsolving the blueprint. - - Example:: - - { - "errors": [], - "blueprints": [ - { - "blueprint": { - "description": "An example GlusterFS server with samba", - "modules": [ - { - "name": "glusterfs", - "version": "3.8.4-18.4.el7.x86_64" - }, - { - "name": "glusterfs-cli", - "version": "3.8.4-18.4.el7.x86_64" - } - ], - "name": "glusterfs", - "packages": [ - { - "name": "ping", - "version": "2:3.2.1-2.el7.noarch" - }, - { - "name": "samba", - "version": "4.6.2-8.el7.x86_64" - } - ], - "version": "0.0.6" - } - } - ] - } - """ - if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - out_fmt = request.args.get("format", "json") - if VALID_API_STRING.match(out_fmt) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 - - blueprints = [] - errors = [] - for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]: - # get the blueprint - # Get the workspace version (if it exists) - blueprint = None - try: - with api.config["GITLOCK"].lock: - blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) - except Exception: - pass - - if not blueprint: - # No workspace version, get the git version (if it exists) - try: - with api.config["GITLOCK"].lock: - blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - except RecipeFileError as e: - # adding an error here would be redundant, skip it - log.error("(v0_blueprints_freeze) %s", str(e)) - except Exception as e: - errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) - log.error("(v0_blueprints_freeze) %s", str(e)) - - # No blueprint found, skip it. - if not blueprint: - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: blueprint_not_found" % blueprint_name}) - continue - - # Combine modules and packages and depsolve the list - # TODO include the version/glob in the depsolving - module_nver = blueprint.module_nver - package_nver = blueprint.package_nver - projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) - deps = [] - try: - with api.config["DNFLOCK"].lock: - deps = projects_depsolve(api.config["DNFLOCK"].dbo, projects, blueprint.group_names) - except ProjectsError as e: - errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) - log.error("(v0_blueprints_freeze) %s", str(e)) - - blueprints.append({"blueprint": blueprint.freeze(deps)}) - - if out_fmt == "toml": - # With TOML output we just want to dump the raw blueprint, skipping the rest. - return "\n\n".join([e["blueprint"].toml() for e in blueprints]) - else: - return jsonify(blueprints=blueprints, errors=errors) - -@v0_api.route("/blueprints/depsolve", defaults={'blueprint_names': ""}) -@v0_api.route("/blueprints/depsolve/") -@checkparams([("blueprint_names", "", "no blueprint names given")]) -def v0_blueprints_depsolve(blueprint_names): - """Return the dependencies for a blueprint - - **/api/v0/blueprints/depsolve/** - - Depsolve the blueprint using yum, return the blueprint used, and the NEVRAs of the packages - chosen to satisfy the blueprint's requirements. The response will include a list of results, - with the full dependency list in `dependencies`, the NEVRAs for the blueprint's direct modules - and packages in `modules`, and any error will be in `errors`. - - Example:: - - { - "errors": [], - "blueprints": [ - { - "dependencies": [ - { - "arch": "noarch", - "epoch": "0", - "name": "2ping", - "release": "2.el7", - "version": "3.2.1" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "acl", - "release": "12.el7", - "version": "2.2.51" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "audit-libs", - "release": "3.el7", - "version": "2.7.6" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "avahi-libs", - "release": "17.el7", - "version": "0.6.31" - }, - ... - ], - "modules": [ - { - "arch": "noarch", - "epoch": "0", - "name": "2ping", - "release": "2.el7", - "version": "3.2.1" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "glusterfs", - "release": "18.4.el7", - "version": "3.8.4" - }, - ... - ], - "blueprint": { - "description": "An example GlusterFS server with samba", - "modules": [ - { - "name": "glusterfs", - "version": "3.7.*" - }, - ... - } - } - ] - } - """ - if any(VALID_BLUEPRINT_NAME.match(blueprint_name) is None for blueprint_name in blueprint_names.split(',')): - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - branch = request.args.get("branch", "master") - if VALID_API_STRING.match(branch) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in branch argument"}]), 400 - - blueprints = [] - errors = [] - for blueprint_name in [n.strip() for n in sorted(blueprint_names.split(","), key=lambda n: n.lower())]: - # get the blueprint - # Get the workspace version (if it exists) - blueprint = None - try: - with api.config["GITLOCK"].lock: - blueprint = workspace_read(api.config["GITLOCK"].repo, branch, blueprint_name) - except Exception: - pass - - if not blueprint: - # No workspace version, get the git version (if it exists) - try: - with api.config["GITLOCK"].lock: - blueprint = read_recipe_commit(api.config["GITLOCK"].repo, branch, blueprint_name) - except RecipeFileError as e: - # adding an error here would be redundant, skip it - log.error("(v0_blueprints_depsolve) %s", str(e)) - except Exception as e: - errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) - log.error("(v0_blueprints_depsolve) %s", str(e)) - - # No blueprint found, skip it. - if not blueprint: - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "%s: blueprint not found" % blueprint_name}) - continue - - # Combine modules and packages and depsolve the list - # TODO include the version/glob in the depsolving - module_nver = blueprint.module_nver - package_nver = blueprint.package_nver - projects = sorted(set(module_nver+package_nver), key=lambda p: p[0].lower()) - deps = [] - try: - with api.config["DNFLOCK"].lock: - deps = projects_depsolve(api.config["DNFLOCK"].dbo, projects, blueprint.group_names) - except ProjectsError as e: - errors.append({"id": BLUEPRINTS_ERROR, "msg": "%s: %s" % (blueprint_name, str(e))}) - log.error("(v0_blueprints_depsolve) %s", str(e)) - - # Get the NEVRA's of the modules and projects, add as "modules" - modules = [] - for dep in deps: - if dep["name"] in projects: - modules.append(dep) - modules = sorted(modules, key=lambda m: m["name"].lower()) - - blueprints.append({"blueprint":blueprint, "dependencies":deps, "modules":modules}) - - return jsonify(blueprints=blueprints, errors=errors) - -@v0_api.route("/projects/list") -def v0_projects_list(): - """List all of the available projects/packages - - **/api/v0/projects/list[?offset=0&limit=20]** - - List all of the available projects. By default this returns the first 20 items, - but this can be changed by setting the `offset` and `limit` arguments. - - Example:: - - { - "limit": 20, - "offset": 0, - "projects": [ - { - "description": "0 A.D. (pronounced \"zero ey-dee\") is a ...", - "homepage": "http://play0ad.com", - "name": "0ad", - "summary": "Cross-Platform RTS Game of Ancient Warfare", - "upstream_vcs": "UPSTREAM_VCS" - }, - ... - ], - "total": 21770 - } - """ - try: - limit = int(request.args.get("limit", "20")) - offset = int(request.args.get("offset", "0")) - except ValueError as e: - return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 - - try: - with api.config["DNFLOCK"].lock: - available = projects_list(api.config["DNFLOCK"].dbo) - except ProjectsError as e: - log.error("(v0_projects_list) %s", str(e)) - return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 - - projects = take_limits(available, offset, limit) - return jsonify(projects=projects, offset=offset, limit=limit, total=len(available)) - -@v0_api.route("/projects/info", defaults={'project_names': ""}) -@v0_api.route("/projects/info/") -@checkparams([("project_names", "", "no project names given")]) -def v0_projects_info(project_names): - """Return detailed information about the listed projects - - **/api/v0/projects/info/** - - Return information about the comma-separated list of projects. It includes the description - of the package along with the list of available builds. - - Example:: - - { - "projects": [ - { - "builds": [ - { - "arch": "x86_64", - "build_config_ref": "BUILD_CONFIG_REF", - "build_env_ref": "BUILD_ENV_REF", - "build_time": "2017-03-01T08:39:23", - "changelog": "- restore incremental backups correctly, files ...", - "epoch": "2", - "metadata": {}, - "release": "32.el7", - "source": { - "license": "GPLv3+", - "metadata": {}, - "source_ref": "SOURCE_REF", - "version": "1.26" - } - } - ], - "description": "The GNU tar program saves many ...", - "homepage": "http://www.gnu.org/software/tar/", - "name": "tar", - "summary": "A GNU file archiving program", - "upstream_vcs": "UPSTREAM_VCS" - } - ] - } - """ - if VALID_API_STRING.match(project_names) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - with api.config["DNFLOCK"].lock: - projects = projects_info(api.config["DNFLOCK"].dbo, project_names.split(",")) - except ProjectsError as e: - log.error("(v0_projects_info) %s", str(e)) - return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 - - if not projects: - msg = "one of the requested projects does not exist: %s" % project_names - log.error("(v0_projects_info) %s", msg) - return jsonify(status=False, errors=[{"id": UNKNOWN_PROJECT, "msg": msg}]), 400 - - return jsonify(projects=projects) - -@v0_api.route("/projects/depsolve", defaults={'project_names': ""}) -@v0_api.route("/projects/depsolve/") -@checkparams([("project_names", "", "no project names given")]) -def v0_projects_depsolve(project_names): - """Return detailed information about the listed projects - - **/api/v0/projects/depsolve/** - - Depsolve the comma-separated list of projects and return the list of NEVRAs needed - to satisfy the request. - - Example:: - - { - "projects": [ - { - "arch": "noarch", - "epoch": "0", - "name": "basesystem", - "release": "7.el7", - "version": "10.0" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "bash", - "release": "28.el7", - "version": "4.2.46" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "filesystem", - "release": "21.el7", - "version": "3.2" - }, - ... - ] - } - """ - if VALID_API_STRING.match(project_names) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - with api.config["DNFLOCK"].lock: - deps = projects_depsolve(api.config["DNFLOCK"].dbo, [(n, "*") for n in project_names.split(",")], []) - except ProjectsError as e: - log.error("(v0_projects_depsolve) %s", str(e)) - return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 - - if not deps: - msg = "one of the requested projects does not exist: %s" % project_names - log.error("(v0_projects_depsolve) %s", msg) - return jsonify(status=False, errors=[{"id": UNKNOWN_PROJECT, "msg": msg}]), 400 - - return jsonify(projects=deps) - -@v0_api.route("/projects/source/list") -def v0_projects_source_list(): - """Return the list of source names - - **/api/v0/projects/source/list** - - Return the list of repositories used for depsolving and installing packages. - - Example:: - - { - "sources": [ - "fedora", - "fedora-cisco-openh264", - "fedora-updates-testing", - "fedora-updates" - ] - } - """ - with api.config["DNFLOCK"].lock: - repos = list(api.config["DNFLOCK"].dbo.repos.iter_enabled()) - sources = sorted([r.id for r in repos]) - return jsonify(sources=sources) - -@v0_api.route("/projects/source/info", defaults={'source_names': ""}) -@v0_api.route("/projects/source/info/") -@checkparams([("source_names", "", "no source names given")]) -def v0_projects_source_info(source_names): - """Return detailed info about the list of sources - - **/api/v0/projects/source/info/** - - Return information about the comma-separated list of source names. Or all of the - sources if '*' is passed. Note that general globbing is not supported, only '*'. - - immutable system sources will have the "system" field set to true. User added sources - will have it set to false. System sources cannot be changed or deleted. - - Example:: - - { - "errors": [], - "sources": { - "fedora": { - "check_gpg": true, - "check_ssl": true, - "gpgkey_urls": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" - ], - "name": "fedora", - "proxy": "http://proxy.brianlane.com:8123", - "system": true, - "type": "yum-metalink", - "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" - } - } - } - """ - if VALID_API_STRING.match(source_names) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - out_fmt = request.args.get("format", "json") - if VALID_API_STRING.match(out_fmt) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 - - # Return info on all of the sources - if source_names == "*": - with api.config["DNFLOCK"].lock: - source_names = ",".join(r.id for r in api.config["DNFLOCK"].dbo.repos.iter_enabled()) - - sources = {} - errors = [] - system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") - for source in source_names.split(","): - with api.config["DNFLOCK"].lock: - repo = api.config["DNFLOCK"].dbo.repos.get(source, None) - if not repo: - errors.append({"id": UNKNOWN_SOURCE, "msg": "%s is not a valid source" % source}) - continue - sources[repo.id] = repo_to_source(repo, repo.id in system_sources, api=0) - - if out_fmt == "toml" and not errors: - # With TOML output we just want to dump the raw sources, skipping the errors - return toml.dumps(sources) - elif out_fmt == "toml" and errors: - # TOML requested, but there was an error - return jsonify(status=False, errors=errors), 400 - else: - return jsonify(sources=sources, errors=errors) - -@v0_api.route("/projects/source/new", methods=["POST"]) -def v0_projects_source_new(): - """Add a new package source. Or change an existing one - - **POST /api/v0/projects/source/new** - - Add (or change) a source for use when depsolving blueprints and composing images. - - The ``proxy`` and ``gpgkey_urls`` entries are optional. All of the others are required. The supported - types for the urls are: - - * ``yum-baseurl`` is a URL to a yum repository. - * ``yum-mirrorlist`` is a URL for a mirrorlist. - * ``yum-metalink`` is a URL for a metalink. - - If ``check_ssl`` is true the https certificates must be valid. If they are self-signed you can either set - this to false, or add your Certificate Authority to the host system. - - If ``check_gpg`` is true the GPG key must either be installed on the host system, or ``gpgkey_urls`` - should point to it. - - You can edit an existing source (other than system sources), by doing a POST - of the new version of the source. It will overwrite the previous one. - - Example:: - - { - "name": "custom-source-1", - "url": "https://url/path/to/repository/", - "type": "yum-baseurl", - "check_ssl": true, - "check_gpg": true, - "gpgkey_urls": [ - "https://url/path/to/gpg-key" - ] - } - - - """ - if request.headers['Content-Type'] == "text/x-toml": - source = toml.loads(request.data) - else: - source = request.get_json(cache=False) - - system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") - if source["name"] in system_sources: - return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be changed." % source["name"]}]), 400 - - try: - # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) - with api.config["DNFLOCK"].lock: - repo_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") - new_repo_source(api.config["DNFLOCK"].dbo, source["name"], source, repo_dir) - except Exception as e: - return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 - - return jsonify(status=True) - -@v0_api.route("/projects/source/delete", defaults={'source_name': ""}, methods=["DELETE"]) -@v0_api.route("/projects/source/delete/", methods=["DELETE"]) -@checkparams([("source_name", "", "no source name given")]) -def v0_projects_source_delete(source_name): - """Delete the named source and return a status response - - **DELETE /api/v0/projects/source/delete/** - - Delete a user added source. This will fail if a system source is passed to - it. - - The response will be a status response with `status` set to true, or an - error response with it set to false and an error message included. - """ - if VALID_API_STRING.match(source_name) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") - if source_name in system_sources: - return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be deleted." % source_name}]), 400 - share_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") - try: - # Remove the file entry for the source - delete_repo_source(joinpaths(share_dir, "*.repo"), source_name) - - # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) - with api.config["DNFLOCK"].lock: - if source_name in api.config["DNFLOCK"].dbo.repos: - del api.config["DNFLOCK"].dbo.repos[source_name] - log.info("Updating repository metadata after removing %s", source_name) - api.config["DNFLOCK"].dbo.fill_sack(load_system_repo=False) - api.config["DNFLOCK"].dbo.read_comps() - - except ProjectsError as e: - log.error("(v0_projects_source_delete) %s", str(e)) - return jsonify(status=False, errors=[{"id": UNKNOWN_SOURCE, "msg": str(e)}]), 400 - - return jsonify(status=True) - -@v0_api.route("/modules/list") -@v0_api.route("/modules/list/") -def v0_modules_list(module_names=None): - """List available modules, filtering by module_names - - **/api/v0/modules/list[?offset=0&limit=20]** - - Return a list of all of the available modules. This includes the name and the - group_type, which is always "rpm" for lorax-composer. By default this returns - the first 20 items. This can be changed by setting the `offset` and `limit` - arguments. - - Example:: - - { - "limit": 20, - "modules": [ - { - "group_type": "rpm", - "name": "0ad" - }, - { - "group_type": "rpm", - "name": "0ad-data" - }, - { - "group_type": "rpm", - "name": "0install" - }, - { - "group_type": "rpm", - "name": "2048-cli" - }, - ... - ] - "total": 21770 - } - - **/api/v0/modules/list/[?offset=0&limit=20]** - - Return the list of comma-separated modules. Output is the same as `/modules/list` - - Example:: - - { - "limit": 20, - "modules": [ - { - "group_type": "rpm", - "name": "tar" - } - ], - "offset": 0, - "total": 1 - } - """ - if module_names and VALID_API_STRING.match(module_names) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - limit = int(request.args.get("limit", "20")) - offset = int(request.args.get("offset", "0")) - except ValueError as e: - return jsonify(status=False, errors=[{"id": BAD_LIMIT_OR_OFFSET, "msg": str(e)}]), 400 - - if module_names: - module_names = module_names.split(",") - - try: - with api.config["DNFLOCK"].lock: - available = modules_list(api.config["DNFLOCK"].dbo, module_names) - except ProjectsError as e: - log.error("(v0_modules_list) %s", str(e)) - return jsonify(status=False, errors=[{"id": MODULES_ERROR, "msg": str(e)}]), 400 - - if module_names and not available: - msg = "one of the requested modules does not exist: %s" % module_names - log.error("(v0_modules_list) %s", msg) - return jsonify(status=False, errors=[{"id": UNKNOWN_MODULE, "msg": msg}]), 400 - - modules = take_limits(available, offset, limit) - return jsonify(modules=modules, offset=offset, limit=limit, total=len(available)) - -@v0_api.route("/modules/info", defaults={'module_names': ""}) -@v0_api.route("/modules/info/") -@checkparams([("module_names", "", "no module names given")]) -def v0_modules_info(module_names): - """Return detailed information about the listed modules - - **/api/v0/modules/info/** - - Return the module's dependencies, and the information about the module. - - Example:: - - { - "modules": [ - { - "dependencies": [ - { - "arch": "noarch", - "epoch": "0", - "name": "basesystem", - "release": "7.el7", - "version": "10.0" - }, - { - "arch": "x86_64", - "epoch": "0", - "name": "bash", - "release": "28.el7", - "version": "4.2.46" - }, - ... - ], - "description": "The GNU tar program saves ...", - "homepage": "http://www.gnu.org/software/tar/", - "name": "tar", - "summary": "A GNU file archiving program", - "upstream_vcs": "UPSTREAM_VCS" - } - ] - } - """ - if VALID_API_STRING.match(module_names) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - try: - with api.config["DNFLOCK"].lock: - modules = modules_info(api.config["DNFLOCK"].dbo, module_names.split(",")) - except ProjectsError as e: - log.error("(v0_modules_info) %s", str(e)) - return jsonify(status=False, errors=[{"id": MODULES_ERROR, "msg": str(e)}]), 400 - - if not modules: - msg = "one of the requested modules does not exist: %s" % module_names - log.error("(v0_modules_info) %s", msg) - return jsonify(status=False, errors=[{"id": UNKNOWN_MODULE, "msg": msg}]), 400 - - return jsonify(modules=modules) - -@v0_api.route("/compose", methods=["POST"]) -def v0_compose_start(): - """Start a compose - - The body of the post should have these fields: - blueprint_name - The blueprint name from /blueprints/list/ - compose_type - The type of output to create, from /compose/types - branch - Optional, defaults to master, selects the git branch to use for the blueprint. - - **POST /api/v0/compose** - - Start a compose. The content type should be 'application/json' and the body of the POST - should look like this - - Example:: - - { - "blueprint_name": "http-server", - "compose_type": "tar", - "branch": "master" - } - - Pass it the name of the blueprint, the type of output (from '/api/v0/compose/types'), and the - blueprint branch to use. 'branch' is optional and will default to master. It will create a new - build and add it to the queue. It returns the build uuid and a status if it succeeds - - Example:: - - { - "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf", - "status": true - } - """ - # Passing ?test=1 will generate a fake FAILED compose. - # Passing ?test=2 will generate a fake FINISHED compose. - try: - test_mode = int(request.args.get("test", "0")) - except ValueError: - test_mode = 0 - - compose = request.get_json(cache=False) - - errors = [] - if not compose: - return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 - - if "blueprint_name" not in compose: - errors.append({"id": UNKNOWN_BLUEPRINT,"msg": "No 'blueprint_name' in the JSON request"}) - else: - blueprint_name = compose["blueprint_name"] - - if "branch" not in compose or not compose["branch"]: - branch = "master" - else: - branch = compose["branch"] - - if "compose_type" not in compose: - errors.append({"id": BAD_COMPOSE_TYPE, "msg": "No 'compose_type' in the JSON request"}) - else: - compose_type = compose["compose_type"] - - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - errors.append({"id": INVALID_CHARS, "msg": "Invalid characters in API path"}) - - if not blueprint_exists(api, branch, blueprint_name): - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}) - - if errors: - return jsonify(status=False, errors=errors), 400 - - try: - build_id = start_build(api.config["COMPOSER_CFG"], api.config["DNFLOCK"], api.config["GITLOCK"], - branch, blueprint_name, compose_type, test_mode) - except Exception as e: - if "Invalid compose type" in str(e): - return jsonify(status=False, errors=[{"id": BAD_COMPOSE_TYPE, "msg": str(e)}]), 400 - else: - return jsonify(status=False, errors=[{"id": BUILD_FAILED, "msg": str(e)}]), 400 - - return jsonify(status=True, build_id=build_id) - -@v0_api.route("/compose/types") -def v0_compose_types(): - """Return the list of enabled output types - - (only enabled types are returned) - - **/api/v0/compose/types** - - Returns the list of supported output types that are valid for use with 'POST /api/v0/compose' - - Example:: - - { - "types": [ - { - "enabled": true, - "name": "tar" - } - ] - } - """ - share_dir = api.config["COMPOSER_CFG"].get("composer", "share_dir") - return jsonify(types=[{"name": t, "enabled": e} for t, e in compose_types(share_dir)]) - -@v0_api.route("/compose/queue") -def v0_compose_queue(): - """Return the status of the new and running queues - - **/api/v0/compose/queue** - - Return the status of the build queue. It includes information about the builds waiting, - and the build that is running. - - Example:: - - { - "new": [ - { - "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", - "blueprint": "glusterfs", - "queue_status": "WAITING", - "job_created": 1517362647.4570868, - "version": "0.0.6" - }, - { - "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73", - "blueprint": "kubernetes", - "queue_status": "WAITING", - "job_created": 1517362659.0034983, - "version": "0.0.1" - } - ], - "run": [ - { - "id": "745712b2-96db-44c0-8014-fe925c35e795", - "blueprint": "glusterfs", - "queue_status": "RUNNING", - "job_created": 1517362633.7965999, - "job_started": 1517362633.8001345, - "version": "0.0.6" - } - ] - } - """ - return jsonify(queue_status(api.config["COMPOSER_CFG"], api=0)) - -@v0_api.route("/compose/finished") -def v0_compose_finished(): - """Return the list of finished composes - - **/api/v0/compose/finished** - - Return the details on all of the finished composes on the system. - - Example:: - - { - "finished": [ - { - "id": "70b84195-9817-4b8a-af92-45e380f39894", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517351003.8210032, - "job_started": 1517351003.8230415, - "job_finished": 1517359234.1003145, - "version": "0.0.6" - }, - { - "id": "e695affd-397f-4af9-9022-add2636e7459", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517362289.7193348, - "job_started": 1517362289.9751132, - "job_finished": 1517363500.1234567, - "version": "0.0.6" - } - ] - } - """ - return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED", api=0)) - -@v0_api.route("/compose/failed") -def v0_compose_failed(): - """Return the list of failed composes - - **/api/v0/compose/failed** - - Return the details on all of the failed composes on the system. - - Example:: - - { - "failed": [ - { - "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", - "blueprint": "http-server", - "queue_status": "FAILED", - "job_created": 1517523249.9301329, - "job_started": 1517523249.9314211, - "job_finished": 1517523255.5623411, - "version": "0.0.2" - } - ] - } - """ - return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED", api=0)) - -@v0_api.route("/compose/status", defaults={'uuids': ""}) -@v0_api.route("/compose/status/") -@checkparams([("uuids", "", "no UUIDs given")]) -def v0_compose_status(uuids): - """Return the status of the listed uuids - - **/api/v0/compose/status/[?blueprint=&status=&type=]** - - Return the details for each of the comma-separated list of uuids. A uuid of '*' will return - details for all composes. - - Example:: - - { - "uuids": [ - { - "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", - "blueprint": "http-server", - "queue_status": "FINISHED", - "job_created": 1517523644.2384307, - "job_started": 1517523644.2551234, - "job_finished": 1517523689.9864314, - "version": "0.0.2" - }, - { - "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517363442.188399, - "job_started": 1517363442.325324, - "job_finished": 1517363451.653621, - "version": "0.0.6" - } - ] - } - """ - if VALID_API_STRING.match(uuids) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - blueprint = request.args.get("blueprint", None) - status = request.args.get("status", None) - compose_type = request.args.get("type", None) - - # Check the arguments for invalid characters - for a in [blueprint, status, compose_type]: - if a is not None and VALID_API_STRING.match(a) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - results = [] - errors = [] - - if uuids.strip() == '*': - queue_status_dict = queue_status(api.config["COMPOSER_CFG"], api=0) - queue_new = queue_status_dict["new"] - queue_running = queue_status_dict["run"] - candidates = queue_new + queue_running + build_status(api.config["COMPOSER_CFG"], api=0) - else: - candidates = [] - for uuid in [n.strip().lower() for n in uuids.split(",")]: - details = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if details is None: - errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) - else: - candidates.append(details) - - for details in candidates: - if blueprint is not None and details['blueprint'] != blueprint: - continue - - if status is not None and details['queue_status'] != status: - continue - - if compose_type is not None and details['compose_type'] != compose_type: - continue - - results.append(details) - - return jsonify(uuids=results, errors=errors) - -@v0_api.route("/compose/cancel", defaults={'uuid': ""}, methods=["DELETE"]) -@v0_api.route("/compose/cancel/", methods=["DELETE"]) -@checkparams([("uuid", "", "no UUID given")]) -def v0_compose_cancel(uuid): - """Cancel a running compose and delete its results directory - - **DELETE /api/v0/compose/cancel/** - - Cancel the build, if it is not finished, and delete the results. It will return a - status of True if it is successful. - - Example:: - - { - "status": true, - "uuid": "03397f8d-acff-4cdb-bd31-f629b7a948f5" - } - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - - if status["queue_status"] not in ["WAITING", "RUNNING"]: - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s is not in WAITING or RUNNING." % uuid}]) - - try: - uuid_cancel(api.config["COMPOSER_CFG"], uuid) - except Exception as e: - return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": "%s: %s" % (uuid, str(e))}]),400 - else: - return jsonify(status=True, uuid=uuid) - -@v0_api.route("/compose/delete", defaults={'uuids': ""}, methods=["DELETE"]) -@v0_api.route("/compose/delete/", methods=["DELETE"]) -@checkparams([("uuids", "", "no UUIDs given")]) -def v0_compose_delete(uuids): - """Delete the compose results for the listed uuids - - **DELETE /api/v0/compose/delete/** - - Delete the list of comma-separated uuids from the compose results. - - Example:: - - { - "errors": [], - "uuids": [ - { - "status": true, - "uuid": "ae1bf7e3-7f16-4c9f-b36e-3726a1093fd0" - } - ] - } - """ - if VALID_API_STRING.match(uuids) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - results = [] - errors = [] - for uuid in [n.strip().lower() for n in uuids.split(",")]: - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) - elif status["queue_status"] not in ["FINISHED", "FAILED"]: - errors.append({"id": BUILD_IN_WRONG_STATE, "msg": "Build %s is not in FINISHED or FAILED." % uuid}) - else: - try: - uuid_delete(api.config["COMPOSER_CFG"], uuid) - except Exception as e: - errors.append({"id": COMPOSE_ERROR, "msg": "%s: %s" % (uuid, str(e))}) - else: - results.append({"uuid":uuid, "status":True}) - return jsonify(uuids=results, errors=errors) - -@v0_api.route("/compose/info", defaults={'uuid': ""}) -@v0_api.route("/compose/info/") -@checkparams([("uuid", "", "no UUID given")]) -def v0_compose_info(uuid): - """Return detailed info about a compose - - **/api/v0/compose/info/** - - Get detailed information about the compose. The returned JSON string will - contain the following information: - - * id - The uuid of the comoposition - * config - containing the configuration settings used to run Anaconda - * blueprint - The depsolved blueprint used to generate the kickstart - * commit - The (local) git commit hash for the blueprint used - * deps - The NEVRA of all of the dependencies used in the composition - * compose_type - The type of output generated (tar, iso, etc.) - * queue_status - The final status of the composition (FINISHED or FAILED) - - Example:: - - { - "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d", - "compose_type": "tar", - "config": { - "anaconda_args": "", - "armplatform": "", - "compress_args": [], - "compression": "xz", - "image_name": "root.tar.xz", - ... - }, - "deps": { - "packages": [ - { - "arch": "x86_64", - "epoch": "0", - "name": "acl", - "release": "14.el7", - "version": "2.2.51" - } - ] - }, - "id": "c30b7d80-523b-4a23-ad52-61b799739ce8", - "queue_status": "FINISHED", - "blueprint": { - "description": "An example kubernetes master", - ... - } - } - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - info = uuid_info(api.config["COMPOSER_CFG"], uuid, api=0) - except Exception as e: - return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 - - if info is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - else: - return jsonify(**info) - -@v0_api.route("/compose/metadata", defaults={'uuid': ""}) -@v0_api.route("/compose/metadata/") -@checkparams([("uuid","", "no UUID given")]) -def v0_compose_metadata(uuid): - """Return a tar of the metadata for the build - - **/api/v0/compose/metadata/** - - Returns a .tar of the metadata used for the build. This includes all the - information needed to reproduce the build, including the final kickstart - populated with repository and package NEVRA. - - The mime type is set to 'application/x-tar' and the filename is set to - UUID-metadata.tar - - The .tar is uncompressed, but is not large. - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - if status["queue_status"] not in ["FINISHED", "FAILED"]: - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 - else: - return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=False, logs=False), - mimetype="application/x-tar", - headers=[("Content-Disposition", "attachment; filename=%s-metadata.tar;" % uuid)], - direct_passthrough=True) - -@v0_api.route("/compose/results", defaults={'uuid': ""}) -@v0_api.route("/compose/results/") -@checkparams([("uuid","", "no UUID given")]) -def v0_compose_results(uuid): - """Return a tar of the metadata and the results for the build - - **/api/v0/compose/results/** - - Returns a .tar of the metadata, logs, and output image of the build. This - includes all the information needed to reproduce the build, including the - final kickstart populated with repository and package NEVRA. The output image - is already in compressed form so the returned tar is not compressed. - - The mime type is set to 'application/x-tar' and the filename is set to - UUID.tar - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - elif status["queue_status"] not in ["FINISHED", "FAILED"]: - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 - else: - return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=True, image=True, logs=True), - mimetype="application/x-tar", - headers=[("Content-Disposition", "attachment; filename=%s.tar;" % uuid)], - direct_passthrough=True) - -@v0_api.route("/compose/logs", defaults={'uuid': ""}) -@v0_api.route("/compose/logs/") -@checkparams([("uuid","", "no UUID given")]) -def v0_compose_logs(uuid): - """Return a tar of the metadata for the build - - **/api/v0/compose/logs/** - - Returns a .tar of the anaconda build logs. The tar is not compressed, but is - not large. - - The mime type is set to 'application/x-tar' and the filename is set to - UUID-logs.tar - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - elif status["queue_status"] not in ["FINISHED", "FAILED"]: - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 - else: - return Response(uuid_tar(api.config["COMPOSER_CFG"], uuid, metadata=False, image=False, logs=True), - mimetype="application/x-tar", - headers=[("Content-Disposition", "attachment; filename=%s-logs.tar;" % uuid)], - direct_passthrough=True) - -@v0_api.route("/compose/image", defaults={'uuid': ""}) -@v0_api.route("/compose/image/") -@checkparams([("uuid","", "no UUID given")]) -def v0_compose_image(uuid): - """Return the output image for the build - - **/api/v0/compose/image/** - - Returns the output image from the build. The filename is set to the filename - from the build with the UUID as a prefix. eg. UUID-root.tar.xz or UUID-boot.iso. - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - elif status["queue_status"] not in ["FINISHED", "FAILED"]: - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s not in FINISHED or FAILED state." % uuid}]), 400 - else: - image_name, image_path = uuid_image(api.config["COMPOSER_CFG"], uuid) - - # Make sure it really exists - if not os.path.exists(image_path): - return jsonify(status=False, errors=[{"id": BUILD_MISSING_FILE, "msg": "Build %s is missing image file %s" % (uuid, image_name)}]), 400 - - # Make the image name unique - image_name = uuid + "-" + image_name - # XXX - Will mime type guessing work for all our output? - return send_file(image_path, as_attachment=True, attachment_filename=image_name, add_etags=False) - -@v0_api.route("/compose/log", defaults={'uuid': ""}) -@v0_api.route("/compose/log/") -@checkparams([("uuid","", "no UUID given")]) -def v0_compose_log_tail(uuid): - """Return the tail of the most currently relevant log - - **/api/v0/compose/log/[?size=KiB]** - - Returns the end of either the anaconda log, the packaging log, or the - composer logs, depending on the progress of the compose. The size - parameter is optional and defaults to 1 MiB if it is not included. The - returned data is raw text from the end of the log file, starting on a - line boundary. - - Example:: - - 12:59:24,222 INFO anaconda: Running Thread: AnaConfigurationThread (140629395244800) - 12:59:24,223 INFO anaconda: Configuring installed system - 12:59:24,912 INFO anaconda: Configuring installed system - 12:59:24,912 INFO anaconda: Creating users - 12:59:24,913 INFO anaconda: Clearing libuser.conf at /tmp/libuser.Dyy8Gj - 12:59:25,154 INFO anaconda: Creating users - 12:59:25,155 INFO anaconda: Configuring addons - 12:59:25,155 INFO anaconda: Configuring addons - 12:59:25,155 INFO anaconda: Generating initramfs - 12:59:49,467 INFO anaconda: Generating initramfs - 12:59:49,467 INFO anaconda: Running post-installation scripts - 12:59:49,467 INFO anaconda: Running kickstart %%post script(s) - 12:59:50,782 INFO anaconda: All kickstart %%post script(s) have been run - 12:59:50,782 INFO anaconda: Running post-installation scripts - 12:59:50,784 INFO anaconda: Thread Done: AnaConfigurationThread (140629395244800) - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - size = int(request.args.get("size", "1024")) - except ValueError as e: - return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 - - status = uuid_status(api.config["COMPOSER_CFG"], uuid, api=0) - if status is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - elif status["queue_status"] == "WAITING": - return jsonify(status=False, errors=[{"id": BUILD_IN_WRONG_STATE, "msg": "Build %s has not started yet. No logs to view" % uuid}]) - try: - return Response(uuid_log(api.config["COMPOSER_CFG"], uuid, size), direct_passthrough=True) - except RuntimeError as e: - return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 diff --git a/src/pylorax/api/v1.py b/src/pylorax/api/v1.py deleted file mode 100644 index 5652d9e1..00000000 --- a/src/pylorax/api/v1.py +++ /dev/null @@ -1,1042 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" Setup v1 of the API server - -""" -import logging -log = logging.getLogger("lorax-composer") - -from flask import jsonify, request -from flask import current_app as api - -from lifted.queue import get_upload, reset_upload, cancel_upload, delete_upload -from lifted.providers import list_providers, resolve_provider, load_profiles, validate_settings, save_settings -from lifted.providers import load_settings, delete_profile -from pylorax.api.checkparams import checkparams -from pylorax.api.compose import start_build -from pylorax.api.errors import BAD_COMPOSE_TYPE, BUILD_FAILED, INVALID_CHARS, MISSING_POST, PROJECTS_ERROR -from pylorax.api.errors import SYSTEM_SOURCE, UNKNOWN_BLUEPRINT, UNKNOWN_SOURCE, UNKNOWN_UUID, UPLOAD_ERROR -from pylorax.api.errors import COMPOSE_ERROR -from pylorax.api.flask_blueprint import BlueprintSkip -from pylorax.api.queue import queue_status, build_status, uuid_status, uuid_schedule_upload, uuid_remove_upload -from pylorax.api.queue import uuid_info -from pylorax.api.projects import get_repo_sources, repo_to_source -from pylorax.api.projects import new_repo_source -from pylorax.api.regexes import VALID_API_STRING, VALID_BLUEPRINT_NAME -import pylorax.api.toml as toml -from pylorax.api.utils import blueprint_exists - - -# Create the v1 routes Blueprint with skip_routes support -v1_api = BlueprintSkip("v1_routes", __name__) - -@v1_api.route("/projects/source/info", defaults={'source_ids': ""}) -@v1_api.route("/projects/source/info/") -@checkparams([("source_ids", "", "no source names given")]) -def v1_projects_source_info(source_ids): - """Return detailed info about the list of sources - - **/api/v1/projects/source/info/** - - Return information about the comma-separated list of source ids. Or all of the - sources if '*' is passed. Note that general globbing is not supported, only '*'. - - Immutable system sources will have the "system" field set to true. User added sources - will have it set to false. System sources cannot be changed or deleted. - - Example:: - - { - "errors": [], - "sources": { - "fedora": { - "check_gpg": true, - "check_ssl": true, - "gpgkey_urls": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-28-x86_64" - ], - "id": "fedora", - "name": "Fedora $releasever - $basearch", - "proxy": "http://proxy.brianlane.com:8123", - "system": true, - "type": "yum-metalink", - "url": "https://mirrors.fedoraproject.org/metalink?repo=fedora-28&arch=x86_64" - } - } - } - - In v0 the ``name`` field was used for the id (a short name for the repo). In v1 ``name`` changed - to ``id`` and ``name`` is now used for the longer descriptive name of the repository. - """ - if VALID_API_STRING.match(source_ids) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - out_fmt = request.args.get("format", "json") - if VALID_API_STRING.match(out_fmt) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in format argument"}]), 400 - - # Return info on all of the sources - if source_ids == "*": - with api.config["DNFLOCK"].lock: - source_ids = ",".join(r.id for r in api.config["DNFLOCK"].dbo.repos.iter_enabled()) - - sources = {} - errors = [] - system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") - for source in source_ids.split(","): - with api.config["DNFLOCK"].lock: - repo = api.config["DNFLOCK"].dbo.repos.get(source, None) - if not repo: - errors.append({"id": UNKNOWN_SOURCE, "msg": "%s is not a valid source" % source}) - continue - sources[repo.id] = repo_to_source(repo, repo.id in system_sources, api=1) - - if out_fmt == "toml" and not errors: - # With TOML output we just want to dump the raw sources, skipping the errors - return toml.dumps(sources) - elif out_fmt == "toml" and errors: - # TOML requested, but there was an error - return jsonify(status=False, errors=errors), 400 - else: - return jsonify(sources=sources, errors=errors) - -@v1_api.route("/projects/source/new", methods=["POST"]) -def v1_projects_source_new(): - """Add a new package source. Or change an existing one - - **POST /api/v1/projects/source/new** - - Add (or change) a source for use when depsolving blueprints and composing images. - - The ``proxy`` and ``gpgkey_urls`` entries are optional. All of the others are required. The supported - types for the urls are: - - * ``yum-baseurl`` is a URL to a yum repository. - * ``yum-mirrorlist`` is a URL for a mirrorlist. - * ``yum-metalink`` is a URL for a metalink. - - If ``check_ssl`` is true the https certificates must be valid. If they are self-signed you can either set - this to false, or add your Certificate Authority to the host system. - - If ``check_gpg`` is true the GPG key must either be installed on the host system, or ``gpgkey_urls`` - should point to it. - - You can edit an existing source (other than system sources), by doing a POST - of the new version of the source. It will overwrite the previous one. - - Example:: - - { - "id": "custom-source-1", - "name": "Custom Package Source #1", - "url": "https://url/path/to/repository/", - "type": "yum-baseurl", - "check_ssl": true, - "check_gpg": true, - "gpgkey_urls": [ - "https://url/path/to/gpg-key" - ] - } - - In v0 the ``name`` field was used for the id (a short name for the repo). In v1 ``name`` changed - to ``id`` and ``name`` is now used for the longer descriptive name of the repository. - """ - if request.headers['Content-Type'] == "text/x-toml": - source = toml.loads(request.data) - else: - source = request.get_json(cache=False) - - # Check for id in source, return error if not - if "id" not in source: - return jsonify(status=False, errors=[{"id": UNKNOWN_SOURCE, "msg": "'id' field is missing from API v1 request."}]), 400 - - system_sources = get_repo_sources("/etc/yum.repos.d/*.repo") - if source["id"] in system_sources: - return jsonify(status=False, errors=[{"id": SYSTEM_SOURCE, "msg": "%s is a system source, it cannot be changed." % source["id"]}]), 400 - - try: - # Remove it from the RepoDict (NOTE that this isn't explicitly supported by the DNF API) - with api.config["DNFLOCK"].lock: - repo_dir = api.config["COMPOSER_CFG"].get("composer", "repo_dir") - new_repo_source(api.config["DNFLOCK"].dbo, source["id"], source, repo_dir) - except Exception as e: - return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400 - - return jsonify(status=True) - -@v1_api.route("/compose", methods=["POST"]) -def v1_compose_start(): - """Start a compose - - The body of the post should have these fields: - blueprint_name - The blueprint name from /blueprints/list/ - compose_type - The type of output to create, from /compose/types - branch - Optional, defaults to master, selects the git branch to use for the blueprint. - - **POST /api/v1/compose** - - Start a compose. The content type should be 'application/json' and the body of the POST - should look like this. The "upload" object is optional. - - The upload object can specify either a pre-existing profile to use (as returned by - `/uploads/providers`) or one-time use settings for the provider. - - Example with upload profile:: - - { - "blueprint_name": "http-server", - "compose_type": "tar", - "branch": "master", - "upload": { - "image_name": "My Image", - "provider": "azure", - "profile": "production-azure-settings" - } - } - - Example with upload settings:: - - { - "blueprint_name": "http-server", - "compose_type": "tar", - "branch": "master", - "upload": { - "image_name": "My Image", - "provider": "azure", - "settings": { - "resource_group": "SOMEBODY", - "storage_account_name": "ONCE", - "storage_container": "TOLD", - "location": "ME", - "subscription_id": "THE", - "client_id": "WORLD", - "secret": "IS", - "tenant": "GONNA" - } - } - } - - Pass it the name of the blueprint, the type of output (from - '/api/v1/compose/types'), and the blueprint branch to use. 'branch' is - optional and will default to master. It will create a new build and add - it to the queue. It returns the build uuid and a status if it succeeds. - If an "upload" is given, it will schedule an upload to run when the build - finishes. - - Example response:: - - { - "build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf", - "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb", - "status": true - } - """ - # Passing ?test=1 will generate a fake FAILED compose. - # Passing ?test=2 will generate a fake FINISHED compose. - try: - test_mode = int(request.args.get("test", "0")) - except ValueError: - test_mode = 0 - - compose = request.get_json(cache=False) - - errors = [] - if not compose: - return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 - - if "blueprint_name" not in compose: - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "No 'blueprint_name' in the JSON request"}) - else: - blueprint_name = compose["blueprint_name"] - - if "branch" not in compose or not compose["branch"]: - branch = "master" - else: - branch = compose["branch"] - - if "compose_type" not in compose: - errors.append({"id": BAD_COMPOSE_TYPE, "msg": "No 'compose_type' in the JSON request"}) - else: - compose_type = compose["compose_type"] - - if VALID_BLUEPRINT_NAME.match(blueprint_name) is None: - errors.append({"id": INVALID_CHARS, "msg": "Invalid characters in API path"}) - - if not blueprint_exists(api, branch, blueprint_name): - errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name}) - - if "upload" in compose: - try: - image_name = compose["upload"]["image_name"] - - if "profile" in compose["upload"]: - # Load a specific profile for this provider - profile = compose["upload"]["profile"] - provider_name = compose["upload"]["provider"] - settings = load_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) - else: - provider_name = compose["upload"]["provider"] - settings = compose["upload"]["settings"] - except KeyError as e: - errors.append({"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'}) - try: - provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name) - if "supported_types" in provider and compose_type not in provider["supported_types"]: - raise RuntimeError(f'Type "{compose_type}" is not supported by provider "{provider_name}"!') - validate_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, settings, image_name) - except Exception as e: - errors.append({"id": UPLOAD_ERROR, "msg": str(e)}) - - if errors: - return jsonify(status=False, errors=errors), 400 - - try: - build_id = start_build(api.config["COMPOSER_CFG"], api.config["DNFLOCK"], api.config["GITLOCK"], - branch, blueprint_name, compose_type, test_mode) - except Exception as e: - if "Invalid compose type" in str(e): - return jsonify(status=False, errors=[{"id": BAD_COMPOSE_TYPE, "msg": str(e)}]), 400 - else: - return jsonify(status=False, errors=[{"id": BUILD_FAILED, "msg": str(e)}]), 400 - - if "upload" in compose: - upload_id = uuid_schedule_upload( - api.config["COMPOSER_CFG"], - build_id, - provider_name, - image_name, - settings - ) - else: - upload_id = "" - - return jsonify(status=True, build_id=build_id, upload_id=upload_id) - -@v1_api.route("/compose/queue") -def v1_compose_queue(): - """Return the status of the new and running queues - - **/api/v1/compose/queue** - - Return the status of the build queue. It includes information about the builds waiting, - and the build that is running. - - Example:: - - { - "new": [ - { - "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", - "blueprint": "glusterfs", - "queue_status": "WAITING", - "job_created": 1517362647.4570868, - "version": "0.0.6" - }, - { - "id": "6d292bd0-bec7-4825-8d7d-41ef9c3e4b73", - "blueprint": "kubernetes", - "queue_status": "WAITING", - "job_created": 1517362659.0034983, - "version": "0.0.1" - } - ], - "run": [ - { - "id": "745712b2-96db-44c0-8014-fe925c35e795", - "blueprint": "glusterfs", - "queue_status": "RUNNING", - "job_created": 1517362633.7965999, - "job_started": 1517362633.8001345, - "version": "0.0.6", - "uploads": [ - { - "creation_time": 1568150660.524401, - "image_name": "glusterfs server", - "image_path": null, - "provider_name": "azure", - "settings": { - "client_id": "need", - "location": "need", - "resource_group": "group", - "secret": "need", - "storage_account_name": "need", - "storage_container": "need", - "subscription_id": "need", - "tenant": "need" - }, - "status": "WAITING", - "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" - } - ] - } - ] - } - """ - return jsonify(queue_status(api.config["COMPOSER_CFG"], api=1)) - -@v1_api.route("/compose/finished") -def v1_compose_finished(): - """Return the list of finished composes - - **/api/v1/compose/finished** - - Return the details on all of the finished composes on the system. - - Example:: - - { - "finished": [ - { - "id": "70b84195-9817-4b8a-af92-45e380f39894", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517351003.8210032, - "job_started": 1517351003.8230415, - "job_finished": 1517359234.1003145, - "version": "0.0.6" - }, - { - "id": "e695affd-397f-4af9-9022-add2636e7459", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517362289.7193348, - "job_started": 1517362289.9751132, - "job_finished": 1517363500.1234567, - "version": "0.0.6", - "uploads": [ - { - "creation_time": 1568150660.524401, - "image_name": "glusterfs server", - "image_path": "/var/lib/lorax/composer/results/e695affd-397f-4af9-9022-add2636e7459/disk.vhd", - "provider_name": "azure", - "settings": { - "client_id": "need", - "location": "need", - "resource_group": "group", - "secret": "need", - "storage_account_name": "need", - "storage_container": "need", - "subscription_id": "need", - "tenant": "need" - }, - "status": "WAITING", - "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" - } - ] - } - ] - } - """ - return jsonify(finished=build_status(api.config["COMPOSER_CFG"], "FINISHED", api=1)) - -@v1_api.route("/compose/failed") -def v1_compose_failed(): - """Return the list of failed composes - - **/api/v1/compose/failed** - - Return the details on all of the failed composes on the system. - - Example:: - - { - "failed": [ - { - "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", - "blueprint": "http-server", - "queue_status": "FAILED", - "job_created": 1517523249.9301329, - "job_started": 1517523249.9314211, - "job_finished": 1517523255.5623411, - "version": "0.0.2", - "uploads": [ - { - "creation_time": 1568150660.524401, - "image_name": "http-server", - "image_path": null, - "provider_name": "azure", - "settings": { - "client_id": "need", - "location": "need", - "resource_group": "group", - "secret": "need", - "storage_account_name": "need", - "storage_container": "need", - "subscription_id": "need", - "tenant": "need" - }, - "status": "WAITING", - "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" - } - ] - } - ] - } - """ - return jsonify(failed=build_status(api.config["COMPOSER_CFG"], "FAILED", api=1)) - -@v1_api.route("/compose/status", defaults={'uuids': ""}) -@v1_api.route("/compose/status/") -@checkparams([("uuids", "", "no UUIDs given")]) -def v1_compose_status(uuids): - """Return the status of the listed uuids - - **/api/v1/compose/status/[?blueprint=&status=&type=]** - - Return the details for each of the comma-separated list of uuids. A uuid of '*' will return - details for all composes. - - Example:: - - { - "uuids": [ - { - "id": "8c8435ef-d6bd-4c68-9bf1-a2ef832e6b1a", - "blueprint": "http-server", - "queue_status": "FINISHED", - "job_created": 1517523644.2384307, - "job_started": 1517523644.2551234, - "job_finished": 1517523689.9864314, - "version": "0.0.2" - }, - { - "id": "45502a6d-06e8-48a5-a215-2b4174b3614b", - "blueprint": "glusterfs", - "queue_status": "FINISHED", - "job_created": 1517363442.188399, - "job_started": 1517363442.325324, - "job_finished": 1517363451.653621, - "version": "0.0.6", - "uploads": [ - { - "creation_time": 1568150660.524401, - "image_name": "glusterfs server", - "image_path": null, - "provider_name": "azure", - "settings": { - "client_id": "need", - "location": "need", - "resource_group": "group", - "secret": "need", - "storage_account_name": "need", - "storage_container": "need", - "subscription_id": "need", - "tenant": "need" - }, - "status": "WAITING", - "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" - } - ] - } - ] - } - """ - if VALID_API_STRING.match(uuids) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - blueprint = request.args.get("blueprint", None) - status = request.args.get("status", None) - compose_type = request.args.get("type", None) - - results = [] - errors = [] - - if uuids.strip() == '*': - queue_status_dict = queue_status(api.config["COMPOSER_CFG"], api=1) - queue_new = queue_status_dict["new"] - queue_running = queue_status_dict["run"] - candidates = queue_new + queue_running + build_status(api.config["COMPOSER_CFG"], api=1) - else: - candidates = [] - for uuid in [n.strip().lower() for n in uuids.split(",")]: - details = uuid_status(api.config["COMPOSER_CFG"], uuid, api=1) - if details is None: - errors.append({"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}) - else: - candidates.append(details) - - for details in candidates: - if blueprint is not None and details['blueprint'] != blueprint: - continue - - if status is not None and details['queue_status'] != status: - continue - - if compose_type is not None and details['compose_type'] != compose_type: - continue - - results.append(details) - - return jsonify(uuids=results, errors=errors) - -@v1_api.route("/compose/info", defaults={'uuid': ""}) -@v1_api.route("/compose/info/") -@checkparams([("uuid", "", "no UUID given")]) -def v1_compose_info(uuid): - """Return detailed info about a compose - - **/api/v1/compose/info/** - - Get detailed information about the compose. The returned JSON string will - contain the following information: - - * id - The uuid of the comoposition - * config - containing the configuration settings used to run Anaconda - * blueprint - The depsolved blueprint used to generate the kickstart - * commit - The (local) git commit hash for the blueprint used - * deps - The NEVRA of all of the dependencies used in the composition - * compose_type - The type of output generated (tar, iso, etc.) - * queue_status - The final status of the composition (FINISHED or FAILED) - - Example:: - - { - "commit": "7078e521a54b12eae31c3fd028680da7a0815a4d", - "compose_type": "tar", - "config": { - "anaconda_args": "", - "armplatform": "", - "compress_args": [], - "compression": "xz", - "image_name": "root.tar.xz", - ... - }, - "deps": { - "packages": [ - { - "arch": "x86_64", - "epoch": "0", - "name": "acl", - "release": "14.el7", - "version": "2.2.51" - } - ] - }, - "id": "c30b7d80-523b-4a23-ad52-61b799739ce8", - "queue_status": "FINISHED", - "blueprint": { - "description": "An example kubernetes master", - ... - }, - "uploads": [ - { - "creation_time": 1568150660.524401, - "image_name": "glusterfs server", - "image_path": "/var/lib/lorax/composer/results/c30b7d80-523b-4a23-ad52-61b799739ce8/disk.vhd", - "provider_name": "azure", - "settings": { - "client_id": "need", - "location": "need", - "resource_group": "group", - "secret": "need", - "storage_account_name": "need", - "storage_container": "need", - "subscription_id": "need", - "tenant": "need" - }, - "status": "FAILED", - "uuid": "21898dfd-9ac9-4e22-bb1d-7f12d0129e65" - } - ] - } - """ - if VALID_API_STRING.match(uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - info = uuid_info(api.config["COMPOSER_CFG"], uuid, api=1) - except Exception as e: - return jsonify(status=False, errors=[{"id": COMPOSE_ERROR, "msg": str(e)}]), 400 - - if info is None: - return jsonify(status=False, errors=[{"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % uuid}]), 400 - else: - return jsonify(**info) - -@v1_api.route("/compose/uploads/schedule", defaults={'compose_uuid': ""}, methods=["POST"]) -@v1_api.route("/compose/uploads/schedule/", methods=["POST"]) -@checkparams([("compose_uuid", "", "no compose UUID given")]) -def v1_compose_uploads_schedule(compose_uuid): - """Schedule an upload of a compose to a given cloud provider - - **POST /api/v1/uploads/schedule/** - - The body can specify either a pre-existing profile to use (as returned by - `/uploads/providers`) or one-time use settings for the provider. - - Example with upload profile:: - - { - "image_name": "My Image", - "provider": "azure", - "profile": "production-azure-settings" - } - - Example with upload settings:: - - { - "image_name": "My Image", - "provider": "azure", - "settings": { - "resource_group": "SOMEBODY", - "storage_account_name": "ONCE", - "storage_container": "TOLD", - "location": "ME", - "subscription_id": "THE", - "client_id": "WORLD", - "secret": "IS", - "tenant": "GONNA" - } - } - - Example response:: - - { - "status": true, - "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb" - } - """ - if VALID_API_STRING.match(compose_uuid) is None: - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - parsed = request.get_json(cache=False) - if not parsed: - return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 - - try: - image_name = parsed["image_name"] - provider_name = parsed["provider"] - if "profile" in parsed: - # Load a specific profile for this provider - profile = parsed["profile"] - settings = load_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) - else: - settings = parsed["settings"] - except KeyError as e: - error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'} - return jsonify(status=False, errors=[error]), 400 - try: - compose_type = uuid_status(api.config["COMPOSER_CFG"], compose_uuid)["compose_type"] - provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name) - if "supported_types" in provider and compose_type not in provider["supported_types"]: - raise RuntimeError( - f'Type "{compose_type}" is not supported by provider "{provider_name}"!' - ) - except Exception as e: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400 - - try: - upload_id = uuid_schedule_upload( - api.config["COMPOSER_CFG"], - compose_uuid, - provider_name, - image_name, - settings - ) - except RuntimeError as e: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400 - return jsonify(status=True, upload_id=upload_id) - -@v1_api.route("/upload/delete", defaults={"upload_uuid": ""}, methods=["DELETE"]) -@v1_api.route("/upload/delete/", methods=["DELETE"]) -@checkparams([("upload_uuid", "", "no upload UUID given")]) -def v1_compose_uploads_delete(upload_uuid): - """Delete an upload and disassociate it from its compose - - **DELETE /api/v1/upload/delete/** - - Example response:: - - { - "status": true, - "upload_id": "572eb0d0-5348-4600-9666-14526ba628bb" - } - """ - if VALID_API_STRING.match(upload_uuid) is None: - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - try: - uuid_remove_upload(api.config["COMPOSER_CFG"], upload_uuid) - delete_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) - except RuntimeError as error: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) - return jsonify(status=True, upload_id=upload_uuid) - -@v1_api.route("/upload/info", defaults={"upload_uuid": ""}) -@v1_api.route("/upload/info/") -@checkparams([("upload_uuid", "", "no UUID given")]) -def v1_upload_info(upload_uuid): - """Returns information about a given upload - - **GET /api/v1/upload/info/** - - Example response:: - - { - "status": true, - "upload": { - "creation_time": 1565620940.069004, - "image_name": "My Image", - "image_path": "/var/lib/lorax/composer/results/b6218e8f-0fa2-48ec-9394-f5c2918544c4/disk.vhd", - "provider_name": "azure", - "settings": { - "resource_group": "SOMEBODY", - "storage_account_name": "ONCE", - "storage_container": "TOLD", - "location": "ME", - "subscription_id": "THE", - "client_id": "WORLD", - "secret": "IS", - "tenant": "GONNA" - }, - "status": "FAILED", - "uuid": "b637c411-9d9d-4279-b067-6c8d38e3b211" - } - } - """ - if VALID_API_STRING.match(upload_uuid) is None: - return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400 - - try: - upload = get_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid).summary() - except RuntimeError as error: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) - return jsonify(status=True, upload=upload) - -@v1_api.route("/upload/log", defaults={"upload_uuid": ""}) -@v1_api.route("/upload/log/") -@checkparams([("upload_uuid", "", "no UUID given")]) -def v1_upload_log(upload_uuid): - """Returns an upload's log - - **GET /api/v1/upload/log/** - - Example response:: - - { - "status": true, - "upload_id": "b637c411-9d9d-4279-b067-6c8d38e3b211", - "log": "< PLAY [localhost] >..." - } - """ - if VALID_API_STRING.match(upload_uuid) is None: - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - try: - upload = get_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) - except RuntimeError as error: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) - return jsonify(status=True, upload_id=upload_uuid, log=upload.upload_log) - -@v1_api.route("/upload/reset", defaults={"upload_uuid": ""}, methods=["POST"]) -@v1_api.route("/upload/reset/", methods=["POST"]) -@checkparams([("upload_uuid", "", "no UUID given")]) -def v1_upload_reset(upload_uuid): - """Reset an upload so it can be attempted again - - **POST /api/v1/upload/reset/** - - Optionally pass in a new image name and/or new settings. - - Example request:: - - { - "image_name": "My renamed image", - "settings": { - "resource_group": "ROLL", - "storage_account_name": "ME", - "storage_container": "I", - "location": "AIN'T", - "subscription_id": "THE", - "client_id": "SHARPEST", - "secret": "TOOL", - "tenant": "IN" - } - } - - Example response:: - - { - "status": true, - "upload_id": "c75d5d62-9d26-42fc-a8ef-18bb14679fc7" - } - """ - if VALID_API_STRING.match(upload_uuid) is None: - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - parsed = request.get_json(cache=False) - image_name = parsed.get("image_name") if parsed else None - settings = parsed.get("settings") if parsed else None - - try: - reset_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid, image_name, settings) - except RuntimeError as error: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) - return jsonify(status=True, upload_id=upload_uuid) - -@v1_api.route("/upload/cancel", defaults={"upload_uuid": ""}, methods=["DELETE"]) -@v1_api.route("/upload/cancel/", methods=["DELETE"]) -@checkparams([("upload_uuid", "", "no UUID given")]) -def v1_upload_cancel(upload_uuid): - """Cancel an upload that is either queued or in progress - - **DELETE /api/v1/upload/cancel/** - - Example response:: - - { - "status": true, - "upload_id": "037a3d56-b421-43e9-9935-c98350c89996" - } - """ - if VALID_API_STRING.match(upload_uuid) is None: - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - try: - cancel_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid) - except RuntimeError as error: - return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}]) - return jsonify(status=True, upload_id=upload_uuid) - -@v1_api.route("/upload/providers") -def v1_upload_providers(): - """Return the information about all upload providers, including their - display names, expected settings, and saved profiles. Refer to the - `resolve_provider` function. - - **GET /api/v1/upload/providers** - - Example response:: - - { - "providers": { - "azure": { - "display": "Azure", - "profiles": { - "default": { - "client_id": "example", - ... - } - }, - "settings-info": { - "client_id": { - "display": "Client ID", - "placeholder": "", - "regex": "", - "type": "string" - }, - ... - }, - "supported_types": ["vhd"] - }, - ... - } - } - """ - - ucfg = api.config["COMPOSER_CFG"]["upload"] - - provider_names = list_providers(ucfg) - - def get_provider_info(provider_name): - provider = resolve_provider(ucfg, provider_name) - provider["profiles"] = load_profiles(ucfg, provider_name) - return provider - - providers = {provider_name: get_provider_info(provider_name) - for provider_name in provider_names} - return jsonify(status=True, providers=providers) - -@v1_api.route("/upload/providers/save", methods=["POST"]) -def v1_providers_save(): - """Save provider settings as a profile for later use - - **POST /api/v1/upload/providers/save** - - Example request:: - - { - "provider": "azure", - "profile": "my-profile", - "settings": { - "resource_group": "SOMEBODY", - "storage_account_name": "ONCE", - "storage_container": "TOLD", - "location": "ME", - "subscription_id": "THE", - "client_id": "WORLD", - "secret": "IS", - "tenant": "GONNA" - } - } - - Saving to an existing profile will overwrite it. - - Example response:: - - { - "status": true - } - """ - parsed = request.get_json(cache=False) - - if parsed is None: - return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400 - - try: - provider_name = parsed["provider"] - profile = parsed["profile"] - settings = parsed["settings"] - except KeyError as e: - error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'} - return jsonify(status=False, errors=[error]), 400 - try: - save_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile, settings) - except Exception as e: - error = {"id": UPLOAD_ERROR, "msg": str(e)} - return jsonify(status=False, errors=[error]) - return jsonify(status=True) - -@v1_api.route("/upload/providers/delete", defaults={"provider_name": "", "profile": ""}, methods=["DELETE"]) -@v1_api.route("/upload/providers/delete//", methods=["DELETE"]) -@checkparams([("provider_name", "", "no provider name given"), ("profile", "", "no profile given")]) -def v1_providers_delete(provider_name, profile): - """Delete a provider's profile settings - - **DELETE /api/v1/upload/providers/delete//** - - Example response:: - - { - "status": true - } - """ - if None in (VALID_API_STRING.match(provider_name), VALID_API_STRING.match(profile)): - error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"} - return jsonify(status=False, errors=[error]), 400 - - try: - delete_profile(api.config["COMPOSER_CFG"]["upload"], provider_name, profile) - except Exception as e: - error = {"id": UPLOAD_ERROR, "msg": str(e)} - return jsonify(status=False, errors=[error]) - return jsonify(status=True) diff --git a/src/pylorax/api/workspace.py b/src/pylorax/api/workspace.py deleted file mode 100644 index 02b3e56c..00000000 --- a/src/pylorax/api/workspace.py +++ /dev/null @@ -1,129 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os - -from pylorax.api.recipes import recipe_filename, recipe_from_toml, RecipeFileError -from pylorax.sysutils import joinpaths - - -def workspace_dir(repo, branch): - """Create the workspace's path from a Repository and branch - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :returns: The path to the branch's workspace directory - :rtype: str - - """ - repo_path = repo.get_location().get_path() - return joinpaths(repo_path, "workspace", branch) - - -def workspace_read(repo, branch, recipe_name): - """Read a Recipe from the branch's workspace - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: The name of the recipe - :type recipe_name: str - :returns: The workspace copy of the recipe, or None if it doesn't exist - :rtype: Recipe or None - :raises: RecipeFileError - """ - ws_dir = workspace_dir(repo, branch) - if not os.path.isdir(ws_dir): - os.makedirs(ws_dir) - filename = joinpaths(ws_dir, recipe_filename(recipe_name)) - if not os.path.exists(filename): - return None - try: - f = open(filename, 'rb') - recipe = recipe_from_toml(f.read().decode("UTF-8")) - except IOError: - raise RecipeFileError - return recipe - - -def workspace_write(repo, branch, recipe): - """Write a recipe to the workspace - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe: The recipe to write to the workspace - :type recipe: Recipe - :returns: None - :raises: IO related errors - """ - ws_dir = workspace_dir(repo, branch) - if not os.path.isdir(ws_dir): - os.makedirs(ws_dir) - filename = joinpaths(ws_dir, recipe.filename) - open(filename, 'wb').write(recipe.toml().encode("UTF-8")) - - -def workspace_filename(repo, branch, recipe_name): - """Return the path and filename of the workspace recipe - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: The name of the recipe - :type recipe_name: str - :returns: workspace recipe path and filename - :rtype: str - """ - ws_dir = workspace_dir(repo, branch) - return joinpaths(ws_dir, recipe_filename(recipe_name)) - - -def workspace_exists(repo, branch, recipe_name): - """Return true of the workspace recipe exists - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: The name of the recipe - :type recipe_name: str - :returns: True if the file exists - :rtype: bool - """ - return os.path.exists(workspace_filename(repo, branch, recipe_name)) - - -def workspace_delete(repo, branch, recipe_name): - """Delete the recipe from the workspace - - :param repo: Open repository - :type repo: Git.Repository - :param branch: Branch name - :type branch: str - :param recipe_name: The name of the recipe - :type recipe_name: str - :returns: None - :raises: IO related errors - """ - filename = workspace_filename(repo, branch, recipe_name) - if os.path.exists(filename): - os.unlink(filename) diff --git a/src/sbin/lorax-composer b/src/sbin/lorax-composer deleted file mode 100755 index f3ecb6cd..00000000 --- a/src/sbin/lorax-composer +++ /dev/null @@ -1,289 +0,0 @@ -#!/usr/bin/python3 -# -# lorax-composer -# -# Copyright (C) 2017-2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import logging -log = logging.getLogger("lorax-composer") -program_log = logging.getLogger("program") -pylorax_log = logging.getLogger("pylorax") -server_log = logging.getLogger("server") -dnf_log = logging.getLogger("dnf") -lifted_log = logging.getLogger("lifted") - -import grp -import os -import pwd -import sys -import subprocess -import tempfile -from threading import Lock -from gevent import socket -from gevent.pywsgi import WSGIServer - -from pylorax import vernum, log_selinux_state -from pylorax.api.cmdline import lorax_composer_parser -from pylorax.api.config import configure, make_dnf_dirs, make_queue_dirs, make_owned_dir -from pylorax.api.compose import test_templates -from pylorax.api.dnfbase import DNFLock -from pylorax.api.queue import start_queue_monitor -from pylorax.api.recipes import open_or_create_repo, commit_recipe_directory -from pylorax.api.server import server, GitLock - -import lifted.config -from lifted.queue import start_upload_monitor - -VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum) - -def setup_logging(logfile): - # Setup logging to console and to logfile - log.setLevel(logging.DEBUG) - pylorax_log.setLevel(logging.DEBUG) - lifted_log.setLevel(logging.DEBUG) - - sh = logging.StreamHandler() - sh.setLevel(logging.INFO) - fmt = logging.Formatter("%(asctime)s: %(message)s") - sh.setFormatter(fmt) - log.addHandler(sh) - pylorax_log.addHandler(sh) - lifted_log.addHandler(sh) - - fh = logging.FileHandler(filename=logfile) - fh.setLevel(logging.DEBUG) - fmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s") - fh.setFormatter(fmt) - log.addHandler(fh) - pylorax_log.addHandler(fh) - lifted_log.addHandler(fh) - - # External program output log - program_log.setLevel(logging.DEBUG) - logfile = os.path.abspath(os.path.dirname(logfile))+"/program.log" - fh = logging.FileHandler(filename=logfile) - fh.setLevel(logging.DEBUG) - fmt = logging.Formatter("%(asctime)s %(levelname)s: %(message)s") - fh.setFormatter(fmt) - program_log.addHandler(fh) - - # Server request logging - server_log.setLevel(logging.DEBUG) - logfile = os.path.abspath(os.path.dirname(logfile))+"/server.log" - fh = logging.FileHandler(filename=logfile) - fh.setLevel(logging.DEBUG) - server_log.addHandler(fh) - - # DNF logging - dnf_log.setLevel(logging.DEBUG) - logfile = os.path.abspath(os.path.dirname(logfile))+"/dnf.log" - fh = logging.FileHandler(filename=logfile) - fh.setLevel(logging.DEBUG) - fmt = logging.Formatter("%(asctime)s %(levelname)s: %(message)s") - fh.setFormatter(fmt) - dnf_log.addHandler(fh) - - -class LogWrapper(object): - """Wrapper for the WSGIServer which only calls write()""" - def __init__(self, log_obj): - self.log = log_obj - - def write(self, msg): - """Log everything as INFO""" - self.log.info(msg.strip()) - -def make_pidfile(pid_path="/run/lorax-composer.pid"): - """Check for a running instance of lorax-composer - - :param pid_path: Path to the pid file - :type pid_path: str - :returns: False if there is already a running lorax-composer, True otherwise - :rtype: bool - - This will look for an existing pid file, and if found read the PID and check to - see if it is really lorax-composer running, or if it is a stale pid. - It will create a new pid file if there isn't already one, or if the PID is stale. - """ - if os.path.exists(pid_path): - try: - pid = int(open(pid_path, "r").read()) - cmdline = open("/proc/%s/cmdline" % pid, "r").read() - if "lorax-composer" in cmdline: - return False - except (IOError, ValueError): - pass - - open(pid_path, "w").write(str(os.getpid())) - return True - -if __name__ == '__main__': - # parse the arguments - opts = lorax_composer_parser().parse_args() - - if opts.showver: - print(VERSION) - sys.exit(0) - - tempfile.tempdir = opts.tmp - logpath = os.path.abspath(os.path.dirname(opts.logfile)) - if not os.path.isdir(logpath): - os.makedirs(logpath) - setup_logging(opts.logfile) - log.debug("opts=%s", opts) - log_selinux_state() - - if not make_pidfile(): - log.error("PID file exists, lorax-composer already running. Quitting.") - sys.exit(1) - - errors = [] - # Check to make sure the user exists and get its uid - try: - uid = pwd.getpwnam(opts.user).pw_uid - except KeyError: - errors.append("Missing user '%s'" % opts.user) - - # Check to make sure the group exists and get its gid - try: - gid = grp.getgrnam(opts.group).gr_gid - except KeyError: - errors.append("Missing group '%s'" % opts.group) - - # No point in continuing if there are uid or gid errors - if errors: - for e in errors: - log.error(e) - sys.exit(1) - - errors = [] - # Check the socket path to make sure it exists, and that ownership and permissions are correct. - socket_dir = os.path.dirname(opts.socket) - if not os.path.exists(socket_dir): - # Create the directory and set permissions and ownership - os.makedirs(socket_dir, 0o750) - os.chown(socket_dir, 0, gid) - - sockdir_stat = os.stat(socket_dir) - if sockdir_stat.st_mode & 0o007 != 0: - errors.append("Incorrect permissions on %s, no 'other' permissions are allowed." % socket_dir) - - if sockdir_stat.st_gid != gid or sockdir_stat.st_uid != 0: - errors.append("%s should be owned by root:%s" % (socket_dir, opts.group)) - - # No point in continuing if there are ownership or permission errors - if errors: - for e in errors: - log.error(e) - sys.exit(1) - - server.config["COMPOSER_CFG"] = configure(conf_file=opts.config) - server.config["COMPOSER_CFG"].set("composer", "tmp", opts.tmp) - - # If the user passed in a releasever set it in the configuration - if opts.releasever: - server.config["COMPOSER_CFG"].set("composer", "releasever", opts.releasever) - - # Override the default sharedir - if opts.sharedir: - server.config["COMPOSER_CFG"].set("composer", "share_dir", opts.sharedir) - - # Override the config file's DNF proxy setting - if opts.proxy: - server.config["COMPOSER_CFG"].set("dnf", "proxy", opts.proxy) - - # Override using system repos - if opts.no_system_repos: - server.config["COMPOSER_CFG"].set("repos", "use_system_repos", "0") - - # Setup the lifted configuration settings - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Make sure the queue paths are setup correctly, exit on errors - errors = make_queue_dirs(server.config["COMPOSER_CFG"], gid) - if errors: - for e in errors: - log.error(e) - sys.exit(1) - - # Make sure dnf directories are created (owned by user:group) - make_dnf_dirs(server.config["COMPOSER_CFG"], uid, gid) - - # Make sure the git repo can be accessed by the API uid/gid - if os.path.exists(opts.BLUEPRINTS): - repodir_stat = os.stat(opts.BLUEPRINTS) - if repodir_stat.st_gid != gid or repodir_stat.st_uid != uid: - subprocess.call(["chown", "-R", "%s:%s" % (opts.user, opts.group), opts.BLUEPRINTS]) - else: - make_owned_dir(opts.BLUEPRINTS, uid, gid) - - # Did systemd pass any extra fds (for socket activation)? - try: - fds = int(os.environ['LISTEN_FDS']) - except (ValueError, KeyError): - fds = 0 - - if fds == 1: - # Inherit the fd passed by systemd - listener = socket.fromfd(3, socket.AF_UNIX, socket.SOCK_STREAM) - elif fds > 1: - log.error("lorax-composer only supports inheriting 1 fd from systemd.") - sys.exit(1) - else: - # Setup the Unix Domain Socket, remove old one, set ownership and permissions - if os.path.exists(opts.socket): - os.unlink(opts.socket) - listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - listener.bind(opts.socket) - os.chmod(opts.socket, 0o660) - os.chown(opts.socket, 0, gid) - listener.listen(socket.SOMAXCONN) - - start_queue_monitor(server.config["COMPOSER_CFG"], uid, gid) - - start_upload_monitor(server.config["COMPOSER_CFG"]["upload"]) - - # Change user and group on the main process. Note that this still happens even if - # --user and --group were passed in, but changing to the same user should be fine. - os.setgid(gid) - os.setuid(uid) - log.debug("user is now %s:%s", os.getresuid(), os.getresgid()) - # Switch to a home directory we can access (libgit2 uses this to look for .gitconfig) - os.environ["HOME"] = server.config["COMPOSER_CFG"].get("composer", "lib_dir") - - # Setup access to the git repo - server.config["REPO_DIR"] = opts.BLUEPRINTS - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=opts.BLUEPRINTS) - - # Import example blueprints - commit_recipe_directory(server.config["GITLOCK"].repo, "master", opts.BLUEPRINTS) - - # Get a dnf.Base to share with the requests - try: - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"]) - except RuntimeError: - # Error has already been logged. Just exit cleanly. - sys.exit(1) - - # Depsolve the templates and make a note of the failures for /api/status to report - with server.config["DNFLOCK"].lock: - server.config["TEMPLATE_ERRORS"] = test_templates(server.config["DNFLOCK"].dbo, server.config["COMPOSER_CFG"].get("composer", "share_dir")) - - log.info("Starting %s on %s with blueprints from %s", VERSION, opts.socket, opts.BLUEPRINTS) - http_server = WSGIServer(listener, server, log=LogWrapper(server_log)) - # The server writes directly to a file object, so point to our log directory - http_server.serve_forever() diff --git a/systemd/lorax-composer.conf b/systemd/lorax-composer.conf deleted file mode 100644 index a2107d9c..00000000 --- a/systemd/lorax-composer.conf +++ /dev/null @@ -1 +0,0 @@ -d /run/weldr 750 root weldr diff --git a/systemd/lorax-composer.service b/systemd/lorax-composer.service deleted file mode 100644 index 1ec3cc93..00000000 --- a/systemd/lorax-composer.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Lorax Image Composer API Server -After=network-online.target -Wants=network-online.target -Documentation=man:lorax-composer(1),https://weldr.io/lorax/lorax-composer.html - -[Service] -User=root -Type=simple -PIDFile=/run/lorax-composer.pid -ExecStartPre=/usr/bin/systemd-tmpfiles --create /usr/lib/tmpfiles.d/lorax-composer.conf -ExecStart=/usr/sbin/lorax-composer /var/lib/lorax/composer/blueprints/ - -[Install] -WantedBy=multi-user.target diff --git a/systemd/lorax-composer.socket b/systemd/lorax-composer.socket deleted file mode 100644 index 2fb58d51..00000000 --- a/systemd/lorax-composer.socket +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=lorax-composer socket activation -Documentation=man:lorax-composer(1),https://weldr.io/lorax/lorax-composer.html - -[Socket] -ListenStream=/run/weldr/api.socket -SocketUser=root -SocketGroup=weldr -SocketMode=0660 -DirectoryMode=0750 - -[Install] -WantedBy=sockets.target diff --git a/test-packages b/test-packages index 9c1b2bdc..bc5bf20f 100644 --- a/test-packages +++ b/test-packages @@ -2,16 +2,12 @@ anaconda-tui beakerlib e2fsprogs git -libgit2-glib libselinux-python3 make pbzip2 pykickstart -python3-ansible-runner python3-coverage python3-coveralls -python3-flask -python3-gevent python3-librepo python3-magic python3-mako @@ -34,4 +30,3 @@ squashfs-tools sudo which xz-lzma-compat -yamllint diff --git a/tests/composer/test_utilities.py b/tests/composer/test_utilities.py index d2a9add8..6fc54498 100644 --- a/tests/composer/test_utilities.py +++ b/tests/composer/test_utilities.py @@ -16,10 +16,11 @@ # import unittest -from pylorax.api.errors import INVALID_CHARS from composer.cli.utilities import argify, toml_filename, frozen_toml_filename, packageNEVRA from composer.cli.utilities import handle_api_result, get_arg +INVALID_CHARS = "InvalidChars" + class CliUtilitiesTest(unittest.TestCase): def test_argify(self): """Convert an optionally comma-separated cmdline into a list of args""" diff --git a/tests/lifted/__init__.py b/tests/lifted/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/lifted/profiles.py b/tests/lifted/profiles.py deleted file mode 100644 index 75b1d4c1..00000000 --- a/tests/lifted/profiles.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -# test profile settings for each provider -test_profiles = { - "aws": ["aws-profile", { - "aws_access_key": "theaccesskey", - "aws_secret_key": "thesecretkey", - "aws_region": "us-east-1", - "aws_bucket": "composer-mops" - }], - "azure": ["azure-profile", { - "resource_group": "production", - "storage_account_name": "HomerSimpson", - "storage_container": "plastic", - "subscription_id": "SpringfieldNuclear", - "client_id": "DonutGuy", - "secret": "I Like sprinkles", - "tenant": "Bart", - "location": "Springfield" - }], - "dummy": ["dummy-profile", {}], - "openstack": ["openstack-profile", { - "auth_url": "https://localhost/auth/url", - "username": "ChuckBurns", - "password": "Excellent!", - "project_name": "Springfield Nuclear", - "user_domain_name": "chuck.burns.localhost", - "project_domain_name": "springfield.burns.localhost", - "is_public": True - }], - "vsphere": ["vsphere-profile", { - "datacenter": "Lisa's Closet", - "datastore": "storage-crate-alpha", - "host": "marge", - "folder": "the.green.one", - "username": "LisaSimpson", - "password": "EmbraceNothingnes" - }] -} diff --git a/tests/lifted/test_config.py b/tests/lifted/test_config.py deleted file mode 100644 index c509a8f6..00000000 --- a/tests/lifted/test_config.py +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import unittest - -import lifted.config -import pylorax.api.config - -class ConfigTestCase(unittest.TestCase): - def test_lifted_config(self): - """Test lifted config setup""" - config = pylorax.api.config.configure(test_config=True) - lifted.config.configure(config) - - self.assertTrue(config.get("upload", "providers_dir").startswith(config.get("composer", "share_dir"))) - self.assertTrue(config.get("upload", "queue_dir").startswith(config.get("composer", "lib_dir"))) - self.assertTrue(config.get("upload", "settings_dir").startswith(config.get("composer", "lib_dir"))) - - def test_lifted_sharedir_config(self): - """Test lifted config setup with custom share_dir""" - config = pylorax.api.config.configure(test_config=True) - config.set("composer", "share_dir", "/custom/share/path") - lifted.config.configure(config) - - self.assertEqual(config.get("composer", "share_dir"), "/custom/share/path") - self.assertTrue(config.get("upload", "providers_dir").startswith(config.get("composer", "share_dir"))) - self.assertTrue(config.get("upload", "queue_dir").startswith(config.get("composer", "lib_dir"))) - self.assertTrue(config.get("upload", "settings_dir").startswith(config.get("composer", "lib_dir"))) - - def test_lifted_libdir_config(self): - """Test lifted config setup with custom lib_dir""" - config = pylorax.api.config.configure(test_config=True) - config.set("composer", "lib_dir", "/custom/lib/path") - lifted.config.configure(config) - - self.assertEqual(config.get("composer", "lib_dir"), "/custom/lib/path") - self.assertTrue(config.get("upload", "providers_dir").startswith(config.get("composer", "share_dir"))) - self.assertTrue(config.get("upload", "queue_dir").startswith(config.get("composer", "lib_dir"))) - self.assertTrue(config.get("upload", "settings_dir").startswith(config.get("composer", "lib_dir"))) diff --git a/tests/lifted/test_providers.py b/tests/lifted/test_providers.py deleted file mode 100644 index d87cf6f4..00000000 --- a/tests/lifted/test_providers.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest - -import lifted.config -from lifted.providers import list_providers, resolve_provider, resolve_playbook_path, save_settings -from lifted.providers import load_profiles, validate_settings, load_settings, delete_profile -from lifted.providers import _get_profile_path -import pylorax.api.config -from pylorax.sysutils import joinpaths - -from tests.lifted.profiles import test_profiles - -class ProvidersTestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.root_dir = tempfile.mkdtemp(prefix="lifted.test.") - self.config = pylorax.api.config.configure(root_dir=self.root_dir, test_config=True) - self.config.set("composer", "share_dir", os.path.realpath("./share/")) - lifted.config.configure(self.config) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.root_dir) - - def test_get_profile_path(self): - """Make sure that _get_profile_path strips path elements from the input""" - path = _get_profile_path(self.config["upload"], "aws", "staging-settings", exists=False) - self.assertEqual(path, os.path.abspath(joinpaths(self.config["upload"]["settings_dir"], "aws/staging-settings.toml"))) - - path = _get_profile_path(self.config["upload"], "../../../../foo/bar/aws", "/not/my/path/staging-settings", exists=False) - self.assertEqual(path, os.path.abspath(joinpaths(self.config["upload"]["settings_dir"], "aws/staging-settings.toml"))) - - def test_list_providers(self): - p = list_providers(self.config["upload"]) - self.assertEqual(p, ['aws', 'dummy', 'openstack', 'vsphere']) - - def test_resolve_provider(self): - for p in list_providers(self.config["upload"]): - print(p) - info = resolve_provider(self.config["upload"], p) - self.assertTrue("display" in info) - self.assertTrue("supported_types" in info) - self.assertTrue("settings-info" in info) - - def test_resolve_playbook_path(self): - for p in list_providers(self.config["upload"]): - print(p) - self.assertTrue(len(resolve_playbook_path(self.config["upload"], p)) > 0) - - def test_resolve_playbook_path_error(self): - with self.assertRaises(RuntimeError): - resolve_playbook_path(self.config["upload"], "foobar") - - def test_validate_settings(self): - for p in list_providers(self.config["upload"]): - print(p) - validate_settings(self.config["upload"], p, test_profiles[p][1]) - - def test_validate_settings_errors(self): - with self.assertRaises(ValueError): - validate_settings(self.config["upload"], "dummy", test_profiles["dummy"][1], image_name="") - - with self.assertRaises(ValueError): - validate_settings(self.config["upload"], "aws", {"wrong-key": "wrong value"}) - - with self.assertRaises(ValueError): - validate_settings(self.config["upload"], "aws", {"secret": False}) - - # TODO - test regex, needs a provider with a regex - - def test_save_settings(self): - """Test saving profiles""" - for p in list_providers(self.config["upload"]): - print(p) - save_settings(self.config["upload"], p, test_profiles[p][0], test_profiles[p][1]) - - profile_dir = joinpaths(self.config.get("upload", "settings_dir"), p, test_profiles[p][0]+".toml") - self.assertTrue(os.path.exists(profile_dir)) - - # This *must* run after test_save_settings, _zz_ ensures that happens - def test_zz_load_profiles(self): - """Test loading profiles""" - for p in list_providers(self.config["upload"]): - print(p) - profile = load_profiles(self.config["upload"], p) - self.assertTrue(test_profiles[p][0] in profile) - - # This *must* run after test_save_settings, _zz_ ensures that happens - def test_zz_load_settings_errors(self): - """Test returning the correct errors for missing profiles and providers""" - with self.assertRaises(ValueError): - load_settings(self.config["upload"], "", "") - - with self.assertRaises(ValueError): - load_settings(self.config["upload"], "", "default") - - with self.assertRaises(ValueError): - load_settings(self.config["upload"], "aws", "") - - with self.assertRaises(RuntimeError): - load_settings(self.config["upload"], "foo", "default") - - with self.assertRaises(RuntimeError): - load_settings(self.config["upload"], "aws", "missing-test") - - # This *must* run after test_save_settings, _zz_ ensures that happens - def test_zz_load_settings(self): - """Test loading settings""" - for p in list_providers(self.config["upload"]): - settings = load_settings(self.config["upload"], p, test_profiles[p][0]) - self.assertEqual(settings, test_profiles[p][1]) - - # This *must* run after all the save and load tests, but *before* the actual delete test - # _zz_ ensures this happens - def test_zz_delete_settings_errors(self): - """Test raising the correct errors when deleting""" - with self.assertRaises(ValueError): - delete_profile(self.config["upload"], "", "") - - with self.assertRaises(ValueError): - delete_profile(self.config["upload"], "", "default") - - with self.assertRaises(ValueError): - delete_profile(self.config["upload"], "aws", "") - - with self.assertRaises(RuntimeError): - delete_profile(self.config["upload"], "aws", "missing-test") - - # This *must* run after all the save and load tests, _zzz_ ensures this happens - def test_zzz_delete_settings(self): - """Test raising the correct errors when deleting""" - # Ensure the profile is really there - settings = load_settings(self.config["upload"], "aws", test_profiles["aws"][0]) - self.assertEqual(settings, test_profiles["aws"][1]) - - delete_profile(self.config["upload"], "aws", test_profiles["aws"][0]) - - with self.assertRaises(RuntimeError): - load_settings(self.config["upload"], "aws", test_profiles["aws"][0]) diff --git a/tests/lifted/test_queue.py b/tests/lifted/test_queue.py deleted file mode 100644 index 7491cb06..00000000 --- a/tests/lifted/test_queue.py +++ /dev/null @@ -1,119 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest - -import lifted.config -from lifted.providers import list_providers -from lifted.queue import _write_callback, create_upload, get_all_uploads, get_upload, get_uploads -from lifted.queue import ready_upload, reset_upload, cancel_upload -import pylorax.api.config - -from tests.lifted.profiles import test_profiles - -class QueueTestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.root_dir = tempfile.mkdtemp(prefix="lifted.test.") - self.config = pylorax.api.config.configure(root_dir=self.root_dir, test_config=True) - self.config.set("composer", "share_dir", os.path.realpath("./share/")) - lifted.config.configure(self.config) - - self.upload_uuids = [] - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.root_dir) - - # This should run first, it writes uploads to the queue directory - def test_00_create_upload(self): - """Test creating an upload for each provider""" - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1]) - summary = upload.summary() - self.assertEqual(summary["provider_name"], p) - self.assertEqual(summary["image_name"], "test-image") - self.assertTrue(summary["status"], "WAITING") - - self.upload_uuids.append(summary["uuid"]) - self.assertTrue(len(self.upload_uuids) > 0) - self.assertTrue(len(self.upload_uuids), len(list_providers(self.config["upload"]))) - - def test_01_get_all_uploads(self): - """Test listing all the uploads""" - uploads = get_all_uploads(self.config["upload"]) - # Should be one upload per provider - providers = sorted([u.provider_name for u in uploads]) - self.assertEqual(providers, list_providers(self.config["upload"])) - - def test_02_get_upload(self): - """Test listing specific uploads by uuid""" - for uuid in self.upload_uuids: - upload = get_upload(self.config["upload"], uuid) - self.assertTrue(upload.uuid, uuid) - - def test_02_get_upload_error(self): - """Test listing an unknown upload uuid""" - with self.assertRaises(RuntimeError): - get_upload(self.config["upload"], "not-a-valid-uuid") - - def test_03_get_uploads(self): - """Test listing multiple uploads by uuid""" - uploads = get_uploads(self.config["upload"], self.upload_uuids) - uuids = sorted([u.uuid for u in uploads]) - self.assertTrue(uuids, sorted(self.upload_uuids)) - - def test_04_ready_upload(self): - """Test ready_upload""" - ready_upload(self.config["upload"], self.upload_uuids[0], "image-test-path") - upload = get_upload(self.config["upload"], self.upload_uuids[0]) - self.assertEqual(upload.image_path, "image-test-path") - - def test_05_reset_upload(self): - """Test reset_upload""" - # Set the status to FAILED so it can be reset - upload = get_upload(self.config["upload"], self.upload_uuids[0]) - upload.set_status("FAILED", _write_callback(self.config["upload"])) - - reset_upload(self.config["upload"], self.upload_uuids[0]) - upload = get_upload(self.config["upload"], self.upload_uuids[0]) - self.assertEqual(upload.status, "READY") - - def test_06_reset_upload_error(self): - """Test reset_upload raising an error""" - with self.assertRaises(RuntimeError): - reset_upload(self.config["upload"], self.upload_uuids[0]) - - def test_07_cancel_upload(self): - """Test cancel_upload""" - cancel_upload(self.config["upload"], self.upload_uuids[0]) - upload = get_upload(self.config["upload"], self.upload_uuids[0]) - self.assertEqual(upload.status, "CANCELLED") - - def test_08_cancel_upload_error(self): - """Test cancel_upload raises an error""" - # Set the status to CANCELED to make sure the cancel will fail - upload = get_upload(self.config["upload"], self.upload_uuids[0]) - upload.set_status("CANCELLED", _write_callback(self.config["upload"])) - - with self.assertRaises(RuntimeError): - cancel_upload(self.config["upload"], self.upload_uuids[0]) - - # TODO test execute diff --git a/tests/lifted/test_upload.py b/tests/lifted/test_upload.py deleted file mode 100644 index a7ed59bb..00000000 --- a/tests/lifted/test_upload.py +++ /dev/null @@ -1,126 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest - -import lifted.config -from lifted.providers import list_providers, resolve_playbook_path, validate_settings -from lifted.upload import Upload -import pylorax.api.config - -from tests.lifted.profiles import test_profiles - -# Helper function for creating Upload object -def create_upload(ucfg, provider_name, image_name, settings, status=None, callback=None): - validate_settings(ucfg, provider_name, settings, image_name) - return Upload( - provider_name=provider_name, - playbook_path=resolve_playbook_path(ucfg, provider_name), - image_name=image_name, - settings=settings, - status=status, - status_callback=callback, - ) - - -class UploadTestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.root_dir = tempfile.mkdtemp(prefix="lifted.test.") - self.config = pylorax.api.config.configure(root_dir=self.root_dir, test_config=True) - self.config.set("composer", "share_dir", os.path.realpath("./share/")) - lifted.config.configure(self.config) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.root_dir) - - def test_new_upload(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="READY") - summary = upload.summary() - self.assertEqual(summary["provider_name"], p) - self.assertEqual(summary["image_name"], "test-image") - self.assertTrue(summary["status"], "WAITING") - - def test_serializable(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="READY") - self.assertEqual(upload.serializable()["settings"], test_profiles[p][1]) - self.assertEqual(upload.serializable()["status"], "READY") - - def test_summary(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="READY") - self.assertEqual(upload.summary()["settings"], test_profiles[p][1]) - self.assertEqual(upload.summary()["status"], "READY") - - def test_set_status(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="READY") - self.assertEqual(upload.summary()["status"], "READY") - upload.set_status("WAITING") - self.assertEqual(upload.summary()["status"], "WAITING") - - def test_ready(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="WAITING") - self.assertEqual(upload.summary()["status"], "WAITING") - upload.ready("test-image-path", status_callback=None) - summary = upload.summary() - self.assertEqual(summary["status"], "READY") - self.assertEqual(summary["image_path"], "test-image-path") - - def test_reset(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="CANCELLED") - upload.ready("test-image-path", status_callback=None) - upload.reset(status_callback=None) - self.assertEqual(upload.status, "READY") - - def test_reset_errors(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="WAITING") - with self.assertRaises(RuntimeError): - upload.reset(status_callback=None) - - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="CANCELLED") - with self.assertRaises(RuntimeError): - upload.reset(status_callback=None) - - def test_cancel(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="WAITING") - upload.cancel() - self.assertEqual(upload.status, "CANCELLED") - - def test_cancel_error(self): - for p in list_providers(self.config["upload"]): - print(p) - upload = create_upload(self.config["upload"], p, "test-image", test_profiles[p][1], status="CANCELLED") - with self.assertRaises(RuntimeError): - upload.cancel() diff --git a/tests/lint-playbooks.sh b/tests/lint-playbooks.sh deleted file mode 100755 index d69db828..00000000 --- a/tests/lint-playbooks.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/sh -for f in ./share/lifted/providers/*/playbook.yaml; do - echo "linting $f" - yamllint -c ./tests/yamllint.conf "$f" -done diff --git a/tests/pylorax/blueprints/example-append.toml b/tests/pylorax/blueprints/example-append.toml deleted file mode 100644 index 279ad0a1..00000000 --- a/tests/pylorax/blueprints/example-append.toml +++ /dev/null @@ -1,18 +0,0 @@ -name = "example-append" -description = "An example using kernel append customization" -version = "0.0.1" - -[[packages]] -name = "tmux" -version = "*" - -[[packages]] -name = "openssh-server" -version = "*" - -[[packages]] -name = "rsync" -version = "*" - -[customizations.kernel] -append = "nosmt=force" diff --git a/tests/pylorax/blueprints/example-atlas.toml b/tests/pylorax/blueprints/example-atlas.toml deleted file mode 100644 index 749961fb..00000000 --- a/tests/pylorax/blueprints/example-atlas.toml +++ /dev/null @@ -1,11 +0,0 @@ -name = "example-atlas" -description = "Automatically Tuned Linear Algebra Software" -version = "0.0.1" - -[[modules]] -name = "atlas" -version = "*" - -[[modules]] -name = "python3-numpy" -version = "*" diff --git a/tests/pylorax/blueprints/example-custom-base.toml b/tests/pylorax/blueprints/example-custom-base.toml deleted file mode 100644 index 14e1fae3..00000000 --- a/tests/pylorax/blueprints/example-custom-base.toml +++ /dev/null @@ -1,45 +0,0 @@ -name = "example-custom-base" -description = "A base system with customizations" -version = "0.0.1" - -[[packages]] -name = "bash" -version = "*" - -[customizations] -hostname = "custombase" - -[[customizations.sshkey]] -user = "root" -key = "A SSH KEY FOR ROOT" - -[[customizations.user]] -name = "widget" -description = "Widget process user account" -home = "/srv/widget/" -shell = "/usr/bin/false" -groups = ["dialout", "users"] - -[[customizations.user]] -name = "admin" -description = "Widget admin account" -password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31LeOUleVK/R/aeWVHVZDi26zAH.o0ywBKH9Tc0/wm7sW/q39uyd1" -home = "/srv/widget/" -shell = "/usr/bin/bash" -groups = ["widget", "users", "students"] -uid = 1200 - -[[customizations.user]] -name = "plain" -password = "simple plain password" - -[[customizations.user]] -name = "bart" -key = "SSH KEY FOR BART" -groups = ["students"] - -[[customizations.group]] -name = "widget" - -[[customizations.group]] -name = "students" diff --git a/tests/pylorax/blueprints/example-development.toml b/tests/pylorax/blueprints/example-development.toml deleted file mode 100644 index bf61e0c6..00000000 --- a/tests/pylorax/blueprints/example-development.toml +++ /dev/null @@ -1,82 +0,0 @@ -name = "example-development" -description = "A general purpose development image" - -[[packages]] -name = "cmake" -version = "*" - -[[packages]] -name = "curl" -version = "*" - -[[packages]] -name = "file" -version = "*" - -[[packages]] -name = "gcc" -version = "*" - -[[packages]] -name = "gcc-c++" -version = "*" - -[[packages]] -name = "gdb" -version = "*" - -[[packages]] -name = "git" -version = "*" - -[[packages]] -name = "glibc-devel" -version = "*" - -[[packages]] -name = "gnupg2" -version = "*" - -[[packages]] -name = "libcurl-devel" -version = "*" - -[[packages]] -name = "make" -version = "*" - -[[packages]] -name = "openssl-devel" -version = "*" - -[[packages]] -name = "openssl-devel" -version = "*" - -[[packages]] -name = "sqlite" -version = "*" - -[[packages]] -name = "sqlite-devel" -version = "*" - -[[packages]] -name = "sudo" -version = "*" - -[[packages]] -name = "tar" -version = "*" - -[[packages]] -name = "xz" -version = "*" - -[[packages]] -name = "xz-devel" -version = "*" - -[[packages]] -name = "zlib-devel" -version = "*" diff --git a/tests/pylorax/blueprints/example-glusterfs.toml b/tests/pylorax/blueprints/example-glusterfs.toml deleted file mode 100644 index 00f9912e..00000000 --- a/tests/pylorax/blueprints/example-glusterfs.toml +++ /dev/null @@ -1,14 +0,0 @@ -name = "example-glusterfs" -description = "An example GlusterFS server with samba" - -[[modules]] -name = "glusterfs" -version = "*" - -[[modules]] -name = "glusterfs-cli" -version = "*" - -[[packages]] -name = "samba" -version = "*" diff --git a/tests/pylorax/blueprints/example-http-server.toml b/tests/pylorax/blueprints/example-http-server.toml deleted file mode 100644 index 5a19ad79..00000000 --- a/tests/pylorax/blueprints/example-http-server.toml +++ /dev/null @@ -1,35 +0,0 @@ -name = "example-http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" - -[[modules]] -name = "httpd" -version = "*" - -[[modules]] -name = "mod_auth_openid" -version = "*" - -[[modules]] -name = "mod_ssl" -version = "*" - -[[modules]] -name = "php" -version = "*" - -[[modules]] -name = "php-mysqlnd" -version = "*" - -[[packages]] -name = "tmux" -version = "*" - -[[packages]] -name = "openssh-server" -version = "*" - -[[packages]] -name = "rsync" -version = "*" diff --git a/tests/pylorax/blueprints/example-jboss.toml b/tests/pylorax/blueprints/example-jboss.toml deleted file mode 100644 index 580a5277..00000000 --- a/tests/pylorax/blueprints/example-jboss.toml +++ /dev/null @@ -1,15 +0,0 @@ -name = "example-jboss" -description = "An example jboss server" -version = "0.0.1" - -[[modules]] -name = "jboss-servlet-3.1-api" -version = "*" - -[[modules]] -name = "jboss-interceptors-1.2-api" -version = "*" - -[[modules]] -name = "java-1.8.0-openjdk" -version = "*" diff --git a/tests/pylorax/blueprints/example-kubernetes.toml b/tests/pylorax/blueprints/example-kubernetes.toml deleted file mode 100644 index c2e0e429..00000000 --- a/tests/pylorax/blueprints/example-kubernetes.toml +++ /dev/null @@ -1,27 +0,0 @@ -name = "example-kubernetes" -description = "An example kubernetes master" -version = "0.0.1" - -[[modules]] -name = "kubernetes" -version = "*" - -[[modules]] -name = "docker" -version = "*" - -[[modules]] -name = "docker-lvm-plugin" -version = "*" - -[[modules]] -name = "etcd" -version = "*" - -[[modules]] -name = "flannel" -version = "*" - -[[packages]] -name = "oci-systemd-hook" -version = "*" diff --git a/tests/pylorax/repos/baseurl-test.repo b/tests/pylorax/repos/baseurl-test.repo deleted file mode 100644 index c2f50374..00000000 --- a/tests/pylorax/repos/baseurl-test.repo +++ /dev/null @@ -1,6 +0,0 @@ -[fake-repo-baseurl] -name = A fake repo with a baseurl -baseurl = https://fake-repo.base.url -sslverify = True -gpgcheck = True -skip_if_unavailable=1 diff --git a/tests/pylorax/repos/gpgkey-test.repo b/tests/pylorax/repos/gpgkey-test.repo deleted file mode 100644 index 383904dc..00000000 --- a/tests/pylorax/repos/gpgkey-test.repo +++ /dev/null @@ -1,6 +0,0 @@ -[fake-repo-gpgkey] -name = A fake repo with a gpgkey -baseurl = https://fake-repo.base.url -sslverify = True -gpgcheck = True -gpgkey = https://fake-repo.gpgkey diff --git a/tests/pylorax/repos/metalink-test.repo b/tests/pylorax/repos/metalink-test.repo deleted file mode 100644 index c0e0223d..00000000 --- a/tests/pylorax/repos/metalink-test.repo +++ /dev/null @@ -1,6 +0,0 @@ -[fake-repo-metalink] -name = A fake repo with a metalink -metalink = https://fake-repo.metalink -sslverify = True -gpgcheck = True -skip_if_unavailable=1 diff --git a/tests/pylorax/repos/mirrorlist-test.repo b/tests/pylorax/repos/mirrorlist-test.repo deleted file mode 100644 index 0157aadb..00000000 --- a/tests/pylorax/repos/mirrorlist-test.repo +++ /dev/null @@ -1,6 +0,0 @@ -[fake-repo-mirrorlist] -name = A fake repo with a mirrorlist -mirrorlist = https://fake-repo.mirrorlist -sslverify = True -gpgcheck = True -skip_if_unavailable=1 diff --git a/tests/pylorax/repos/proxy-test.repo b/tests/pylorax/repos/proxy-test.repo deleted file mode 100644 index deb17014..00000000 --- a/tests/pylorax/repos/proxy-test.repo +++ /dev/null @@ -1,6 +0,0 @@ -[fake-repo-proxy] -name = A fake repo with a proxy -baseurl = https://fake-repo.base.url -proxy = https://fake-repo.proxy -sslverify = True -gpgcheck = True diff --git a/tests/pylorax/repos/server-1.repo b/tests/pylorax/repos/server-1.repo deleted file mode 100644 index 29c24d5e..00000000 --- a/tests/pylorax/repos/server-1.repo +++ /dev/null @@ -1,47 +0,0 @@ -[lorax-1] -name=Lorax test repo 1 -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo-1/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False - -[lorax-2] -name=Lorax test repo 2 -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo-2/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False - -[lorax-3] -name=Lorax test repo 3 -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo-3/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False - -[lorax-4] -name=Lorax test repo 4 -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo-4/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False diff --git a/tests/pylorax/repos/server-2.repo b/tests/pylorax/repos/server-2.repo deleted file mode 100644 index c4518a01..00000000 --- a/tests/pylorax/repos/server-2.repo +++ /dev/null @@ -1,11 +0,0 @@ -[single-repo] -name=One repo in the file -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False diff --git a/tests/pylorax/repos/server-3.repo b/tests/pylorax/repos/server-3.repo deleted file mode 100644 index e23d8722..00000000 --- a/tests/pylorax/repos/server-3.repo +++ /dev/null @@ -1,11 +0,0 @@ -[other-repo] -name=Other repo -failovermethod=priority -baseurl=file:///tmp/lorax-other-empty-repo/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False diff --git a/tests/pylorax/repos/single-dupe.repo b/tests/pylorax/repos/single-dupe.repo deleted file mode 100644 index 551b0861..00000000 --- a/tests/pylorax/repos/single-dupe.repo +++ /dev/null @@ -1,11 +0,0 @@ -[single-repo-duplicate] -name=single-repo-duplicate -failovermethod=priority -baseurl=file:///tmp/lorax-empty-repo/ -enabled=1 -metadata_expire=7d -repo_gpgcheck=0 -type=rpm -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False diff --git a/tests/pylorax/results/custom-base.dict b/tests/pylorax/results/custom-base.dict deleted file mode 100644 index fb955a0e..00000000 --- a/tests/pylorax/results/custom-base.dict +++ /dev/null @@ -1 +0,0 @@ -{'name': 'custom-base', 'description': 'A base system with customizations', 'version': '0.0.1', 'modules': [], 'packages': [{'name': 'bash', 'version': '5.0.*'}], 'groups': [], 'customizations': {'hostname': 'custombase', 'sshkey': [{'user': 'root', 'key': 'A SSH KEY FOR ROOT'}], 'kernel': {'append': 'nosmt=force'}, 'user': [{'name': 'admin', 'description': 'Administrator account', 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', 'key': 'PUBLIC SSH KEY', 'home': '/srv/widget/', 'shell': '/usr/bin/bash', 'groups': ['widget', 'users', 'wheel'], 'uid': 1200, 'gid': 1200}], 'group': [{'name': 'widget', 'gid': 1130}], 'timezone': {'timezone': 'US/Eastern', 'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org']}, 'locale': {'languages': ['en_US.UTF-8'], 'keyboard': 'us'}, 'firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], 'services': {'enabled': ['ftp', 'ntp', 'dhcp'], 'disabled': ['telnet']}}, 'services': {'enabled': ['sshd', 'cockpit.socket', 'httpd'], 'disabled': ['postfix', 'telnetd']}}} diff --git a/tests/pylorax/results/custom-base.toml b/tests/pylorax/results/custom-base.toml deleted file mode 100644 index a44f796a..00000000 --- a/tests/pylorax/results/custom-base.toml +++ /dev/null @@ -1,51 +0,0 @@ -name = "custom-base" -description = "A base system with customizations" -version = "0.0.1" - -[[packages]] -name = "bash" -version = "5.0.*" - -[customizations] -hostname = "custombase" - -[[customizations.sshkey]] -user = "root" -key = "A SSH KEY FOR ROOT" - -[customizations.kernel] -append = "nosmt=force" - -[[customizations.user]] -name = "admin" -description = "Administrator account" -password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L..." -key = "PUBLIC SSH KEY" -home = "/srv/widget/" -shell = "/usr/bin/bash" -groups = ["widget", "users", "wheel"] -uid = 1200 -gid = 1200 - -[[customizations.group]] -name = "widget" -gid = 1130 - -[customizations.timezone] -timezone = "US/Eastern" -ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] - -[customizations.locale] -languages = ["en_US.UTF-8"] -keyboard = "us" - -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] - -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] - -[customizations.services] -enabled = ["sshd", "cockpit.socket", "httpd"] -disabled = ["postfix", "telnetd"] diff --git a/tests/pylorax/results/full-recipe.dict b/tests/pylorax/results/full-recipe.dict deleted file mode 100644 index 23a0ee4f..00000000 --- a/tests/pylorax/results/full-recipe.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example http server with PHP and MySQL support.', 'packages': [{'version': u'6.6.*', 'name': u'openssh-server'}, {'version': u'3.0.*', 'name': u'rsync'}, {'version': u'2.2', 'name': u'tmux'}], 'groups': [], 'modules': [{'version': u'2.4.*', 'name': u'httpd'}, {'version': u'5.4', 'name': u'mod_auth_kerb'}, {'version': u'2.4.*', 'name': u'mod_ssl'}, {'version': u'5.4.*', 'name': u'php'}, {'version': u'5.4.*', 'name': u'php-mysql'}], 'version': u'0.0.1', 'name': u'http-server'} diff --git a/tests/pylorax/results/full-recipe.toml b/tests/pylorax/results/full-recipe.toml deleted file mode 100644 index c6d74b1c..00000000 --- a/tests/pylorax/results/full-recipe.toml +++ /dev/null @@ -1,35 +0,0 @@ -name = "http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" - -[[modules]] -name = "httpd" -version = "2.4.*" - -[[modules]] -name = "mod_auth_kerb" -version = "5.4" - -[[modules]] -name = "mod_ssl" -version = "2.4.*" - -[[modules]] -name = "php" -version = "5.4.*" - -[[modules]] -name = "php-mysql" -version = "5.4.*" - -[[packages]] -name = "tmux" -version = "2.2" - -[[packages]] -name = "openssh-server" -version = "6.6.*" - -[[packages]] -name = "rsync" -version = "3.0.*" diff --git a/tests/pylorax/results/groups-only.dict b/tests/pylorax/results/groups-only.dict deleted file mode 100644 index 099ea24a..00000000 --- a/tests/pylorax/results/groups-only.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example e-mail server.', 'packages': [], 'groups': [{'name': u'mail-server'}], 'modules': [], 'version': u'0.0.1', 'name': u'mail-server'} diff --git a/tests/pylorax/results/groups-only.toml b/tests/pylorax/results/groups-only.toml deleted file mode 100644 index b9235832..00000000 --- a/tests/pylorax/results/groups-only.toml +++ /dev/null @@ -1,6 +0,0 @@ -name = "mail-server" -description = "An example e-mail server." -version = "0.0.1" - -[[groups]] -name = "mail-server" diff --git a/tests/pylorax/results/minimal.dict b/tests/pylorax/results/minimal.dict deleted file mode 100644 index 81178d27..00000000 --- a/tests/pylorax/results/minimal.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example http server with PHP and MySQL support.', 'packages': [], 'groups': [], 'modules': [], 'version': u'0.0.1', 'name': u'http-server'} diff --git a/tests/pylorax/results/minimal.toml b/tests/pylorax/results/minimal.toml deleted file mode 100644 index bb71b650..00000000 --- a/tests/pylorax/results/minimal.toml +++ /dev/null @@ -1,3 +0,0 @@ -name = "http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" diff --git a/tests/pylorax/results/modules-only.dict b/tests/pylorax/results/modules-only.dict deleted file mode 100644 index 81185b8c..00000000 --- a/tests/pylorax/results/modules-only.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example http server with PHP and MySQL support.', 'packages': [], 'groups': [], 'modules': [{'version': u'2.4.*', 'name': u'httpd'}, {'version': u'5.4', 'name': u'mod_auth_kerb'}, {'version': u'2.4.*', 'name': u'mod_ssl'}, {'version': u'5.4.*', 'name': u'php'}, {'version': u'5.4.*', 'name': u'php-mysql'}], 'version': u'0.0.1', 'name': u'http-server'} diff --git a/tests/pylorax/results/modules-only.toml b/tests/pylorax/results/modules-only.toml deleted file mode 100644 index 3457797d..00000000 --- a/tests/pylorax/results/modules-only.toml +++ /dev/null @@ -1,23 +0,0 @@ -name = "http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" - -[[modules]] -name = "httpd" -version = "2.4.*" - -[[modules]] -name = "mod_auth_kerb" -version = "5.4" - -[[modules]] -name = "mod_ssl" -version = "2.4.*" - -[[modules]] -name = "php" -version = "5.4.*" - -[[modules]] -name = "php-mysql" -version = "5.4.*" diff --git a/tests/pylorax/results/packages-only.dict b/tests/pylorax/results/packages-only.dict deleted file mode 100644 index 9c542a53..00000000 --- a/tests/pylorax/results/packages-only.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example http server with PHP and MySQL support.', 'packages': [{'version': u'6.6.*', 'name': u'openssh-server'}, {'version': u'3.0.*', 'name': u'rsync'}, {'version': u'2.2', 'name': u'tmux'}], 'groups': [], 'modules': [], 'version': u'0.0.1', 'name': u'http-server'} diff --git a/tests/pylorax/results/packages-only.toml b/tests/pylorax/results/packages-only.toml deleted file mode 100644 index 2aa507f3..00000000 --- a/tests/pylorax/results/packages-only.toml +++ /dev/null @@ -1,15 +0,0 @@ -name = "http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" - -[[packages]] -name = "tmux" -version = "2.2" - -[[packages]] -name = "openssh-server" -version = "6.6.*" - -[[packages]] -name = "rsync" -version = "3.0.*" diff --git a/tests/pylorax/results/repos-git.dict b/tests/pylorax/results/repos-git.dict deleted file mode 100644 index cf180a6c..00000000 --- a/tests/pylorax/results/repos-git.dict +++ /dev/null @@ -1 +0,0 @@ -{'description': u'An example http server with PHP and MySQL support.', 'packages': [], 'groups': [], 'modules': [{'version': u'2.4.*', 'name': u'httpd'}, {'version': u'5.4.*', 'name': u'php'}], 'version': u'0.0.1', 'name': u'http-server', 'repos': {'git': [{"rpmname": "server-config-files", "rpmversion": "1.0", "rpmrelease": "1", "summary": "Setup files for server deployment", "repo": "https://github.com/bcl/server-config-files", "ref": "v3.0", "destination": "/srv/config/"}]}} diff --git a/tests/pylorax/results/repos-git.toml b/tests/pylorax/results/repos-git.toml deleted file mode 100644 index f6f9634e..00000000 --- a/tests/pylorax/results/repos-git.toml +++ /dev/null @@ -1,20 +0,0 @@ -name = "http-server" -description = "An example http server with PHP and MySQL support." -version = "0.0.1" - -[[modules]] -name = "httpd" -version = "2.4.*" - -[[modules]] -name = "php" -version = "5.4.*" - -[[repos.git]] -rpmname="server-config-files" -rpmversion="1.0" -rpmrelease="1" -summary="Setup files for server deployment" -repo="https://github.com/bcl/server-config-files" -ref="v3.0" -destination="/srv/config/" diff --git a/tests/pylorax/source/bad-repo.toml b/tests/pylorax/source/bad-repo.toml deleted file mode 100644 index 95f4355f..00000000 --- a/tests/pylorax/source/bad-repo.toml +++ /dev/null @@ -1,6 +0,0 @@ -name = "bad-repo-1" -url = "file:///tmp/not-a-repo/" -type = "yum-baseurl" -check_ssl = true -check_gpg = true -gpgkey_urls = ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"] diff --git a/tests/pylorax/source/replace-fedora.toml b/tests/pylorax/source/replace-fedora.toml deleted file mode 100644 index ab0a795a..00000000 --- a/tests/pylorax/source/replace-fedora.toml +++ /dev/null @@ -1,7 +0,0 @@ -name = "fedora" -id = "fedora" -url = "file:///tmp/lorax-empty-repo/" -type = "yum-baseurl" -check_ssl = false -check_gpg = true -gpgkey_urls = [] diff --git a/tests/pylorax/source/replace-rawhide.toml b/tests/pylorax/source/replace-rawhide.toml deleted file mode 100644 index 50802fad..00000000 --- a/tests/pylorax/source/replace-rawhide.toml +++ /dev/null @@ -1,7 +0,0 @@ -name = "rawhide" -id = "rawhide" -url = "file:///tmp/lorax-empty-repo/" -type = "yum-baseurl" -check_ssl = false -check_gpg = true -gpgkey_urls = [] diff --git a/tests/pylorax/source/replace-repo.toml b/tests/pylorax/source/replace-repo.toml deleted file mode 100644 index a8dc5883..00000000 --- a/tests/pylorax/source/replace-repo.toml +++ /dev/null @@ -1,7 +0,0 @@ -name = "single-repo" -id = "single-repo" -url = "file:///tmp/lorax-empty-repo/" -type = "yum-baseurl" -check_ssl = false -check_gpg = true -gpgkey_urls = [] diff --git a/tests/pylorax/source/test-repo-v1-vars.toml b/tests/pylorax/source/test-repo-v1-vars.toml deleted file mode 100644 index a8ddac2a..00000000 --- a/tests/pylorax/source/test-repo-v1-vars.toml +++ /dev/null @@ -1,7 +0,0 @@ -id = "new-repo-2-v1-vars" -name = "API v1 toml new repo with vars" -url = "file:///tmp/lorax-empty-repo-v1-$releasever-$basearch/" -type = "yum-baseurl" -check_ssl = true -check_gpg = true -gpgkey_urls = ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"] diff --git a/tests/pylorax/source/test-repo-v1.json b/tests/pylorax/source/test-repo-v1.json deleted file mode 100644 index 1def3b65..00000000 --- a/tests/pylorax/source/test-repo-v1.json +++ /dev/null @@ -1 +0,0 @@ -{"id": "new-repo-1-v1", "name": "API v1 json new repo", "url": "file:///tmp/lorax-empty-repo/", "type": "yum-baseurl", "check_ssl": true, "check_gpg": true, "gpgkey_urls": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"]} diff --git a/tests/pylorax/source/test-repo-v1.toml b/tests/pylorax/source/test-repo-v1.toml deleted file mode 100644 index bfeef2ea..00000000 --- a/tests/pylorax/source/test-repo-v1.toml +++ /dev/null @@ -1,7 +0,0 @@ -id = "new-repo-2-v1" -name = "API v1 toml new repo" -url = "file:///tmp/lorax-empty-repo/" -type = "yum-baseurl" -check_ssl = true -check_gpg = true -gpgkey_urls = ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"] diff --git a/tests/pylorax/source/test-repo-vars.toml b/tests/pylorax/source/test-repo-vars.toml deleted file mode 100644 index 676940fd..00000000 --- a/tests/pylorax/source/test-repo-vars.toml +++ /dev/null @@ -1,6 +0,0 @@ -name = "new-repo-2-vars" -url = "file:///tmp/lorax-empty-repo-$releasever-$basearch/" -type = "yum-baseurl" -check_ssl = true -check_gpg = true -gpgkey_urls = ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"] diff --git a/tests/pylorax/source/test-repo.json b/tests/pylorax/source/test-repo.json deleted file mode 100644 index 579c87a3..00000000 --- a/tests/pylorax/source/test-repo.json +++ /dev/null @@ -1 +0,0 @@ -{"name": "new-repo-1", "url": "file:///tmp/lorax-empty-repo/", "type": "yum-baseurl", "check_ssl": true, "check_gpg": true, "gpgkey_urls": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"]} diff --git a/tests/pylorax/source/test-repo.toml b/tests/pylorax/source/test-repo.toml deleted file mode 100644 index 2cc5e262..00000000 --- a/tests/pylorax/source/test-repo.toml +++ /dev/null @@ -1,6 +0,0 @@ -name = "new-repo-2" -url = "file:///tmp/lorax-empty-repo/" -type = "yum-baseurl" -check_ssl = true -check_gpg = true -gpgkey_urls = ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"] diff --git a/tests/pylorax/test_bisect.py b/tests/pylorax/test_bisect.py deleted file mode 100644 index 8e5a101a..00000000 --- a/tests/pylorax/test_bisect.py +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import unittest - -from pylorax.api.bisect import insort_left - - -class BisectTest(unittest.TestCase): - def test_insort_left_nokey(self): - results = [] - for x in range(0, 10): - insort_left(results, x) - self.assertEqual(results, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - def test_insort_left_key_strings(self): - unsorted = ["Maggie", "Homer", "Bart", "Marge"] - results = [] - for x in unsorted: - insort_left(results, x, key=lambda p: p.lower()) - self.assertEqual(results, ["Bart", "Homer", "Maggie", "Marge"]) - - def test_insort_left_key_dict(self): - unsorted = [{"name":"Maggie"}, {"name":"Homer"}, {"name":"Bart"}, {"name":"Marge"}] - results = [] - for x in unsorted: - insort_left(results, x, key=lambda p: p["name"].lower()) - self.assertEqual(results, [{"name":"Bart"}, {"name":"Homer"}, {"name":"Maggie"}, {"name":"Marge"}]) diff --git a/tests/pylorax/test_compose.py b/tests/pylorax/test_compose.py deleted file mode 100644 index de697ed7..00000000 --- a/tests/pylorax/test_compose.py +++ /dev/null @@ -1,847 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -from io import StringIO -import os -import shutil -import tempfile -import unittest - -import lifted.config -from pylorax import get_buildarch -from pylorax.api.compose import add_customizations, get_extra_pkgs, compose_types -from pylorax.api.compose import timezone_cmd, get_timezone_settings -from pylorax.api.compose import lang_cmd, get_languages, keyboard_cmd, get_keyboard_layout -from pylorax.api.compose import firewall_cmd, get_firewall_settings -from pylorax.api.compose import services_cmd, get_services, get_default_services -from pylorax.api.compose import get_kernel_append, bootloader_append, customize_ks_template -from pylorax.api.config import configure, make_dnf_dirs -from pylorax.api.dnfbase import get_base_object -from pylorax.api.recipes import recipe_from_toml, RecipeError -from pylorax.sysutils import joinpaths - -BASE_RECIPE = """name = "test-cases" -description = "Used for testing" -version = "0.0.1" - -""" - -HOSTNAME = BASE_RECIPE + """[customizations] -hostname = "testhostname" -""" - -TIMEZONE = BASE_RECIPE + """[customizations] -timezone = "US/Samoa" -""" - -SSHKEY = BASE_RECIPE + """[[customizations.sshkey]] -user = "root" -key = "ROOT SSH KEY" -""" - -USER = BASE_RECIPE + """[[customizations.user]] -name = "tester" -""" - -ROOT_USER = BASE_RECIPE + """[[customizations.user]] -name = "root" -""" - -USER_KEY = """ -key = "A SSH KEY FOR THE USER" -""" - -USER_DESC = """ -description = "a test user account" -""" - -USER_CRYPT = """ -password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31LeOUleVK/R/aeWVHVZDi26zAH.o0ywBKH9Tc0/wm7sW/q39uyd1" -""" - -USER_PLAIN = """ -password = "plainpassword" -""" - -USER_HOME = """ -home = "/opt/users/tester/" -""" - -USER_SHELL = """ -shell = "/usr/bin/zsh" -""" - -USER_UID = """ -uid = 1013 -""" - -USER_GID = """ -gid = 4242 -""" - -USER_GROUPS = """ -groups = ["wheel", "users"] -""" - -USER_ALL = USER + USER_KEY + USER_DESC + USER_CRYPT + USER_HOME + USER_SHELL + USER_UID + USER_GID - -GROUP = BASE_RECIPE + """[[customizations.group]] -name = "testgroup" -""" - -GROUP_GID = GROUP + """ -gid = 1011 -""" - -USER_GROUP = USER + """[[customizations.group]] -name = "tester" -""" - -KS_USER_ALL = '''sshkey --user tester "A SSH KEY FOR THE USER" -user --name tester --homedir /opt/users/tester/ --iscrypted --password "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31LeOUleVK/R/aeWVHVZDi26zAH.o0ywBKH9Tc0/wm7sW/q39uyd1" --shell /usr/bin/zsh --uid 1013 --gid 4242 --gecos "a test user account" -rootpw --lock -''' - -# ROOT TESTS -ROOT_CRYPT = ROOT_USER + USER_CRYPT -ROOT_PLAIN = ROOT_USER + USER_PLAIN -ROOT_CRYPT_KEY = ROOT_USER + USER_CRYPT + USER_KEY -ROOT_PLAIN_KEY = ROOT_USER + USER_PLAIN + USER_KEY -ROOT_KEY = ROOT_USER + USER_KEY - -class CustomizationsTestCase(unittest.TestCase): - def assertCustomization(self, test, result): - r = recipe_from_toml(test) - f = StringIO() - add_customizations(f, r) - self.assertTrue(result in f.getvalue(), f.getvalue()) - - def assertNotCustomization(self, test, result): - r = recipe_from_toml(test) - f = StringIO() - add_customizations(f, r) - self.assertTrue(result not in f.getvalue(), f.getvalue()) - - def test_no_customizations(self): - """Test not setting any customizations""" - self.assertCustomization(BASE_RECIPE, "rootpw --lock") - - def test_set_hostname(self): - """Test setting the hostname""" - self.assertCustomization(HOSTNAME, "network --hostname=testhostname") - self.assertCustomization(HOSTNAME, "rootpw --lock") - - def test_set_sshkey(self): - """Test setting sshkey without user""" - self.assertCustomization(SSHKEY, 'sshkey --user root "ROOT SSH KEY"') - - def test_sshkey_only(self): - """Test adding a sshkey to an existing user account""" - self.assertCustomization(USER + USER_KEY, 'sshkey --user tester "A SSH KEY FOR THE USER"') - self.assertCustomization(USER + USER_KEY, "rootpw --lock") - - def test_create_user(self): - """Test creating a user with no options""" - self.assertCustomization(USER, "user --name tester") - self.assertCustomization(USER, "rootpw --lock") - - def test_create_user_desc(self): - """Test creating a user with a description""" - self.assertCustomization(USER + USER_DESC, '--gecos "a test user account"') - self.assertCustomization(USER + USER_DESC, "rootpw --lock") - - def test_create_user_crypt(self): - """Test creating a user with a pre-crypted password""" - self.assertCustomization(USER + USER_CRYPT, '--password "$6$CHO2$3r') - self.assertCustomization(USER + USER_CRYPT, "rootpw --lock") - - def test_create_user_plain(self): - """Test creating a user with a plaintext password""" - self.assertCustomization(USER + USER_PLAIN, '--password "plainpassword"') - self.assertCustomization(USER + USER_PLAIN, "rootpw --lock") - - def test_create_user_home(self): - """Test creating user with a home directory""" - self.assertCustomization(USER + USER_HOME, "--homedir /opt/users/tester/") - self.assertCustomization(USER + USER_HOME, "rootpw --lock") - - def test_create_user_shell(self): - """Test creating user with shell set""" - self.assertCustomization(USER + USER_SHELL, "--shell /usr/bin/zsh") - self.assertCustomization(USER + USER_SHELL, "rootpw --lock") - - def test_create_user_uid(self): - """Test creating user with uid set""" - self.assertCustomization(USER + USER_UID, "--uid 1013") - self.assertCustomization(USER + USER_UID, "rootpw --lock") - - def test_create_user_gid(self): - """Test creating user with gid set""" - self.assertCustomization(USER + USER_GID, "--gid 4242") - self.assertCustomization(USER + USER_GID, "rootpw --lock") - - def test_create_user_groups(self): - """Test creating user with group membership""" - self.assertCustomization(USER + USER_GROUPS, "--groups wheel,users") - self.assertCustomization(USER + USER_GROUPS, "rootpw --lock") - - def test_user_same_group(self): - """Test creating a group with the same name as a user""" - - # Creating a group with the same name should skip the group creation - self.assertCustomization(USER_GROUP, "user --name tester") - self.assertNotCustomization(USER_GROUP, "group --name tester") - self.assertCustomization(USER_GROUP, "rootpw --lock") - - def test_create_user_all(self): - """Test creating user with all settings""" - r = recipe_from_toml(USER_ALL) - f = StringIO() - add_customizations(f, r) - self.assertEqual(KS_USER_ALL, f.getvalue()) - - def test_create_group(self): - """Test creating group without gid set""" - self.assertCustomization(GROUP, "group --name testgroup") - self.assertCustomization(GROUP, "rootpw --lock") - - def test_create_group_gid(self): - """Test creating group with gid set""" - self.assertCustomization(GROUP_GID, "group --name testgroup --gid 1011") - self.assertCustomization(GROUP_GID, "rootpw --lock") - - def test_root_crypt(self): - self.assertCustomization(ROOT_CRYPT, 'rootpw --iscrypted "$6$CHO2$3r') - self.assertNotCustomization(ROOT_CRYPT, "rootpw --lock") - - def test_root_plain(self): - self.assertCustomization(ROOT_PLAIN, 'rootpw --plaintext "plainpassword"') - self.assertNotCustomization(ROOT_PLAIN, "rootpw --lock") - - def test_root_crypt_key(self): - self.assertCustomization(ROOT_CRYPT_KEY, 'rootpw --iscrypted "$6$CHO2$3r') - self.assertCustomization(ROOT_CRYPT_KEY, 'sshkey --user root "A SSH KEY FOR THE USER"') - self.assertNotCustomization(ROOT_CRYPT_KEY, "rootpw --lock") - - def test_root_plain_key(self): - self.assertCustomization(ROOT_PLAIN_KEY, 'rootpw --plaintext "plainpassword"') - self.assertCustomization(ROOT_PLAIN_KEY, 'sshkey --user root "A SSH KEY FOR THE USER"') - self.assertNotCustomization(ROOT_PLAIN_KEY, "rootpw --lock") - - def test_get_kernel_append(self): - """Test get_kernel_append function""" - blueprint_data = """name = "test-kernel" -description = "test recipe" -version = "0.0.1" -""" - blueprint2_data = blueprint_data + """ -[customizations.kernel] -append="nosmt=force" -""" - recipe = recipe_from_toml(blueprint_data) - self.assertEqual(get_kernel_append(recipe), "") - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_kernel_append(recipe), "nosmt=force") - - def test_bootloader_append(self): - """Test bootloader_append function""" - - self.assertEqual(bootloader_append("", "nosmt=force"), 'bootloader --append="nosmt=force" --location=none') - self.assertEqual(bootloader_append("", "nosmt=force console=ttyS0,115200n8"), - 'bootloader --append="nosmt=force console=ttyS0,115200n8" --location=none') - self.assertEqual(bootloader_append("bootloader --location=none", "nosmt=force"), - 'bootloader --append="nosmt=force" --location=none') - self.assertEqual(bootloader_append("bootloader --location=none", "console=ttyS0,115200n8 nosmt=force"), - 'bootloader --append="console=ttyS0,115200n8 nosmt=force" --location=none') - self.assertEqual(bootloader_append('bootloader --append="no_timer_check console=ttyS0,115200n8" --location=mbr', "nosmt=force"), - 'bootloader --append="no_timer_check console=ttyS0,115200n8 nosmt=force" --location=mbr') - self.assertEqual(bootloader_append('bootloader --append="console=tty1" --location=mbr --password="BADPASSWORD"', "nosmt=force"), - 'bootloader --append="console=tty1 nosmt=force" --location=mbr --password="BADPASSWORD"') - - def test_get_timezone_settings(self): - """Test get_timezone_settings function""" - blueprint_data = """name = "test-kernel" -description = "test recipe" -version = "0.0.1" -""" - blueprint2_data = blueprint_data + """ -[customizations.timezone] -timezone = "US/Samoa" -""" - blueprint3_data = blueprint_data + """ -[customizations.timezone] -ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] -""" - blueprint4_data = blueprint_data + """ -[customizations.timezone] -timezone = "US/Samoa" -ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] -""" - recipe = recipe_from_toml(blueprint_data) - self.assertEqual(get_timezone_settings(recipe), {}) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_timezone_settings(recipe), {"timezone": "US/Samoa"}) - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_timezone_settings(recipe), - {"ntpservers": ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"]}) - - recipe = recipe_from_toml(blueprint4_data) - self.assertEqual(get_timezone_settings(recipe), - {"timezone": "US/Samoa", - "ntpservers": ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"]}) - - def test_timezone_cmd(self): - """Test timezone_cmd function""" - - self.assertEqual(timezone_cmd("timezone UTC", {}), 'timezone UTC') - self.assertEqual(timezone_cmd("timezone FOO", {"timezone": "US/Samoa"}), - 'timezone US/Samoa') - self.assertEqual(timezone_cmd("timezone FOO", - {"timezone": "US/Samoa", "ntpservers": ["0.ntp.org", "1.ntp.org"]}), - 'timezone US/Samoa --ntpservers=0.ntp.org,1.ntp.org') - - self.assertEqual(timezone_cmd("timezone --ntpservers=a,b,c FOO", - {"timezone": "US/Samoa", "ntpservers": ["0.pool.ntp.org", "1.pool.ntp.org"]}), - 'timezone US/Samoa --ntpservers=0.pool.ntp.org,1.pool.ntp.org') - - def test_get_languages(self): - """Test get_languages function""" - blueprint_data = """name = "test-locale" -description = "test recipe" -version = "0.0.1" - """ - blueprint2_data = blueprint_data + """ -[customizations.locale] -languages = ["en_CA.utf8", "en_HK.utf8"] -""" - blueprint3_data = blueprint_data + """ -[customizations.locale] -keyboard = "de (dvorak)" -languages = ["en_CA.utf8", "en_HK.utf8"] -""" - recipe = recipe_from_toml(blueprint_data) - self.assertEqual(get_languages(recipe), []) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_languages(recipe), ["en_CA.utf8", "en_HK.utf8"]) - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_languages(recipe), ["en_CA.utf8", "en_HK.utf8"]) - - def test_lang_cmd(self): - """Test lang_cmd function""" - - self.assertEqual(lang_cmd("lang en_CA.utf8", {}), 'lang en_CA.utf8') - self.assertEqual(lang_cmd("lang en_US.utf8", ["en_HK.utf8"]), - 'lang en_HK.utf8') - self.assertEqual(lang_cmd("lang en_US.utf8", ["en_CA.utf8", "en_HK.utf8"]), - 'lang en_CA.utf8 --addsupport=en_HK.utf8') - - self.assertEqual(lang_cmd("lang --addsupport en_US.utf8 en_CA.utf8", - ["en_CA.utf8", "en_HK.utf8", "en_GB.utf8"]), - 'lang en_CA.utf8 --addsupport=en_HK.utf8,en_GB.utf8') - - def test_get_keyboard_layout(self): - """Test get_keyboard_layout function""" - blueprint_data = """name = "test-locale" -description = "test recipe" -version = "0.0.1" - """ - blueprint2_data = blueprint_data + """ -[customizations.locale] -keyboard = "de (dvorak)" -""" - blueprint3_data = blueprint_data + """ -[customizations.locale] -keyboard = "de (dvorak)" -languages = ["en_CA.utf8", "en_HK.utf8"] -""" - recipe = recipe_from_toml(blueprint_data) - self.assertEqual(get_keyboard_layout(recipe), []) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_keyboard_layout(recipe), "de (dvorak)") - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_keyboard_layout(recipe), "de (dvorak)") - - def test_keyboard_cmd(self): - """Test lang_cmd function""" - - self.assertEqual(keyboard_cmd("keyboard us", {}), "keyboard 'us'") - self.assertEqual(keyboard_cmd("keyboard us", "de (dvorak)"), - "keyboard 'de (dvorak)'") - - self.assertEqual(keyboard_cmd("keyboard --vckeymap=us --xlayouts=us,gb", - "de (dvorak)"), - "keyboard 'de (dvorak)'") - - def test_get_firewall_settings(self): - """Test get_firewall_settings function""" - blueprint_data = """name = "test-firewall" -description = "test recipe" -version = "0.0.1" - """ - firewall_ports = """ -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] -""" - firewall_services = """ -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] -""" - blueprint2_data = blueprint_data + firewall_ports - blueprint3_data = blueprint_data + firewall_services - blueprint4_data = blueprint_data + firewall_ports + firewall_services - - recipe = recipe_from_toml(blueprint_data) - self.assertEqual(get_firewall_settings(recipe), {'ports': [], 'enabled': [], 'disabled': []}) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_firewall_settings(recipe), - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": [], "disabled": []}) - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_firewall_settings(recipe), - {"ports": [], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}) - - recipe = recipe_from_toml(blueprint4_data) - self.assertEqual(get_firewall_settings(recipe), - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}) - - def test_firewall_cmd(self): - """Test firewall_cmd function""" - - self.assertEqual(firewall_cmd("firewall --enabled", {}), "firewall --enabled") - self.assertEqual(firewall_cmd("firewall --enabled", - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": [], "disabled": []}), - "firewall --enabled --port=22:tcp,53:tcp,53:udp,80:tcp,imap:tcp") - self.assertEqual(firewall_cmd("firewall --enabled", - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": []}), - "firewall --enabled --port=22:tcp,53:tcp,53:udp,80:tcp,imap:tcp --service=dhcp,ftp,ntp") - self.assertEqual(firewall_cmd("firewall --enabled", - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}), - "firewall --enabled --port=22:tcp,53:tcp,53:udp,80:tcp,imap:tcp --service=dhcp,ftp,ntp --remove-service=telnet") - # Make sure that --disabled overrides setting ports and services - self.assertEqual(firewall_cmd("firewall --disabled", - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}), - "firewall --disabled") - # Make sure that ports includes any existing settings from the firewall command - self.assertEqual(firewall_cmd("firewall --enabled --port=8080:tcp --service=dns --remove-service=ftp", - {"ports": ["80:tcp"], - "enabled": ["ntp"], "disabled": ["telnet"]}), - "firewall --enabled --port=8080:tcp,80:tcp --service=dns,ntp --remove-service=ftp,telnet") - - def test_get_services(self): - """Test get_services function""" - blueprint_data = """name = "test-services" -description = "test recipe" -version = "0.0.1" -[customizations.services] - """ - enable_services = """ -enabled = ["sshd", "cockpit.socket", "httpd"] - """ - disable_services = """ -disabled = ["postfix", "telnetd"] - """ - blueprint2_data = blueprint_data + enable_services - blueprint3_data = blueprint_data + disable_services - blueprint4_data = blueprint_data + enable_services + disable_services - - with self.assertRaises(RecipeError): - recipe = recipe_from_toml(blueprint_data) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_services(recipe), - {"enabled": ["cockpit.socket", "httpd", "sshd"], "disabled": []}) - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_services(recipe), - {"enabled": [], "disabled": ["postfix", "telnetd"]}) - - recipe = recipe_from_toml(blueprint4_data) - self.assertEqual(get_services(recipe), - {"enabled": ["cockpit.socket", "httpd", "sshd"], "disabled": ["postfix", "telnetd"]}) - - def test_services_cmd(self): - """Test services_cmd function""" - - self.assertEqual(services_cmd("", {"enabled": [], "disabled": []}), "") - self.assertEqual(services_cmd("", {"enabled": ["cockpit.socket", "httpd", "sshd"], "disabled": []}), - 'services --enabled="cockpit.socket,httpd,sshd"') - self.assertEqual(services_cmd("", {"enabled": [], "disabled": ["postfix", "telnetd"]}), - 'services --disabled="postfix,telnetd"') - self.assertEqual(services_cmd("", {"enabled": ["cockpit.socket", "httpd", "sshd"], - "disabled": ["postfix", "telnetd"]}), - 'services --disabled="postfix,telnetd" --enabled="cockpit.socket,httpd,sshd"') - self.assertEqual(services_cmd("services --enabled=pop3", {"enabled": ["cockpit.socket", "httpd", "sshd"], - "disabled": ["postfix", "telnetd"]}), - 'services --disabled="postfix,telnetd" --enabled="cockpit.socket,httpd,pop3,sshd"') - self.assertEqual(services_cmd("services --disabled=imapd", {"enabled": ["cockpit.socket", "httpd", "sshd"], - "disabled": ["postfix", "telnetd"]}), - 'services --disabled="imapd,postfix,telnetd" --enabled="cockpit.socket,httpd,sshd"') - self.assertEqual(services_cmd("services --enabled=pop3 --disabled=imapd", {"enabled": ["cockpit.socket", "httpd", "sshd"], - "disabled": ["postfix", "telnetd"]}), - 'services --disabled="imapd,postfix,telnetd" --enabled="cockpit.socket,httpd,pop3,sshd"') - - def test_get_default_services(self): - """Test get_default_services function""" - blueprint_data = """name = "test-services" -description = "test recipe" -version = "0.0.1" - -[customizations.services] - """ - enable_services = """ -enabled = ["sshd", "cockpit.socket", "httpd"] - """ - disable_services = """ -disabled = ["postfix", "telnetd"] - """ - blueprint2_data = blueprint_data + enable_services - blueprint3_data = blueprint_data + disable_services - blueprint4_data = blueprint_data + enable_services + disable_services - - with self.assertRaises(RecipeError): - recipe = recipe_from_toml(blueprint_data) - - recipe = recipe_from_toml(blueprint2_data) - self.assertEqual(get_default_services(recipe), "services") - - recipe = recipe_from_toml(blueprint3_data) - self.assertEqual(get_default_services(recipe), "services") - - recipe = recipe_from_toml(blueprint4_data) - self.assertEqual(get_default_services(recipe), "services") - - def _checkBootloader(self, result, append_str, line_limit=0): - """Find the bootloader line and make sure append_str is in it""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("bootloader") and append_str in line: - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: bootloader not in the first %d lines of the output" % line_limit) - return False - line_num += 1 - return False - - def _checkTimezone(self, result, settings, line_limit=0): - """Find the timezone line and make sure it is as expected""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("timezone"): - if settings["timezone"] in line and all([True for n in settings["ntpservers"] if n in line]): - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: timezone not in the first %d lines of the output" % line_limit) - return False - else: - print("FAILED: %s not matching %s" % (settings, line)) - line_num += 1 - return False - - def _checkLang(self, result, locales, line_limit=0): - """Find the lang line and make sure it is as expected""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("lang"): - if all([True for n in locales if n in line]): - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: lang not in the first %d lines of the output" % line_limit) - return False - else: - print("FAILED: %s not matching %s" % (locales, line)) - line_num += 1 - return False - - def _checkKeyboard(self, result, layout, line_limit=0): - """Find the keyboard line and make sure it is as expected""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("keyboard"): - if layout in line: - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: keyboard not in the first %d lines of the output" % line_limit) - return False - else: - print("FAILED: %s not matching %s" % (layout, line)) - line_num += 1 - return False - - def _checkFirewall(self, result, settings, line_limit=0): - """Find the firewall line and make sure it is as expected""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("firewall"): - # First layout is used twice, so total count should be n+1 - ports = all([bool(p in line) for p in settings["ports"]]) - enabled = all([bool(e in line) for e in settings["enabled"]]) - disabled = all([bool(d in line) for d in settings["disabled"]]) - - if ports and enabled and disabled: - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: firewall not in the first %d lines of the output" % line_limit) - return False - else: - print("FAILED: %s not matching %s" % (settings, line)) - line_num += 1 - return False - - def _checkServices(self, result, settings, line_limit=0): - """Find the services line and make sure it is as expected""" - # Optionally check to make sure the change is at the top of the template - line_num = 0 - for line in result.splitlines(): - if line.startswith("services"): - # First layout is used twice, so total count should be n+1 - enabled = all([bool(e in line) for e in settings["enabled"]]) - disabled = all([bool(d in line) for d in settings["disabled"]]) - - if enabled and disabled: - if line_limit == 0 or line_num < line_limit: - return True - else: - print("FAILED: services not in the first %d lines of the output" % line_limit) - return False - else: - print("FAILED: %s not matching %s" % (settings, line)) - line_num += 1 - return False - - def test_template_defaults(self): - """Test that customize_ks_template includes defaults correctly""" - blueprint_data = """name = "test-kernel" -description = "test recipe" -version = "0.0.1" - -[[packages]] -name = "lorax" -version = "*" -""" - recipe = recipe_from_toml(blueprint_data) - - # Make sure that a kickstart with no bootloader and no timezone has them added - result = customize_ks_template("firewall --enabled\n", recipe) - print(result) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("bootloader")]), 1) - self.assertTrue(self._checkBootloader(result, "none", line_limit=2)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("timezone")]), 1) - self.assertTrue(self._checkTimezone(result, {"timezone": "UTC", "ntpservers": []}, line_limit=2)) - self.assertTrue("services" not in result) - - # Make sure that a kickstart with a bootloader, and no timezone has timezone added to the top - result = customize_ks_template("firewall --enabled\nbootloader --location=mbr\n", recipe) - print(result) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("bootloader")]), 1) - self.assertTrue(self._checkBootloader(result, "mbr")) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("timezone")]), 1) - self.assertTrue(self._checkTimezone(result, {"timezone": "UTC", "ntpservers": []}, line_limit=1)) - self.assertTrue("services" not in result) - - # Make sure that a kickstart with a bootloader and timezone has neither added - result = customize_ks_template("firewall --enabled\nbootloader --location=mbr\ntimezone US/Samoa\n", recipe) - print(result) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("bootloader")]), 1) - self.assertTrue(self._checkBootloader(result, "mbr")) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("timezone")]), 1) - self.assertTrue(self._checkTimezone(result, {"timezone": "US/Samoa", "ntpservers": []})) - self.assertTrue("services" not in result) - - def test_customize_ks_template(self): - """Test that customize_ks_template works correctly""" - blueprint_data = """name = "test-kernel" -description = "test recipe" -version = "0.0.1" - -[customizations.kernel] -append="nosmt=force" - -[customizations.timezone] -timezone = "US/Samoa" -ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] - -[customizations.locale] -keyboard = "de (dvorak)" -languages = ["en_CA.utf8", "en_HK.utf8"] - -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] - -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] - -[customizations.services] -enabled = ["sshd", "cockpit.socket", "httpd"] -disabled = ["postfix", "telnetd"] -""" - tz_dict = {"timezone": "US/Samoa", "ntpservers": ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"]} - recipe = recipe_from_toml(blueprint_data) - - # Test against a kickstart without bootloader - result = customize_ks_template("firewall --enabled\n", recipe) - self.assertTrue(self._checkBootloader(result, "nosmt=force", line_limit=2)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("bootloader")]), 1) - self.assertTrue(self._checkTimezone(result, tz_dict, line_limit=2)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("timezone")]), 1) - self.assertTrue(self._checkLang(result, ["en_CA.utf8", "en_HK.utf8"], line_limit=4)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("lang")]), 1) - self.assertTrue(self._checkKeyboard(result, "de (dvorak)", line_limit=4)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("keyboard")]), 1) - self.assertTrue(self._checkFirewall(result, - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}, line_limit=6)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("firewall")]), 1) - self.assertTrue(self._checkServices(result, - {"enabled": ["cockpit.socket", "httpd", "sshd"], "disabled": ["postfix", "telnetd"]}, - line_limit=8)) - self.assertEqual(sum([1 for l in result.splitlines() if l.startswith("services")]), 1) - - # Test against a kickstart with a bootloader line - result = customize_ks_template("firewall --enabled\nbootloader --location=mbr\n", recipe) - self.assertTrue(self._checkBootloader(result, "nosmt=force")) - self.assertTrue(self._checkTimezone(result, tz_dict, line_limit=2)) - - # Test against all of the available templates - share_dir = "./share/" - errors = [] - for compose_type, _enabled in compose_types(share_dir): - # Read the kickstart template for this type - ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" - ks_template = open(ks_template_path, "r").read() - result = customize_ks_template(ks_template, recipe) - if not self._checkBootloader(result, "nosmt=force"): - errors.append(("bootloader for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("bootloader")]) != 1: - errors.append(("bootloader for compose_type %s failed: More than 1 entry" % compose_type, result)) - - - # google images should retain their timezone settings - if compose_type == "google": - if self._checkTimezone(result, tz_dict): - errors.append(("timezone for compose_type %s failed" % compose_type, result)) - elif not self._checkTimezone(result, tz_dict, line_limit=2): - # None of the templates have a timezone to modify, it should be placed at the top - errors.append(("timezone for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("timezone")]) != 1: - errors.append(("timezone for compose_type %s failed: More than 1 entry" % compose_type, result)) - - if not self._checkLang(result, ["en_CA.utf8", "en_HK.utf8"]): - errors.append(("lang for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("lang")]) != 1: - errors.append(("lang for compose_type %s failed: More than 1 entry" % compose_type, result)) - - if not self._checkKeyboard(result, "de (dvorak)"): - errors.append(("keyboard for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("keyboard")]) != 1: - errors.append(("keyboard for compose_type %s failed: More than 1 entry" % compose_type, result)) - - # google and openstack templates requires the firewall to be disabled - if compose_type == "google" or compose_type == "openstack": - if not self._checkFirewall(result, {'ports': [], 'enabled': [], 'disabled': []}): - errors.append(("firewall for compose_type %s failed" % compose_type, result)) - else: - if not self._checkFirewall(result, - {"ports": ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"], - "enabled": ["ftp", "ntp", "dhcp"], "disabled": ["telnet"]}): - errors.append(("firewall for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("firewall")]) != 1: - errors.append(("firewall for compose_type %s failed: More than 1 entry" % compose_type, result)) - - if not self._checkServices(result, - {"enabled": ["cockpit.socket", "httpd", "sshd"], - "disabled": ["postfix", "telnetd"]}): - errors.append(("services for compose_type %s failed" % compose_type, result)) - if sum([1 for l in result.splitlines() if l.startswith("services")]) != 1: - errors.append(("services for compose_type %s failed: More than 1 entry" % compose_type, result)) - - # Print the bad results - for e, r in errors: - print("%s:\n%s\n\n" % (e, r)) - - self.assertEqual(errors, []) - -class ExtraPkgsTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - self.config = configure(root_dir=self.tmp_dir, test_config=True) - lifted.config.configure(self.config) - make_dnf_dirs(self.config, os.getuid(), os.getgid()) - self.dbo = get_base_object(self.config) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_live_install(self): - """Check that live-install.tmpl is parsed correctly""" - # A package for each arch to test for - arch_pkg = { - "aarch64": "shim-aa64", - "arm": "grub2-efi-arm-cdboot", - "armhfp": "grub2-efi-arm-cdboot", - "x86_64": "shim-x64", - "i386": "memtest86+", - "ppc64le": "powerpc-utils", - "s390x": "s390utils-base" - } - - extra_pkgs = get_extra_pkgs(self.dbo, "./share/", "live-iso") - self.assertTrue(len(extra_pkgs) > 0) - - # Results depend on arch - arch = get_buildarch(self.dbo) - self.assertTrue(arch_pkg[arch] in extra_pkgs) - - def test_other_install(self): - """Test that non-live doesn't parse live-install.tmpl""" - extra_pkgs = get_extra_pkgs(self.dbo, "./share/", "qcow2") - self.assertEqual(extra_pkgs, []) - -class ComposeTypesTest(unittest.TestCase): - def test_compose_types(self): - types = compose_types("./share/") - self.assertTrue(("qcow2", True) in types) - - if os.uname().machine != 'x86_64': - self.assertTrue(("alibaba", False) in types) diff --git a/tests/pylorax/test_dnfbase.py b/tests/pylorax/test_dnfbase.py deleted file mode 100644 index 560fd019..00000000 --- a/tests/pylorax/test_dnfbase.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest - -import configparser - -import lifted.config -from pylorax.api.config import configure, make_dnf_dirs -from pylorax.api.dnfbase import get_base_object - - -class DnfbaseNoSystemReposTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.dnfbase.") - conf_file = os.path.join(self.tmp_dir, 'test.conf') - open(conf_file, 'w').write("""[composer] -# releasever different from the current default -releasever = 6 -[dnf] -proxy = https://proxy.example.com -sslverify = False -[repos] -use_system_repos = False -""") - - # will read the above configuration - config = configure(conf_file=conf_file, root_dir=self.tmp_dir) - lifted.config.configure(config) - make_dnf_dirs(config, os.getuid(), os.getgid()) - - # will read composer config and store a dnf config file - self.dbo = get_base_object(config) - - # will read the stored dnf config file - self.dnfconf = configparser.ConfigParser() - self.dnfconf.read([config.get("composer", "dnf_conf")]) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_stores_dnf_proxy_from_composer_config(self): - self.assertEqual('https://proxy.example.com', self.dnfconf.get('main', 'proxy')) - - def test_disables_sslverify_if_composer_disables_it(self): - self.assertEqual(False, self.dnfconf.getboolean('main', 'sslverify')) - - def test_sets_releasever_from_composer(self): - self.assertEqual('6', self.dbo.conf.releasever) - - def test_doesnt_use_system_repos(self): - # no other repos defined for this test - self.assertEqual({}, self.dbo.repos) - - -class DnfbaseSystemReposTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.dnfbase.") - - # will read the above configuration - config = configure(root_dir=self.tmp_dir) - lifted.config.configure(config) - make_dnf_dirs(config, os.getuid(), os.getgid()) - - # will read composer config and store a dnf config file - self.dbo = get_base_object(config) - - # will read the stored dnf config file - self.dnfconf = configparser.ConfigParser() - self.dnfconf.read([config.get("composer", "dnf_conf")]) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_uses_system_repos(self): - # no other repos defined for this test - self.assertTrue(len(self.dbo.repos) > 0) - - -class CreateDnfDirsTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.dnfbase.") - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_creates_missing_dnf_root_directory(self): - config = configure(test_config=True, root_dir=self.tmp_dir) - lifted.config.configure(config) - - # will create the above directory if missing - make_dnf_dirs(config, os.getuid(), os.getgid()) - - self.assertTrue(os.path.exists(self.tmp_dir + '/var/tmp/composer/dnf/root')) diff --git a/tests/pylorax/test_gitrpm.py b/tests/pylorax/test_gitrpm.py deleted file mode 100644 index 1d01d25f..00000000 --- a/tests/pylorax/test_gitrpm.py +++ /dev/null @@ -1,287 +0,0 @@ -# -# Copyright (C) 2019 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import rpm -import shutil -import stat -import tarfile -import tempfile -import unittest - -from ..lib import create_git_repo -from pylorax.api.gitrpm import GitArchiveTarball, GitRpmBuild, make_git_rpm, create_gitrpm_repo -import pylorax.api.toml as toml -from pylorax.sysutils import joinpaths - -class GitArchiveTest(unittest.TestCase): - @classmethod - def setUpClass(self): - (self.repodir, self.test_results, self.first_commit) = create_git_repo() - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.repodir) - - def _check_tar(self, archive, prefix, test_name): - """Check the file list of the created archive against the expected list in self.test_results""" - try: - tardir = tempfile.mkdtemp(prefix="git-rpm-test.") - archive.write_file(tardir) - tarpath = os.path.join(tardir, archive.sourceName) - - # Archive is in rpmdir + archive.sourceName - self.assertTrue(os.path.exists(tarpath)) - - # Examine contents of the tarfile - tar = tarfile.open(tarpath, "r") - files = sorted(i.name for i in tar if i.isreg()) - self.assertEqual(files, [os.path.join(prefix, f) for f in self.test_results[test_name]]) - tar.close() - finally: - shutil.rmtree(tardir) - - def test_git_branch(self): - """Test creating an archive from a git branch""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="origin/custom-branch" - destination="/srv/testing-rpm/" - """ % self.repodir) - archive = GitArchiveTarball(git_repo["repos"]["git"][0]) - self._check_tar(archive, "git-rpm-test/", "branch") - - def test_git_commit(self): - """Test creating an archive from a git commit hash""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="%s" - destination="/srv/testing-rpm/" - """ % (self.repodir, self.first_commit)) - archive = GitArchiveTarball(git_repo["repos"]["git"][0]) - self._check_tar(archive, "git-rpm-test/", "first") - - def test_git_tag(self): - """Test creating an archive from a git tag""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.1.0" - destination="/srv/testing-rpm/" - """ % (self.repodir)) - archive = GitArchiveTarball(git_repo["repos"]["git"][0]) - self._check_tar(archive, "git-rpm-test/", "second") - - def test_git_fail_repo(self): - """Test creating an archive from a bad url""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.1.0" - destination="/srv/testing-rpm/" - """ % ("/tmp/no-repo-here/")) - with self.assertRaises(RuntimeError) as e: - archive = GitArchiveTarball(git_repo["repos"]["git"][0]) - self._check_tar(archive, "git-rpm-test/", None) - self.assertIn("Failed to clone", str(e.exception)) - - def test_git_fail_ref(self): - """Test creating an archive from a bad ref""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="0297617d7b8baa263a69ae7dc901bbbcefd0eaa4" - destination="/srv/testing-rpm/" - """ % (self.repodir)) - with self.assertRaises(RuntimeError) as e: - archive = GitArchiveTarball(git_repo["repos"]["git"][0]) - self._check_tar(archive, "git-rpm-test/", None) - self.assertIn("Failed to archive", str(e.exception)) - - -class GitRpmTest(unittest.TestCase): - @classmethod - def setUpClass(self): - (self.repodir, self.test_results, self.first_commit) = create_git_repo() - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.repodir) - - def _check_rpm(self, repo, rpm_dir, rpm_file, test_name): - """Check the contents of the rpm against the expected test results - """ - ts = rpm.TransactionSet() - fd = os.open(os.path.join(rpm_dir, rpm_file), os.O_RDONLY) - hdr = ts.hdrFromFdno(fd) - os.close(fd) - - self.assertEqual(hdr[rpm.RPMTAG_NAME], repo["rpmname"]) - self.assertEqual(hdr[rpm.RPMTAG_VERSION], repo["rpmversion"]) - self.assertEqual(hdr[rpm.RPMTAG_RELEASE], repo["rpmrelease"]) - self.assertEqual(hdr[rpm.RPMTAG_URL], repo["repo"]) - - files = sorted(f.name for f in rpm.files(hdr) if stat.S_ISREG(f.mode)) - self.assertEqual(files, [os.path.join(repo["destination"], f) for f in self.test_results[test_name]]) - - # / should never be included in the rpm, doing so conflicts with the filesystem package - self.assertFalse(any(True for f in files if f == "/")) - - def test_git_branch(self): - """Test creating an rpm from a git branch""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="origin/custom-branch" - destination="/srv/testing-rpm/" - """ % self.repodir) - try: - rpm_dir = tempfile.mkdtemp(prefix="git-rpm-test.") - rpm_file = make_git_rpm(git_repo["repos"]["git"][0], rpm_dir) - self._check_rpm(git_repo["repos"]["git"][0], rpm_dir, rpm_file, "branch") - finally: - shutil.rmtree(rpm_dir) - - def test_git_commit(self): - """Test creating an rpm from a git commit hash""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="%s" - destination="/srv/testing-rpm/" - """ % (self.repodir, self.first_commit)) - try: - rpm_dir = tempfile.mkdtemp(prefix="git-rpm-test.") - rpm_file = make_git_rpm(git_repo["repos"]["git"][0], rpm_dir) - self._check_rpm(git_repo["repos"]["git"][0], rpm_dir, rpm_file, "first") - finally: - shutil.rmtree(rpm_dir) - - def test_git_tag(self): - """Test creating an rpm from a git tag""" - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.1.0" - destination="/srv/testing-rpm/" - """ % (self.repodir)) - try: - rpm_dir = tempfile.mkdtemp(prefix="git-rpm-test.") - rpm_file = make_git_rpm(git_repo["repos"]["git"][0], rpm_dir) - self._check_rpm(git_repo["repos"]["git"][0], rpm_dir, rpm_file, "second") - finally: - shutil.rmtree(rpm_dir) - - def test_gitrpm_repo(self): - """Test creating a dnf repo of the git rpms""" - recipe = toml.loads(""" - [[repos.git]] - rpmname="repo-test-alpha" - rpmversion="1.1.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.1.0" - destination="/srv/testing-alpha/" - - [[repos.git]] - rpmname="repo-test-beta" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.0.0" - destination="/srv/testing-beta/" - """ % (self.repodir, self.repodir)) - try: - temp_dir = tempfile.mkdtemp(prefix="git-rpm-test.") - repo_dir = create_gitrpm_repo(temp_dir, recipe) - - self.assertTrue(len(repo_dir) > 0) - self.assertTrue(os.path.exists(joinpaths(repo_dir, "repo-test-alpha-1.1.0-1.noarch.rpm"))) - self.assertTrue(os.path.exists(joinpaths(repo_dir, "repo-test-beta-1.0.0-1.noarch.rpm"))) - - finally: - shutil.rmtree(temp_dir) - - def test_git_root(self): - """Test creating an rpm with / as the destination """ - git_repo = toml.loads(""" - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="v1.1.0" - destination="/" - """ % (self.repodir)) - try: - rpm_dir = tempfile.mkdtemp(prefix="git-rpm-test.") - rpm_file = make_git_rpm(git_repo["repos"]["git"][0], rpm_dir) - self._check_rpm(git_repo["repos"]["git"][0], rpm_dir, rpm_file, "second") - finally: - shutil.rmtree(rpm_dir) - - -class GitRpmBuildTest(unittest.TestCase): - def test_get_base_dir(self): - """Make sure base_dir is created""" - gitRpm = GitRpmBuild("rpmtest", "1.0.0", "1", ["noarch"]) - base_dir = gitRpm.get_base_dir() - self.assertTrue("lorax-git-rpm" in base_dir) - gitRpm.cleanup_tmpdir() - - def test_short_base_dir(self): - """Make sure cleanup of an unusually short base_dir fails""" - gitRpm = GitRpmBuild("rpmtest", "1.0.0", "1", ["noarch"]) - gitRpm._base_dir = "/aa/" - with self.assertRaises(RuntimeError): - gitRpm.cleanup_tmpdir() diff --git a/tests/pylorax/test_imgutils.py b/tests/pylorax/test_imgutils.py index 56d2dcad..954b620a 100644 --- a/tests/pylorax/test_imgutils.py +++ b/tests/pylorax/test_imgutils.py @@ -265,7 +265,7 @@ class ImgUtilsTest(unittest.TestCase): with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: with tempfile.NamedTemporaryFile(prefix="lorax.test.disk.") as disk_img: mkfakerootdir(work_dir) - graft = {work_dir+"/etc/yum.repos.d/": "./tests/pylorax/repos/server-2.repo"} + graft = {work_dir+"/ext4test/": "./tests/pylorax/templates/install-cmd.tmpl"} mkext4img(work_dir, disk_img.name, graft=graft) self.assertTrue(os.path.exists(disk_img.name)) file_details = get_file_magic(disk_img.name) @@ -281,7 +281,7 @@ class ImgUtilsTest(unittest.TestCase): with open(joinpaths(work_dir, "large-file"), "w") as f: for _ in range(5): f.write("A" * 1024**2) - graft = {work_dir+"/etc/yum.repos.d/": "./tests/pylorax/repos/server-2.repo"} + graft = {work_dir+"/ext4test/": "./tests/pylorax/templates/install-cmd.tmpl"} try: mkext4img(work_dir, disk_img.name, graft=graft, size=5*1024**2) except CalledProcessError as e: diff --git a/tests/pylorax/test_ltmpl.py b/tests/pylorax/test_ltmpl.py index 6afc0a46..86bebf2b 100644 --- a/tests/pylorax/test_ltmpl.py +++ b/tests/pylorax/test_ltmpl.py @@ -43,15 +43,15 @@ class TemplateFunctionsTestCase(unittest.TestCase): def test_rglob(self): """Test rglob function""" - self.assertEqual(list(rglob("*http*toml", "./tests/pylorax/blueprints", fatal=False)), ["example-http-server.toml"]) + self.assertEqual(list(rglob("chmod*tmpl", "./tests/pylorax/templates", fatal=False)), ["chmod-cmd.tmpl"]) self.assertEqual(list(rglob("einstein", "./tests/pylorax/blueprints", fatal=False)), []) with self.assertRaises(IOError): list(rglob("einstein", "./tests/pylorax/blueprints", fatal=True)) def test_rexists(self): """Test rexists function""" - self.assertTrue(rexists("*http*toml", "./tests/pylorax/blueprints")) - self.assertFalse(rexists("einstein", "./tests/pylorax/blueprints")) + self.assertTrue(rexists("chmod*tmpl", "./tests/pylorax/templates")) + self.assertFalse(rexists("einstein", "./tests/pylorax/templates")) class LoraxTemplateTestCase(unittest.TestCase): @classmethod diff --git a/tests/pylorax/test_projects.py b/tests/pylorax/test_projects.py deleted file mode 100644 index fc96290b..00000000 --- a/tests/pylorax/test_projects.py +++ /dev/null @@ -1,606 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import dnf -from glob import glob -import os -import shutil -import tempfile -import time -import unittest - -import lifted.config -from pylorax.sysutils import joinpaths -from pylorax.api.config import configure, make_dnf_dirs -from pylorax.api.projects import api_time, api_changelog, pkg_to_project, pkg_to_project_info, pkg_to_dep -from pylorax.api.projects import proj_to_module, projects_list, projects_info, projects_depsolve -from pylorax.api.projects import modules_list, modules_info, ProjectsError, dep_evra, dep_nevra -from pylorax.api.projects import repo_to_source, get_repo_sources, delete_repo_source, source_to_repo -from pylorax.api.projects import source_to_repodict, dnf_repo_to_file_repo -from pylorax.api.dnfbase import get_base_object - -class Package(object): - """Test class for hawkey.Package tests""" - name = "name" - summary = "summary" - description = "description" - url = "url" - epoch = 1 - release = "release" - arch = "arch" - buildtime = 499222800 - license = "license" - version = "version" - -class ProjectsTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - self.config = configure(root_dir=self.tmp_dir, test_config=True) - lifted.config.configure(self.config) - make_dnf_dirs(self.config, os.getuid(), os.getgid()) - self.dbo = get_base_object(self.config) - os.environ["TZ"] = "UTC" - time.tzset() - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_api_time(self): - self.assertEqual(api_time(499222800), "1985-10-27T01:00:00") - - def test_api_changelog(self): - self.assertEqual(api_changelog([[0, 1, "Heavy!"], [0, 1, "Light!"]]), "Heavy!") - - def test_api_changelog_empty_list(self): - self.assertEqual(api_changelog([]), '') - - def test_api_changelog_missing_text_entry(self): - self.assertEqual(api_changelog([('now', 'atodorov')]), '') - - def test_pkg_to_project(self): - result = {"name":"name", - "summary":"summary", - "description":"description", - "homepage":"url", - "upstream_vcs":"UPSTREAM_VCS"} - - pkg = Package() - self.assertEqual(pkg_to_project(pkg), result) - - def test_pkg_to_project_info(self): - build = {"epoch":1, - "release":"release", - "arch":"arch", - "build_time":"1985-10-27T01:00:00", - "changelog":"CHANGELOG_NEEDED", - "build_config_ref": "BUILD_CONFIG_REF", - "build_env_ref": "BUILD_ENV_REF", - "metadata": {}, - "source": {"license":"license", - "version":"version", - "source_ref": "SOURCE_REF", - "metadata": {}}} - - result = {"name":"name", - "summary":"summary", - "description":"description", - "homepage":"url", - "upstream_vcs":"UPSTREAM_VCS", - "builds": [build]} - - pkg = Package() - self.assertEqual(pkg_to_project_info(pkg), result) - - def test_pkg_to_dep(self): - result = {"name":"name", - "epoch":1, - "version":"version", - "release":"release", - "arch":"arch"} - - pkg = Package() - self.assertEqual(pkg_to_dep(pkg), result) - - def test_proj_to_module(self): - result = {"name":"name", - "group_type":"rpm"} - - proj = pkg_to_project(Package()) - self.assertEqual(proj_to_module(proj), result) - - def test_dep_evra(self): - dep = {"arch": "noarch", - "epoch": 0, - "name": "basesystem", - "release": "7.el7", - "version": "10.0"} - self.assertEqual(dep_evra(dep), "10.0-7.el7.noarch") - - def test_dep_evra_with_epoch_not_zero(self): - dep = {"arch": "x86_64", - "epoch": 2, - "name": "tog-pegasus-libs", - "release": "3.el7", - "version": "2.14.1"} - self.assertEqual(dep_evra(dep), "2:2.14.1-3.el7.x86_64") - - def test_dep_nevra(self): - dep = {"arch": "noarch", - "epoch": 0, - "name": "basesystem", - "release": "7.el7", - "version": "10.0"} - self.assertEqual(dep_nevra(dep), "basesystem-10.0-7.el7.noarch") - - def test_projects_list(self): - projects = projects_list(self.dbo) - self.assertEqual(len(projects) > 10, True) - - def test_projects_info(self): - projects = projects_info(self.dbo, ["bash"]) - - self.assertEqual(projects[0]["name"], "bash") - self.assertEqual(projects[0]["builds"][0]["source"]["license"], "GPLv3+") - - def test_projects_depsolve(self): - deps = projects_depsolve(self.dbo, [("bash", "*.*")], []) - self.assertTrue(len(deps) > 3) - self.assertTrue("basesystem" in [dep["name"] for dep in deps]) - - def test_projects_depsolve_version(self): - """Test that depsolving with a partial wildcard version works""" - deps = projects_depsolve(self.dbo, [("bash", "5.*")], []) - self.assertEqual(deps[1]["name"], "bash") - - deps = projects_depsolve(self.dbo, [("bash", "5.0.*")], []) - self.assertEqual(deps[1]["name"], "bash") - - def test_projects_depsolve_oldversion(self): - """Test that depsolving a specific non-existant version fails""" - with self.assertRaises(ProjectsError): - deps = projects_depsolve(self.dbo, [("bash", "1.0.0")], []) - self.assertEqual(deps[1]["name"], "bash") - - def test_projects_depsolve_fail(self): - with self.assertRaises(ProjectsError): - projects_depsolve(self.dbo, [("nada-package", "*.*")], []) - - def test_shim_depsolve(self): - """Test that requesting shim pulls in shim-*""" - deps = projects_depsolve(self.dbo, [("shim", "*")], []) - self.assertTrue(len(deps) > 0) - self.assertTrue(any(True for dep in deps if "shim-" in dep["name"])) - - def test_cdbootglob_depsolve(self): - """Test that requesting grub2-efi-*-cdboot pulls in a cdboot package""" - deps = projects_depsolve(self.dbo, [("grub2-efi-*-cdboot", "*")], []) - self.assertTrue(len(deps) > 0) - self.assertTrue(any(True for dep in deps if "-cdboot" in dep["name"])) - - def test_modules_list_all(self): - modules = modules_list(self.dbo, None) - - self.assertEqual(len(modules) > 10, True) - self.assertEqual(modules[0]["group_type"], "rpm") - - def test_modules_list_glob(self): - modules = modules_list(self.dbo, ["g*"]) - self.assertEqual(modules[0]["name"].startswith("g"), True) - - def test_modules_info(self): - modules = modules_info(self.dbo, ["bash"]) - - print(modules) - self.assertEqual(modules[0]["name"], "bash") - self.assertEqual(modules[0]["dependencies"][0]["name"], "basesystem") - - def test_groups_depsolve(self): - deps = projects_depsolve(self.dbo, [], ["c-development"]) - names = [grp["name"] for grp in deps] - self.assertTrue("autoconf" in names) # mandatory package - self.assertTrue("ctags" in names) # default package - self.assertFalse("cmake" in names) # optional package - -class ConfigureTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.configure.") - self.conf_file = os.path.join(self.tmp_dir, 'test.conf') - open(self.conf_file, 'w').write("[composer]\ncache_dir = /tmp/cache-test") - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def test_configure_reads_existing_file(self): - config = configure(conf_file=self.conf_file) - lifted.config.configure(config) - self.assertEqual(config.get('composer', 'cache_dir'), '/tmp/cache-test') - - def test_configure_reads_non_existing_file(self): - config = configure(conf_file=self.conf_file + '.non-existing') - lifted.config.configure(config) - self.assertEqual(config.get('composer', 'cache_dir'), '/var/tmp/composer/cache') - -def fakerepo_baseurl_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "name": "fake-repo-baseurl", - "system": False, - "type": "yum-baseurl", - "url": "https://fake-repo.base.url" - } - -def fakerepo_baseurl_v1(): - d = fakerepo_baseurl_v0() - d["id"] = "fake-repo-baseurl" - d["name"] = "A fake repo with a baseurl" - return d - -def fakesystem_repo_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "name": "fake-repo-baseurl", - "system": True, - "type": "yum-baseurl", - "url": "https://fake-repo.base.url" - } - -def fakesystem_repo_v1(): - d = fakesystem_repo_v0() - d["id"] = "fake-repo-baseurl" - d["name"] = "A fake repo with a baseurl" - return d - -def fakerepo_metalink_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "name": "fake-repo-metalink", - "system": False, - "type": "yum-metalink", - "url": "https://fake-repo.metalink" - } - -def fakerepo_metalink_v1(): - d = fakerepo_metalink_v0() - d["id"] = "fake-repo-metalink" - d["name"] = "A fake repo with a metalink" - return d - -def fakerepo_mirrorlist_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "name": "fake-repo-mirrorlist", - "system": False, - "type": "yum-mirrorlist", - "url": "https://fake-repo.mirrorlist" - } - -def fakerepo_mirrorlist_v1(): - d = fakerepo_mirrorlist_v0() - d["id"] = "fake-repo-mirrorlist" - d["name"] = "A fake repo with a mirrorlist" - return d - -def fakerepo_proxy_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "name": "fake-repo-proxy", - "proxy": "https://fake-repo.proxy", - "system": False, - "type": "yum-baseurl", - "url": "https://fake-repo.base.url" - } - -def fakerepo_proxy_v1(): - d = fakerepo_proxy_v0() - d["id"] = "fake-repo-proxy" - d["name"] = "A fake repo with a proxy" - return d - -def fakerepo_gpgkey_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "gpgkey_urls": [ - "https://fake-repo.gpgkey" - ], - "name": "fake-repo-gpgkey", - "system": False, - "type": "yum-baseurl", - "url": "https://fake-repo.base.url" - } - -def fakerepo_gpgkey_v1(): - d = fakerepo_gpgkey_v0() - d["id"] = "fake-repo-gpgkey" - d["name"] = "A fake repo with a gpgkey" - return d - -def singlerepo_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "gpgkey_urls": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-" + os.environ['TEST_OS'] + "-x86_64" - ], - "name": "single-repo", - "system": False, - "type": "yum-baseurl", - "url": "file:///tmp/lorax-empty-repo/" - } - -def singlerepo_v1(): - d = singlerepo_v0() - d["id"] = "single-repo" - d["name"] = "One repo in the file" - return d - -def singlerepo_vars_v0(): - return { - "check_gpg": True, - "check_ssl": True, - "gpgkey_urls": [ - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch" - ], - "name": "single-repo", - "system": False, - "type": "yum-baseurl", - "url": "file:///tmp/lorax-empty-repo-$releasever-$basearch/" - } - -def singlerepo_vars_v1(): - d = singlerepo_v0() - d["id"] = "single-repo" - d["name"] = "One repo in the file" - return d - -class SourceTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.tmp_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - for f in glob("./tests/pylorax/repos/*.repo"): - shutil.copy2(f, self.tmp_dir) - - self.dbo = dnf.Base() - - # Load all the test repos - self.dbo.conf.reposdir = [self.tmp_dir] - self.dbo.read_all_repos() - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.tmp_dir) - - def _read(self, repo_file): - return open(joinpaths(self.tmp_dir, repo_file), "r").read() - - def test_repo_to_source_baseurl(self): - """Test a repo with a baseurl API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-baseurl"), False, 0), fakerepo_baseurl_v0()) - - def test_repo_to_source_baseurl_v1(self): - """Test a repo with a baseurl API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-baseurl"), False, 1), fakerepo_baseurl_v1()) - - def test_system_repo(self): - """Test a system repo with a baseurl API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-baseurl"), True, 0), fakesystem_repo_v0()) - - def test_system_repo_v1(self): - """Test a system repo with a baseurl API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-baseurl"), True, 1), fakesystem_repo_v1()) - - def test_repo_to_source_metalink(self): - """Test a repo with a metalink API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-metalink"), False, 0), fakerepo_metalink_v0()) - - def test_repo_to_source_metalink_v1(self): - """Test a repo with a metalink API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-metalink"), False, 1), fakerepo_metalink_v1()) - - def test_repo_to_source_mirrorlist(self): - """Test a repo with a mirrorlist API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-mirrorlist"), False, 0), fakerepo_mirrorlist_v0()) - - def test_repo_to_source_mirrorlist_v1(self): - """Test a repo with a mirrorlist API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-mirrorlist"), False, 1), fakerepo_mirrorlist_v1()) - - def test_repo_to_source_proxy(self): - """Test a repo with a proxy API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-proxy"), False, 0), fakerepo_proxy_v0()) - - def test_repo_to_source_proxy_v1(self): - """Test a repo with a proxy API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-proxy"), False, 1), fakerepo_proxy_v1()) - - def test_repo_to_source_gpgkey(self): - """Test a repo with a GPG key API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-gpgkey"), False, 0), fakerepo_gpgkey_v0()) - - def test_repo_to_source_gpgkey_v1(self): - """Test a repo with a GPG key API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("fake-repo-gpgkey"), False, 1), fakerepo_gpgkey_v1()) - - def test_get_repo_sources(self): - """Test getting a list of sources from a repo directory""" - sources = get_repo_sources(joinpaths(self.tmp_dir, "*.repo")) - self.assertTrue("lorax-1" in sources) - self.assertTrue("lorax-2" in sources) - - def test_delete_source_multiple(self): - """Test deleting a source from a repo file with multiple entries""" - delete_repo_source(joinpaths(self.tmp_dir, "*.repo"), "lorax-3") - sources = get_repo_sources(joinpaths(self.tmp_dir, "*.repo")) - self.assertTrue("lorax-3" not in sources) - - def test_delete_source_single(self): - """Test deleting a source from a repo with only 1 entry""" - delete_repo_source(joinpaths(self.tmp_dir, "*.repo"), "single-repo") - sources = get_repo_sources(joinpaths(self.tmp_dir, "*.repo")) - self.assertTrue("single-repo" not in sources) - self.assertTrue(not os.path.exists(joinpaths(self.tmp_dir, "single.repo"))) - - def test_delete_source_other(self): - """Test deleting a source from a repo that doesn't match the source name""" - with self.assertRaises(ProjectsError): - delete_repo_source(joinpaths(self.tmp_dir, "*.repo"), "unknown-source") - sources = get_repo_sources(joinpaths(self.tmp_dir, "*.repo")) - self.assertTrue("lorax-1" in sources) - self.assertTrue("lorax-2" in sources) - self.assertTrue("lorax-4" in sources) - self.assertTrue("other-repo" in sources) - - def test_source_to_repo_baseurl(self): - """Test creating a dnf.Repo with a baseurl API v0""" - repo = source_to_repo(fakerepo_baseurl_v0(), self.dbo.conf) - self.assertEqual(repo.baseurl[0], fakerepo_baseurl_v0()["url"]) - - def test_source_to_repodict_baseurl(self): - """Test creating a repodict with a baseurl API v0""" - repo = source_to_repodict(fakerepo_baseurl_v0()) - self.assertEqual(repo[1][0], fakerepo_baseurl_v0()["url"]) - - def test_source_to_repo_baseurl_v1(self): - """Test creating a dnf.Repo with a baseurl API v1""" - repo = source_to_repo(fakerepo_baseurl_v1(), self.dbo.conf) - self.assertEqual(repo.baseurl[0], fakerepo_baseurl_v1()["url"]) - - def test_source_to_repodict_baseurl_v1(self): - """Test creating a repodict with a baseurl API v1""" - repo = source_to_repodict(fakerepo_baseurl_v1()) - self.assertEqual(repo[1][0], fakerepo_baseurl_v1()["url"]) - - def test_source_to_repo_metalink(self): - """Test creating a dnf.Repo with a metalink API v0""" - repo = source_to_repo(fakerepo_metalink_v0(), self.dbo.conf) - self.assertEqual(repo.metalink, fakerepo_metalink_v0()["url"]) - - def test_source_to_repodict_metalink(self): - """Test creating a repodict with a metalink API v0""" - repo = source_to_repodict(fakerepo_metalink_v0()) - self.assertEqual(repo[2]["metalink"], fakerepo_metalink_v0()["url"]) - - def test_source_to_repo_metalink_v1(self): - """Test creating a dnf.Repo with a metalink API v1""" - repo = source_to_repo(fakerepo_metalink_v1(), self.dbo.conf) - self.assertEqual(repo.metalink, fakerepo_metalink_v1()["url"]) - - def test_source_to_repodict_metalink_v1(self): - """Test creating a repodict with a metalink API v1""" - repo = source_to_repodict(fakerepo_metalink_v1()) - self.assertEqual(repo[2]["metalink"], fakerepo_metalink_v1()["url"]) - - def test_source_to_repo_mirrorlist(self): - """Test creating a dnf.Repo with a mirrorlist API v0""" - repo = source_to_repo(fakerepo_mirrorlist_v0(), self.dbo.conf) - self.assertEqual(repo.mirrorlist, fakerepo_mirrorlist_v0()["url"]) - - def test_source_to_repodict_mirrorlist(self): - """Test creating a repodict with a mirrorlist API v0""" - repo = source_to_repodict(fakerepo_mirrorlist_v0()) - self.assertEqual(repo[2]["mirrorlist"], fakerepo_mirrorlist_v0()["url"]) - - def test_source_to_repo_mirrorlist_v1(self): - """Test creating a dnf.Repo with a mirrorlist""" - repo = source_to_repo(fakerepo_mirrorlist_v1(), self.dbo.conf) - self.assertEqual(repo.mirrorlist, fakerepo_mirrorlist_v1()["url"]) - - def test_source_to_repodict_mirrorlist_v1(self): - """Test creating a repodict with a mirrorlist""" - repo = source_to_repodict(fakerepo_mirrorlist_v1()) - self.assertEqual(repo[2]["mirrorlist"], fakerepo_mirrorlist_v1()["url"]) - - def test_source_to_repo_proxy(self): - """Test creating a dnf.Repo with a proxy API v0""" - repo = source_to_repo(fakerepo_proxy_v0(), self.dbo.conf) - self.assertEqual(repo.proxy, fakerepo_proxy_v0()["proxy"]) - - def test_source_to_repodict_proxy(self): - """Test creating a repodict with a proxy API v0""" - repo = source_to_repodict(fakerepo_proxy_v0()) - self.assertEqual(repo[2]["proxy"], fakerepo_proxy_v0()["proxy"]) - - def test_source_to_repo_proxy_v1(self): - """Test creating a dnf.Repo with a proxy API v1""" - repo = source_to_repo(fakerepo_proxy_v1(), self.dbo.conf) - self.assertEqual(repo.proxy, fakerepo_proxy_v1()["proxy"]) - - def test_source_to_repodict_proxy_v1(self): - """Test creating a repodict with a proxy API v1""" - repo = source_to_repodict(fakerepo_proxy_v1()) - self.assertEqual(repo[2]["proxy"], fakerepo_proxy_v1()["proxy"]) - - def test_source_to_repo_gpgkey(self): - """Test creating a dnf.Repo with a proxy API v0""" - repo = source_to_repo(fakerepo_gpgkey_v0(), self.dbo.conf) - self.assertEqual(repo.gpgkey[0], fakerepo_gpgkey_v0()["gpgkey_urls"][0]) - - def test_source_to_repodict_gpgkey(self): - """Test creating a repodict with a proxy API v0""" - repo = source_to_repodict(fakerepo_gpgkey_v0()) - self.assertEqual(repo[2]["gpgkey"][0], fakerepo_gpgkey_v0()["gpgkey_urls"][0]) - - def test_source_to_repo_gpgkey_v1(self): - """Test creating a dnf.Repo with a proxy API v1""" - repo = source_to_repo(fakerepo_gpgkey_v1(), self.dbo.conf) - self.assertEqual(repo.gpgkey[0], fakerepo_gpgkey_v1()["gpgkey_urls"][0]) - - def test_source_to_repodict_gpgkey_v1(self): - """Test creating a repodict with a proxy API v1""" - repo = source_to_repodict(fakerepo_gpgkey_v1()) - self.assertEqual(repo[2]["gpgkey"][0], fakerepo_gpgkey_v1()["gpgkey_urls"][0]) - - def test_drtfr_baseurl(self): - """Test creating a dnf .repo file from a baseurl Repo object""" - self.assertEqual(dnf_repo_to_file_repo(self.dbo.repos.get("fake-repo-baseurl")), - self._read("baseurl-test.repo")) - - def test_drtfr_metalink(self): - """Test creating a dnf .repo file from a metalink Repo object""" - self.assertEqual(dnf_repo_to_file_repo(self.dbo.repos.get("fake-repo-metalink")), - self._read("metalink-test.repo")) - - def test_drtfr_mirrorlist(self): - """Test creating a dnf .repo file from a mirrorlist Repo object""" - self.assertEqual(dnf_repo_to_file_repo(self.dbo.repos.get("fake-repo-mirrorlist")), - self._read("mirrorlist-test.repo")) - - def test_drtfr_proxy(self): - """Test creating a dnf .repo file from a baseurl Repo object with proxy""" - self.assertEqual(dnf_repo_to_file_repo(self.dbo.repos.get("fake-repo-proxy")), - self._read("proxy-test.repo")) - - def test_drtfr_gpgkey(self): - """Test creating a dnf .repo file from a baseurl Repo object with gpgkey""" - self.assertEqual(dnf_repo_to_file_repo(self.dbo.repos.get("fake-repo-gpgkey")), - self._read("gpgkey-test.repo")) - - def test_repo_to_source_json(self): - """Test serializing repo_to_source results API v0""" - self.assertEqual(repo_to_source(self.dbo.repos.get("single-repo"), False, 0), singlerepo_v0()) - - def test_repo_to_source_json_v1(self): - """Test serializing repo_to_source results API v1""" - self.assertEqual(repo_to_source(self.dbo.repos.get("single-repo"), False, 1), singlerepo_v1()) diff --git a/tests/pylorax/test_queue.py b/tests/pylorax/test_queue.py deleted file mode 100644 index a6c4a236..00000000 --- a/tests/pylorax/test_queue.py +++ /dev/null @@ -1,110 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest -from uuid import uuid4 - -import lifted.config -from pylorax.api.config import configure, make_queue_dirs -from pylorax.api.queue import check_queues -from pylorax.base import DataHolder -from pylorax.sysutils import joinpaths - - -class QueueTestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.maxDiff = None - self.config = dict() - - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - self.config["REPO_DIR"] = repo_dir - - self.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(self.config["COMPOSER_CFG"]) - os.makedirs(joinpaths(self.config["COMPOSER_CFG"].get("composer", "share_dir"), "composer")) - errors = make_queue_dirs(self.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - lib_dir = self.config["COMPOSER_CFG"].get("composer", "lib_dir") - share_dir = self.config["COMPOSER_CFG"].get("composer", "share_dir") - tmp = self.config["COMPOSER_CFG"].get("composer", "tmp") - self.monitor_cfg = DataHolder(composer_dir=lib_dir, share_dir=share_dir, uid=0, gid=0, tmp=tmp) - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.config["REPO_DIR"]) - - def test_broken_run_symlinks(self): - """Put a broken symlink into queue/run and make sure it is removed""" - uuid = str(uuid4()) - os.symlink(joinpaths(self.monitor_cfg.composer_dir, "results", uuid), - joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid)) - self.assertTrue(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) - check_queues(self.monitor_cfg) - self.assertFalse(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) - - def test_broken_new_symlinks(self): - """Put a broken symlink into queue/new and make sure it is removed""" - uuid = str(uuid4()) - os.symlink(joinpaths(self.monitor_cfg.composer_dir, "results", uuid), - joinpaths(self.monitor_cfg.composer_dir, "queue/new", uuid)) - self.assertTrue(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/new", uuid))) - check_queues(self.monitor_cfg) - self.assertFalse(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/new", uuid))) - - def test_stale_run_symlink(self): - """Put a valid symlink in run, make sure it is set to FAILED and removed""" - uuid = str(uuid4()) - os.makedirs(joinpaths(self.monitor_cfg.composer_dir, "results", uuid)) - os.symlink(joinpaths(self.monitor_cfg.composer_dir, "results", uuid), - joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid)) - self.assertTrue(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) - check_queues(self.monitor_cfg) - self.assertFalse(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) - status = open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS")).read().strip() - self.assertEqual(status, "FAILED") - - def test_missing_status(self): - """Create a results dir w/o STATUS and confirm it is set to FAILED""" - uuid = str(uuid4()) - os.makedirs(joinpaths(self.monitor_cfg.composer_dir, "results", uuid)) - check_queues(self.monitor_cfg) - status = open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS")).read().strip() - self.assertEqual(status, "FAILED") - - def test_running_status(self): - """Create a results dir with STATUS set to RUNNING and confirm it is set to FAILED""" - uuid = str(uuid4()) - os.makedirs(joinpaths(self.monitor_cfg.composer_dir, "results", uuid)) - open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS"), "w").write("RUNNING\n") - check_queues(self.monitor_cfg) - status = open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS")).read().strip() - self.assertEqual(status, "FAILED") - - def test_missing_new_symlink(self): - """Create a results dir with STATUS set to WAITING and confirm a symlink is created in queue/new""" - uuid = str(uuid4()) - os.makedirs(joinpaths(self.monitor_cfg.composer_dir, "results", uuid)) - open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS"), "w").write("WAITING\n") - check_queues(self.monitor_cfg) - status = open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS")).read().strip() - self.assertEqual(status, "WAITING") - self.assertTrue(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/new", uuid))) diff --git a/tests/pylorax/test_recipes.py b/tests/pylorax/test_recipes.py deleted file mode 100644 index 24c190d1..00000000 --- a/tests/pylorax/test_recipes.py +++ /dev/null @@ -1,1318 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest -from unittest import mock - -import pylorax.api.recipes as recipes -from pylorax.api.compose import add_customizations, customize_ks_template -from pylorax.api.toml import TomlError -from pylorax.sysutils import joinpaths - -from pykickstart.parser import KickstartParser -from pykickstart.version import makeVersion - -class BasicRecipeTest(unittest.TestCase): - @classmethod - def setUpClass(self): - # Input toml is in .toml and python dict string is in .dict - input_recipes = [("full-recipe.toml", "full-recipe.dict"), - ("minimal.toml", "minimal.dict"), - ("modules-only.toml", "modules-only.dict"), - ("packages-only.toml", "packages-only.dict"), - ("groups-only.toml", "groups-only.dict"), - ("custom-base.toml", "custom-base.dict"), - ("repos-git.toml", "repos-git.dict")] - results_path = "./tests/pylorax/results/" - self.input_toml = {} - for (recipe_toml, recipe_dict) in input_recipes: - with open(joinpaths(results_path, recipe_toml)) as f_toml: - with open(joinpaths(results_path, recipe_dict)) as f_dict: - # XXX Warning, can run arbitrary code - result_dict = eval(f_dict.read()) - self.input_toml[recipe_toml] = (f_toml.read(), result_dict) - - # Used by diff tests - self.old_modules = [recipes.RecipeModule("toml", "2.1"), - recipes.RecipeModule("bash", "5.*"), - recipes.RecipeModule("httpd", "3.7.*")] - self.new_modules = [recipes.RecipeModule("toml", "2.1"), - recipes.RecipeModule("httpd", "3.8.*"), - recipes.RecipeModule("openssh", "2.8.1")] - self.modules_result = [{"new": {"Modules": {"version": "2.8.1", "name": "openssh"}}, - "old": None}, - {"new": None, - "old": {"Modules": {"name": "bash", "version": "5.*"}}}, - {"new": {"Modules": {"version": "3.8.*", "name": "httpd"}}, - "old": {"Modules": {"version": "3.7.*", "name": "httpd"}}}] - - self.old_packages = [recipes.RecipePackage("python", "2.7.*"), - recipes.RecipePackage("parted", "3.2")] - self.new_packages = [recipes.RecipePackage("python", "2.7.*"), - recipes.RecipePackage("parted", "3.2"), - recipes.RecipePackage("git", "2.13.*")] - self.packages_result = [{"new": {"Packages": {"name": "git", "version": "2.13.*"}}, "old": None}] - - self.old_groups = [recipes.RecipeGroup("backup-client"), - recipes.RecipeGroup("standard")] - self.new_groups = [recipes.RecipeGroup("console-internet"), - recipes.RecipeGroup("standard")] - self.groups_result = [{'new': {'Groups': {'name': 'console-internet'}}, 'old': None}, - {'new': None, 'old': {'Groups': {'name': 'backup-client'}}}] - - # customizations test data and results. - self.old_custom = {'hostname': 'custombase'} - self.custom_sshkey1 = {'sshkey': [{'user': 'root', 'key': 'A SSH KEY FOR ROOT'}]} - self.custom_sshkey2 = {'sshkey': [{'user': 'root', 'key': 'A DIFFERENT SSH KEY FOR ROOT'}]} - self.custom_sshkey3 = {'sshkey': [{'user': 'root', 'key': 'A SSH KEY FOR ROOT'}, {'user': 'cliff', 'key': 'A SSH KEY FOR CLIFF'}]} - self.custom_kernel = {'kernel': {'append': 'nosmt=force'}} - self.custom_user1 = {'user': [{'name': 'admin', 'description': 'Administrator account', 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', 'key': 'PUBLIC SSH KEY', 'home': '/srv/widget/', 'shell': '/usr/bin/bash', 'groups': ['widget', 'users', 'wheel'], 'uid': 1200, 'gid': 1200}]} - self.custom_user2 = {'user': [{'name': 'admin', 'description': 'Administrator account', 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', 'key': 'PUBLIC SSH KEY', 'home': '/root/', 'shell': '/usr/bin/bash', 'groups': ['widget', 'users', 'wheel'], 'uid': 1200, 'gid': 1200}]} - self.custom_user3 = {'user': [{'name': 'admin', 'description': 'Administrator account', 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', 'key': 'PUBLIC SSH KEY', 'home': '/srv/widget/', 'shell': '/usr/bin/bash', 'groups': ['widget', 'users', 'wheel'], 'uid': 1200, 'gid': 1200}, {'name': 'norman', 'key': 'PUBLIC SSH KEY'}]} - self.custom_group = {'group': [{'name': 'widget', 'gid': 1130}]} - self.custom_timezone1 = {'timezone': {'timezone': 'US/Eastern', 'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org']}} - self.custom_timezone2 = {'timezone': {'timezone': 'US/Eastern'}} - self.custom_timezone3 = {'timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org']}} - self.custom_locale1 = {'locale': {'languages': ['en_US.UTF-8'], 'keyboard': 'us'}} - self.custom_locale2 = {'locale': {'languages': ['en_US.UTF-8']}} - self.custom_locale3 = {'locale': {'keyboard': 'us'}} - self.custom_firewall1 = {'firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], 'services': {'enabled': ['ftp', 'ntp', 'dhcp'], 'disabled': ['telnet']}}} - self.custom_firewall2 = {'firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp']}} - self.custom_firewall3 = {'firewall': {'services': {'enabled': ['ftp', 'ntp', 'dhcp'], 'disabled': ['telnet']}}} - self.custom_firewall4 = {'firewall': {'services': {'enabled': ['ftp', 'ntp', 'dhcp']}}} - self.custom_firewall5 = {'firewall': {'services': {'disabled': ['telnet']}}} - self.custom_services1 = {'services': {'enabled': ['sshd', 'cockpit.socket', 'httpd'], 'disabled': ['postfix', 'telnetd']}} - self.custom_services2 = {'services': {'enabled': ['sshd', 'cockpit.socket', 'httpd']}} - self.custom_services3 = {'services': {'disabled': ['postfix', 'telnetd']}} - - self.old_custom.update(self.custom_sshkey1) - # Build the new custom from these pieces - self.new_custom = self.old_custom.copy() - for d in [self.custom_kernel, self.custom_user1, self.custom_group, self.custom_timezone1, - self.custom_locale1, self.custom_firewall1, self.custom_services1]: - self.new_custom.update(d) - self.custom_result = [{'new': {'Customizations.firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], - 'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'old': None}, - {'new': {'Customizations.group': [{'gid': 1130, 'name': 'widget'}]}, - 'old': None}, - {'new': {'Customizations.kernel': {'append': 'nosmt=force'}}, - 'old': None}, - {'new': {'Customizations.locale': {'keyboard': 'us', 'languages': ['en_US.UTF-8']}}, - 'old': None}, - {'new': {'Customizations.services': {'disabled': ['postfix', 'telnetd'], 'enabled': ['sshd', 'cockpit.socket', 'httpd']}}, - 'old': None}, - {'new': {'Customizations.timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org'], - 'timezone': 'US/Eastern'}}, - 'old': None}, - {'new': {'Customizations.user': [{'description': 'Administrator account', 'gid': 1200, - 'groups': ['widget', 'users', 'wheel'], 'home': '/srv/widget/', - 'key': 'PUBLIC SSH KEY', 'name': 'admin', - 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', 'shell': '/usr/bin/bash', 'uid': 1200}]}, - 'old': None}] - - # repos.git test data and results - self.old_git = [{'rpmname': 'server-config-files', - 'rpmversion': '1.0', - 'rpmrelease': '1', - 'summary': 'Setup files for server deployment', - 'repo': 'https://github.com/weldr/server-config-files', - 'ref': 'v3.0', - 'destination': '/srv/config/'}] - self.new_git = [{'rpmname': 'bart-files', - 'rpmversion': '1.1', - 'rpmrelease': '1', - 'summary': 'Files needed for Bart', - 'repo': 'https://github.com/weldr/not-a-real-repo', - 'ref': 'v1.0', - 'destination': '/home/bart/Documents/'}, - {'rpmname': 'server-config-files', - 'rpmversion': '1.0', - 'rpmrelease': '1', - 'summary': 'Setup files for server deployment', - 'repo': 'https://github.com/weldr/server-config-files', - 'ref': 'v3.0', - 'destination': '/srv/config/'}] - self.git_result = [{'old': None, - 'new': {'Repos.git': {'rpmname': 'bart-files', - 'rpmversion': '1.1', - 'rpmrelease': '1', - 'summary': 'Files needed for Bart', - 'repo': 'https://github.com/weldr/not-a-real-repo', - 'ref': 'v1.0', - 'destination': '/home/bart/Documents/'}}}] - self.maxDiff = None - - @classmethod - def tearDownClass(self): - pass - - def test_toml_to_recipe(self): - """Test converting the TOML string to a Recipe object""" - for (toml_str, recipe_dict) in self.input_toml.values(): - result = recipes.recipe_from_toml(toml_str) - self.assertEqual(result, recipe_dict) - - def test_toml_to_recipe_fail(self): - """Test trying to convert a non-TOML string to a Recipe""" - with self.assertRaises(TomlError): - recipes.recipe_from_toml("This is not a TOML string\n") - - with self.assertRaises(recipes.RecipeError): - recipes.recipe_from_toml('name = "a failed toml string"\n') - - def test_recipe_to_toml(self): - """Test converting a Recipe object to a TOML string""" - # In order to avoid problems from matching strings we convert to TOML and - # then back so compare the Recipes. - for (toml_str, _recipe_dict) in self.input_toml.values(): - # This is tested in toml_to_recipe - recipe_1 = recipes.recipe_from_toml(toml_str) - # Convert the Recipe to TOML and then back to a Recipe - toml_2 = recipe_1.toml() - recipe_2 = recipes.recipe_from_toml(toml_2) - self.assertEqual(recipe_1, recipe_2) - - def test_recipe_bump_version(self): - """Test the Recipe's version bump function""" - - # Neither have a version - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", None, None, None, None) - new_version = recipe.bump_version(None) - self.assertEqual(new_version, "0.0.1") - - # Original has a version, new does not - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", None, None, None, None) - new_version = recipe.bump_version("0.0.1") - self.assertEqual(new_version, "0.0.2") - - # Original has no version, new does - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.0", None, None, None) - new_version = recipe.bump_version(None) - self.assertEqual(new_version, "0.1.0") - - # New and Original are the same - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.0.1", None, None, None) - new_version = recipe.bump_version("0.0.1") - self.assertEqual(new_version, "0.0.2") - - # New is different from Original - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", None, None, None) - new_version = recipe.bump_version("0.0.1") - self.assertEqual(new_version, "0.1.1") - - def test_find_field(self): - """Test the find_field_value function""" - test_list = [{"name":"dog"}, {"name":"cat"}, {"name":"squirrel"}] - - self.assertEqual(recipes.find_field_value("name", "cat", test_list), {"name":"cat"}) - self.assertIsNone(recipes.find_field_value("name", "alien", test_list)) - self.assertIsNone(recipes.find_field_value("color", "green", test_list)) - self.assertIsNone(recipes.find_field_value("color", "green", [])) - - def test_find_name(self): - """Test the find_name function""" - test_list = [{"name":"dog"}, {"name":"cat"}, {"name":"squirrel"}] - - self.assertEqual(recipes.find_name("cat", test_list), {"name":"cat"}) - self.assertIsNone(recipes.find_name("alien", test_list)) - self.assertIsNone(recipes.find_name("alien", [])) - - def test_find_obj(self): - """Test the find_recipe_obj function""" - test_recipe = {"customizations": {"hostname": "foo", "users": ["root"]}, "repos": {"git": ["git-repos"]}} - - self.assertEqual(recipes.find_recipe_obj(["customizations", "hostname"], test_recipe, ""), "foo") - self.assertEqual(recipes.find_recipe_obj(["customizations", "locale"], test_recipe, {}), {}) - self.assertEqual(recipes.find_recipe_obj(["repos", "git"], test_recipe, ""), ["git-repos"]) - self.assertEqual(recipes.find_recipe_obj(["repos", "git", "oak"], test_recipe, ""), "") - self.assertIsNone(recipes.find_recipe_obj(["pine"], test_recipe)) - - def test_diff_lists(self): - """Test the diff_lists function""" - self.assertEqual(recipes.diff_lists("Modules", "name", self.old_modules, self.old_modules), []) - self.assertEqual(recipes.diff_lists("Modules", "name", self.old_modules, self.new_modules), self.modules_result) - self.assertEqual(recipes.diff_lists("Packages", "name", self.old_packages, self.new_packages), self.packages_result) - self.assertEqual(recipes.diff_lists("Groups", "name", self.old_groups, self.new_groups), self.groups_result) - self.assertEqual(recipes.diff_lists("Repos.git", "rpmname", self.old_git, self.new_git), self.git_result) - self.assertEqual(recipes.diff_lists("Repos.git", "rpmname", self.old_git, sorted(self.new_git, reverse=True, key=lambda o: o["rpmname"].lower())), self.git_result) - - def test_customizations_diff(self): - """Test the customizations_diff function""" - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=self.old_custom) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=self.new_custom) - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), self.custom_result) - - def test_customizations_diff_services(self): - """Test the customizations_diff function with services variations""" - # Test adding the services customization - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_services1) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.services': {'disabled': ['postfix', 'telnetd'], 'enabled': ['sshd', 'cockpit.socket', 'httpd']}}, - 'old': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing disabled - old_custom = self.old_custom.copy() - old_custom.update(self.custom_services1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_services2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.services': {'disabled': ['postfix', 'telnetd'], 'enabled': ['sshd', 'cockpit.socket', 'httpd']}}, - 'new': {'Customizations.services': {'enabled': ['sshd', 'cockpit.socket', 'httpd']}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing enabled - old_custom = self.old_custom.copy() - old_custom.update(self.custom_services1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_services3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.services': {'disabled': ['postfix', 'telnetd'], 'enabled': ['sshd', 'cockpit.socket', 'httpd']}}, - 'new': {'Customizations.services': {'disabled': ['postfix', 'telnetd']}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - def test_customizations_diff_firewall(self): - """Test the customizations_diff function with firewall variations""" - # Test adding the firewall customization - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_firewall1) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], - 'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'old': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing services - old_custom = self.old_custom.copy() - old_custom.update(self.custom_firewall1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_firewall2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], - 'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'new': {'Customizations.firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp']}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing ports - old_custom = self.old_custom.copy() - old_custom.update(self.custom_firewall1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_firewall3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.firewall': {'ports': ['22:tcp', '80:tcp', 'imap:tcp', '53:tcp', '53:udp'], - 'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'new': {'Customizations.firewall': {'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing disabled services - old_custom = self.old_custom.copy() - old_custom.update(self.custom_firewall3) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_firewall4) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.firewall': {'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'new': {'Customizations.firewall': {'services': {'enabled': ['ftp', 'ntp', 'dhcp']}}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing enabled services - old_custom = self.old_custom.copy() - old_custom.update(self.custom_firewall3) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_firewall5) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.firewall': {'services': {'disabled': ['telnet'], 'enabled': ['ftp', 'ntp', 'dhcp']}}}, - 'new': {'Customizations.firewall': {'services': {'disabled': ['telnet']}}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - def test_customizations_diff_locale(self): - """Test the customizations_diff function with locale variations""" - # Test adding the locale customization - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_locale1) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.locale': {'keyboard': 'us', 'languages': ['en_US.UTF-8']}}, - 'old': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing keyboard - old_custom = self.old_custom.copy() - old_custom.update(self.custom_locale1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_locale2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.locale': {'keyboard': 'us', 'languages': ['en_US.UTF-8']}}, - 'new': {'Customizations.locale': {'languages': ['en_US.UTF-8']}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing languages - old_custom = self.old_custom.copy() - old_custom.update(self.custom_locale1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_locale3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.locale': {'keyboard': 'us', 'languages': ['en_US.UTF-8']}}, - 'new': {'Customizations.locale': {'keyboard': 'us'}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - def test_customizations_diff_timezone(self): - """Test the customizations_diff function with timezone variations""" - # Test adding the timezone customization - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_timezone1) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org'], 'timezone': 'US/Eastern'}}, - 'old': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing ntpservers - old_custom = self.old_custom.copy() - old_custom.update(self.custom_timezone1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_timezone2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org'], 'timezone': 'US/Eastern'}}, - 'new': {'Customizations.timezone': {'timezone': 'US/Eastern'}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing timezone - old_custom = self.old_custom.copy() - old_custom.update(self.custom_timezone1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_custom.update(self.custom_timezone3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org'], 'timezone': 'US/Eastern'}}, - 'new': {'Customizations.timezone': {'ntpservers': ['0.north-america.pool.ntp.org', '1.north-america.pool.ntp.org']}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - - def test_customizations_diff_sshkey(self): - """Test the customizations_diff function with sshkey variations""" - # Test changed root ssh key - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_sshkey2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.sshkey': {'key': 'A DIFFERENT SSH KEY FOR ROOT', 'user': 'root'}}, - 'old': {'Customizations.sshkey': {'key': 'A SSH KEY FOR ROOT', 'user': 'root'}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test adding a user's ssh key - old_custom = self.old_custom.copy() - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_sshkey3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.sshkey': {'key': 'A SSH KEY FOR CLIFF', 'user': 'cliff'}}, - 'old': None}] - - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing a user's ssh key - old_custom = old_custom.copy() - old_custom.update(self.custom_sshkey3) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = self.old_custom.copy() - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'old': {'Customizations.sshkey': {'key': 'A SSH KEY FOR CLIFF', 'user': 'cliff'}}, - 'new': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - def test_customizations_diff_user(self): - """Test the customizations_diff function with user variations""" - # Test changed admin user - old_custom = self.old_custom.copy() - old_custom.update(self.custom_user1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_user2) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.user': {'description': 'Administrator account', - 'gid': 1200, - 'groups': ['widget', 'users', 'wheel'], - 'home': '/root/', - 'key': 'PUBLIC SSH KEY', - 'name': 'admin', - 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', - 'shell': '/usr/bin/bash', - 'uid': 1200}}, - 'old': {'Customizations.user': {'description': 'Administrator account', - 'gid': 1200, - 'groups': ['widget', 'users', 'wheel'], - 'home': '/srv/widget/', - 'key': 'PUBLIC SSH KEY', - 'name': 'admin', - 'password': '$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31L...', - 'shell': '/usr/bin/bash', - 'uid': 1200}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test adding a user - old_custom = self.old_custom.copy() - old_custom.update(self.custom_user1) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_user3) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': {'Customizations.user': {'key': 'PUBLIC SSH KEY', 'name': 'norman'}}, 'old': None}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - # Test removing a user - old_custom = self.old_custom.copy() - old_custom.update(self.custom_user3) - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], [], [], customizations=old_custom) - - new_custom = old_custom.copy() - new_custom.update(self.custom_user1) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", [], [], [], customizations=new_custom) - result = [{'new': None, 'old': {'Customizations.user': {'key': 'PUBLIC SSH KEY', 'name': 'norman'}}}] - self.assertEqual(recipes.customizations_diff(old_recipe, new_recipe), result) - - - - def test_recipe_diff(self): - """Test the recipe_diff function""" - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", self.old_modules, self.old_packages, [], gitrepos=self.old_git) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", self.new_modules, self.new_packages, [], gitrepos=self.new_git) - result = [{'new': {'Version': '0.3.1'}, 'old': {'Version': '0.1.1'}}, - {'new': {'Module': {'name': 'openssh', 'version': '2.8.1'}}, 'old': None}, - {'new': None, 'old': {'Module': {'name': 'bash', 'version': '5.*'}}}, - {'new': {'Module': {'name': 'httpd', 'version': '3.8.*'}}, - 'old': {'Module': {'name': 'httpd', 'version': '3.7.*'}}}, - {'new': {'Package': {'name': 'git', 'version': '2.13.*'}}, 'old': None}, - {'new': {'Repos.git': {'destination': '/home/bart/Documents/', - 'ref': 'v1.0', - 'repo': 'https://github.com/weldr/not-a-real-repo', - 'rpmname': 'bart-files', - 'rpmrelease': '1', - 'rpmversion': '1.1', - 'summary': 'Files needed for Bart'}}, - 'old': None}] - self.assertEqual(recipes.recipe_diff(old_recipe, new_recipe), result) - - # Empty starting recipe - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", [], self.old_packages, [], gitrepos=self.old_git) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", self.new_modules, self.new_packages, [], gitrepos=self.new_git) - result = [{'new': {'Version': '0.3.1'}, 'old': {'Version': '0.1.1'}}, - {'new': {'Module': {'name': 'httpd', 'version': '3.8.*'}}, 'old': None}, - {'new': {'Module': {'name': 'openssh', 'version': '2.8.1'}}, 'old': None}, - {'new': {'Module': {'name': 'toml', 'version': '2.1'}}, 'old': None}, - {'new': {'Package': {'name': 'git', 'version': '2.13.*'}}, 'old': None}, - {'new': {'Repos.git': {'destination': '/home/bart/Documents/', - 'ref': 'v1.0', - 'repo': 'https://github.com/weldr/not-a-real-repo', - 'rpmname': 'bart-files', - 'rpmrelease': '1', - 'rpmversion': '1.1', - 'summary': 'Files needed for Bart'}}, - 'old': None}] - self.assertEqual(recipes.recipe_diff(old_recipe, new_recipe), result) - - # All new git repos - old_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.1.1", self.old_modules, self.old_packages, []) - new_recipe = recipes.Recipe("test-recipe", "A recipe used for testing", "0.3.1", self.new_modules, self.new_packages, [], gitrepos=self.new_git) - result = [{'new': {'Version': '0.3.1'}, 'old': {'Version': '0.1.1'}}, - {'new': {'Module': {'name': 'openssh', 'version': '2.8.1'}}, 'old': None}, - {'new': None, 'old': {'Module': {'name': 'bash', 'version': '5.*'}}}, - {'new': {'Module': {'name': 'httpd', 'version': '3.8.*'}}, - 'old': {'Module': {'name': 'httpd', 'version': '3.7.*'}}}, - {'new': {'Package': {'name': 'git', 'version': '2.13.*'}}, 'old': None}, - {'new': {'Repos.git': {'destination': '/home/bart/Documents/', - 'ref': 'v1.0', - 'repo': 'https://github.com/weldr/not-a-real-repo', - 'rpmname': 'bart-files', - 'rpmrelease': '1', - 'rpmversion': '1.1', - 'summary': 'Files needed for Bart'}}, - 'old': None}, - {'new': {'Repos.git': {'destination': '/srv/config/', - 'ref': 'v3.0', - 'repo': 'https://github.com/weldr/server-config-files', - 'rpmname': 'server-config-files', - 'rpmrelease': '1', - 'rpmversion': '1.0', - 'summary': 'Setup files for server deployment'}}, - 'old': None}] - - - self.assertEqual(recipes.recipe_diff(old_recipe, new_recipe), result) - - def test_recipe_freeze(self): - """Test the recipe freeze() function""" - # Use the repos-git.toml test, it only has http and php in it - deps = [{"arch": "x86_64", - "epoch": 0, - "name": "httpd", - "release": "1.el7", - "version": "2.4.11"}, - {"arch": "x86_64", - "epoch": 0, - "name": "php", - "release": "1.el7", - "version": "5.4.2"}] - result = recipes.recipe_from_toml(self.input_toml["repos-git.toml"][0]) - self.assertEqual(result, self.input_toml["repos-git.toml"][1]) - - # Freeze the recipe with our fake deps - frozen = result.freeze(deps) - self.assertTrue(frozen is not None) - http_module = recipes.find_name("httpd", frozen["modules"]) - self.assertTrue(http_module is not None) - self.assertEqual(http_module["version"], "2.4.11-1.el7.x86_64") - - php_module = recipes.find_name("php", frozen["modules"]) - self.assertTrue(php_module is not None) - self.assertEqual(php_module["version"], "5.4.2-1.el7.x86_64") - - -class GitRecipesTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - self.repo = recipes.open_or_create_repo(self.repo_dir) - - self.results_path = "./tests/pylorax/results/" - self.examples_path = "./tests/pylorax/blueprints/" - self.new_recipe = os.path.join(self.examples_path, 'python-testing.toml') - - @classmethod - def tearDownClass(self): - if self.repo is not None: - del self.repo - shutil.rmtree(self.repo_dir) - - def tearDown(self): - if os.path.exists(self.new_recipe): - os.remove(self.new_recipe) - - def _create_another_recipe(self): - open(self.new_recipe, 'w').write("""name = "python-testing" -description = "A recipe used during testing." -version = "0.0.1" - -[[packages]] -name = "python" -version = "2.7.*" -""") - - def _create_bad_toml_file(self): - open(self.new_recipe, 'w').write("""customizations] -hostname = "testing-bad-recipe" -""") - - def _create_bad_recipe(self): - open(self.new_recipe, 'w').write("""[customizations] -hostname = "testing-bad-recipe" -""") - - def test_01_repo_creation(self): - """Test that creating the repository succeeded""" - self.assertNotEqual(self.repo, None) - - def test_02_commit_recipe(self): - """Test committing a Recipe object""" - recipe = recipes.Recipe("test-recipe", "A recipe used for testing", None, None, None, None) - oid = recipes.commit_recipe(self.repo, "master", recipe) - self.assertNotEqual(oid, None) - - def test_03_list_recipe(self): - """Test listing recipe commits""" - commits = recipes.list_commits(self.repo, "master", "test-recipe.toml") - self.assertEqual(len(commits), 1, "Wrong number of commits.") - self.assertEqual(commits[0].message, "Recipe test-recipe, version 0.0.1 saved.") - self.assertNotEqual(commits[0].timestamp, None, "Timestamp is None") - self.assertEqual(len(commits[0].commit), 40, "Commit hash isn't 40 characters") - self.assertEqual(commits[0].revision, None, "revision is not None") - - def test_03_list_commits_commit_time_val_error(self): - """Test listing recipe commits which raise CommitTimeValError""" - with mock.patch('pylorax.api.recipes.GLib.DateTime.format_iso8601', return_value=False): - commits = recipes.list_commits(self.repo, "master", "test-recipe.toml") - self.assertEqual(len(commits), 0, "Wrong number of commits.") - - def test_04_commit_recipe_file(self): - """Test committing a TOML file""" - recipe_path = joinpaths(self.results_path, "full-recipe.toml") - oid = recipes.commit_recipe_file(self.repo, "master", recipe_path) - self.assertNotEqual(oid, None) - - commits = recipes.list_commits(self.repo, "master", "http-server.toml") - self.assertEqual(len(commits), 1, "Wrong number of commits: %s" % commits) - - def test_04_commit_recipe_file_handles_internal_ioerror(self): - """Test committing a TOML raises RecipeFileError on internal IOError""" - recipe_path = joinpaths(self.results_path, "non-existing-file.toml") - with self.assertRaises(recipes.RecipeFileError): - recipes.commit_recipe_file(self.repo, "master", recipe_path) - - def test_04_commit_recipe_file_bad_toml(self): - """Test committing an invalid TOML file""" - self._create_bad_toml_file() - with self.assertRaises(TomlError): - recipes.commit_recipe_file(self.repo, "master", self.new_recipe) - - def test_04_commit_recipe_file_bad_recipe(self): - """Test committing an invalid recipe file""" - self._create_bad_recipe() - with self.assertRaises(recipes.RecipeError): - recipes.commit_recipe_file(self.repo, "master", self.new_recipe) - - def test_05_commit_toml_dir(self): - """Test committing a directory of TOML files""" - # first verify that the newly created file isn't present - old_commits = recipes.list_commits(self.repo, "master", "python-testing.toml") - self.assertEqual(len(old_commits), 0, "Wrong number of commits: %s" % old_commits) - - # then create it and commit the entire directory - self._create_another_recipe() - recipes.commit_recipe_directory(self.repo, "master", self.examples_path) - - # verify that the newly created file is already in the repository - new_commits = recipes.list_commits(self.repo, "master", "python-testing.toml") - self.assertEqual(len(new_commits), 1, "Wrong number of commits: %s" % new_commits) - # again make sure new_commits != old_commits - self.assertGreater(len(new_commits), len(old_commits), - "New commits shoud differ from old commits") - - def test_05_commit_recipe_directory_handling_internal_exceptions(self): - """Test committing a directory of TOML files while handling internal exceptions""" - # first verify that the newly created file isn't present - old_commits = recipes.list_commits(self.repo, "master", "python-testing.toml") - self.assertEqual(len(old_commits), 0, "Wrong number of commits: %s" % old_commits) - - # then create it and commit the entire directory - self._create_another_recipe() - - # try to commit while raising RecipeError - with mock.patch('pylorax.api.recipes.commit_recipe_file', side_effect=recipes.RecipeError('TESTING')): - recipes.commit_recipe_directory(self.repo, "master", self.examples_path) - - # try to commit while raising RecipeFileError - with mock.patch('pylorax.api.recipes.commit_recipe_file', side_effect=recipes.RecipeFileError('TESTING')): - recipes.commit_recipe_directory(self.repo, "master", self.examples_path) - - # try to commit while raising TomlError - with mock.patch('pylorax.api.recipes.commit_recipe_file', side_effect=TomlError('TESTING', "", 0)): - recipes.commit_recipe_directory(self.repo, "master", self.examples_path) - - # verify again that the newly created file isn't present b/c we raised an exception - new_commits = recipes.list_commits(self.repo, "master", "python-testing.toml") - self.assertEqual(len(new_commits), 0, "Wrong number of commits: %s" % new_commits) - - def test_06_read_recipe(self): - """Test reading a recipe from a commit""" - commits = recipes.list_commits(self.repo, "master", "example-http-server.toml") - self.assertEqual(len(commits), 1, "Wrong number of commits: %s" % commits) - - recipe = recipes.read_recipe_commit(self.repo, "master", "example-http-server") - self.assertNotEqual(recipe, None) - self.assertEqual(recipe["name"], "example-http-server") - - # Read by commit id - recipe = recipes.read_recipe_commit(self.repo, "master", "example-http-server", commits[0].commit) - self.assertNotEqual(recipe, None) - self.assertEqual(recipe["name"], "example-http-server") - - # Read the recipe and its commit id - (commit_id, recipe) = recipes.read_recipe_and_id(self.repo, "master", "example-http-server", commits[0].commit) - self.assertEqual(commit_id, commits[0].commit) - - def test_07_tag_commit(self): - """Test tagging the most recent commit of a recipe""" - result = recipes.tag_file_commit(self.repo, "master", "not-a-file") - self.assertEqual(result, None) - - result = recipes.tag_recipe_commit(self.repo, "master", "example-http-server") - self.assertNotEqual(result, None) - - commits = recipes.list_commits(self.repo, "master", "example-http-server.toml") - self.assertEqual(len(commits), 1, "Wrong number of commits: %s" % commits) - self.assertEqual(commits[0].revision, 1) - - def test_08_delete_recipe(self): - """Test deleting a file from a branch""" - oid = recipes.delete_recipe(self.repo, "master", "example-http-server") - self.assertNotEqual(oid, None) - - master_files = recipes.list_branch_files(self.repo, "master") - self.assertEqual("example-http-server.toml" in master_files, False) - - def test_09_revert_commit(self): - """Test reverting a file on a branch""" - commits = recipes.list_commits(self.repo, "master", "example-http-server.toml") - revert_to = commits[0].commit - oid = recipes.revert_recipe(self.repo, "master", "example-http-server", revert_to) - self.assertNotEqual(oid, None) - - commits = recipes.list_commits(self.repo, "master", "example-http-server.toml") - self.assertEqual(len(commits), 2, "Wrong number of commits: %s" % commits) - self.assertEqual(commits[0].message, "example-http-server.toml reverted to commit %s" % revert_to) - - def test_10_tag_new_commit(self): - """Test tagging a newer commit of a recipe""" - recipe = recipes.read_recipe_commit(self.repo, "master", "example-http-server") - recipe["description"] = "A modified description" - oid = recipes.commit_recipe(self.repo, "master", recipe) - self.assertNotEqual(oid, None) - - # Tag the new commit - result = recipes.tag_recipe_commit(self.repo, "master", "example-http-server") - self.assertNotEqual(result, None) - - commits = recipes.list_commits(self.repo, "master", "example-http-server.toml") - self.assertEqual(len(commits), 3, "Wrong number of commits: %s" % commits) - self.assertEqual(commits[0].revision, 2) - - -class ExistingGitRepoRecipesTest(GitRecipesTest): - @classmethod - def setUpClass(self): - # will initialize the git repository in the parent class - super(ExistingGitRepoRecipesTest, self).setUpClass() - - # reopen the repository again so that tests are executed - # against the existing repo one more time. - self.repo = recipes.open_or_create_repo(self.repo_dir) - - -class GetRevisionFromTagTests(unittest.TestCase): - def test_01_valid_tag(self): - revision = recipes.get_revision_from_tag('branch/filename/r123') - self.assertEqual(123, revision) - - def test_02_invalid_tag_not_a_number(self): - revision = recipes.get_revision_from_tag('branch/filename/rABC') - self.assertIsNone(revision) - - def test_02_invalid_tag_missing_revision_string(self): - revision = recipes.get_revision_from_tag('branch/filename/mybranch') - self.assertIsNone(revision) - -class CustomizationsTests(unittest.TestCase): - @staticmethod - def _blueprint_to_ks(blueprint_data): - recipe_obj = recipes.recipe_from_toml(blueprint_data) - ks = KickstartParser(makeVersion()) - - # write out the customization data, and parse the resulting kickstart - with tempfile.NamedTemporaryFile(prefix="lorax.test.customizations", mode="w") as f: - f.write(customize_ks_template("", recipe_obj)) - add_customizations(f, recipe_obj) - f.flush() - ks.readKickstart(f.name) - - return ks - - @staticmethod - def _find_user(ks, username): - for user in ks.handler.user.userList: - if user.name == username: - return user - return None - - @staticmethod - def _find_sshkey(ks, username): - for key in ks.handler.sshkey.sshUserList: - if key.username == username: - return key - return None - - @staticmethod - def _find_group(ks, groupname): - for group in ks.handler.group.groupList: - if group.name == groupname: - return group - return None - - def test_hostname(self): - blueprint_data = """name = "test-hostname" -description = "test recipe" -version = "0.0.1" - -[customizations] -hostname = "testy.example.com" -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.network.hostname, "testy.example.com") - - def test_hostname_list(self): - """Test that the hostname still works when using [[customizations]] instead of [customizations]""" - - blueprint_data = """name = "test-hostname-list" -description = "test recipe" -version = "0.0.1" - -[[customizations]] -hostname = "testy.example.com" -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.network.hostname, "testy.example.com") - - def test_timezone(self): - blueprint_data = """name = "test-timezone" -description = "test recipe" -version = "0.0.1" - -[customizations.timezone] -timezone = "US/Samoa" -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.timezone.timezone, "US/Samoa") - - def test_timezone_ntpservers(self): - blueprint_data = """name = "test-ntpservers" -description = "test recipe" -version = "0.0.1" - -[customizations.timezone] -timezone = "US/Samoa" -ntpservers = ["1.north-america.pool.ntp.org"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.timezone.timezone, "US/Samoa") - self.assertEqual(ks.handler.timezone.ntpservers, ["1.north-america.pool.ntp.org"]) - - def test_locale_languages(self): - blueprint_data = """name = "test-locale" -description = "test recipe" -version = "0.0.1" -""" - blueprint2_data = blueprint_data + """ -[customizations.locale] -languages = ["en_CA.utf8"] -""" - blueprint3_data = blueprint_data + """ -[customizations.locale] -languages = ["en_CA.utf8", "en_HK.utf8"] -""" - ks = self._blueprint_to_ks(blueprint2_data) - self.assertEqual(ks.handler.lang.lang, "en_CA.utf8") - self.assertEqual(ks.handler.lang.addsupport, []) - - ks = self._blueprint_to_ks(blueprint3_data) - self.assertEqual(ks.handler.lang.lang, "en_CA.utf8") - self.assertEqual(ks.handler.lang.addsupport, ["en_HK.utf8"]) - - def test_locale_keyboard(self): - blueprint_data = """name = "test-locale" -description = "test recipe" -version = "0.0.1" -""" - blueprint2_data = blueprint_data + """ -[customizations.locale] -keyboard = "us" -""" - blueprint3_data = blueprint_data + """ -[customizations.locale] -keyboard = "de (dvorak)" -""" - ks = self._blueprint_to_ks(blueprint2_data) - self.assertEqual(ks.handler.keyboard.keyboard, "us") - - ks = self._blueprint_to_ks(blueprint3_data) - self.assertEqual(ks.handler.keyboard.keyboard, "de (dvorak)") - - def test_locale(self): - blueprint_data = """name = "test-locale" -description = "test recipe" -version = "0.0.1" - -[customizations.locale] -keyboard = "de (dvorak)" -languages = ["en_CA.utf8", "en_HK.utf8"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.keyboard.keyboard, "de (dvorak)") - self.assertEqual(ks.handler.lang.lang, "en_CA.utf8") - self.assertEqual(ks.handler.lang.addsupport, ["en_HK.utf8"]) - - def test_firewall_ports(self): - blueprint_data = """name = "test-firewall" -description = "test recipe" -version = "0.0.1" -""" - blueprint2_data = blueprint_data + """ -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.firewall.ports, []) - self.assertEqual(ks.handler.firewall.services, []) - self.assertEqual(ks.handler.firewall.remove_services, []) - - ks = self._blueprint_to_ks(blueprint2_data) - self.assertEqual(ks.handler.firewall.ports, ["22:tcp", "53:tcp", "53:udp", "80:tcp", "imap:tcp"]) - self.assertEqual(ks.handler.firewall.services, []) - self.assertEqual(ks.handler.firewall.remove_services, []) - - def test_firewall_services(self): - blueprint_data = """name = "test-firewall" -description = "test recipe" -version = "0.0.1" - -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.firewall.ports, []) - self.assertEqual(ks.handler.firewall.services, ["dhcp", "ftp", "ntp"]) - self.assertEqual(ks.handler.firewall.remove_services, ["telnet"]) - - def test_firewall(self): - blueprint_data = """name = "test-firewall" -description = "test recipe" -version = "0.0.1" - -[customizations.firewall] -ports = ["22:tcp", "80:tcp", "imap:tcp", "53:tcp", "53:udp"] - -[customizations.firewall.services] -enabled = ["ftp", "ntp", "dhcp"] -disabled = ["telnet"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(ks.handler.firewall.ports, ["22:tcp", "53:tcp", "53:udp", "80:tcp", "imap:tcp"]) - self.assertEqual(ks.handler.firewall.services, ["dhcp", "ftp", "ntp"]) - self.assertEqual(ks.handler.firewall.remove_services, ["telnet"]) - - def test_services(self): - blueprint_data = """name = "test-services" -description = "test recipe" -version = "0.0.1" - -[customizations.services] -enabled = ["sshd", "cockpit.socket", "httpd"] -disabled = ["postfix", "telnetd"] -""" - ks = self._blueprint_to_ks(blueprint_data) - self.assertEqual(sorted(ks.handler.services.enabled), ["cockpit.socket", "httpd", "sshd"]) - self.assertEqual(sorted(ks.handler.services.disabled), ["postfix", "telnetd"]) - - def test_user(self): - blueprint_data = """name = "test-user" -description = "test recipe" -version = "0.0.1" - -[[customizations.user]] -name = "admin" -description = "Widget admin account" -password = "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31LeOUleVK/R/aeWVHVZDi26zAH.o0ywBKH9Tc0/wm7sW/q39uyd1" -home = "/srv/widget/" -shell = "/usr/bin/bash" -groups = ["widget", "users", "students"] -uid = 1200 - -[[customizations.user]] -name = "bart" -key = "SSH KEY FOR BART" -groups = ["students"] -""" - - ks = self._blueprint_to_ks(blueprint_data) - - admin = self._find_user(ks, "admin") - self.assertIsNotNone(admin) - self.assertEqual(admin.name, "admin") - self.assertEqual(admin.password, "$6$CHO2$3rN8eviE2t50lmVyBYihTgVRHcaecmeCk31LeOUleVK/R/aeWVHVZDi26zAH.o0ywBKH9Tc0/wm7sW/q39uyd1") - self.assertEqual(admin.homedir, "/srv/widget/") - self.assertEqual(admin.shell, "/usr/bin/bash") - # order is unimportant, so use a set instead of comparing lists directly - self.assertEqual(set(admin.groups), {"widget", "users", "students"}) - self.assertEqual(admin.uid, 1200) - - bart = self._find_user(ks, "bart") - self.assertIsNotNone(bart) - self.assertEqual(bart.name, "bart") - self.assertEqual(bart.groups, ["students"]) - - bartkey = self._find_sshkey(ks, "bart") - self.assertIsNotNone(bartkey) - self.assertEqual(bartkey.username, "bart") - self.assertEqual(bartkey.key, "SSH KEY FOR BART") - - def test_group(self): - blueprint_data = """name = "test-group" -description = "test recipe" -version = "0.0.1" - -[[customizations.group]] -name = "widget" - -[[customizations.group]] -name = "students" -""" - - ks = self._blueprint_to_ks(blueprint_data) - - widget = self._find_group(ks, "widget") - self.assertIsNotNone(widget) - - students = self._find_group(ks, "students") - self.assertIsNotNone(students) - - def test_full(self): - blueprint_data = """name = "custom-base" -description = "A base system with customizations" -version = "0.0.1" -modules = [] -groups = [] - -[[packages]] -name = "bash" -version = "5.0.*" - -[[customizations]] -hostname = "custom-base" - -[[customizations.sshkey]] -user = "root" -key = "ssh-rsa" - -[[customizations.user]] -name = "widget" -description = "Widget process user account" -home = "/srv/widget/" -shell = "/usr/bin/false" -groups = ["dialout", "users"] - -[[customizations.user]] -name = "admin" -description = "Widget admin account" -password = "" -home = "/srv/widget/" -shell = "/usr/bin/bash" -groups = ["widget", "users", "students"] -uid = 1200 - -[[customizations.user]] -name = "plain" -password = "password" - -[[customizations.user]] -name = "bart" -key = "" -groups = ["students"] - -[[customizations.group]] -name = "widget" - -[[customizations.group]] -name = "students" - -[customizations.timezone] -timezone = "US/Samoa" -ntpservers = ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"] -""" - ks = self._blueprint_to_ks(blueprint_data) - - self.assertEqual(ks.handler.network.hostname, "custom-base") - - rootkey = self._find_sshkey(ks, "root") - self.assertIsNotNone(rootkey) - self.assertEqual(rootkey.username, "root") - self.assertEqual(rootkey.key, "ssh-rsa") - - widget = self._find_user(ks, "widget") - self.assertIsNotNone(widget) - self.assertEqual(widget.name, "widget") - self.assertEqual(widget.homedir, "/srv/widget/") - self.assertEqual(widget.shell, "/usr/bin/false") - self.assertEqual(set(widget.groups), {"dialout", "users"}) - - admin = self._find_user(ks, "admin") - self.assertIsNotNone(admin) - self.assertEqual(admin.name, "admin") - self.assertEqual(admin.password, "") - self.assertEqual(admin.homedir, "/srv/widget/") - self.assertEqual(admin.shell, "/usr/bin/bash") - self.assertEqual(set(admin.groups), {"widget", "users", "students"}) - self.assertEqual(admin.uid, 1200) - - plain = self._find_user(ks, "plain") - self.assertIsNotNone(plain) - self.assertEqual(plain.name, "plain") - self.assertEqual(plain.password, "password") - - # widget does not appear as a separate group line, since a widget - # group is created for the widget user - widgetGroup = self._find_group(ks, "widget") - self.assertIsNone(widgetGroup) - - studentsGroup = self._find_group(ks, "students") - self.assertIsNotNone(studentsGroup) - self.assertEqual(studentsGroup.name, "students") - - self.assertEqual(ks.handler.timezone.timezone, "US/Samoa") - self.assertEqual(ks.handler.timezone.ntpservers, ["0.north-america.pool.ntp.org", "1.north-america.pool.ntp.org"]) - -class RecipeDictTest(unittest.TestCase): - def test_check_list_case(self): - """Test the list case checker function""" - self.assertEqual(recipes.check_list_case([], []), []) - self.assertEqual(recipes.check_list_case(["name", "description", "version"], []), []) - self.assertEqual(recipes.check_list_case(["name", "description", "version"], - ["name", "description", "version"]), []) - self.assertEqual(recipes.check_list_case(["name", "description", "version"], - ["name", "Description", "VERSION"]), - ["Description should be description", "VERSION should be version"]) - self.assertEqual(recipes.check_list_case(["append"], ["appEnD"], prefix="kernel "), - ["kernel appEnD should be append"]) - - def test_check_required_list(self): - """Test the required list function""" - self.assertEqual(recipes.check_required_list([{}], ["name", "version"]), - ["1 is missing 'name', 'version'"]) - self.assertEqual(recipes.check_required_list([{"name": "foo", "version": "1.0.0"}], ["name", "version"]), - []) - self.assertEqual(recipes.check_required_list([{"Name": "foo", "Version": "1.0.0"}], ["name", "version"]), - ['1 Name should be name', '1 Version should be version', "1 is missing 'name', 'version'"]) - - def test_check_recipe_dict(self): - """Test the recipe dict checker function""" - r = {} - self.assertEqual(recipes.check_recipe_dict(r), ["Missing 'name'", "Missing 'description'"]) - r["name"] = "recipe name" - r["description"] = "recipe description" - r["version"] = "92ee0ad691" - self.assertEqual(recipes.check_recipe_dict(r), ["Invalid 'version', must use Semantic Versioning"]) - r["version"] = "0.0.1" - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["modules"] = [{"name": "mod1"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'modules' errors:\n1 is missing 'version'"]) - r["modules"] = [{"name": "mod1", "version": "*"}, {"Name": "mod2", "Version": "1.0"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'modules' errors:\n2 Name should be name\n2 Version should be version\n2 is missing 'name', 'version'"]) - r["modules"] = [{"name": "mod1", "version": "*"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["packages"] = [{"name": "pkg1"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'packages' errors:\n1 is missing 'version'"]) - r["packages"] = [{"name": "pkg1", "version": "*"}, {"Name": "pkg2", "Version": "1.0"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'packages' errors:\n2 Name should be name\n2 Version should be version\n2 is missing 'name', 'version'"]) - r["packages"] = [{"name": "pkg1", "version": "*"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["groups"] = [{}] - self.assertEqual(recipes.check_recipe_dict(r), ["'groups' errors:\n1 is missing 'name'"]) - r["groups"] = [{"Name": "grp1"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'groups' errors:\n1 Name should be name\n1 is missing 'name'"]) - r["groups"] = [{"name": "grp1"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"] = {"kernel": {}} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.kernel': missing append field."]) - r["customizations"] = {"kernel": {"Append": "cmdline-arg"}} - self.assertEqual(recipes.check_recipe_dict(r), ['kernel Append should be append', "'customizations.kernel': missing append field."]) - r["customizations"] = {"kernel": {"append": "cmdline-arg"}} - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["sshkey"] = [{"key": "KEY"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.sshkey' errors:\n1 is missing 'user'"]) - r["customizations"]["sshkey"] = [{"user": "username", "KEY": "KEY"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.sshkey' errors:\n1 KEY should be key\n1 is missing 'key'"]) - r["customizations"]["sshkey"] = [{"user": "username", "key": "KEY"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["user"] = [{"password": "FOOBAR"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.user' errors:\n1 is missing 'name'"]) - r["customizations"]["user"] = [{"naMe": "admin", "key": "KEY"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.user' errors:\n1 naMe should be name\n1 is missing 'name'"]) - r["customizations"]["user"] = [{"name": "admin", "key": "KEY"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["group"] = [{"id": "2001"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.group' errors:\n1 is missing 'name'"]) - r["customizations"]["group"] = [{"Name": "admins", "id": "2001"}] - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.group' errors:\n1 Name should be name\n1 is missing 'name'"]) - r["customizations"]["group"] = [{"name": "admins", "id": "2001"}] - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["timezone"] = {} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.timezone': missing timezone or ntpservers fields."]) - r["customizations"]["timezone"] = {"Timezone": "PST8PDT"} - self.assertEqual(recipes.check_recipe_dict(r), ['timezone Timezone should be timezone']) - r["customizations"]["timezone"] = {"timezone": "PST8PDT"} - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["locale"] = {} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.locale': missing languages or keyboard fields."]) - r["customizations"]["locale"] = {"Keyboard": "dvorak"} - self.assertEqual(recipes.check_recipe_dict(r), ['locale Keyboard should be keyboard']) - r["customizations"]["locale"] = {"keyboard": "dvorak"} - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["firewall"] = {} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.firewall': missing ports field or services section."]) - r["customizations"]["firewall"] = {"Ports": "8080:tcp"} - self.assertEqual(recipes.check_recipe_dict(r), ['firewall Ports should be ports']) - r["customizations"]["firewall"] = {"ports": "8080:tcp"} - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["firewall"]["services"] = {} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.firewall.services': missing enabled or disabled fields."]) - r["customizations"]["firewall"]["services"] = {"enabled": "sshd"} - self.assertEqual(recipes.check_recipe_dict(r), []) - - r["customizations"]["services"] = {} - self.assertEqual(recipes.check_recipe_dict(r), ["'customizations.services': missing enabled or disabled fields."]) - r["customizations"]["services"] = {"DISABLED": "telnetd"} - self.assertEqual(recipes.check_recipe_dict(r), ['services DISABLED should be disabled']) - r["customizations"]["services"] = {"disabled": "telnetd"} - self.assertEqual(recipes.check_recipe_dict(r), []) - diff --git a/tests/pylorax/test_server.py b/tests/pylorax/test_server.py deleted file mode 100644 index ecc7ebc9..00000000 --- a/tests/pylorax/test_server.py +++ /dev/null @@ -1,4269 +0,0 @@ -# -*- coding: UTF-8 -*- -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -from configparser import ConfigParser, NoOptionError -from contextlib import contextmanager -import dnf -from glob import glob -from rpmfluff import SimpleRpmBuild, expectedArch -import shutil -import tempfile -import time -from threading import Lock -import unittest - -from flask import json -from ..lib import create_git_repo -import lifted.config -from pylorax.api.config import configure, make_dnf_dirs, make_queue_dirs -from pylorax.api.errors import * # pylint: disable=wildcard-import -from pylorax.api.queue import start_queue_monitor -from pylorax.api.recipes import open_or_create_repo, commit_recipe_directory -from pylorax.api.server import server, GitLock -import pylorax.api.toml as toml -from pylorax.api.dnfbase import DNFLock -from pylorax.sysutils import joinpaths - -from tests.lifted.profiles import test_profiles - -# Used for testing UTF-8 input support -UTF8_TEST_STRING = "I w𝒊ll 𝟉ο𝘁 𝛠a𝔰ꜱ 𝘁𝒉𝝸𝚜" - - -# HELPER CONSTANTS -HTTP_GLOB = {"name":"httpd", "version":"*"} -OPENID_GLOB = {"name":"mod_auth_openid", "version":"*"} -MODSSL_GLOB = {"name":"mod_ssl", "version":"*"} -PHP_GLOB = {"name":"php", "version":"*"} -PHPMYSQL_GLOB = {"name": "php-mysqlnd", "version":"*"} -OPENSSH_GLOB = {"name":"openssh-server", "version": "*"} -RSYNC_GLOB = {"name": "rsync", "version": "*"} -SAMBA_GLOB = {"name": "samba", "version": "*"} -TMUX_GLOB = {"name": "tmux", "version": "*"} -GLUSTERFS_GLOB = {"name": "glusterfs", "version": "*"} -GLUSTERFSCLI_GLOB = {"name": "glusterfs-cli", "version": "*"} - - -def get_system_repo(): - """Get an enabled system repo from /etc/yum.repos.d/*repo - - This will be used for test_projects_source_01_delete_system() - """ - # The sources delete test needs the name of a system repo, get it from /etc/yum.repos.d/ - for sys_repo in sorted(glob("/etc/yum.repos.d/*repo")): - cfg = ConfigParser() - cfg.read(sys_repo) - for section in cfg.sections(): - try: - if cfg.get(section, "enabled") == "1": - return section - except NoOptionError: - pass - - # Failed to find one, fall back to using base - return "base" - -def _wait_for_status(self, uuid, wait_status, api=0): - """Helper function that waits for a status - - :param uuid: UUID of the build to check - :type uuid: str - :param wait_status: List of statuses to exit on - :type wait_status: list of str - :returns: True if status was found, False if it timed out - :rtype: bool - - This will time out after 60 seconds - """ - start = time.time() - while True: - resp = self.server.get("/api/v%d/compose/info/%s" % (api, uuid)) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - queue_status = data.get("queue_status") - if queue_status in wait_status: - return True - if time.time() > start + 60: - return False - time.sleep(1) - - -class ServerAPIV0TestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.rawhide = False - self.maxDiff = None - - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - server.config["REPO_DIR"] = repo_dir - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=repo_dir) - - server.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Copy the shared files over to the directory tree we are using - for d in ["composer", "lifted"]: - shutil.copytree(joinpaths("./share", d), joinpaths(server.config["COMPOSER_CFG"].get("composer", "share_dir"), d)) - - errors = make_queue_dirs(server.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - make_dnf_dirs(server.config["COMPOSER_CFG"], os.getuid(), os.getgid()) - - # copy over the test_server dnf repositories - dnf_repo_dir = server.config["COMPOSER_CFG"].get("composer", "repo_dir") - for f in glob("./tests/pylorax/repos/server-*.repo"): - shutil.copy2(f, dnf_repo_dir) - - # Modify fedora vs. rawhide tests when running on rawhide - if os.path.exists("/etc/yum.repos.d/fedora-rawhide.repo"): - self.rawhide = True - - # Need the substitution values to create the directories before we can create the dnf.Base for real - dbo = dnf.Base() - repo_dirs = ["/tmp/lorax-empty-repo-%s-%s" % (dbo.conf.substitutions["releasever"], dbo.conf.substitutions["basearch"]), - "/tmp/lorax-empty-repo-v1-%s-%s" % (dbo.conf.substitutions["releasever"], dbo.conf.substitutions["basearch"])] - # dnf repo baseurl has to point to an absolute directory, so we use /tmp/lorax-empty-repo/ in the files - # and create an empty repository. We now remove duplicate repo entries so we need a number of them. - for d in repo_dirs + ["/tmp/lorax-empty-repo/", "/tmp/lorax-other-empty-repo/", "/tmp/lorax-empty-repo-1/", - "/tmp/lorax-empty-repo-2/", "/tmp/lorax-empty-repo-3/", "/tmp/lorax-empty-repo-4/"]: - os.makedirs(d) - rc = os.system("createrepo_c %s" % d) - if rc != 0: - shutil.rmtree(d) - raise RuntimeError("Problem running createrepo_c, is it installed") - - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"]) - - # Grab the substitution values for later - with server.config["DNFLOCK"].lock: - self.substitutions = server.config["DNFLOCK"].dbo.conf.substitutions - - if "releasever" not in self.substitutions or "basearch" not in self.substitutions: - raise RuntimeError("DNF is missing the releasever and basearch substitutions") - - # Include a message in /api/status output - server.config["TEMPLATE_ERRORS"] = ["Test message"] - - server.config['TESTING'] = True - self.server = server.test_client() - self.repo_dir = repo_dir - - self.examples_path = "./tests/pylorax/blueprints/" - - # Import the example blueprints - commit_recipe_directory(server.config["GITLOCK"].repo, "master", self.examples_path) - - # The sources delete test needs the name of a system repo, get it from /etc/yum.repos.d/ - self.system_repo = get_system_repo() - - start_queue_monitor(server.config["COMPOSER_CFG"], 0, 0) - - @classmethod - def tearDownClass(self): - shutil.rmtree(server.config["REPO_DIR"]) - # Clean up the empty repos - for repo_dir in glob("/tmp/lorax-*empty-repo*"): - shutil.rmtree(repo_dir) - - def test_01_status(self): - """Test the /api/status route""" - status_fields = ["build", "api", "db_version", "schema_version", "db_supported", "backend", "msgs"] - resp = self.server.get("/api/status") - data = json.loads(resp.data) - # Make sure the fields are present - self.assertEqual(sorted(data.keys()), sorted(status_fields)) - - # Check for test message - self.assertEqual(data["msgs"], ["Test message"]) - - - def test_02_blueprints_list(self): - """Test the /api/v0/blueprints/list route""" - list_dict = {"blueprints":["example-append", "example-atlas", "example-custom-base", "example-development", - "example-glusterfs", "example-http-server", "example-jboss", - "example-kubernetes"], "limit":20, "offset":0, "total":8} - resp = self.server.get("/api/v0/blueprints/list") - data = json.loads(resp.data) - self.assertEqual(data, list_dict) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v0/blueprints/list?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["limit"], 0) - self.assertEqual(data["offset"], 0) - self.assertEqual(data["total"], list_dict["total"]) - - def test_03_blueprints_info_1(self): - """Test the /api/v0/blueprints/info route with one blueprint""" - info_dict_1 = {"changes":[{"changed":False, "name":"example-http-server"}], - "errors":[], - "blueprints":[{"description":"An example http server with PHP and MySQL support.", - "modules":[HTTP_GLOB, - OPENID_GLOB, - MODSSL_GLOB, - PHP_GLOB, - PHPMYSQL_GLOB], - "name":"example-http-server", - "packages": [OPENSSH_GLOB, - RSYNC_GLOB, - TMUX_GLOB], - "groups": [], - "version": "0.0.1"}]} - resp = self.server.get("/api/v0/blueprints/info/example-http-server") - data = json.loads(resp.data) - self.assertEqual(data, info_dict_1) - - def test_03_blueprints_info_2(self): - """Test the /api/v0/blueprints/info route with 2 blueprints""" - info_dict_2 = {"changes":[{"changed":False, "name":"example-glusterfs"}, - {"changed":False, "name":"example-http-server"}], - "errors":[], - "blueprints":[{"description": "An example GlusterFS server with samba", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "name":"example-glusterfs", - "packages":[SAMBA_GLOB], - "groups": [], - "version": "0.0.1"}, - {"description":"An example http server with PHP and MySQL support.", - "modules":[HTTP_GLOB, - OPENID_GLOB, - MODSSL_GLOB, - PHP_GLOB, - PHPMYSQL_GLOB], - "name":"example-http-server", - "packages": [OPENSSH_GLOB, - RSYNC_GLOB, - TMUX_GLOB], - "groups": [], - "version": "0.0.1"}, - ]} - resp = self.server.get("/api/v0/blueprints/info/example-http-server,example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, info_dict_2) - - def test_03_blueprints_info_none(self): - """Test the /api/v0/blueprints/info route with an unknown blueprint""" - resp = self.server.get("/api/v0/blueprints/info/missing-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_04_blueprints_changes(self): - """Test the /api/v0/blueprints/changes route""" - resp = self.server.get("/api/v0/blueprints/changes/example-http-server") - data = json.loads(resp.data) - - # Can't compare a whole dict since commit hash and timestamps will change. - # Should have 1 commit (for now), with a matching message. - self.assertEqual(data["limit"], 20) - self.assertEqual(data["offset"], 0) - self.assertEqual(len(data["errors"]), 0) - self.assertEqual(len(data["blueprints"]), 1) - self.assertEqual(data["blueprints"][0]["name"], "example-http-server") - self.assertEqual(len(data["blueprints"][0]["changes"]), 1) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v0/blueprints/changes/example-http-server?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["limit"], 0) - self.assertEqual(data["offset"], 0) - self.assertEqual(data["blueprints"][0]["total"], 1) - - def test_04a_blueprints_diff_empty_ws(self): - """Test the /api/v0/diff/NEWEST/WORKSPACE with empty workspace""" - resp = self.server.get("/api/v0/blueprints/diff/example-glusterfs/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"diff": []}) - - def test_05_blueprints_new_json(self): - """Test the /api/v0/blueprints/new route with json blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":"example-glusterfs", - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - - def test_06_blueprints_new_toml(self): - """Test the /api/v0/blueprints/new route with toml blueprint""" - test_blueprint = open(joinpaths(self.examples_path, "example-glusterfs.toml"), "rb").read() - resp = self.server.post("/api/v0/blueprints/new", - data=test_blueprint, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - - # Returned blueprint has had its version bumped - test_blueprint = toml.loads(test_blueprint) - test_blueprint["version"] = "0.2.1" - - # The test_blueprint generated by toml.loads will not have any groups property - # defined, since there are no groups listed. However, /api/v0/blueprints/new will - # return an object with groups=[]. So, add that here to keep the equality test - # working. - test_blueprint["groups"] = [] - - self.assertEqual(blueprints[0], test_blueprint) - - def test_07_blueprints_ws_json(self): - """Test the /api/v0/blueprints/workspace route with json blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":True}) - - def test_08_blueprints_ws_toml(self): - """Test the /api/v0/blueprints/workspace route with toml blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.4.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":True}) - - def test_09_blueprints_unknown_ws_delete(self): - """Test DELETE /api/v0/blueprints/workspace/missing-blueprint""" - resp = self.server.delete("/api/v0/blueprints/workspace/missing-blueprint") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_09_blueprints_ws_delete(self): - """Test DELETE /api/v0/blueprints/workspace/""" - # Write to the workspace first, just use the test_blueprints_ws_json test for this - self.test_07_blueprints_ws_json() - - # Delete it - resp = self.server.delete("/api/v0/blueprints/workspace/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Make sure it isn't the workspace copy and that changed is False - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["version"], "0.2.1") - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":False}) - - def test_10_blueprints_delete(self): - """Test DELETE /api/v0/blueprints/delete/""" - - # Push a new workspace blueprint first - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "1.4.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - resp = self.server.post("/api/v0/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - # Make sure the workspace file is present - self.assertEqual(os.path.exists(joinpaths(self.repo_dir, "git/workspace/master/example-glusterfs.toml")), True) - - # This should delete the git blueprint and the workspace copy - resp = self.server.delete("/api/v0/blueprints/delete/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Make sure example-glusterfs is no longer in the list of blueprints - resp = self.server.get("/api/v0/blueprints/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual("example-glusterfs" in blueprints, False) - - # Make sure the workspace file is gone - self.assertEqual(os.path.exists(joinpaths(self.repo_dir, "git/workspace/master/example-glusterfs.toml")), False) - - # This has to run after the above test - def test_10_blueprints_delete_2(self): - """Test running a compose with the deleted blueprint""" - # Trying to start a compose with a deleted blueprint should fail - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Compose of deleted blueprint did not fail: %s" % data) - - def test_11_blueprints_undo(self): - """Test POST /api/v0/blueprints/undo//""" - resp = self.server.get("/api/v0/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - # Revert it to the first commit - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - - # Revert it to the first commit - commit = changes[-1]["commit"] - resp = self.server.post("/api/v0/blueprints/undo/example-glusterfs/%s" % commit) - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - - expected_msg = "example-glusterfs.toml reverted to commit %s" % commit - self.assertEqual(changes[0]["message"], expected_msg) - - def test_12_blueprints_tag(self): - """Test POST /api/v0/blueprints/tag/""" - resp = self.server.post("/api/v0/blueprints/tag/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - # Revert it to the first commit - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - self.assertEqual(changes[0]["revision"], 1) - - def test_13_blueprints_diff(self): - """Test /api/v0/blueprints/diff///""" - resp = self.server.get("/api/v0/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) >= 2, True) - - from_commit = changes[1].get("commit") - self.assertNotEqual(from_commit, None) - to_commit = changes[0].get("commit") - self.assertNotEqual(to_commit, None) - - print("from: %s" % from_commit) - print("to: %s" % to_commit) - print(changes) - - # Get the differences between the two commits - resp = self.server.get("/api/v0/blueprints/diff/example-glusterfs/%s/%s" % (from_commit, to_commit)) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"diff": [{"new": {"Version": "0.0.1"}, "old": {"Version": "0.2.1"}}]}) - - # Write to the workspace and check the diff - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB]} - - resp = self.server.post("/api/v0/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Get the differences between the newest commit and the workspace - resp = self.server.get("/api/v0/blueprints/diff/example-glusterfs/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - result = {"diff": [{"new": {"Description": "An example GlusterFS server with samba, ws version"}, - "old": {"Description": "An example GlusterFS server with samba"}}, - {"new": {"Version": "0.3.0"}, - "old": {"Version": "0.0.1"}}, - {"new": {"Package": TMUX_GLOB}, - "old": None}]} - self.assertEqual(data, result) - - def test_14_blueprints_depsolve(self): - """Test /api/v0/blueprints/depsolve/""" - resp = self.server.get("/api/v0/blueprints/depsolve/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "example-glusterfs") - self.assertEqual(len(blueprints[0]["dependencies"]) > 10, True) - self.assertFalse(data.get("errors")) - - def test_14_blueprints_depsolve_empty(self): - """Test /api/v0/blueprints/depsolve/ on empty blueprint""" - test_blueprint = {"description": "An empty blueprint", - "name":"void", - "version": "0.1.0"} - resp = self.server.post("/api/v0/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/depsolve/void") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "void") - self.assertEqual(blueprints[0]["blueprint"]["packages"], []) - self.assertEqual(blueprints[0]["blueprint"]["modules"], []) - self.assertEqual(blueprints[0]["dependencies"], []) - self.assertFalse(data.get("errors")) - - def test_15_blueprints_freeze(self): - """Test /api/v0/blueprints/freeze/""" - resp = self.server.get("/api/v0/blueprints/freeze/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertTrue(len(blueprints[0]["blueprint"]["modules"]) > 0) - self.assertEqual(blueprints[0]["blueprint"]["name"], "example-glusterfs") - evra = blueprints[0]["blueprint"]["modules"][0]["version"] - self.assertEqual(len(evra) > 10, True) - - def test_projects_list(self): - """Test /api/v0/projects/list""" - resp = self.server.get("/api/v0/projects/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - projects = data.get("projects") - self.assertEqual(len(projects) > 10, True) - - expected_total = data["total"] - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v0/projects/list?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["total"], expected_total) - - def test_projects_info(self): - """Test /api/v0/projects/info/""" - resp = self.server.get("/api/v0/projects/info/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - projects = data.get("projects") - self.assertEqual(len(projects) > 0, True) - self.assertEqual(projects[0]["name"], "bash") - self.assertEqual(projects[0]["builds"][0]["source"]["license"], "GPLv3+") - - def test_projects_depsolve(self): - """Test /api/v0/projects/depsolve/""" - resp = self.server.get("/api/v0/projects/depsolve/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - deps = data.get("projects") - self.assertEqual(len(deps) > 10, True) - self.assertTrue("basesystem" in [dep["name"] for dep in deps]) - - def test_projects_source_00_list(self): - """Test /api/v0/projects/source/list""" - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - # Make sure it lists some common sources - for r in ["lorax-1", "lorax-2", "lorax-3", "lorax-4", "other-repo", "single-repo"]: - self.assertTrue(r in data["sources"] ) - - # Make sure the duplicate repo is not listed - self.assertFalse("single-repo-duplicate" in data["sources"]) - - def test_projects_source_00_info(self): - """Test /api/v0/projects/source/info""" - resp = self.server.get("/api/v0/projects/source/info/lorax-3") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - sources = data["sources"] - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" not in sources["lorax-3"]) - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "lorax-3") - - def test_projects_source_00_info_comma(self): - """Test /api/v0/projects/source/info/lorax-3,lorax-2""" - resp = self.server.get("/api/v0/projects/source/info/lorax-3,lorax-2") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - sources = data["sources"] - self.assertEqual(len(sources), 2) - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" not in sources["lorax-3"]) - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "lorax-3") - - self.assertTrue("lorax-2" in sources) - self.assertTrue("id" not in sources["lorax-2"]) - self.assertTrue("name" in sources["lorax-2"]) - self.assertEqual(sources["lorax-2"]["name"], "lorax-2") - - def test_projects_source_00_info_toml(self): - """Test /api/v0/projects/source/info TOML output""" - resp = self.server.get("/api/v0/projects/source/info/lorax-3?format=toml") - data = toml.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - sources = data - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" not in sources["lorax-3"]) - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "lorax-3") - - def test_projects_source_00_info_wild(self): - """Test /api/v0/projects/source/info/* wildcard""" - resp = self.server.get("/api/v0/projects/source/info/*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - sources = data["sources"] - self.assertTrue(len(sources) > 1) - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" not in sources["lorax-3"]) - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "lorax-3") - - def test_projects_source_00_new_json(self): - """Test /api/v0/projects/source/new with a new json source""" - json_source = open("./tests/pylorax/source/test-repo.json").read() - self.assertTrue(len(json_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=json_source, - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Is it listed? - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("new-repo-1" in sources) - - def test_projects_source_00_new_toml(self): - """Test /api/v0/projects/source/new with a new toml source""" - toml_source = open("./tests/pylorax/source/test-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Is it listed? - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("new-repo-2" in sources) - - def test_projects_source_00_new_toml_vars(self): - """Test /api/v0/projects/source/new with a new toml source using vars""" - toml_source = open("./tests/pylorax/source/test-repo-vars.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Was it added, and was is it correct? - resp = self.server.get("/api/v0/projects/source/info/new-repo-2-vars") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - print(sources) - self.assertTrue("new-repo-2-vars" in sources) - - self.assertTrue(self.substitutions["releasever"] in sources["new-repo-2-vars"]["url"]) - self.assertTrue(self.substitutions["basearch"] in sources["new-repo-2-vars"]["url"]) - self.assertTrue(self.substitutions["releasever"] in sources["new-repo-2-vars"]["gpgkey_urls"][0]) - self.assertTrue(self.substitutions["basearch"] in sources["new-repo-2-vars"]["gpgkey_urls"][0]) - - def test_projects_source_00_replace(self): - """Test /api/v0/projects/source/new with a replacement source""" - toml_source = open("./tests/pylorax/source/replace-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Check to see if it was really changed - resp = self.server.get("/api/v0/projects/source/info/single-repo") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("single-repo" in sources) - repo = sources["single-repo"] - self.assertEqual(repo["check_ssl"], False) - self.assertTrue("gpgkey_urls" not in repo) - - def test_projects_source_00_replace_system(self): - """Test /api/v0/projects/source/new with a replacement system source""" - if self.rawhide: - toml_source = open("./tests/pylorax/source/replace-rawhide.toml").read() - else: - toml_source = open("./tests/pylorax/source/replace-fedora.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_00_bad_url(self): - """Test /api/v0/projects/source/new with a new source that has an invalid url""" - toml_source = open("./tests/pylorax/source/bad-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_01_delete_system(self): - """Test /api/v0/projects/source/delete a system source""" - if self.rawhide: - resp = self.server.delete("/api/v0/projects/source/delete/rawhide") - else: - resp = self.server.delete("/api/v0/projects/source/delete/fedora") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - - # Make sure fedora/rawhide is still listed - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(self.system_repo in data["sources"], "%s not in %s" % (self.system_repo, data["sources"])) - - def test_projects_source_02_delete_single(self): - """Test /api/v0/projects/source/delete a single source""" - resp = self.server.delete("/api/v0/projects/source/delete/single-repo") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"status":True}) - - # Make sure single-repo isn't listed - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("single-repo" not in data["sources"]) - - def test_projects_source_03_delete_unknown(self): - """Test /api/v0/projects/source/delete an unknown source""" - resp = self.server.delete("/api/v0/projects/source/delete/unknown-repo") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - - def test_projects_source_04_delete_multi(self): - """Test /api/v0/projects/source/delete a source from a file with multiple sources""" - resp = self.server.delete("/api/v0/projects/source/delete/lorax-3") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"status":True}) - - # Make sure single-repo isn't listed - resp = self.server.get("/api/v0/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("lorax-3" not in data["sources"]) - - def test_modules_list(self): - """Test /api/v0/modules/list""" - resp = self.server.get("/api/v0/modules/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 10, True) - self.assertEqual(modules[0]["group_type"], "rpm") - - expected_total = data["total"] - - resp = self.server.get("/api/v0/modules/list/d*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 0, True) - self.assertEqual(modules[0]["name"].startswith("d"), True) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v0/modules/list?limit=0") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["total"], expected_total) - - def test_modules_info(self): - """Test /api/v0/modules/info""" - resp = self.server.get("/api/v0/modules/info/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 0, True) - self.assertEqual(modules[0]["name"], "bash") - self.assertTrue("basesystem" in [dep["name"] for dep in modules[0]["dependencies"]]) - - def test_blueprint_new_branch(self): - """Test the /api/v0/blueprints/new route with a new branch""" - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":"example-glusterfs", - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/new?branch=test", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/info/example-glusterfs?branch=test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - - def assert_documentation(self, response): - """ - Assert response containing documentation from /api/doc/ is - valid *without* comparing to the actual file on disk. - """ - self.assertEqual(200, response.status_code) - self.assertTrue(len(response.data) > 1024) - # look for some well known strings inside the documentation - self.assertRegex(response.data.decode("utf-8"), r"Lorax [\d.]+ documentation") - self.assertRegex(response.data.decode("utf-8"), r"Copyright \d+, Red Hat, Inc.") - - def test_api_docs(self): - """Test the /api/docs/""" - resp = self.server.get("/api/docs/") - self.assert_documentation(resp) - - def test_api_docs_with_existing_path(self): - """Test the /api/docs/modules.html""" - resp = self.server.get("/api/docs/modules.html") - self.assert_documentation(resp) - - def test_compose_01_types(self): - """Test the /api/v0/compose/types route""" - resp = self.server.get("/api/v0/compose/types") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual({"name": "tar", "enabled": True} in data["types"], True) - - # All of the non-x86 compose types disable alibaba - if os.uname().machine != 'x86_64': - self.assertEqual({"name": "alibaba", "enabled": False} in data["types"], True) - - def test_compose_02_bad_type(self): - """Test that using an unsupported image type failes""" - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "snakes", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to fail to start test compose: %s" % data) - self.assertEqual(data["errors"], [{"id": BAD_COMPOSE_TYPE, "msg": "Invalid compose type (snakes), must be one of ['alibaba', 'ami', 'ext4-filesystem', 'google', 'hyper-v', 'live-iso', 'liveimg-tar', 'openstack', 'partitioned-disk', 'qcow2', 'tar', 'vhd', 'vmdk']"}], - "Failed to get errors: %s" % data) - - def test_compose_03_status_fail(self): - """Test that requesting a status for a bad uuid is empty""" - resp = self.server.get("/api/v0/compose/status/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["uuids"], [], "Failed to get empty result bad uuid: %s" % data) - - def test_compose_04_cancel_fail(self): - """Test that requesting a cancel for a bad uuid fails.""" - resp = self.server.delete("/api/v0/compose/cancel/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_05_delete_fail(self): - """Test that requesting a delete for a bad uuid fails.""" - resp = self.server.delete("/api/v0/compose/delete/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "no-uuid-to-see-here is not a valid build uuid"}], - "Failed to get an error for a bad uuid: %s" % data) - - def test_compose_06_info_fail(self): - """Test that requesting info for a bad uuid fails.""" - resp = self.server.get("/api/v0/compose/info/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_07_metadata_fail(self): - """Test that requesting metadata for a bad uuid fails.""" - resp = self.server.get("/api/v0/compose/metadata/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_08_results_fail(self): - """Test that requesting results for a bad uuid fails.""" - resp = self.server.get("/api/v0/compose/results/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_09_logs_fail(self): - """Test that requesting logs for a bad uuid fails.""" - resp = self.server.get("/api/v0/compose/logs/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_10_log_fail(self): - """Test that requesting log for a bad uuid fails.""" - resp = self.server.get("/api/v0/compose/log/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_11_create_failed(self): - """Test the /api/v0/compose routes with a failed test compose""" - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"]), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FAILED"]), True, "Failed to finish test compose") - - resp = self.server.get("/api/v0/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FAILED", "Build not in FAILED state") - - # Test the /api/v0/compose/failed route - resp = self.server.get("/api/v0/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["failed"]] - self.assertEqual(build_id in ids, True, "Failed build not listed by /compose/failed") - - # V0 API should *not* have the uploads details in the results - print(data) - uploads = any("uploads" in e for e in data["failed"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Test the /api/v0/compose/finished route - resp = self.server.get("/api/v0/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["finished"], [], "Finished build not listed by /compose/finished") - - # Test the /api/v0/compose/status/ route - resp = self.server.get("/api/v0/compose/status/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["id"], e["queue_status"]) for e in data["uuids"]] - self.assertEqual((build_id, "FAILED") in ids, True, "Failed build not listed by /compose/status") - - # V0 API should *not* have the uploads details in the results - print(data) - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Test the /api/v0/compose/cancel/ route - resp = self.server.post("/api/v0/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - cancel_id = data["build_id"] - - # Wait for it to start - self.assertEqual(_wait_for_status(self, cancel_id, ["RUNNING"]), True, "Failed to start test compose") - - # Cancel the build - resp = self.server.delete("/api/v0/compose/cancel/%s" % cancel_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to cancel test compose: %s" % data) - - # Delete the failed build - # Test the /api/v0/compose/delete/ route - resp = self.server.delete("/api/v0/compose/delete/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["uuid"], e["status"]) for e in data["uuids"]] - self.assertEqual((build_id, True) in ids, True, "Failed to delete test compose: %s" % data) - - # Make sure the failed list is empty - resp = self.server.get("/api/v0/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["failed"], [], "Failed to delete the failed build: %s" % data) - - def test_compose_12_create_finished(self): - """Test the /api/v0/compose routes with a finished test compose""" - test_compose = {"blueprint_name": "example-custom-base", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"]), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"]), True, "Failed to finish test compose") - - resp = self.server.get("/api/v0/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Test the /api/v0/compose/finished route - resp = self.server.get("/api/v0/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["finished"]] - self.assertEqual(build_id in ids, True, "Finished build not listed by /compose/finished") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["finished"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Test the /api/v0/compose/failed route - resp = self.server.get("/api/v0/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["failed"], [], "Failed build not listed by /compose/failed") - - # Test the /api/v0/compose/status/ route - resp = self.server.get("/api/v0/compose/status/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["id"], e["queue_status"]) for e in data["uuids"]] - self.assertEqual((build_id, "FINISHED") in ids, True, "Finished build not listed by /compose/status") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Test the /api/v0/compose/metadata/ route - resp = self.server.get("/api/v0/compose/metadata/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 1024, True) - - # Test the /api/v0/compose/results/ route - resp = self.server.get("/api/v0/compose/results/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 1024, True) - - # Test the /api/v0/compose/image/ route - resp = self.server.get("/api/v0/compose/image/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 0, True) - self.assertEqual(resp.data, b"TEST IMAGE") - - # Examine the final-kickstart.ks for the customizations - # A bit kludgy since it examines the filesystem directly, but that's better than unpacking the metadata - final_ks = open(joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "final-kickstart.ks")).read() - - # Check for the expected customizations in the kickstart - self.assertTrue("network --hostname=" in final_ks) - self.assertTrue("sshkey --user root" in final_ks) - - # Examine the config.toml to make sure it has an empty extra_boot_args - cfg_path = joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "config.toml") - cfg_dict = toml.loads(open(cfg_path, "r").read()) - self.assertTrue("extra_boot_args" in cfg_dict) - self.assertEqual(cfg_dict["extra_boot_args"], "") - - # Delete the finished build - # Test the /api/v0/compose/delete/ route - resp = self.server.delete("/api/v0/compose/delete/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["uuid"], e["status"]) for e in data["uuids"]] - self.assertEqual((build_id, True) in ids, True, "Failed to delete test compose: %s" % data) - - # Make sure the finished list is empty - resp = self.server.get("/api/v0/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["finished"], [], "Failed to delete the failed build: %s" % data) - - def test_compose_13_status_filter(self): - """Test filter arguments on the /api/v0/compose/status route""" - # Get a couple compose results going so we have something to filter - test_compose_fail = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - test_compose_success = {"blueprint_name": "example-custom-base", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=1", - data=json.dumps(test_compose_fail), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id_fail = data["build_id"] - - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id_fail in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id_fail, ["RUNNING"]), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id_fail, ["FAILED"]), True, "Failed to finish test compose") - - # Fire up the other one - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose_success), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id_success = data["build_id"] - - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id_success in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id_success, ["RUNNING"]), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id_success, ["FINISHED"]), True, "Failed to finish test compose") - - # Test that both composes appear in /api/v0/compose/status/* - resp = self.server.get("/api/v0/compose/status/*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_success, ids, "Finished build not listed by /compose/status/*") - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status/*") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Filter by name - resp = self.server.get("/api/v0/compose/status/*?blueprint=%s" % test_compose_fail["blueprint_name"]) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status blueprint filter") - self.assertNotIn(build_id_success, ids, "Finished build listed by /compose/status blueprint filter") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Filter by type - resp = self.server.get("/api/v0/compose/status/*?type=tar") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status type filter") - self.assertIn(build_id_success, ids, "Finished build not listed by /compose/status type filter") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - resp = self.server.get("/api/v0/compose/status/*?type=snakes") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertEqual(ids, [], "Invalid type not filtered by /compose/status type filter") - - # Filter by status - resp = self.server.get("/api/v0/compose/status/*?status=FAILED") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status status filter") - self.assertNotIn(build_id_success, "Finished build listed by /compose/status status filter") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["uuids"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - def test_compose_14_kernel_append(self): - """Test the /api/v0/compose with kernel append customization""" - test_compose = {"blueprint_name": "example-append", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"]), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"]), True, "Failed to finish test compose") - - resp = self.server.get("/api/v0/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Examine the final-kickstart.ks for the customizations - # A bit kludgy since it examines the filesystem directly, but that's better than unpacking the metadata - final_ks = open(joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "final-kickstart.ks")).read() - - # Check for the expected customizations in the kickstart - # nosmt=force should be in the bootloader line, find it and check it - bootloader_line = "" - for line in final_ks.splitlines(): - if line.startswith("bootloader"): - bootloader_line = line - break - self.assertNotEqual(bootloader_line, "", "No bootloader line found") - self.assertTrue("nosmt=force" in bootloader_line) - - # Examine the config.toml to make sure it was written there as well - cfg_path = joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "config.toml") - cfg_dict = toml.loads(open(cfg_path, "r").read()) - self.assertTrue("extra_boot_args" in cfg_dict) - self.assertEqual(cfg_dict["extra_boot_args"], "nosmt=force") - - def assertInputError(self, resp): - """Check all the conditions for a successful input check error result""" - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(resp.status_code, 400) - self.assertEqual(data["status"], False) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("Invalid characters in" in data["errors"][0]["msg"]) - - def test_blueprints_list_branch(self): - resp = self.server.get("/api/v0/blueprints/list?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_info_input(self): - """Test the blueprints/info input character checking""" - # /api/v0/blueprints/info/ - resp = self.server.get("/api/v0/blueprints/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/info/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/info/example-http-server?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_changes_input(self): - """Test the blueprints/changes input character checking""" - # /api/v0/blueprints/changes/ - resp = self.server.get("/api/v0/blueprints/changes/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/changes/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_new_input(self): - """Test the blueprints/new input character checking""" - # /api/v0/blueprints/new - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":UTF8_TEST_STRING, - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - test_blueprint["name"] = "example-glusterfs" - resp = self.server.post("/api/v0/blueprints/new?branch=" + UTF8_TEST_STRING, - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - def test_blueprints_delete_input(self): - """Test the blueprints/delete input character checking""" - resp = self.server.delete("/api/v0/blueprints/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.delete("/api/v0/blueprints/delete/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_workspace_input(self): - """Test the blueprints/workspace input character checking""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":UTF8_TEST_STRING, - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - test_blueprint["name"] = "example-glusterfs" - resp = self.server.post("/api/v0/blueprints/workspace?branch=" + UTF8_TEST_STRING, - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - def test_blueprints_workspace_delete_input(self): - """Test the DELETE blueprints/workspace input character checking""" - resp = self.server.delete("/api/v0/blueprints/workspace/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.delete("/api/v0/blueprints/workspace/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_undo_input(self): - """Test the blueprints/undo/... input character checking""" - resp = self.server.post("/api/v0/blueprints/undo/" + UTF8_TEST_STRING + "/deadbeef") - self.assertInputError(resp) - - resp = self.server.post("/api/v0/blueprints/undo/example-http-server/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.post("/api/v0/blueprints/undo/example-http-server/deadbeef?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_tag_input(self): - """Test the blueprints/tag input character checking""" - resp = self.server.post("/api/v0/blueprints/tag/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.post("/api/v0/blueprints/tag/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_diff_input(self): - """Test the blueprints/diff input character checking""" - # /api/v0/blueprints/diff/// - resp = self.server.get("/api/v0/blueprints/diff/" + UTF8_TEST_STRING + "/NEWEST/WORKSPACE") - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/diff/example-http-server/NEWEST/WORKSPACE?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_freeze_input(self): - """Test the blueprints/freeze input character checking""" - resp = self.server.get("/api/v0/blueprints/freeze/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/freeze/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/freeze/example-http-server?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_depsolve_input(self): - """Test the blueprints/depsolve input character checking""" - resp = self.server.get("/api/v0/blueprints/depsolve/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/blueprints/depsolve/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_info_input(self): - """Test the projects/info input character checking""" - resp = self.server.get("/api/v0/projects/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_depsolve_input(self): - """Test the projects/depsolve input character checking""" - resp = self.server.get("/api/v0/projects/depsolve/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_source_info_input(self): - """Test the /api/v0/projects/source/info input character checking""" - resp = self.server.get("/api/v0/projects/source/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - # Test failure for bad format characters - resp = self.server.get("/api/v0/projects/source/info/lorax-3?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_source_info_unknown(self): - """Test the /api/v0/projects/source/info unknown source""" - resp = self.server.get("/api/v0/projects/source/info/notasource") - data = json.loads(resp.data) - print(data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_info_unknown_toml(self): - """Test the /api/v0/projects/source/info unknown source TOML output""" - resp = self.server.get("/api/v0/projects/source/info/notasource?format=toml") - data = json.loads(resp.data) - print(data) - self.assertNotEqual(data, None) - self.assertEqual(resp.status_code, 400) - self.assertEqual(data["status"], False) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_delete_input(self): - """Test the projects/source/delete input character checking""" - resp = self.server.delete("/api/v0/projects/source/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_modules_list_input(self): - """Test the modules/list input character checking""" - resp = self.server.get("/api/v0/modules/list/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_modules_info_input(self): - """Test the modules/info input character checking""" - resp = self.server.get("/api/v0/modules/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_status_input(self): - """Test the compose/status input character checking""" - resp = self.server.get("/api/v0/compose/status/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/compose/status/*?blueprint=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/compose/status/*?status=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v0/compose/status/*?type=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_cancel_input(self): - """Test the compose/cancel input character checking""" - resp = self.server.delete("/api/v0/compose/cancel/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_delete_input(self): - """Test the compose/delete input character checking""" - resp = self.server.delete("/api/v0/compose/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_info_input(self): - """Test the compose/info input character checking""" - resp = self.server.get("/api/v0/compose/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_metadata_input(self): - """Test the compose/metadata input character checking""" - resp = self.server.get("/api/v0/compose/metadata/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_results_input(self): - """Test the compose/results input character checking""" - resp = self.server.get("/api/v0/compose/results/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_logs_input(self): - """Test the compose/logs input character checking""" - resp = self.server.get("/api/v0/compose/logs/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_image_input(self): - """Test the compose/image input character checking""" - resp = self.server.get("/api/v0/compose/image/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_log_input(self): - """Test the compose/log input character checking""" - resp = self.server.get("/api/v0/compose/log/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - # A series of tests for dealing with deleted blueprints - def test_deleted_bp_00_setup(self): - """Setup a deleted blueprint for use in the tests""" - # Start by creating a new blueprint for this series of tests and then - # deleting it. - test_blueprint = {"description": "A blueprint that has been deleted", - "name":"deleted-blueprint", - "version": "0.0.1", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.delete("/api/v0/blueprints/delete/deleted-blueprint") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def test_deleted_bp_01_show(self): - """Test blueprint show with deleted blueprint""" - resp = self.server.get("/api/v0/blueprints/info/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_02_depsolve(self): - """Test blueprint depsolve with deleted blueprint""" - resp = self.server.get("/api/v0/blueprints/depsolve/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_03_diff(self): - """Test blueprint diff with deleted blueprint""" - resp = self.server.get("/api/v0/blueprints/diff/deleted-blueprint/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["status"], False) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_04_freeze(self): - """Test blueprint freeze with deleted blueprint""" - resp = self.server.get("/api/v0/blueprints/freeze/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_05_tag(self): - """Test blueprint tag with deleted blueprint""" - resp = self.server.post("/api/v0/blueprints/tag/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_404(self): - """Test that a 404 returns JSON""" - resp = self.server.get("/marmalade") - print(resp) - print(resp.data) - self.assertEqual(resp.status_code, 404) - self.assertEqual(json.loads(resp.data), { - "status": False, - "errors": [{ "id": "HTTPError", "code": 404, "msg": "Not Found" }] - }) - - def test_405(self): - """Test that a 405 returns JSON""" - resp = self.server.post("/api/status") - self.assertEqual(resp.status_code, 405) - self.assertEqual(json.loads(resp.data), { - "status": False, - "errors": [{ "id": "HTTPError", "code": 405, "msg": "Method Not Allowed" }] - }) - -class ServerAPIV1TestCase(unittest.TestCase): - @classmethod - def setUpClass(self): - self.rawhide = False - self.maxDiff = None - - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - server.config["REPO_DIR"] = repo_dir - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=repo_dir) - - server.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Copy the shared files over to the directory tree we are using - for d in ["composer", "lifted"]: - shutil.copytree(joinpaths("./share", d), joinpaths(server.config["COMPOSER_CFG"].get("composer", "share_dir"), d)) - - errors = make_queue_dirs(server.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - make_dnf_dirs(server.config["COMPOSER_CFG"], os.getuid(), os.getgid()) - - # copy over the test_server dnf repositories - dnf_repo_dir = server.config["COMPOSER_CFG"].get("composer", "repo_dir") - for f in glob("./tests/pylorax/repos/server-*.repo"): - shutil.copy2(f, dnf_repo_dir) - - # Modify fedora vs. rawhide tests when running on rawhide - if os.path.exists("/etc/yum.repos.d/fedora-rawhide.repo"): - self.rawhide = True - - # Need the substitution values to create the directories before we can create the dnf.Base for real - dbo = dnf.Base() - repo_dirs = ["/tmp/lorax-empty-repo-%s-%s" % (dbo.conf.substitutions["releasever"], dbo.conf.substitutions["basearch"]), - "/tmp/lorax-empty-repo-v1-%s-%s" % (dbo.conf.substitutions["releasever"], dbo.conf.substitutions["basearch"])] - # dnf repo baseurl has to point to an absolute directory, so we use /tmp/lorax-empty-repo/ in the files - # and create an empty repository. We now remove duplicate repo entries so we need a number of them. - for d in repo_dirs + ["/tmp/lorax-empty-repo/", "/tmp/lorax-other-empty-repo/", "/tmp/lorax-empty-repo-1/", - "/tmp/lorax-empty-repo-2/", "/tmp/lorax-empty-repo-3/", "/tmp/lorax-empty-repo-4/"]: - os.makedirs(d) - rc = os.system("createrepo_c %s" % d) - if rc != 0: - shutil.rmtree(d) - raise RuntimeError("Problem running createrepo_c, is it installed") - - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"]) - - # Grab the substitution values for later - with server.config["DNFLOCK"].lock: - self.substitutions = server.config["DNFLOCK"].dbo.conf.substitutions - - if "releasever" not in self.substitutions or "basearch" not in self.substitutions: - raise RuntimeError("DNF is missing the releasever and basearch substitutions") - - # Include a message in /api/status output - server.config["TEMPLATE_ERRORS"] = ["Test message"] - - server.config['TESTING'] = True - self.server = server.test_client() - self.repo_dir = repo_dir - - self.examples_path = "./tests/pylorax/blueprints/" - - # Import the example blueprints - commit_recipe_directory(server.config["GITLOCK"].repo, "master", self.examples_path) - - # The sources delete test needs the name of a system repo, get it from /etc/yum.repos.d/ - self.system_repo = get_system_repo() - - start_queue_monitor(server.config["COMPOSER_CFG"], 0, 0) - - @classmethod - def tearDownClass(self): - shutil.rmtree(server.config["REPO_DIR"]) - # Clean up the empty repos - for repo_dir in glob("/tmp/lorax-*empty-repo*"): - shutil.rmtree(repo_dir) - - def test_02_blueprints_list(self): - """Test the /api/v1/blueprints/list route""" - list_dict = {"blueprints":["example-append", "example-atlas", "example-custom-base", "example-development", - "example-glusterfs", "example-http-server", "example-jboss", - "example-kubernetes"], "limit":20, "offset":0, "total":8} - resp = self.server.get("/api/v1/blueprints/list") - data = json.loads(resp.data) - self.assertEqual(data, list_dict) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v1/blueprints/list?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["limit"], 0) - self.assertEqual(data["offset"], 0) - self.assertEqual(data["total"], list_dict["total"]) - - def test_03_blueprints_info_1(self): - """Test the /api/v1/blueprints/info route with one blueprint""" - info_dict_1 = {"changes":[{"changed":False, "name":"example-http-server"}], - "errors":[], - "blueprints":[{"description":"An example http server with PHP and MySQL support.", - "modules":[HTTP_GLOB, - OPENID_GLOB, - MODSSL_GLOB, - PHP_GLOB, - PHPMYSQL_GLOB], - "name":"example-http-server", - "packages": [OPENSSH_GLOB, - RSYNC_GLOB, - TMUX_GLOB], - "groups": [], - "version": "0.0.1"}]} - resp = self.server.get("/api/v1/blueprints/info/example-http-server") - data = json.loads(resp.data) - self.assertEqual(data, info_dict_1) - - def test_03_blueprints_info_2(self): - """Test the /api/v1/blueprints/info route with 2 blueprints""" - info_dict_2 = {"changes":[{"changed":False, "name":"example-glusterfs"}, - {"changed":False, "name":"example-http-server"}], - "errors":[], - "blueprints":[{"description": "An example GlusterFS server with samba", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "name":"example-glusterfs", - "packages":[SAMBA_GLOB], - "groups": [], - "version": "0.0.1"}, - {"description":"An example http server with PHP and MySQL support.", - "modules":[HTTP_GLOB, - OPENID_GLOB, - MODSSL_GLOB, - PHP_GLOB, - PHPMYSQL_GLOB], - "name":"example-http-server", - "packages": [OPENSSH_GLOB, - RSYNC_GLOB, - TMUX_GLOB], - "groups": [], - "version": "0.0.1"}, - ]} - resp = self.server.get("/api/v1/blueprints/info/example-http-server,example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, info_dict_2) - - def test_03_blueprints_info_none(self): - """Test the /api/v1/blueprints/info route with an unknown blueprint""" - resp = self.server.get("/api/v1/blueprints/info/missing-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_04_blueprints_changes(self): - """Test the /api/v1/blueprints/changes route""" - resp = self.server.get("/api/v1/blueprints/changes/example-http-server") - data = json.loads(resp.data) - - # Can't compare a whole dict since commit hash and timestamps will change. - # Should have 1 commit (for now), with a matching message. - self.assertEqual(data["limit"], 20) - self.assertEqual(data["offset"], 0) - self.assertEqual(len(data["errors"]), 0) - self.assertEqual(len(data["blueprints"]), 1) - self.assertEqual(data["blueprints"][0]["name"], "example-http-server") - self.assertEqual(len(data["blueprints"][0]["changes"]), 1) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v1/blueprints/changes/example-http-server?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["limit"], 0) - self.assertEqual(data["offset"], 0) - self.assertEqual(data["blueprints"][0]["total"], 1) - - def test_04a_blueprints_diff_empty_ws(self): - """Test the /api/v1/diff/NEWEST/WORKSPACE with empty workspace""" - resp = self.server.get("/api/v1/blueprints/diff/example-glusterfs/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"diff": []}) - - def test_05_blueprints_new_json(self): - """Test the /api/v1/blueprints/new route with json blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":"example-glusterfs", - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - - def test_06_blueprints_new_toml(self): - """Test the /api/v1/blueprints/new route with toml blueprint""" - test_blueprint = open(joinpaths(self.examples_path, "example-glusterfs.toml"), "rb").read() - resp = self.server.post("/api/v1/blueprints/new", - data=test_blueprint, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - - # Returned blueprint has had its version bumped - test_blueprint = toml.loads(test_blueprint) - test_blueprint["version"] = "0.2.1" - - # The test_blueprint generated by toml.loads will not have any groups property - # defined, since there are no groups listed. However, /api/v0/blueprints/new will - # return an object with groups=[]. So, add that here to keep the equality test - # working. - test_blueprint["groups"] = [] - - self.assertEqual(blueprints[0], test_blueprint) - - def test_07_blueprints_ws_json(self): - """Test the /api/v1/blueprints/workspace route with json blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":True}) - - def test_08_blueprints_ws_toml(self): - """Test the /api/v1/blueprints/workspace route with toml blueprint""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.4.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":True}) - - def test_09_blueprints_unknown_ws_delete(self): - """Test DELETE /api/v1/blueprints/workspace/missing-blueprint""" - resp = self.server.delete("/api/v1/blueprints/workspace/missing-blueprint") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_09_blueprints_ws_delete(self): - """Test DELETE /api/v1/blueprints/workspace/""" - # Write to the workspace first, just use the test_blueprints_ws_json test for this - self.test_07_blueprints_ws_json() - - # Delete it - resp = self.server.delete("/api/v1/blueprints/workspace/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Make sure it isn't the workspace copy and that changed is False - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["version"], "0.2.1") - changes = data.get("changes") - self.assertEqual(len(changes), 1) - self.assertEqual(changes[0], {"name":"example-glusterfs", "changed":False}) - - def test_10_blueprints_delete(self): - """Test DELETE /api/v1/blueprints/delete/""" - - # Push a new workspace blueprint first - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "1.4.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - resp = self.server.post("/api/v1/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - # Make sure the workspace file is present - self.assertEqual(os.path.exists(joinpaths(self.repo_dir, "git/workspace/master/example-glusterfs.toml")), True) - - # This should delete the git blueprint and the workspace copy - resp = self.server.delete("/api/v1/blueprints/delete/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Make sure example-glusterfs is no longer in the list of blueprints - resp = self.server.get("/api/v1/blueprints/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual("example-glusterfs" in blueprints, False) - - # Make sure the workspace file is gone - self.assertEqual(os.path.exists(joinpaths(self.repo_dir, "git/workspace/master/example-glusterfs.toml")), False) - - # This has to run after the above test - def test_10_blueprints_delete_2(self): - """Test running a compose with the deleted blueprint""" - # Trying to start a compose with a deleted blueprint should fail - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Compose of deleted blueprint did not fail: %s" % data) - - def test_11_blueprints_undo(self): - """Test POST /api/v1/blueprints/undo//""" - resp = self.server.get("/api/v1/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - # Revert it to the first commit - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - - # Revert it to the first commit - commit = changes[-1]["commit"] - resp = self.server.post("/api/v1/blueprints/undo/example-glusterfs/%s" % commit) - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - - expected_msg = "example-glusterfs.toml reverted to commit %s" % commit - self.assertEqual(changes[0]["message"], expected_msg) - - def test_12_blueprints_tag(self): - """Test POST /api/v1/blueprints/tag/""" - resp = self.server.post("/api/v1/blueprints/tag/example-glusterfs") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - - # Revert it to the first commit - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) > 1, True) - self.assertEqual(changes[0]["revision"], 1) - - def test_13_blueprints_diff(self): - """Test /api/v1/blueprints/diff///""" - resp = self.server.get("/api/v1/blueprints/changes/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - changes = blueprints[0].get("changes") - self.assertEqual(len(changes) >= 2, True) - - from_commit = changes[1].get("commit") - self.assertNotEqual(from_commit, None) - to_commit = changes[0].get("commit") - self.assertNotEqual(to_commit, None) - - print("from: %s" % from_commit) - print("to: %s" % to_commit) - print(changes) - - # Get the differences between the two commits - resp = self.server.get("/api/v1/blueprints/diff/example-glusterfs/%s/%s" % (from_commit, to_commit)) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"diff": [{"new": {"Version": "0.0.1"}, "old": {"Version": "0.2.1"}}]}) - - # Write to the workspace and check the diff - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":"example-glusterfs", - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB]} - - resp = self.server.post("/api/v1/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Get the differences between the newest commit and the workspace - resp = self.server.get("/api/v1/blueprints/diff/example-glusterfs/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - result = {"diff": [{"new": {"Description": "An example GlusterFS server with samba, ws version"}, - "old": {"Description": "An example GlusterFS server with samba"}}, - {"new": {"Version": "0.3.0"}, - "old": {"Version": "0.0.1"}}, - {"new": {"Package": TMUX_GLOB}, - "old": None}]} - self.assertEqual(data, result) - - def test_14_blueprints_depsolve(self): - """Test /api/v1/blueprints/depsolve/""" - resp = self.server.get("/api/v1/blueprints/depsolve/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "example-glusterfs") - self.assertEqual(len(blueprints[0]["dependencies"]) > 10, True) - self.assertFalse(data.get("errors")) - - def test_14_blueprints_depsolve_empty(self): - """Test /api/v1/blueprints/depsolve/ on empty blueprint""" - test_blueprint = {"description": "An empty blueprint", - "name":"void", - "version": "0.1.0"} - resp = self.server.post("/api/v1/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/depsolve/void") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "void") - self.assertEqual(blueprints[0]["blueprint"]["packages"], []) - self.assertEqual(blueprints[0]["blueprint"]["modules"], []) - self.assertEqual(blueprints[0]["dependencies"], []) - self.assertFalse(data.get("errors")) - - def test_15_blueprints_freeze(self): - """Test /api/v1/blueprints/freeze/""" - resp = self.server.get("/api/v1/blueprints/freeze/example-glusterfs") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertTrue(len(blueprints[0]["blueprint"]["modules"]) > 0) - self.assertEqual(blueprints[0]["blueprint"]["name"], "example-glusterfs") - evra = blueprints[0]["blueprint"]["modules"][0]["version"] - self.assertEqual(len(evra) > 10, True) - - def test_projects_list(self): - """Test /api/v1/projects/list""" - resp = self.server.get("/api/v1/projects/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - projects = data.get("projects") - self.assertEqual(len(projects) > 10, True) - - expected_total = data["total"] - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v1/projects/list?limit=0") - data = json.loads(resp.data) - self.assertEqual(data["total"], expected_total) - - def test_projects_info(self): - """Test /api/v1/projects/info/""" - resp = self.server.get("/api/v1/projects/info/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - projects = data.get("projects") - self.assertEqual(len(projects) > 0, True) - self.assertEqual(projects[0]["name"], "bash") - self.assertEqual(projects[0]["builds"][0]["source"]["license"], "GPLv3+") - - def test_projects_depsolve(self): - """Test /api/v1/projects/depsolve/""" - resp = self.server.get("/api/v1/projects/depsolve/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - deps = data.get("projects") - self.assertEqual(len(deps) > 10, True) - self.assertTrue("basesystem" in [dep["name"] for dep in deps]) - - def test_projects_source_00_list(self): - """Test /api/v1/projects/source/list""" - resp = self.server.get("/api/v1/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - # Make sure it lists some common sources - for r in ["lorax-1", "lorax-2", "lorax-3", "lorax-4", "other-repo", "single-repo"]: - self.assertTrue(r in data["sources"] ) - - # Make sure the duplicate repo is not listed - self.assertFalse("single-repo-duplicate" in data["sources"]) - - def test_projects_source_01_info(self): - """Test /api/v1/projects/source/info""" - resp = self.server.get("/api/v1/projects/source/info/lorax-3") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["id"], "lorax-3") - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "Lorax test repo 3") - - def test_projects_source_01_info_comma(self): - """Test /api/v1/projects/source/info/lorax-3,lorax-2""" - resp = self.server.get("/api/v1/projects/source/info/lorax-3,lorax-2") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - sources = data["sources"] - self.assertEqual(len(sources), 2) - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["id"], "lorax-3") - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "Lorax test repo 3") - - self.assertTrue("lorax-2" in sources) - self.assertTrue("id" in sources["lorax-2"]) - self.assertEqual(sources["lorax-2"]["id"], "lorax-2") - self.assertTrue("name" in sources["lorax-2"]) - self.assertEqual(sources["lorax-2"]["name"], "Lorax test repo 2") - - def test_projects_source_01_info_toml(self): - """Test /api/v1/projects/source/info TOML output""" - resp = self.server.get("/api/v1/projects/source/info/lorax-3?format=toml") - data = toml.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - sources = data - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["id"], "lorax-3") - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "Lorax test repo 3") - - def test_projects_source_01_info_wild(self): - """Test /api/v1/projects/source/info/* wildcard""" - resp = self.server.get("/api/v1/projects/source/info/*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data["sources"]) - sources = data["sources"] - self.assertTrue(len(sources) > 1) - self.assertTrue("lorax-3" in sources) - self.assertTrue("id" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["id"], "lorax-3") - self.assertTrue("name" in sources["lorax-3"]) - self.assertEqual(sources["lorax-3"]["name"], "Lorax test repo 3") - - def test_projects_source_01_new_json(self): - """Test /api/v1/projects/source/new with a new json source""" - json_source = open("./tests/pylorax/source/test-repo-v1.json").read() - self.assertTrue(len(json_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=json_source, - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Was it added, and was is it correct? - resp = self.server.get("/api/v1/projects/source/info/new-repo-1-v1") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("new-repo-1-v1" in sources) - self.assertTrue("id" in sources["new-repo-1-v1"]) - self.assertEqual(sources["new-repo-1-v1"]["id"], "new-repo-1-v1") - self.assertTrue("name" in sources["new-repo-1-v1"]) - self.assertEqual(sources["new-repo-1-v1"]["name"], "API v1 json new repo") - - def test_projects_source_02_new_json(self): - """Test /api/v1/projects/source/new with a new json source missing id field""" - json_source = open("./tests/pylorax/source/test-repo.json").read() - self.assertTrue(len(json_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=json_source, - content_type="application/json") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_01_new_toml(self): - """Test /api/v1/projects/source/new with a new toml source""" - toml_source = open("./tests/pylorax/source/test-repo-v1.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Was it added, and was is it correct? - resp = self.server.get("/api/v1/projects/source/info/new-repo-2-v1") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("new-repo-2-v1" in sources) - self.assertTrue("id" in sources["new-repo-2-v1"]) - self.assertEqual(sources["new-repo-2-v1"]["id"], "new-repo-2-v1") - self.assertTrue("name" in sources["new-repo-2-v1"]) - self.assertEqual(sources["new-repo-2-v1"]["name"], "API v1 toml new repo") - - def test_projects_source_01_new_toml_vars(self): - """Test /api/v1/projects/source/new with a new toml source using vars""" - toml_source = open("./tests/pylorax/source/test-repo-v1-vars.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Was it added, and was is it correct? - resp = self.server.get("/api/v1/projects/source/info/new-repo-2-v1-vars") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("new-repo-2-v1-vars" in sources) - self.assertTrue(self.substitutions["releasever"] in sources["new-repo-2-v1-vars"]["url"]) - self.assertTrue(self.substitutions["basearch"] in sources["new-repo-2-v1-vars"]["url"]) - self.assertTrue(self.substitutions["releasever"] in sources["new-repo-2-v1-vars"]["gpgkey_urls"][0]) - self.assertTrue(self.substitutions["basearch"] in sources["new-repo-2-v1-vars"]["gpgkey_urls"][0]) - - def test_projects_source_02_new_toml(self): - """Test /api/v1/projects/source/new with a new toml source w/o id field""" - toml_source = open("./tests/pylorax/source/test-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_00_replace(self): - """Test /api/v1/projects/source/new with a replacement source""" - toml_source = open("./tests/pylorax/source/replace-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Check to see if it was really changed - resp = self.server.get("/api/v1/projects/source/info/single-repo") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("single-repo" in sources) - repo = sources["single-repo"] - self.assertEqual(repo["check_ssl"], False) - self.assertTrue("gpgkey_urls" not in repo) - - def test_projects_source_00_replace_system(self): - """Test /api/v1/projects/source/new with a replacement system source""" - if self.rawhide: - toml_source = open("./tests/pylorax/source/replace-rawhide.toml").read() - else: - toml_source = open("./tests/pylorax/source/replace-fedora.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_01_replace(self): - """Test /api/v1/projects/source/new with a replacement source""" - toml_source = open("./tests/pylorax/source/replace-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Check to see if it was really changed - resp = self.server.get("/api/v1/projects/source/info/single-repo") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - sources = data["sources"] - self.assertTrue("single-repo" in sources) - repo = sources["single-repo"] - self.assertEqual(repo["check_ssl"], False) - self.assertTrue("gpgkey_urls" not in repo) - - def test_projects_source_01_replace_system(self): - """Test /api/v1/projects/source/new with a replacement system source""" - if self.rawhide: - toml_source = open("./tests/pylorax/source/replace-rawhide.toml").read() - else: - toml_source = open("./tests/pylorax/source/replace-fedora.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_00_bad_url(self): - """Test /api/v1/projects/source/new with a new source that has an invalid url""" - toml_source = open("./tests/pylorax/source/bad-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_01_bad_url(self): - """Test /api/v1/projects/source/new with a new source that has an invalid url""" - toml_source = open("./tests/pylorax/source/bad-repo.toml").read() - self.assertTrue(len(toml_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=toml_source, - content_type="text/x-toml") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertEqual(data["status"], False) - - def test_projects_source_01_delete_system(self): - """Test /api/v1/projects/source/delete a system source""" - if self.rawhide: - resp = self.server.delete("/api/v1/projects/source/delete/rawhide") - else: - resp = self.server.delete("/api/v1/projects/source/delete/fedora") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - - # Make sure fedora/rawhide is still listed - resp = self.server.get("/api/v1/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(self.system_repo in data["sources"], "%s not in %s" % (self.system_repo, data["sources"])) - - def test_projects_source_02_delete_single(self): - """Test /api/v1/projects/source/delete a single source""" - resp = self.server.delete("/api/v1/projects/source/delete/single-repo") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"status":True}) - - # Make sure single-repo isn't listed - resp = self.server.get("/api/v1/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("single-repo" not in data["sources"]) - - def test_projects_source_03_delete_unknown(self): - """Test /api/v1/projects/source/delete an unknown source""" - resp = self.server.delete("/api/v1/projects/source/delete/unknown-repo") - self.assertEqual(resp.status_code, 400) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - - def test_projects_source_04_delete_multi(self): - """Test /api/v1/projects/source/delete a source from a file with multiple sources""" - resp = self.server.delete("/api/v1/projects/source/delete/lorax-3") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"status":True}) - - # Make sure single-repo isn't listed - resp = self.server.get("/api/v1/projects/source/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("lorax-3" not in data["sources"]) - - def test_modules_list(self): - """Test /api/v1/modules/list""" - resp = self.server.get("/api/v1/modules/list") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 10, True) - self.assertEqual(modules[0]["group_type"], "rpm") - - expected_total = data["total"] - - resp = self.server.get("/api/v1/modules/list/d*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 0, True) - self.assertEqual(modules[0]["name"].startswith("d"), True) - - # Make sure limit=0 still returns the correct total - resp = self.server.get("/api/v1/modules/list?limit=0") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["total"], expected_total) - - def test_modules_info(self): - """Test /api/v1/modules/info""" - resp = self.server.get("/api/v1/modules/info/bash") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - modules = data.get("modules") - self.assertEqual(len(modules) > 0, True) - self.assertEqual(modules[0]["name"], "bash") - self.assertTrue("basesystem" in [dep["name"] for dep in modules[0]["dependencies"]]) - - def test_blueprint_new_branch(self): - """Test the /api/v1/blueprints/new route with a new branch""" - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":"example-glusterfs", - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/new?branch=test", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v1/blueprints/info/example-glusterfs?branch=test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0], test_blueprint) - - def assert_documentation(self, response): - """ - Assert response containing documentation from /api/doc/ is - valid *without* comparing to the actual file on disk. - """ - self.assertEqual(200, response.status_code) - self.assertTrue(len(response.data) > 1024) - # look for some well known strings inside the documentation - self.assertRegex(response.data.decode("utf-8"), r"Lorax [\d.]+ documentation") - self.assertRegex(response.data.decode("utf-8"), r"Copyright \d+, Red Hat, Inc.") - - def test_api_docs(self): - """Test the /api/docs/""" - resp = self.server.get("/api/docs/") - self.assert_documentation(resp) - - def test_api_docs_with_existing_path(self): - """Test the /api/docs/modules.html""" - resp = self.server.get("/api/docs/modules.html") - self.assert_documentation(resp) - - def test_compose_01_types(self): - """Test the /api/v1/compose/types route""" - resp = self.server.get("/api/v1/compose/types") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual({"name": "tar", "enabled": True} in data["types"], True) - - def test_compose_02_bad_type(self): - """Test that using an unsupported image type failes""" - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "snakes", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to fail to start test compose: %s" % data) - self.assertEqual(data["errors"], [{"id": BAD_COMPOSE_TYPE, "msg": "Invalid compose type (snakes), must be one of ['alibaba', 'ami', 'ext4-filesystem', 'google', 'hyper-v', 'live-iso', 'liveimg-tar', 'openstack', 'partitioned-disk', 'qcow2', 'tar', 'vhd', 'vmdk']"}], - "Failed to get errors: %s" % data) - - def test_compose_03_status_fail(self): - """Test that requesting a status for a bad uuid is empty""" - resp = self.server.get("/api/v1/compose/status/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["uuids"], [], "Failed to get empty result bad uuid: %s" % data) - - def test_compose_04_cancel_fail(self): - """Test that requesting a cancel for a bad uuid fails.""" - resp = self.server.delete("/api/v1/compose/cancel/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_05_delete_fail(self): - """Test that requesting a delete for a bad uuid fails.""" - resp = self.server.delete("/api/v1/compose/delete/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "no-uuid-to-see-here is not a valid build uuid"}], - "Failed to get an error for a bad uuid: %s" % data) - - def test_compose_06_info_fail(self): - """Test that requesting info for a bad uuid fails.""" - resp = self.server.get("/api/v1/compose/info/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_07_metadata_fail(self): - """Test that requesting metadata for a bad uuid fails.""" - resp = self.server.get("/api/v1/compose/metadata/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_08_results_fail(self): - """Test that requesting results for a bad uuid fails.""" - resp = self.server.get("/api/v1/compose/results/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_09_logs_fail(self): - """Test that requesting logs for a bad uuid fails.""" - resp = self.server.get("/api/v1/compose/logs/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_10_log_fail(self): - """Test that requesting log for a bad uuid fails.""" - resp = self.server.get("/api/v1/compose/log/NO-UUID-TO-SEE-HERE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to get an error for a bad uuid: %s" % data) - self.assertEqual(data["errors"], [{"id": UNKNOWN_UUID, "msg": "NO-UUID-TO-SEE-HERE is not a valid build uuid"}], - "Failed to get errors: %s" % data) - - def test_compose_11_create_failed(self): - """Test the /api/v1/compose routes with a failed test compose""" - test_compose = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FAILED"], api=1), True, "Failed to finish test compose") - - resp = self.server.get("/api/v1/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FAILED", "Build not in FAILED state") - - # Test the /api/v1/compose/failed route - resp = self.server.get("/api/v1/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["failed"]] - self.assertEqual(build_id in ids, True, "Failed build not listed by /compose/failed") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["failed"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Test the /api/v1/compose/finished route - resp = self.server.get("/api/v1/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["finished"], [], "Finished build not listed by /compose/finished") - - # Test the /api/v1/compose/status/ route - resp = self.server.get("/api/v1/compose/status/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["id"], e["queue_status"]) for e in data["uuids"]] - self.assertEqual((build_id, "FAILED") in ids, True, "Failed build not listed by /compose/status") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Test the /api/v1/compose/cancel/ route - resp = self.server.post("/api/v1/compose?test=1", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - cancel_id = data["build_id"] - - # Wait for it to start - self.assertEqual(_wait_for_status(self, cancel_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Cancel the build - resp = self.server.delete("/api/v1/compose/cancel/%s" % cancel_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to cancel test compose: %s" % data) - - # Delete the failed build - # Test the /api/v1/compose/delete/ route - resp = self.server.delete("/api/v1/compose/delete/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["uuid"], e["status"]) for e in data["uuids"]] - self.assertEqual((build_id, True) in ids, True, "Failed to delete test compose: %s" % data) - - # Make sure the failed list is empty - resp = self.server.get("/api/v1/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["failed"], [], "Failed to delete the failed build: %s" % data) - - def test_compose_12_create_finished(self): - """Test the /api/v1/compose routes with a finished test compose""" - test_compose = {"blueprint_name": "example-custom-base", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"], api=1), True, "Failed to finish test compose") - - resp = self.server.get("/api/v1/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Test the /api/v1/compose/finished route - resp = self.server.get("/api/v1/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["finished"]] - self.assertEqual(build_id in ids, True, "Finished build not listed by /compose/finished") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["finished"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Test the /api/v1/compose/failed route - resp = self.server.get("/api/v1/compose/failed") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["failed"], [], "Failed build not listed by /compose/failed") - - # Test the /api/v1/compose/status/ route - resp = self.server.get("/api/v1/compose/status/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["id"], e["queue_status"]) for e in data["uuids"]] - self.assertEqual((build_id, "FINISHED") in ids, True, "Finished build not listed by /compose/status") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Test the /api/v1/compose/metadata/ route - resp = self.server.get("/api/v1/compose/metadata/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 1024, True) - - # Test the /api/v1/compose/results/ route - resp = self.server.get("/api/v1/compose/results/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 1024, True) - - # Test the /api/v1/compose/image/ route - resp = self.server.get("/api/v1/compose/image/%s" % build_id) - self.assertEqual(resp.status_code, 200) - self.assertEqual(len(resp.data) > 0, True) - self.assertEqual(resp.data, b"TEST IMAGE") - - # Examine the final-kickstart.ks for the customizations - # A bit kludgy since it examines the filesystem directly, but that's better than unpacking the metadata - final_ks = open(joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "final-kickstart.ks")).read() - - # Check for the expected customizations in the kickstart - self.assertTrue("network --hostname=" in final_ks) - self.assertTrue("sshkey --user root" in final_ks) - - # Examine the config.toml to make sure it has an empty extra_boot_args - cfg_path = joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "config.toml") - cfg_dict = toml.loads(open(cfg_path, "r").read()) - self.assertTrue("extra_boot_args" in cfg_dict) - self.assertEqual(cfg_dict["extra_boot_args"], "") - - # Delete the finished build - # Test the /api/v1/compose/delete/ route - resp = self.server.delete("/api/v1/compose/delete/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [(e["uuid"], e["status"]) for e in data["uuids"]] - self.assertEqual((build_id, True) in ids, True, "Failed to delete test compose: %s" % data) - - # Make sure the finished list is empty - resp = self.server.get("/api/v1/compose/finished") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["finished"], [], "Failed to delete the failed build: %s" % data) - - def test_compose_13_status_filter(self): - """Test filter arguments on the /api/v1/compose/status route""" - # Get a couple compose results going so we have something to filter - test_compose_fail = {"blueprint_name": "example-glusterfs", - "compose_type": "tar", - "branch": "master"} - - test_compose_success = {"blueprint_name": "example-custom-base", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=1", - data=json.dumps(test_compose_fail), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id_fail = data["build_id"] - - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id_fail in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id_fail, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id_fail, ["FAILED"], api=1), True, "Failed to finish test compose") - - # Fire up the other one - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose_success), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id_success = data["build_id"] - - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id_success in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id_success, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id_success, ["FINISHED"], api=1), True, "Failed to finish test compose") - - # Test that both composes appear in /api/v1/compose/status/* - resp = self.server.get("/api/v1/compose/status/*") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_success, ids, "Finished build not listed by /compose/status/*") - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status/*") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Filter by name - resp = self.server.get("/api/v1/compose/status/*?blueprint=%s" % test_compose_fail["blueprint_name"]) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status blueprint filter") - self.assertNotIn(build_id_success, ids, "Finished build listed by /compose/status blueprint filter") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Filter by type - resp = self.server.get("/api/v1/compose/status/*?type=tar") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status type filter") - self.assertIn(build_id_success, ids, "Finished build not listed by /compose/status type filter") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - resp = self.server.get("/api/v1/compose/status/*?type=snakes") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertEqual(ids, [], "Invalid type not filtered by /compose/status type filter") - - # Filter by status - resp = self.server.get("/api/v1/compose/status/*?status=FAILED") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["uuids"]] - self.assertIn(build_id_fail, ids, "Failed build not listed by /compose/status status filter") - self.assertNotIn(build_id_success, "Finished build listed by /compose/status status filter") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["uuids"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - def test_compose_14_kernel_append(self): - """Test the /api/v1/compose with kernel append customization""" - test_compose = {"blueprint_name": "example-append", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"], api=1), True, "Failed to finish test compose") - - resp = self.server.get("/api/v1/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Examine the final-kickstart.ks for the customizations - # A bit kludgy since it examines the filesystem directly, but that's better than unpacking the metadata - final_ks = open(joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "final-kickstart.ks")).read() - - # Check for the expected customizations in the kickstart - # nosmt=force should be in the bootloader line, find it and check it - bootloader_line = "" - for line in final_ks.splitlines(): - if line.startswith("bootloader"): - bootloader_line = line - break - self.assertNotEqual(bootloader_line, "", "No bootloader line found") - self.assertTrue("nosmt=force" in bootloader_line) - - # Examine the config.toml to make sure it was written there as well - cfg_path = joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "config.toml") - cfg_dict = toml.loads(open(cfg_path, "r").read()) - self.assertTrue("extra_boot_args" in cfg_dict) - self.assertEqual(cfg_dict["extra_boot_args"], "nosmt=force") - - def assertInputError(self, resp): - """Check all the conditions for a successful input check error result""" - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(resp.status_code, 400) - self.assertEqual(data["status"], False) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("Invalid characters in" in data["errors"][0]["msg"]) - - def test_blueprints_list_branch(self): - resp = self.server.get("/api/v1/blueprints/list?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_info_input(self): - """Test the blueprints/info input character checking""" - # /api/v1/blueprints/info/ - resp = self.server.get("/api/v1/blueprints/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/info/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/info/example-http-server?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_changes_input(self): - """Test the blueprints/changes input character checking""" - # /api/v1/blueprints/changes/ - resp = self.server.get("/api/v1/blueprints/changes/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/changes/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_new_input(self): - """Test the blueprints/new input character checking""" - # /api/v1/blueprints/new - test_blueprint = {"description": "An example GlusterFS server with samba", - "name":UTF8_TEST_STRING, - "version": "0.2.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - test_blueprint["name"] = "example-glusterfs" - resp = self.server.post("/api/v1/blueprints/new?branch=" + UTF8_TEST_STRING, - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - def test_blueprints_delete_input(self): - """Test the blueprints/delete input character checking""" - resp = self.server.delete("/api/v1/blueprints/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.delete("/api/v1/blueprints/delete/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_workspace_input(self): - """Test the blueprints/workspace input character checking""" - test_blueprint = {"description": "An example GlusterFS server with samba, ws version", - "name":UTF8_TEST_STRING, - "version": "0.3.0", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/workspace", - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - test_blueprint["name"] = "example-glusterfs" - resp = self.server.post("/api/v1/blueprints/workspace?branch=" + UTF8_TEST_STRING, - data=json.dumps(test_blueprint), - content_type="application/json") - self.assertInputError(resp) - - def test_blueprints_workspace_delete_input(self): - """Test the DELETE blueprints/workspace input character checking""" - resp = self.server.delete("/api/v1/blueprints/workspace/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.delete("/api/v1/blueprints/workspace/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_undo_input(self): - """Test the blueprints/undo/... input character checking""" - resp = self.server.post("/api/v1/blueprints/undo/" + UTF8_TEST_STRING + "/deadbeef") - self.assertInputError(resp) - - resp = self.server.post("/api/v1/blueprints/undo/example-http-server/deadbeef?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_tag_input(self): - """Test the blueprints/tag input character checking""" - resp = self.server.post("/api/v1/blueprints/tag/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.post("/api/v1/blueprints/tag/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_diff_input(self): - """Test the blueprints/diff input character checking""" - # /api/v1/blueprints/diff/// - resp = self.server.get("/api/v1/blueprints/diff/" + UTF8_TEST_STRING + "/NEWEST/WORKSPACE") - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/diff/example-http-server/NEWEST/WORKSPACE?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_freeze_input(self): - """Test the blueprints/freeze input character checking""" - resp = self.server.get("/api/v1/blueprints/freeze/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/freeze/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/freeze/example-http-server?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_blueprints_depsolve_input(self): - """Test the blueprints/depsolve input character checking""" - resp = self.server.get("/api/v1/blueprints/depsolve/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - resp = self.server.get("/api/v1/blueprints/depsolve/example-http-server?branch=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_info_input(self): - """Test the projects/info input character checking""" - resp = self.server.get("/api/v1/projects/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_depsolve_input(self): - """Test the projects/depsolve input character checking""" - resp = self.server.get("/api/v1/projects/depsolve/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_source_info_input(self): - """Test the /api/v1/projects/source/info input character checking""" - resp = self.server.get("/api/v1/projects/source/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - # Test failure for bad format characters - resp = self.server.get("/api/v1/projects/source/info/lorax-3?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_source_info_unknown(self): - """Test the /api/v1/projects/source/info unknown source""" - resp = self.server.get("/api/v1/projects/source/info/notasource") - data = json.loads(resp.data) - print(data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_info_unknown_toml(self): - """Test the /api/v1/projects/source/info unknown source TOML output""" - resp = self.server.get("/api/v1/projects/source/info/notasource?format=toml") - data = json.loads(resp.data) - print(data) - self.assertNotEqual(data, None) - self.assertEqual(resp.status_code, 400) - self.assertEqual(data["status"], False) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_info_v1_input(self): - """Test the /api/v1/projects/source/info input character checking""" - resp = self.server.get("/api/v1/projects/source/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - # Test failure for bad format characters - resp = self.server.get("/api/v1/projects/source/info/lorax-3?format=" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_projects_source_info_v1_unknown(self): - """Test the /api/v1/projects/source/info unknown source""" - resp = self.server.get("/api/v1/projects/source/info/notasource") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_info_v1_unknown_toml(self): - """Test the /api/v1/projects/source/info unknown source TOML output""" - resp = self.server.get("/api/v1/projects/source/info/notasource?format=toml") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(resp.status_code, 400) - self.assertEqual(data["status"], False) - self.assertTrue(len(data["errors"]) > 0) - self.assertTrue("is not a valid source" in data["errors"][0]["msg"]) - - def test_projects_source_delete_input(self): - """Test the projects/source/delete input character checking""" - resp = self.server.delete("/api/v1/projects/source/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_modules_list_input(self): - """Test the modules/list input character checking""" - resp = self.server.get("/api/v1/modules/list/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_modules_info_input(self): - """Test the modules/info input character checking""" - resp = self.server.get("/api/v1/modules/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_status_input(self): - """Test the compose/status input character checking""" - resp = self.server.get("/api/v1/compose/status/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_cancel_input(self): - """Test the compose/cancel input character checking""" - resp = self.server.delete("/api/v1/compose/cancel/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_delete_input(self): - """Test the compose/delete input character checking""" - resp = self.server.delete("/api/v1/compose/delete/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_info_input(self): - """Test the compose/info input character checking""" - resp = self.server.get("/api/v1/compose/info/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_metadata_input(self): - """Test the compose/metadata input character checking""" - resp = self.server.get("/api/v1/compose/metadata/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_results_input(self): - """Test the compose/results input character checking""" - resp = self.server.get("/api/v1/compose/results/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_logs_input(self): - """Test the compose/logs input character checking""" - resp = self.server.get("/api/v1/compose/logs/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_image_input(self): - """Test the compose/image input character checking""" - resp = self.server.get("/api/v1/compose/image/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - def test_compose_log_input(self): - """Test the compose/log input character checking""" - resp = self.server.get("/api/v1/compose/log/" + UTF8_TEST_STRING) - self.assertInputError(resp) - - # A series of tests for dealing with deleted blueprints - def test_deleted_bp_00_setup(self): - """Setup a deleted blueprint for use in the tests""" - # Start by creating a new blueprint for this series of tests and then - # deleting it. - test_blueprint = {"description": "A blueprint that has been deleted", - "name":"deleted-blueprint", - "version": "0.0.1", - "modules":[GLUSTERFS_GLOB, - GLUSTERFSCLI_GLOB], - "packages":[SAMBA_GLOB, - TMUX_GLOB], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.delete("/api/v1/blueprints/delete/deleted-blueprint") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def test_deleted_bp_01_show(self): - """Test blueprint show with deleted blueprint""" - resp = self.server.get("/api/v1/blueprints/info/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_02_depsolve(self): - """Test blueprint depsolve with deleted blueprint""" - resp = self.server.get("/api/v1/blueprints/depsolve/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_03_diff(self): - """Test blueprint diff with deleted blueprint""" - resp = self.server.get("/api/v1/blueprints/diff/deleted-blueprint/NEWEST/WORKSPACE") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["status"], False) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_04_freeze(self): - """Test blueprint freeze with deleted blueprint""" - resp = self.server.get("/api/v1/blueprints/freeze/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_deleted_bp_05_tag(self): - """Test blueprint tag with deleted blueprint""" - resp = self.server.post("/api/v1/blueprints/tag/deleted-blueprint") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue(len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "UnknownBlueprint") - - def test_404(self): - """Test that a 404 returns JSON""" - resp = self.server.get("/marmalade") - print(resp) - print(resp.data) - self.assertEqual(resp.status_code, 404) - self.assertEqual(json.loads(resp.data), { - "status": False, - "errors": [{ "id": "HTTPError", "code": 404, "msg": "Not Found" }] - }) - - def test_405(self): - """Test that a 405 returns JSON""" - resp = self.server.post("/api/status") - self.assertEqual(resp.status_code, 405) - self.assertEqual(json.loads(resp.data), { - "status": False, - "errors": [{ "id": "HTTPError", "code": 405, "msg": "Method Not Allowed" }] - }) - - # upload route tests need to be run in order - def test_upload_00_providers(self): - """List upload providers without profile settings""" - # list of providers, before saving and settings - resp = self.server.get("/api/v1/upload/providers") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("providers" in data) - providers = sorted(data["providers"].keys()) - self.assertEqual(providers, ["aws", "dummy", "openstack", "vsphere"]) - - def test_upload_01_providers_save(self): - """Save settings for a provider""" - # list of providers, and their settings - test_settings = { - "provider": "aws", - "profile": test_profiles["aws"][0], - "settings": test_profiles["aws"][1] - } - - resp = self.server.post("/api/v1/upload/providers/save", - data=json.dumps(test_settings), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - # Check that the new profile is listed - resp = self.server.get("/api/v1/upload/providers") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertTrue("providers" in data) - self.assertTrue("aws" in data["providers"]) - self.assertTrue(test_profiles["aws"][0] in data["providers"]["aws"]["profiles"]) - - def test_upload_02_compose_profile(self): - """Test starting a compose with upload profile""" - test_compose = { - "blueprint_name": "example-custom-base", - "compose_type": "ami", - "branch": "master", - "upload": { - "image_name": "AWS custom-base", - "provider": "aws", - "settings": test_profiles["aws"][1] - } - } - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - self.assertTrue("build_id" in data) - self.assertTrue(len(data["build_id"]) > 0) - self.assertTrue("upload_id" in data) - self.assertTrue(len(data["upload_id"]) > 0) - - upload_id = data["upload_id"] - - # Get info about the upload - resp = self.server.get("/api/v1/upload/info/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - self.assertTrue("upload" in data) - self.assertEqual(data["upload"]["provider_name"], "aws") - self.assertEqual(data["upload"]["uuid"], upload_id) - self.assertEqual(data["upload"]["image_name"], "AWS custom-base") - - # Get the upload log - resp = self.server.get("/api/v1/upload/log/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - # NOTE: log is empty - - # Cancel the upload - resp = self.server.delete("/api/v1/upload/cancel/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - self.assertEqual(data["upload_id"], upload_id) - - def test_upload_03_compose_settings(self): - """Test starting a compose with upload settings""" - test_compose = { - "blueprint_name": "example-custom-base", - "compose_type": "ami", - "branch": "master", - "upload": { - "image_name": "AWS custom-base", - "provider": "aws", - "profile": test_profiles["aws"][0] - } - } - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - self.assertTrue("build_id" in data) - self.assertTrue(len(data["build_id"]) > 0) - self.assertTrue("upload_id" in data) - self.assertTrue(len(data["upload_id"]) > 0) - - upload_id = data["upload_id"] - - # Get info about the upload - resp = self.server.get("/api/v1/upload/info/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - self.assertTrue("upload" in data) - self.assertEqual(data["upload"]["provider_name"], "aws") - self.assertEqual(data["upload"]["uuid"], upload_id) - self.assertEqual(data["upload"]["image_name"], "AWS custom-base") - - # Get the upload log - resp = self.server.get("/api/v1/upload/log/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - # NOTE: log is empty - - # Cancel the upload - resp = self.server.delete("/api/v1/upload/cancel/%s" % upload_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - print(data) - self.assertEqual(data["status"], True) - self.assertEqual(data["upload_id"], upload_id) - - def test_upload_05_uploads_schedule(self): - """Test schedule upload and upload delete""" - - # Create a test compose - test_compose = {"blueprint_name": "example-custom-base", - "compose_type": "ami", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v1/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V1 API should have the uploads details in the results - uploads = all("uploads" in e for e in data["new"] + data["run"]) - self.assertTrue(uploads, "V1 API should include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"], api=1), True, "Failed to finish test compose") - - resp = self.server.get("/api/v1/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Schedule an upload of this image using settings - upload = { - "image_name": "AWS custom-base", - "provider": "aws", - "settings": test_profiles["aws"][1] - } - resp = self.server.post("/api/v1/compose/uploads/schedule/%s" % build_id, - data=json.dumps(upload), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to schedule upload: %s" % data) - self.assertTrue(len(data["upload_id"]) > 0) - - # Schedule an upload of this image using settings - upload = { - "image_name": "AWS custom-base", - "provider": "aws", - "profile": test_profiles["aws"][0] - } - resp = self.server.post("/api/v1/compose/uploads/schedule/%s" % build_id, - data=json.dumps(upload), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to schedule upload: %s" % data) - self.assertTrue(len(data["upload_id"]) > 0) - - # Delete this upload from this compose - resp = self.server.delete("/api/v1/upload/delete/%s" % data["upload_id"]) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to delete upload: %s" % data) - self.assertTrue(len(data["upload_id"]) > 0) - - def test_upload_06_providers_delete(self): - """Delete a profile from a provider""" - # /api/v1/upload/providers/delete/provider/profile - resp = self.server.delete("/api/v1/upload/providers/delete/aws/%s" % test_profiles["aws"][0]) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data, {"status":True}) - - # Check that the profile has been deleted - resp = self.server.get("/api/v1/upload/providers") - data = json.loads(resp.data) - self.assertTrue("providers" in data) - self.assertTrue("aws" in data["providers"]) - self.assertEqual(data["providers"]["aws"]["profiles"], {}) - - def test_upload_07_delete_unknown_profile(self): - """Delete an unknown profile""" - resp = self.server.delete("/api/v1/upload/providers/delete/aws/unknown") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to delete upload: %s" % data) - - def test_upload_08_delete_unknown_upload(self): - """Delete an unknown upload uuid""" - resp = self.server.delete("/api/v1/upload/delete/4b15fd5a-0d5a-42c5-8534-95d831328803") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False, "Failed to delete profile: %s" % data) - -@contextmanager -def in_tempdir(prefix='tmp'): - """Execute a block of code with chdir in a temporary location""" - oldcwd = os.getcwd() - tmpdir = tempfile.mkdtemp(prefix=prefix) - os.chdir(tmpdir) - try: - yield - finally: - os.chdir(oldcwd) - shutil.rmtree(tmpdir) - -def makeFakeRPM(repo_dir, name, epoch, version, release): - """Make a fake rpm file in repo_dir""" - p = SimpleRpmBuild(name, version, release) - if epoch: - p.epoch = epoch - p.add_simple_payload_file_random() - with in_tempdir("lorax-test-rpms."): - p.make() - rpmfile = p.get_built_rpm(expectedArch) - shutil.move(rpmfile, repo_dir) - -class RepoCacheAPIV0TestCase(unittest.TestCase): - """Test to make sure that changes to the repository are picked up immediately.""" - @classmethod - def setUpClass(self): - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - server.config["REPO_DIR"] = repo_dir - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=repo_dir) - - server.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Copy the shared files over to the directory tree we are using - for d in ["composer", "lifted"]: - shutil.copytree(joinpaths("./share", d), joinpaths(server.config["COMPOSER_CFG"].get("composer", "share_dir"), d)) - - errors = make_queue_dirs(server.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - make_dnf_dirs(server.config["COMPOSER_CFG"], os.getuid(), os.getgid()) - - # Modify fedora vs. rawhide tests when running on rawhide - if os.path.exists("/etc/yum.repos.d/fedora-rawhide.repo"): - self.rawhide = True - - # Create an extra repo to use for checking the metadata expire handling - os.makedirs("/tmp/lorax-test-repo/") - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.0", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"], expire_secs=10) - - # Include a message in /api/status output - server.config["TEMPLATE_ERRORS"] = ["Test message"] - - server.config['TESTING'] = True - self.server = server.test_client() - self.repo_dir = repo_dir - - start_queue_monitor(server.config["COMPOSER_CFG"], 0, 0) - - @classmethod - def tearDownClass(self): - shutil.rmtree(server.config["REPO_DIR"]) - shutil.rmtree("/tmp/lorax-test-repo/") - - def add_new_source(self, repo_dir): - json_source = """{"name": "new-repo-1", "url": "file:///tmp/lorax-test-repo/", "type": "yum-baseurl", - "check_ssl": false, "check_gpg": false}""" - self.assertTrue(len(json_source) > 0) - resp = self.server.post("/api/v0/projects/source/new", - data=json_source, - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def add_blueprint(self): - test_blueprint = {"description": "Metadata expire test blueprint", - "name":"milhouse-test", - "version": "0.0.1", - "modules":[], - "packages":[{"name":"fake-milhouse", "version":"1.*.*"}], - "groups": []} - - resp = self.server.post("/api/v0/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def test_metadata_expires(self): - """Ensure that metadata expire settings pick up changes to the repo immediately""" - - # Metadata can change at any time, but checking for that is expensive. So we only want - # to check when the timeout has expired, OR when starting a new compose - # Add a new repository at /tmp/lorax-test-repo/ - self.add_new_source("/tmp/lorax-test-repo") - - # Add a new blueprint with fake-milhouse in it - self.add_blueprint() - - # Depsolve the blueprint - resp = self.server.get("/api/v0/blueprints/depsolve/milhouse-test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "milhouse-test") - deps = blueprints[0]["dependencies"] - print(deps) - self.assertTrue(any([True for d in deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.0"])) - self.assertFalse(data.get("errors")) - - # Make a new version of fake-milhouse - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.1", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - # Expire time has been set to 10 seconds, so wait 11 and try it. - time.sleep(11) - - resp = self.server.get("/api/v0/blueprints/depsolve/milhouse-test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "milhouse-test") - deps = blueprints[0]["dependencies"] - print(deps) - self.assertTrue(any([True for d in deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.1"])) - self.assertFalse(data.get("errors")) - - # Make a new version of fake-milhouse - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.2", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - test_compose = {"blueprint_name": "milhouse-test", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Check to see which version was used for the compose, should be 1.0.2 - resp = self.server.get("/api/v0/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - pkg_deps = data["deps"]["packages"] - print(pkg_deps) - self.assertTrue(any([True for d in pkg_deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.2"])) - -class RepoCacheAPIV1TestCase(unittest.TestCase): - """Test to make sure that changes to the repository are picked up immediately.""" - @classmethod - def setUpClass(self): - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - server.config["REPO_DIR"] = repo_dir - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=repo_dir) - - server.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Copy the shared files over to the directory tree we are using - for d in ["composer", "lifted"]: - shutil.copytree(joinpaths("./share", d), joinpaths(server.config["COMPOSER_CFG"].get("composer", "share_dir"), d)) - - errors = make_queue_dirs(server.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - make_dnf_dirs(server.config["COMPOSER_CFG"], os.getuid(), os.getgid()) - - # Modify fedora vs. rawhide tests when running on rawhide - if os.path.exists("/etc/yum.repos.d/fedora-rawhide.repo"): - self.rawhide = True - - # Create an extra repo to use for checking the metadata expire handling - os.makedirs("/tmp/lorax-test-repo/") - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.0", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"], expire_secs=10) - - # Include a message in /api/status output - server.config["TEMPLATE_ERRORS"] = ["Test message"] - - server.config['TESTING'] = True - self.server = server.test_client() - self.repo_dir = repo_dir - - start_queue_monitor(server.config["COMPOSER_CFG"], 0, 0) - - @classmethod - def tearDownClass(self): - shutil.rmtree(server.config["REPO_DIR"]) - shutil.rmtree("/tmp/lorax-test-repo/") - - def add_new_source(self, repo_dir): - json_source = """{"id": "new-repo-1", "name": "New repo 1", "url": "file:///tmp/lorax-test-repo/", - "type": "yum-baseurl", "check_ssl": false, "check_gpg": false}""" - self.assertTrue(len(json_source) > 0) - resp = self.server.post("/api/v1/projects/source/new", - data=json_source, - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def add_blueprint(self): - test_blueprint = {"description": "Metadata expire test blueprint", - "name":"milhouse-test", - "version": "0.0.1", - "modules":[], - "packages":[{"name":"fake-milhouse", "version":"1.*.*"}], - "groups": []} - - resp = self.server.post("/api/v1/blueprints/new", - data=json.dumps(test_blueprint), - content_type="application/json") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - def test_metadata_expires(self): - """Ensure that metadata expire settings pick up changes to the repo immediately""" - - # Metadata can change at any time, but checking for that is expensive. So we only want - # to check when the timeout has expired, OR when starting a new compose - # Add a new repository at /tmp/lorax-test-repo/ - self.add_new_source("/tmp/lorax-test-repo") - - # Add a new blueprint with fake-milhouse in it - self.add_blueprint() - - # Depsolve the blueprint - resp = self.server.get("/api/v1/blueprints/depsolve/milhouse-test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "milhouse-test") - deps = blueprints[0]["dependencies"] - print(deps) - self.assertTrue(any([True for d in deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.0"])) - self.assertFalse(data.get("errors")) - - # Make a new version of fake-milhouse - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.1", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - # Expire time has been set to 10 seconds, so wait 11 and try it. - time.sleep(11) - - resp = self.server.get("/api/v1/blueprints/depsolve/milhouse-test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "milhouse-test") - deps = blueprints[0]["dependencies"] - print(deps) - self.assertTrue(any([True for d in deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.1"])) - self.assertFalse(data.get("errors")) - - # Make a new version of fake-milhouse - makeFakeRPM("/tmp/lorax-test-repo/", "fake-milhouse", 0, "1.0.2", "1") - os.system("createrepo_c /tmp/lorax-test-repo/") - - test_compose = {"blueprint_name": "milhouse-test", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v1/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Check to see which version was used for the compose, should be 1.0.2 - resp = self.server.get("/api/v1/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - pkg_deps = data["deps"]["packages"] - print(pkg_deps) - self.assertTrue(any([True for d in pkg_deps if d["name"] == "fake-milhouse" and d["version"] == "1.0.2"])) - -class GitRPMBlueprintTestCase(unittest.TestCase): - """Test to make sure that a blueprint with repos.git entry works.""" - @classmethod - def setUpClass(self): - (self.gitrpm_repo, self.test_results, self.first_commit) = create_git_repo() - - repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - server.config["REPO_DIR"] = repo_dir - repo = open_or_create_repo(server.config["REPO_DIR"]) - server.config["GITLOCK"] = GitLock(repo=repo, lock=Lock(), dir=repo_dir) - - server.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) - lifted.config.configure(server.config["COMPOSER_CFG"]) - - # Copy the shared files over to the directory tree we are using - for d in ["composer", "lifted"]: - shutil.copytree(joinpaths("./share", d), joinpaths(server.config["COMPOSER_CFG"].get("composer", "share_dir"), d)) - - errors = make_queue_dirs(server.config["COMPOSER_CFG"], os.getgid()) - if errors: - raise RuntimeError("\n".join(errors)) - - make_dnf_dirs(server.config["COMPOSER_CFG"], os.getuid(), os.getgid()) - - # Modify fedora vs. rawhide tests when running on rawhide - if os.path.exists("/etc/yum.repos.d/fedora-rawhide.repo"): - self.rawhide = True - - server.config["DNFLOCK"] = DNFLock(server.config["COMPOSER_CFG"], expire_secs=10) - - # Include a message in /api/status output - server.config["TEMPLATE_ERRORS"] = ["Test message"] - - server.config['TESTING'] = True - self.server = server.test_client() - self.repo_dir = repo_dir - - start_queue_monitor(server.config["COMPOSER_CFG"], 0, 0) - - @classmethod - def tearDownClass(self): - shutil.rmtree(server.config["REPO_DIR"]) - shutil.rmtree(self.gitrpm_repo) - - def test_01_depsolve_gitrpm(self): - """Make sure that depsolve works with repos.git""" - # Note that the git rpm isn't built and added until a compose, so it won't be listed - test_blueprint = """ - name = "git-rpm-blueprint-test" - description = "A test blueprint including a rpm created from git" - version = "0.0.1" - - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="%s" - destination="/srv/testing-rpm/" - - [[packages]] - name="openssh-server" - version="*" - """ % (self.gitrpm_repo, self.first_commit) - resp = self.server.post("/api/v0/blueprints/new", - data=test_blueprint, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - resp = self.server.get("/api/v0/blueprints/depsolve/git-rpm-blueprint-test") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - blueprints = data.get("blueprints") - self.assertNotEqual(blueprints, None) - self.assertEqual(len(blueprints), 1) - self.assertEqual(blueprints[0]["blueprint"]["name"], "git-rpm-blueprint-test") - self.assertFalse(data.get("errors")) - deps = blueprints[0]["dependencies"] - print(deps) - self.assertEqual(len(blueprints[0]["dependencies"]) > 10, True) - - def test_02_compose_gitrpm(self): - """Test that the compose includes the git rpm repo and rpm""" - test_compose = {"blueprint_name": "git-rpm-blueprint-test", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], True, "Failed to start test compose: %s" % data) - - build_id = data["build_id"] - - # Is it in the queue list (either new or run is fine, based on timing) - resp = self.server.get("/api/v0/compose/queue") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - ids = [e["id"] for e in data["new"] + data["run"]] - self.assertEqual(build_id in ids, True, "Failed to add build to the queue") - - # V0 API should *not* have the uploads details in the results - print(data) - uploads = any("uploads" in e for e in data["new"] + data["run"]) - self.assertFalse(uploads, "V0 API should not include 'uploads' field") - - # Wait for it to start - self.assertEqual(_wait_for_status(self, build_id, ["RUNNING"], api=1), True, "Failed to start test compose") - - # Wait for it to finish - self.assertEqual(_wait_for_status(self, build_id, ["FINISHED"], api=1), True, "Failed to finish test compose") - - resp = self.server.get("/api/v0/compose/info/%s" % build_id) - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["queue_status"], "FINISHED", "Build not in FINISHED state") - - # Examine the final-kickstart.ks for the customizations - # A bit kludgy since it examines the filesystem directly, but that's better than unpacking the metadata - final_ks = open(joinpaths(self.repo_dir, "var/lib/lorax/composer/results/", build_id, "final-kickstart.ks")).read() - - # Is the source in the kickstart? - self.assertTrue('repo --name="gitrpms"' in final_ks) - - # Is the rpm in the kickstart? - self.assertTrue("git-rpm-test-1.0.0-1" in final_ks) - - def test_03_compose_badref_gitrpm(self): - """Make sure that compose with a bad reference returns an error""" - test_blueprint = """ - name = "git-rpm-blueprint-test" - description = "A test blueprint including a rpm created from git" - version = "0.0.2" - - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file://%s" - ref="nobody-saw-me-do-it" - destination="/srv/testing-rpm/" - - [[packages]] - name="openssh-server" - version="*" - """ % self.gitrpm_repo - resp = self.server.post("/api/v0/blueprints/new", - data=test_blueprint, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - test_compose = {"blueprint_name": "git-rpm-blueprint-test", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - self.assertTrue("errors" in data and len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "BuildFailed") - - def test_04_compose_badrepo_gitrpm(self): - """Make sure that compose with a bad repo returns an error""" - test_blueprint = """ - name = "git-rpm-blueprint-test" - description = "A test blueprint including a rpm created from git" - version = "0.0.3" - - [[repos.git]] - rpmname="git-rpm-test" - rpmversion="1.0.0" - rpmrelease="1" - summary="Testing the git rpm code" - repo="file:///not/a/repo/path/" - ref="origin/master" - destination="/srv/testing-rpm/" - - [[packages]] - name="openssh-server" - version="*" - """ - resp = self.server.post("/api/v0/blueprints/new", - data=test_blueprint, - content_type="text/x-toml") - data = json.loads(resp.data) - self.assertEqual(data, {"status":True}) - - test_compose = {"blueprint_name": "git-rpm-blueprint-test", - "compose_type": "tar", - "branch": "master"} - - resp = self.server.post("/api/v0/compose?test=2", - data=json.dumps(test_compose), - content_type="application/json") - data = json.loads(resp.data) - self.assertNotEqual(data, None) - self.assertEqual(data["status"], False) - self.assertTrue("errors" in data and len(data["errors"]) > 0) - self.assertEqual(data["errors"][0]["id"], "BuildFailed") diff --git a/tests/pylorax/test_timestamp.py b/tests/pylorax/test_timestamp.py deleted file mode 100644 index e9a0452b..00000000 --- a/tests/pylorax/test_timestamp.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# Copyright (C) 2018 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import shutil -import tempfile -import unittest - -from pylorax.api.timestamp import write_timestamp, timestamp_dict -from pylorax.api.timestamp import TS_CREATED, TS_STARTED, TS_FINISHED - -class TimestampTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.test_dir = tempfile.mkdtemp(prefix="lorax.timestamp.") - - @classmethod - def tearDownClass(self): - shutil.rmtree(self.test_dir) - - def test_timestamp(self): - """Test writing and reading compose timestamps""" - write_timestamp(self.test_dir, TS_CREATED) - ts = timestamp_dict(self.test_dir) - self.assertTrue(TS_CREATED in ts) - self.assertTrue(TS_STARTED not in ts) - self.assertTrue(TS_FINISHED not in ts) - - write_timestamp(self.test_dir, TS_STARTED) - ts = timestamp_dict(self.test_dir) - self.assertTrue(TS_CREATED in ts) - self.assertTrue(TS_STARTED in ts) - self.assertTrue(TS_FINISHED not in ts) - - write_timestamp(self.test_dir, TS_FINISHED) - ts = timestamp_dict(self.test_dir) - self.assertTrue(TS_CREATED in ts) - self.assertTrue(TS_STARTED in ts) - self.assertTrue(TS_FINISHED in ts) diff --git a/tests/pylorax/test_workspace.py b/tests/pylorax/test_workspace.py deleted file mode 100644 index 7f8517fe..00000000 --- a/tests/pylorax/test_workspace.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -import os -import shutil -import tempfile -import unittest -from unittest import mock - -import pylorax.api.recipes as recipes -from pylorax.api.workspace import workspace_dir, workspace_read, workspace_write, workspace_delete -from pylorax.sysutils import joinpaths - -class WorkspaceTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") - self.repo = recipes.open_or_create_repo(self.repo_dir) - - self.results_path = "./tests/pylorax/results/" - self.examples_path = "./tests/pylorax/blueprints/" - - recipe_path = joinpaths(self.examples_path, "example-http-server.toml") - f = open(recipe_path, 'rb') - self.example_recipe = recipes.recipe_from_toml(f.read()) - - @classmethod - def tearDownClass(self): - if self.repo is not None: - del self.repo - shutil.rmtree(self.repo_dir) - - def test_01_repo_creation(self): - """Test that creating the repository succeeded""" - self.assertNotEqual(self.repo, None) - - def test_02_workspace_dir(self): - """Test the workspace_dir function""" - ws_dir = workspace_dir(self.repo, "master") - self.assertEqual(ws_dir, joinpaths(self.repo_dir, "git", "workspace", "master")) - - def test_03_workspace_write(self): - """Test the workspace_write function""" - # Use an example recipe - workspace_write(self.repo, "master", self.example_recipe) - - # The file should have ended up here - ws_recipe_path = joinpaths(self.repo_dir, "git", "workspace", "master", "example-http-server.toml") - self.assertEqual(os.path.exists(ws_recipe_path), True) - - def test_04_workspace_read(self): - """Test the workspace_read function""" - # The recipe was written by the workspace_write test. Read it and compare with the source recipe. - recipe = workspace_read(self.repo, "master", "example-http-server") - self.assertEqual(self.example_recipe, recipe) - - def test_04_workspace_read_ioerror(self): - """Test the workspace_read function dealing with internal IOError""" - # The recipe was written by the workspace_write test. - with self.assertRaises(recipes.RecipeFileError): - with mock.patch('pylorax.api.workspace.recipe_from_toml', side_effect=IOError('TESTING')): - workspace_read(self.repo, "master", "example-http-server") - - def test_05_workspace_delete(self): - """Test the workspace_delete function""" - ws_recipe_path = joinpaths(self.repo_dir, "git", "workspace", "master", "example-http-server.toml") - - self.assertEqual(os.path.exists(ws_recipe_path), True) - workspace_delete(self.repo, "master", "example-http-server") - self.assertEqual(os.path.exists(ws_recipe_path), False) - - def test_05_workspace_delete_non_existing(self): - """Test the workspace_delete function""" - ws_recipe_path = joinpaths(self.repo_dir, "git", "workspace", "master", "non-existing.toml") - - workspace_delete(self.repo, "master", "non-existing") - self.assertFalse(os.path.exists(ws_recipe_path))