2015-01-22 12:38:16 +00:00
|
|
|
# Copyright (C) 2014 SUSE Linux GmbH
|
2021-06-01 20:31:56 +00:00
|
|
|
# Copyright Red Hat
|
2015-01-22 12:38:16 +00:00
|
|
|
#
|
Add fifloader tests, template schemas, update README
This adds a test suite for fifloader (renamed fifloader.py for
test sanity). It adds JSON Schema form schemas for both FIF and
upstream openQA template data, and has fifloader (optionally,
but by default) validate both input and output data against the
schemas. It also adds a tox.ini configured to run the fifloader
tests, use fifloader to validate the template files, and do diff
coverage and lint checks. It also adjusts the Zuul config to run
tox instead of the test job.
There are also some pylint cleanups, since the new tests run
pylint.
fifcheck, fifconverter and tojson.pm are removed, as they were
mainly only needed for one-time conversion of the old format
templates; now they are in the git history we can always recover
them if we need them.
Along with all this I updated the README a bit to explain some
of it (and explain FIF better), and to explicitly state that this
repo is GPLv2+ licensed, and added GPL headers to some of the
files.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2020-01-25 15:35:26 +00:00
|
|
|
# This file is part of os-autoinst-distri-fedora.
|
|
|
|
#
|
|
|
|
# os-autoinst-distri-fedora is free software; you can redistribute it
|
|
|
|
# and/or modify it under the terms of the GNU General Public License
|
|
|
|
# as published by the Free Software Foundation, either version 2 of
|
|
|
|
# the License, or (at your option) any later version.
|
2015-01-22 12:38:16 +00:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
Add fifloader tests, template schemas, update README
This adds a test suite for fifloader (renamed fifloader.py for
test sanity). It adds JSON Schema form schemas for both FIF and
upstream openQA template data, and has fifloader (optionally,
but by default) validate both input and output data against the
schemas. It also adds a tox.ini configured to run the fifloader
tests, use fifloader to validate the template files, and do diff
coverage and lint checks. It also adjusts the Zuul config to run
tox instead of the test job.
There are also some pylint cleanups, since the new tests run
pylint.
fifcheck, fifconverter and tojson.pm are removed, as they were
mainly only needed for one-time conversion of the old format
templates; now they are in the git history we can always recover
them if we need them.
Along with all this I updated the README a bit to explain some
of it (and explain FIF better), and to explicitly state that this
repo is GPLv2+ licensed, and added GPL headers to some of the
files.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2020-01-25 15:35:26 +00:00
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2015-01-22 12:38:16 +00:00
|
|
|
|
|
|
|
use strict;
|
|
|
|
use testapi;
|
|
|
|
use autotest;
|
2015-03-18 21:28:03 +00:00
|
|
|
use needle;
|
2018-12-18 12:07:37 +00:00
|
|
|
use File::Basename;
|
2015-03-18 21:28:03 +00:00
|
|
|
|
|
|
|
# distribution-specific implementations of expected methods
|
|
|
|
my $distri = testapi::get_var("CASEDIR") . '/lib/fedoradistribution.pm';
|
|
|
|
require $distri;
|
|
|
|
testapi::set_distribution(fedoradistribution->new());
|
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
## UTILITY SUBROUTINES
|
|
|
|
|
|
|
|
|
2015-03-18 21:28:03 +00:00
|
|
|
# Stolen from openSUSE.
|
|
|
|
sub unregister_needle_tags($) {
|
|
|
|
my $tag = shift;
|
|
|
|
my @a = @{ needle::tags($tag) };
|
|
|
|
for my $n (@a) { $n->unregister(); }
|
|
|
|
}
|
|
|
|
|
2015-09-29 22:52:50 +00:00
|
|
|
# The purpose of this function is to un-register all needles which have
|
|
|
|
# at least one tag that starts with a given string (the 'prefix'), if
|
|
|
|
# it does not have any tag that matches the pattern 'prefix-value', for
|
|
|
|
# any of the values given in an array. The first argument passed must
|
|
|
|
# be the prefix; the second must be a reference to the array of values.
|
|
|
|
# For instance, if the 'prefix' is LANGUAGE and the 'values' are
|
|
|
|
# ENGLISH and FRENCH, this function would un-reference a needle which
|
|
|
|
# had only the tag 'LANGUAGE-DUTCH', but it would keep a needle which
|
|
|
|
# had the tag 'LANGUAGE-ENGLISH', or a needle with no tag starting in
|
|
|
|
# 'LANGUAGE-' at all.
|
|
|
|
sub unregister_prefix_tags {
|
|
|
|
my ($prefix, $valueref) = @_;
|
2015-09-15 01:08:58 +00:00
|
|
|
NEEDLE: for my $needle ( needle::all() ) {
|
2015-09-29 22:52:50 +00:00
|
|
|
my $unregister = 0;
|
|
|
|
for my $tag ( @{$needle->{'tags'}} ) {
|
|
|
|
if ($tag =~ /^\Q$prefix/) {
|
|
|
|
# We have at least one tag matching the prefix, so we
|
|
|
|
# *MAY* want to un-register the needle
|
|
|
|
$unregister = 1;
|
|
|
|
for my $value ( @{$valueref} ) {
|
|
|
|
# At any point if we hit a prefix-value match, we
|
|
|
|
# know we need to keep this needle and can skip
|
|
|
|
# to the next
|
|
|
|
next NEEDLE if ($tag eq "$prefix-$value");
|
|
|
|
}
|
|
|
|
}
|
2015-09-15 01:08:58 +00:00
|
|
|
}
|
2015-09-29 22:52:50 +00:00
|
|
|
# We get here if we hit no prefix-value match, but we only want
|
|
|
|
# to unregister the needle if we hit any prefix match, i.e. if
|
|
|
|
# 'unregister' is 1.
|
|
|
|
$needle->unregister() if ($unregister);
|
2015-09-15 01:08:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:28:03 +00:00
|
|
|
sub cleanup_needles() {
|
add a default_install test for cloud atomic installer image
Summary:
We have these 'atomic installer' images (so far just Cloud),
and maxamillion wanted to get them tested. Turns out it's
pretty trivial - they look much like other installs. Only
little wrinkle is they have a reduced hub (no repository
needles) like live images, but are not like live images in
any other way, so I rejigged the 'small hub needle filtering'
handling a bit.
There will be an accompanying diff for tools, and also some
changes in fedfind (these images are getting built nightly
for *current stable*, and it'd be good to test those).
Because we'd like to test the 22 nightlies, I had to add some
needles for 'olddpi' versions of a few screens. See 2e4c1c2 -
the 22 Atomic installer images still have the old GTK+ code
meaning they run at 96.09dpi. I only retook the necessary
needles for the default-install test, if we add any others we
made need to retake a few more needles.
Test Plan:
Schedule jobs for a compose with the atomic installer
image. You will need the matching openqa_fedora_tools diff and
the very latest git fedfind. Check the test for that image runs,
all other tests run as usual, excessive images are not
downloaded, and the atomic installer is not used for running
universal tests.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D595
2015-09-29 18:36:11 +00:00
|
|
|
if (!get_var('LIVE') and !get_var('CANNED')) {
|
|
|
|
## Unregister smaller hub needles. Live and 'canned' installers have
|
|
|
|
## a smaller hub with no repository spokes. On other images we want
|
|
|
|
## to wait for repository setup to complete, but if we match that
|
|
|
|
## spoke's "ready" icon, it breaks live and canned because they
|
|
|
|
## don't have that spoke. So we have a needle which doesn't match
|
|
|
|
## on that icon, but we unregister it for other installs so they
|
|
|
|
## don't match on it too soon.
|
|
|
|
unregister_needle_tags("INSTALLER-smallhub");
|
2015-03-18 21:28:03 +00:00
|
|
|
}
|
2015-09-15 01:08:58 +00:00
|
|
|
|
2015-12-08 10:32:48 +00:00
|
|
|
# Unregister desktop needles of other desktops when DESKTOP is specified
|
|
|
|
if (get_var('DESKTOP')) {
|
|
|
|
unregister_prefix_tags('DESKTOP', [ get_var('DESKTOP') ])
|
|
|
|
}
|
|
|
|
|
2015-09-29 22:52:50 +00:00
|
|
|
# Unregister non-language-appropriate needles. See unregister_except_
|
|
|
|
# tags for details; basically all needles with at least one LANGUAGE-
|
|
|
|
# tag will be unregistered unless they match the current langauge.
|
|
|
|
my $langref = [ get_var('LANGUAGE') || 'english' ];
|
|
|
|
unregister_prefix_tags('LANGUAGE', $langref);
|
2015-03-18 21:28:03 +00:00
|
|
|
}
|
|
|
|
$needle::cleanuphandler = \&cleanup_needles;
|
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
## TEST LOADING SUBROUTINES
|
2015-01-22 12:38:16 +00:00
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
|
|
|
|
sub load_upgrade_tests() {
|
Test upgrade of FreeIPA server and client deployment
Summary:
This adds an upgrade variant of the FreeIPA tests, with only
the simplest client enrolment (sssd) for now. The server test
starts from the N-1 release and deploys the domain controller
role. The client test similarly starts from the N-1 release
and, when the server is deployed, enrols as a domain client.
Then the server upgrades itself, while the client waits (as the
server is its name server). Then the client upgrades itself,
while the server does some self-checks. The server then waits
for the client to do its checks before decommissioning itself,
as usual. So, summary: *deployment* of both server and client
occurs on N-1, then both are upgraded, then the actual *checks*
occur on N.
In my testing, this all more or less works, except the role
decommission step fails. This failure seems to be a genuine one
so far as I can tell; I intend to file a bug for it soon.
Test Plan:
Run the new tests, check they work. Run the existing
FreeIPA tests (both the compose and the update variants), check
they both behave the same.
Reviewers: jsedlak, jskladan
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1204
2017-05-17 19:39:45 +00:00
|
|
|
# all upgrade tests include: boot phase (where bootloader and
|
|
|
|
# encryption are handled if necessary), preinstall phase (where
|
|
|
|
# packages are upgraded and dnf-plugin-system-upgrade installed),
|
|
|
|
# run phase (where upgrade is run) and postinstall phase (where
|
|
|
|
# is checked if fedora was upgraded successfully). The PREUPGRADE
|
|
|
|
# variable can be used to specify additional test modules to run
|
|
|
|
# after the preinstall phase but before the run phase, and the
|
|
|
|
# POSTINSTALL variable can be used to specify additional test
|
|
|
|
# modules to run after the upgrade postinstall phase.
|
|
|
|
autotest::loadtest "tests/upgrade_boot.pm";
|
|
|
|
# if static networking config is needed we must do it at this point
|
|
|
|
if (get_var("POST_STATIC")) {
|
|
|
|
autotest::loadtest "tests/_post_network_static.pm";
|
|
|
|
}
|
2016-02-10 08:42:56 +00:00
|
|
|
autotest::loadtest "tests/upgrade_preinstall.pm";
|
Test upgrade of FreeIPA server and client deployment
Summary:
This adds an upgrade variant of the FreeIPA tests, with only
the simplest client enrolment (sssd) for now. The server test
starts from the N-1 release and deploys the domain controller
role. The client test similarly starts from the N-1 release
and, when the server is deployed, enrols as a domain client.
Then the server upgrades itself, while the client waits (as the
server is its name server). Then the client upgrades itself,
while the server does some self-checks. The server then waits
for the client to do its checks before decommissioning itself,
as usual. So, summary: *deployment* of both server and client
occurs on N-1, then both are upgraded, then the actual *checks*
occur on N.
In my testing, this all more or less works, except the role
decommission step fails. This failure seems to be a genuine one
so far as I can tell; I intend to file a bug for it soon.
Test Plan:
Run the new tests, check they work. Run the existing
FreeIPA tests (both the compose and the update variants), check
they both behave the same.
Reviewers: jsedlak, jskladan
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1204
2017-05-17 19:39:45 +00:00
|
|
|
# generic pre-upgrade test load
|
|
|
|
if (get_var("PREUPGRADE")) {
|
|
|
|
my @pus = split(/ /, get_var("PREUPGRADE"));
|
|
|
|
foreach my $pu (@pus) {
|
|
|
|
autotest::loadtest "tests/${pu}.pm";
|
|
|
|
}
|
|
|
|
}
|
2016-02-10 08:42:56 +00:00
|
|
|
autotest::loadtest "tests/upgrade_run.pm";
|
Test upgrade of FreeIPA server and client deployment
Summary:
This adds an upgrade variant of the FreeIPA tests, with only
the simplest client enrolment (sssd) for now. The server test
starts from the N-1 release and deploys the domain controller
role. The client test similarly starts from the N-1 release
and, when the server is deployed, enrols as a domain client.
Then the server upgrades itself, while the client waits (as the
server is its name server). Then the client upgrades itself,
while the server does some self-checks. The server then waits
for the client to do its checks before decommissioning itself,
as usual. So, summary: *deployment* of both server and client
occurs on N-1, then both are upgraded, then the actual *checks*
occur on N.
In my testing, this all more or less works, except the role
decommission step fails. This failure seems to be a genuine one
so far as I can tell; I intend to file a bug for it soon.
Test Plan:
Run the new tests, check they work. Run the existing
FreeIPA tests (both the compose and the update variants), check
they both behave the same.
Reviewers: jsedlak, jskladan
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1204
2017-05-17 19:39:45 +00:00
|
|
|
# handle additional postinstall tests
|
|
|
|
if (get_var("POSTINSTALL")) {
|
|
|
|
set_var('POSTINSTALL', "upgrade_postinstall " . get_var("POSTINSTALL"));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
set_var('POSTINSTALL', "upgrade_postinstall");
|
|
|
|
}
|
2015-05-13 11:00:46 +00:00
|
|
|
}
|
2016-05-05 23:39:47 +00:00
|
|
|
|
|
|
|
sub load_install_tests() {
|
2020-08-19 21:58:02 +00:00
|
|
|
# CoreOS is special, so we handle that here
|
|
|
|
if (get_var("SUBVARIANT") eq "CoreOS") {
|
|
|
|
autotest::loadtest "tests/_coreos_install.pm";
|
|
|
|
return;
|
|
|
|
}
|
2015-08-05 06:23:59 +00:00
|
|
|
# normal installation test consists of several phases, from which some of them are
|
|
|
|
# loaded automatically and others are loaded based on what env variables are set
|
|
|
|
|
|
|
|
# generally speaking, install test consists of: boot phase, customization phase, installation
|
|
|
|
# and reboot phase, postinstall phase
|
|
|
|
|
|
|
|
# boot phase is loaded automatically every time
|
2016-02-10 08:42:56 +00:00
|
|
|
autotest::loadtest "tests/_boot_to_anaconda.pm";
|
2015-01-30 09:35:13 +00:00
|
|
|
|
2019-11-05 20:13:33 +00:00
|
|
|
# if this is a kickstart or VNC install, that's all folks
|
|
|
|
return if (get_var("KICKSTART") || get_var("VNC_SERVER"));
|
2015-02-04 12:02:54 +00:00
|
|
|
|
2019-06-28 17:03:03 +00:00
|
|
|
# Root password and user creation spokes are suppressed on
|
|
|
|
# Workstation live install and Silverblue DVD install, so we do
|
|
|
|
# not want to try and use them. Setting this in the templates is
|
|
|
|
# tricky as it gets set for post-install tests too that way, and
|
|
|
|
# we don't want that
|
|
|
|
if ((get_var('LIVE') || get_var('CANNED')) && get_var('DESKTOP') eq 'gnome') {
|
2018-03-09 04:31:14 +00:00
|
|
|
set_var('INSTALLER_NO_ROOT', '1');
|
2018-03-09 03:22:21 +00:00
|
|
|
# this is effectively a forced install_no_user
|
2018-03-09 04:31:14 +00:00
|
|
|
set_var('INSTALL_NO_USER', '1');
|
2018-03-09 02:42:09 +00:00
|
|
|
}
|
|
|
|
|
2016-09-07 08:34:54 +00:00
|
|
|
if (get_var('ANACONDA_TEXT')) {
|
|
|
|
# since it differs much, handle text installation separately
|
|
|
|
autotest::loadtest "tests/install_text.pm";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-09 15:43:46 +00:00
|
|
|
## Networking
|
|
|
|
if (get_var('ANACONDA_STATIC')) {
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
autotest::loadtest "tests/_anaconda_network_static.pm";
|
2016-06-09 15:43:46 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
## Installation source
|
2021-08-11 19:15:46 +00:00
|
|
|
if (get_var('MIRRORLIST_GRAPHICAL') || get_var("REPOSITORY_GRAPHICAL") || get_var("ADD_REPOSITORY_GRAPHICAL")) {
|
2016-05-05 23:39:47 +00:00
|
|
|
autotest::loadtest "tests/install_source_graphical.pm";
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
autotest::loadtest "tests/_check_install_source.pm";
|
2016-05-05 23:39:47 +00:00
|
|
|
}
|
2019-01-25 13:41:32 +00:00
|
|
|
if (get_var("REPOSITORY_VARIATION") || get_var("ADD_REPOSITORY_VARIATION")) {
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
autotest::loadtest "tests/_check_install_source.pm";
|
2016-05-05 23:39:47 +00:00
|
|
|
}
|
2015-02-04 13:45:37 +00:00
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
## Select package set. Minimal is the default, if 'default' is specified, skip selection.
|
|
|
|
autotest::loadtest "tests/_software_selection.pm";
|
|
|
|
|
|
|
|
## Disk partitioning.
|
|
|
|
# If PARTITIONING is set, we pick the storage test
|
|
|
|
# to run based on the value (usually we run the test with the name
|
|
|
|
# that matches the value, except for a couple of commented cases).
|
|
|
|
my $storage = '';
|
|
|
|
my $partitioning = get_var('PARTITIONING');
|
|
|
|
# if PARTITIONING is unset, or one of [...], use disk_guided_empty,
|
|
|
|
# which is the simplest / 'default' case.
|
|
|
|
if (! $partitioning || $partitioning ~~ ['guided_empty', 'guided_free_space']) {
|
|
|
|
$storage = "tests/disk_guided_empty.pm";
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
$storage = "tests/disk_".$partitioning.".pm";
|
|
|
|
}
|
|
|
|
autotest::loadtest $storage;
|
2015-02-04 16:16:21 +00:00
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
if (get_var("ENCRYPT_PASSWORD")){
|
|
|
|
autotest::loadtest "tests/disk_guided_encrypted.pm";
|
2015-01-27 13:22:35 +00:00
|
|
|
}
|
2015-01-27 12:35:27 +00:00
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
# Start installation, set user & root passwords, reboot
|
|
|
|
# install and reboot phase is loaded automatically every time (except when KICKSTART is set)
|
|
|
|
autotest::loadtest "tests/_do_install_and_reboot.pm";
|
|
|
|
}
|
|
|
|
|
2017-04-13 21:47:17 +00:00
|
|
|
sub _load_instance {
|
|
|
|
# loads a specific 'instance' of a given test. See next function
|
|
|
|
# for more details.
|
|
|
|
my ($test, $instance) = @_;
|
|
|
|
$test .= "_${instance}" if $instance;
|
|
|
|
autotest::loadtest "${test}.pm";
|
|
|
|
}
|
|
|
|
|
|
|
|
sub _load_early_postinstall_tests {
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
# Early post-install test loading. Split out as a separate sub
|
|
|
|
# because we do this all twice on update tests.
|
2017-01-16 17:30:14 +00:00
|
|
|
|
2017-04-13 21:47:17 +00:00
|
|
|
# openQA isn't very good at handling jobs where the same module
|
|
|
|
# is loaded more than once, and fixing that will be a bit complex
|
|
|
|
# and no-one got around to it yet. So for now, we use a bit of a
|
|
|
|
# hack: for modules we know may get loaded multiple times, we have
|
|
|
|
# symlinks named _2, _3 etc. This function can be passed an arg
|
|
|
|
# specifying which 'instance' of the tests to use.
|
|
|
|
my ($instance) = @_;
|
|
|
|
$instance //= 0;
|
|
|
|
|
2015-07-31 08:31:27 +00:00
|
|
|
# Unlock encrypted storage volumes, if necessary. The test name here
|
|
|
|
# follows the 'storage post-install' convention, but must be run earlier.
|
2016-05-05 23:39:47 +00:00
|
|
|
if (get_var("ENCRYPT_PASSWORD")) {
|
2017-04-13 21:47:17 +00:00
|
|
|
_load_instance("tests/disk_guided_encrypted_postinstall", $instance);
|
2015-02-04 16:16:21 +00:00
|
|
|
}
|
2015-03-18 21:28:03 +00:00
|
|
|
|
2017-11-07 21:04:01 +00:00
|
|
|
# For now, there's no possibility to get a graphical desktop on
|
|
|
|
# Modular composes, so short-circuit here for those
|
|
|
|
if (get_var("MODULAR")) {
|
|
|
|
_load_instance("tests/_console_wait_login", $instance);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:28:03 +00:00
|
|
|
# Appropriate login method for install type
|
|
|
|
if (get_var("DESKTOP")) {
|
2017-04-13 21:47:17 +00:00
|
|
|
_load_instance("tests/_graphical_wait_login", $instance);
|
2015-03-18 21:28:03 +00:00
|
|
|
}
|
2016-12-21 16:41:00 +00:00
|
|
|
# Test non-US input at this point, on language tests
|
|
|
|
if (get_var("SWITCHED_LAYOUT") || get_var("INPUT_METHOD")) {
|
2017-04-13 21:47:17 +00:00
|
|
|
_load_instance("tests/_graphical_input", $instance);
|
2016-12-21 16:41:00 +00:00
|
|
|
}
|
2021-03-31 09:58:24 +00:00
|
|
|
|
|
|
|
if (get_var("LANGUAGE") eq "japanese" || get_var("LANGUAGE") eq "arabic") {
|
|
|
|
_load_instance("tests/check_default_fonts", $instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-04-27 11:02:53 +00:00
|
|
|
# We do not want to run this on Desktop installations or when
|
|
|
|
# the installation is interrupted on purpose.
|
|
|
|
unless (get_var("DESKTOP") || get_var("CRASH_REPORT")) {
|
2017-04-13 21:47:17 +00:00
|
|
|
_load_instance("tests/_console_wait_login", $instance);
|
2015-03-18 21:28:03 +00:00
|
|
|
}
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub load_postinstall_tests() {
|
|
|
|
# special case for the memory check test, as it doesn't need to boot
|
|
|
|
# the installed system: just load its test and return
|
|
|
|
if (get_var("MEMCHECK")) {
|
|
|
|
autotest::loadtest "tests/_memcheck.pm";
|
|
|
|
return;
|
|
|
|
}
|
2019-11-05 20:13:33 +00:00
|
|
|
# VNC client test's work is done once install is complete
|
|
|
|
if (get_var("VNC_CLIENT")) {
|
|
|
|
return;
|
|
|
|
}
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
|
|
|
|
# load the early tests
|
|
|
|
_load_early_postinstall_tests();
|
2015-01-30 09:35:13 +00:00
|
|
|
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
# do standard post-install static network config if the var is set
|
Test upgrade of FreeIPA server and client deployment
Summary:
This adds an upgrade variant of the FreeIPA tests, with only
the simplest client enrolment (sssd) for now. The server test
starts from the N-1 release and deploys the domain controller
role. The client test similarly starts from the N-1 release
and, when the server is deployed, enrols as a domain client.
Then the server upgrades itself, while the client waits (as the
server is its name server). Then the client upgrades itself,
while the server does some self-checks. The server then waits
for the client to do its checks before decommissioning itself,
as usual. So, summary: *deployment* of both server and client
occurs on N-1, then both are upgraded, then the actual *checks*
occur on N.
In my testing, this all more or less works, except the role
decommission step fails. This failure seems to be a genuine one
so far as I can tell; I intend to file a bug for it soon.
Test Plan:
Run the new tests, check they work. Run the existing
FreeIPA tests (both the compose and the update variants), check
they both behave the same.
Reviewers: jsedlak, jskladan
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1204
2017-05-17 19:39:45 +00:00
|
|
|
# and this is not an upgrade test (this is done elsewhere in the
|
|
|
|
# upgrade workflow)
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
# this is here not in early_postinstall_tests as there's no need
|
|
|
|
# to do it twice
|
Test upgrade of FreeIPA server and client deployment
Summary:
This adds an upgrade variant of the FreeIPA tests, with only
the simplest client enrolment (sssd) for now. The server test
starts from the N-1 release and deploys the domain controller
role. The client test similarly starts from the N-1 release
and, when the server is deployed, enrols as a domain client.
Then the server upgrades itself, while the client waits (as the
server is its name server). Then the client upgrades itself,
while the server does some self-checks. The server then waits
for the client to do its checks before decommissioning itself,
as usual. So, summary: *deployment* of both server and client
occurs on N-1, then both are upgraded, then the actual *checks*
occur on N.
In my testing, this all more or less works, except the role
decommission step fails. This failure seems to be a genuine one
so far as I can tell; I intend to file a bug for it soon.
Test Plan:
Run the new tests, check they work. Run the existing
FreeIPA tests (both the compose and the update variants), check
they both behave the same.
Reviewers: jsedlak, jskladan
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1204
2017-05-17 19:39:45 +00:00
|
|
|
if (get_var("POST_STATIC") && !get_var("UPGRADE")) {
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
autotest::loadtest "tests/_post_network_static.pm";
|
|
|
|
}
|
|
|
|
|
2019-01-29 09:06:16 +00:00
|
|
|
# if scheduler passed an advisory or task ID, update packages from that
|
|
|
|
# advisory or task ID (intended for the updates testing workflow, so we
|
|
|
|
# install the updates to be tested). Don't do this for UPGRADE tests, as
|
|
|
|
# the update gets installed as part of the upgrade in that case and we
|
|
|
|
# don't need the extra reboot. Don't do this for INSTALL test(s); these
|
|
|
|
# are checking that an installer image built from the update works and do
|
|
|
|
# not install the update themselves in this manner
|
|
|
|
if (get_var("ADVISORY_OR_TASK") && !get_var("UPGRADE") && !get_var("INSTALL")) {
|
2019-02-08 16:20:24 +00:00
|
|
|
autotest::loadtest "tests/_advisory_update.pm";
|
|
|
|
# now load the early boot tests again, as _advisory_update reboots
|
|
|
|
_load_early_postinstall_tests(2);
|
Add support for testing updates
Summary:
This adds an entirely new workflow for testing distribution
updates. The `ADVISORY` variable is introduced: when set,
`main.pm` will load an early post-install test that sets up
a repository containing the packages from the specified update,
runs `dnf -y update`, and reboots. A new templates file is
added, `templates-updates`, which adds two new flavors called
`updates-server` and `updates-workstation`, each containing
job templates for appropriate post-install tests. Scheduler is
expected to post `ADVISORY=(update ID) HDD_1=(base image)
FLAVOR=updates-(server|workstation)`, where (base image) is one
of the stable release base disk images produced by `createhdds`
and usually used for upgrade testing. This will result in the
appropriate job templates being loaded.
We rejig postinstall test loading and static network config a
bit so that this works for both the 'compose' and 'updates' test
flows: we have to ensure we bring up networking for the tap
tests before we try and install the updates, but still allow
later adjustment of the configuration. We take advantage of the
openQA feature that was added a few months back to run the same
module multiple times, so the `_advisory_update` module can
reboot after installing the updates and the modules that take
care of bootloader, encryption and login get run again. This
looks slightly wacky in the web UI, though - it doesn't show the
later runs of each module.
We also use the recently added feature to specify `+HDD_1` in
the test suites which use a disk image uploaded by an earlier
post-install test, so the test suite value will take priority
over the value POSTed by the scheduler for those tests, and we
will use the uploaded disk image (and not the clean base image
POSTed by the scheduler) for those tests.
My intent here is to enhance the scheduler, adding a consumer
which listens out for critpath updates, and runs this test flow
for each one, then reports the results to ResultsDB where Bodhi
could query and display them. We could also add a list of other
packages to have one or both sets of update tests run on it, I
guess.
Test Plan:
Try a post something like:
HDD_1=disk_f25_server_3_x86_64.img DISTRI=fedora VERSION=25
FLAVOR=updates-server ARCH=x86_64 BUILD=FEDORA-2017-376ae2b92c
ADVISORY=FEDORA-2017-376ae2b92c CURRREL=25 PREVREL=24
Pick an appropriate `ADVISORY` (ideally, one containing some
packages which might actually be involved in the tests), and
matching `FLAVOR` and `HDD_1`. The appropriate tests should run,
a repo with the update packages should be created and enabled
(and dnf update run), and the tests should work properly. Also
test a regular compose run to make sure I didn't break anything.
Reviewers: jskladan, jsedlak
Reviewed By: jsedlak
Subscribers: tflink
Differential Revision: https://phab.qa.fedoraproject.org/D1143
2017-01-25 16:16:12 +00:00
|
|
|
}
|
2015-08-05 06:23:59 +00:00
|
|
|
# from now on, we have fully installed and booted system with root/specified user logged in
|
|
|
|
|
2015-07-31 08:31:27 +00:00
|
|
|
# If there is a post-install test to verify storage configuration worked
|
|
|
|
# correctly, run it. Again we determine the test name based on the value
|
2015-08-06 09:02:18 +00:00
|
|
|
# of PARTITIONING
|
2015-07-31 08:31:27 +00:00
|
|
|
my $storagepost = '';
|
2015-08-06 09:02:18 +00:00
|
|
|
if (get_var('PARTITIONING')) {
|
2016-06-09 15:43:46 +00:00
|
|
|
my $casedir = get_var("CASEDIR");
|
|
|
|
my $loc = "tests/disk_" . get_var('PARTITIONING') . "_postinstall.pm";
|
|
|
|
$storagepost = $loc if (-e "$casedir/$loc");
|
2015-03-06 09:36:25 +00:00
|
|
|
}
|
2015-07-31 08:31:27 +00:00
|
|
|
autotest::loadtest $storagepost if ($storagepost);
|
2015-09-15 09:04:01 +00:00
|
|
|
|
2022-02-20 23:41:04 +00:00
|
|
|
if (get_var("UEFI") && !get_var("NO_UEFI_POST") && !get_var("START_AFTER_TEST")) {
|
2016-02-10 08:42:56 +00:00
|
|
|
autotest::loadtest "tests/uefi_postinstall.pm";
|
2015-09-15 09:04:01 +00:00
|
|
|
}
|
2016-03-23 20:52:00 +00:00
|
|
|
|
2016-12-08 20:03:26 +00:00
|
|
|
# console avc / crash check
|
|
|
|
# it makes no sense to run this after logging in on most post-
|
2021-04-27 11:02:53 +00:00
|
|
|
# install tests (hence ! BOOTFROM) and we do not want it
|
2021-07-28 07:02:02 +00:00
|
|
|
# on crashed installations (hence ! CRASH_REPORT) but we *do* want
|
2021-04-27 11:02:53 +00:00
|
|
|
# to run it on upgrade tests after upgrading (hence UPGRADE)
|
2016-12-08 20:03:26 +00:00
|
|
|
# desktops have specific tests for this (hence !DESKTOP). For
|
|
|
|
# desktop upgrades we should really upload a disk image at the end
|
|
|
|
# of upgrade and run all the desktop post-install tests on that
|
2021-04-27 11:02:53 +00:00
|
|
|
if (!get_var("DESKTOP") && !get_var("CRASH_REPORT") && (!get_var("BOOTFROM") || get_var("UPGRADE"))) {
|
2016-12-08 16:58:29 +00:00
|
|
|
autotest::loadtest "tests/_console_avc_crash.pm";
|
|
|
|
}
|
|
|
|
|
2021-07-28 07:02:02 +00:00
|
|
|
# generic post-install test load
|
|
|
|
if (get_var("POSTINSTALL")) {
|
|
|
|
my @pis = split(/ /, get_var("POSTINSTALL"));
|
|
|
|
# For each test in POSTINSTALL, load the test
|
|
|
|
foreach my $pi (@pis) {
|
|
|
|
autotest::loadtest "tests/${pi}.pm";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# If POSTINSTALL_PATH is set, we will load all available test files from that location
|
|
|
|
# as postinstall tests.
|
|
|
|
elsif (get_var("POSTINSTALL_PATH")) {
|
|
|
|
my $casedir = get_var("CASEDIR");
|
|
|
|
my $path = get_var("POSTINSTALL_PATH");
|
|
|
|
# Read the list of files on that path,
|
|
|
|
my @pis = glob "${casedir}/${path}/*.pm";
|
|
|
|
# and load each of them.
|
|
|
|
foreach my $pi (@pis) {
|
|
|
|
$pi = basename($pi);
|
|
|
|
autotest::loadtest "$path/$pi";
|
|
|
|
}
|
|
|
|
}
|
2016-03-23 20:52:00 +00:00
|
|
|
|
2019-01-29 09:06:16 +00:00
|
|
|
# load the ADVISORY / KOJITASK post-install test - this records which
|
|
|
|
# update or task packages were actually installed during the test. Don't
|
|
|
|
# do this for INSTALL test(s); these are checking that an installer image
|
2018-08-31 20:34:12 +00:00
|
|
|
# built from the update works and do not install the update themselves.
|
2019-01-29 09:06:16 +00:00
|
|
|
if (get_var("ADVISORY_OR_TASK") && !get_var("INSTALL")) {
|
2019-02-08 16:20:24 +00:00
|
|
|
# don't do this for support server unless the update is for the same
|
|
|
|
# release as the support server disk image, as we don't install the
|
|
|
|
# updates on support server when they differ
|
|
|
|
unless (get_var("TEST") eq "support_server" && get_var("VERSION") ne get_var("CURRREL")) {
|
2019-02-07 18:37:04 +00:00
|
|
|
autotest::loadtest "tests/_advisory_post.pm";
|
|
|
|
}
|
2017-02-22 20:59:39 +00:00
|
|
|
}
|
|
|
|
|
shutdown before uploading disk images
Summary:
I believe the failures in the Server DVD chained Base tests are
happening because the VM is not cleanly shut down before the disk
image is uploaded. This adds a shutdown step to all tests that
upload a disk image (so, for now, just default_install). To keep
things simple it just runs 'shutdown' from a root console, rather
than using graphical desktop shutdown methods, as the aim is only
to make the disk state clean, not to test shutdown exactly.
I've tested this on staging; a Server DVD test run with this
change produced a full set of passed tests, as opposed to all
the Base tests failing because the system didn't boot properly.
Workstation and KDE tests seem to work fine also.
For the record, SUSE does much the same thing as this commit.
Test Plan:
Do a full test run and make sure everything that worked
before still does. Check that all default_install tests have a
_console_shutdown step added, and it works, and all chained tests
work (or fail for some unrelated reason, but make sure this
doesn't break them).
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D787
2016-03-22 14:19:47 +00:00
|
|
|
# we should shut down before uploading disk images
|
2020-11-04 19:42:13 +00:00
|
|
|
if (get_var("STORE_HDD_1") || get_var("STORE_HDD_2") || get_var("PUBLISH_HDD_1")) {
|
shutdown before uploading disk images
Summary:
I believe the failures in the Server DVD chained Base tests are
happening because the VM is not cleanly shut down before the disk
image is uploaded. This adds a shutdown step to all tests that
upload a disk image (so, for now, just default_install). To keep
things simple it just runs 'shutdown' from a root console, rather
than using graphical desktop shutdown methods, as the aim is only
to make the disk state clean, not to test shutdown exactly.
I've tested this on staging; a Server DVD test run with this
change produced a full set of passed tests, as opposed to all
the Base tests failing because the system didn't boot properly.
Workstation and KDE tests seem to work fine also.
For the record, SUSE does much the same thing as this commit.
Test Plan:
Do a full test run and make sure everything that worked
before still does. Check that all default_install tests have a
_console_shutdown step added, and it works, and all chained tests
work (or fail for some unrelated reason, but make sure this
doesn't break them).
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D787
2016-03-22 14:19:47 +00:00
|
|
|
autotest::loadtest "tests/_console_shutdown.pm";
|
|
|
|
}
|
2021-07-28 07:02:02 +00:00
|
|
|
|
2018-12-18 12:07:37 +00:00
|
|
|
|
2015-01-26 14:58:07 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
## LOADING STARTS HERE
|
|
|
|
|
2015-01-22 12:38:16 +00:00
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
# if user set ENTRYPOINT, run required test directly
|
|
|
|
# (good for tests where it doesn't make sense to use _boot_to_anaconda, _software_selection etc.)
|
2016-08-16 07:33:10 +00:00
|
|
|
# if you want to run more than one test via ENTRYPOINT, separate them with space
|
2016-05-05 23:39:47 +00:00
|
|
|
if (get_var("ENTRYPOINT")) {
|
2016-08-16 07:33:10 +00:00
|
|
|
my @entrs = split(/ /, get_var("ENTRYPOINT"));
|
|
|
|
foreach my $entr (@entrs) {
|
|
|
|
autotest::loadtest "tests/${entr}.pm";
|
|
|
|
}
|
2016-05-05 23:39:47 +00:00
|
|
|
}
|
|
|
|
elsif (get_var("UPGRADE")) {
|
|
|
|
load_upgrade_tests;
|
|
|
|
}
|
2020-10-28 21:20:16 +00:00
|
|
|
elsif ((!get_var("START_AFTER_TEST") && !get_var("BOOTFROM") && !get_var("IMAGE_DEPLOY")) || get_var("INSTALL")) {
|
consolidate login waits, use postinstall not entrypoint for base
Summary:
I started out wanting to fix an issue I noticed today where
graphical upgrade tests were failing because they didn't wait
for the graphical login screen properly; the test was sitting
at the 'full Fedora logo' state of plymouth for a long time,
so the current boot_to_login_screen's wait_still_screen was
triggered by it and the function wound up failing on the
assert_screen, because it was still some time before the real
login screen appeared.
So I tweaked the boot_to_login_screen implementation to work
slightly differently (look for a login screen match, *then* -
if we're dealing with a graphical login - wait_still_screen
to defeat the 'old GPU buffer showing login screen' problem
and assert the login screen again). But while working on it,
I figured we really should consolidate all the various places
that handle the bootloader -> login, we were doing it quite
differently in all sorts of different places. And as part of
that, I converted the base tests to use POSTINSTALL (and thus
go through the shared _wait_login tests) instead of handling
boot themselves. As part of *that*, I tweaked main.pm to not
require all POSTINSTALL tests have the _postinstall suffix on
their names, as it really doesn't make sense, and renamed the
tests.
Test Plan: Run all tests, see if they work.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D1015
2016-09-27 18:48:15 +00:00
|
|
|
# for now we can assume START_AFTER_TEST and BOOTFROM mean the
|
2020-10-28 21:20:16 +00:00
|
|
|
# test picks up after an install, and IMAGE_DEPLOY means we're
|
|
|
|
# deploying a disk image (no installer) so in those cases we skip
|
|
|
|
# to post-install, unless the override INSTALL var is set
|
2019-02-04 14:46:31 +00:00
|
|
|
|
|
|
|
if (get_var("PREINSTALL")) {
|
|
|
|
# specified module supposed to first boot to rescue mode
|
|
|
|
# do any required actions before to exit rescue mode (triggering reboot).
|
|
|
|
# reboot will run through next normal install steps of load_install_tests.
|
|
|
|
my @pis = split(/ /, get_var("PREINSTALL"));
|
|
|
|
foreach my $pi (@pis) {
|
|
|
|
autotest::loadtest "tests/${pi}.pm";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 23:39:47 +00:00
|
|
|
load_install_tests;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!get_var("ENTRYPOINT")) {
|
|
|
|
load_postinstall_tests;
|
|
|
|
}
|
2015-01-30 09:35:13 +00:00
|
|
|
|
2021-07-28 07:02:02 +00:00
|
|
|
# load application start-stop tests
|
2018-12-18 12:07:37 +00:00
|
|
|
if (get_var("STARTSTOP")) {
|
|
|
|
my $desktop = get_var('DESKTOP');
|
|
|
|
my $casedir = get_var('CASEDIR');
|
|
|
|
|
2019-01-29 14:40:52 +00:00
|
|
|
if ($desktop eq 'gnome') {
|
2019-09-19 14:03:50 +00:00
|
|
|
# Run this test to preset the environment
|
|
|
|
autotest::loadtest "tests/apps_gnome_preset.pm";
|
2021-07-28 07:02:02 +00:00
|
|
|
}
|
2018-12-18 12:07:37 +00:00
|
|
|
|
2019-09-19 14:03:50 +00:00
|
|
|
# Find all tests from a directory defined by the DESKTOP variable
|
|
|
|
my @apptests = glob "${casedir}/tests/apps_startstop/${desktop}/*.pm";
|
|
|
|
# Now load them
|
2018-12-18 12:07:37 +00:00
|
|
|
foreach my $filepath (@apptests) {
|
2019-09-19 14:03:50 +00:00
|
|
|
my $file = basename($filepath);
|
2021-06-08 22:16:54 +00:00
|
|
|
autotest::loadtest "tests/apps_startstop/${desktop}/${file}";
|
2018-12-18 12:07:37 +00:00
|
|
|
}
|
2019-09-19 14:03:50 +00:00
|
|
|
if ($desktop eq 'gnome') {
|
|
|
|
# Run this test to check if required application have registered.
|
|
|
|
autotest::loadtest "tests/workstation_core_applications.pm";
|
|
|
|
}
|
2018-12-18 12:07:37 +00:00
|
|
|
}
|
2015-01-22 12:38:16 +00:00
|
|
|
1;
|
|
|
|
|
|
|
|
# vim: set sw=4 et:
|