2016-06-09 15:43:46 +00:00
|
|
|
use base "installedtest";
|
|
|
|
use strict;
|
2019-08-16 00:51:57 +00:00
|
|
|
use anaconda;
|
2016-06-09 15:43:46 +00:00
|
|
|
use testapi;
|
|
|
|
use lockapi;
|
|
|
|
use mmapi;
|
2017-01-18 07:15:44 +00:00
|
|
|
use tapnet;
|
2019-01-25 13:41:32 +00:00
|
|
|
use utils;
|
2016-06-09 15:43:46 +00:00
|
|
|
|
2019-08-16 00:51:57 +00:00
|
|
|
sub _pxe_setup {
|
|
|
|
# set up PXE server (via dnsmasq). Not used for update tests.
|
|
|
|
# don't get hung up on slow mirrors when DNFing...
|
|
|
|
repos_mirrorlist;
|
|
|
|
# create necessary dirs
|
|
|
|
assert_script_run "mkdir -p /var/lib/tftpboot/fedora";
|
|
|
|
# basic tftp config
|
|
|
|
assert_script_run "printf 'enable-tftp\ntftp-root=/var/lib/tftpboot\ntftp-secure\n' >> /etc/dnsmasq.conf";
|
|
|
|
# pxe boot config
|
|
|
|
# we boot grub directly not shim on aarch64 as shim fails to boot
|
|
|
|
# with 'Synchronous Exception'
|
|
|
|
# https://bugzilla.redhat.com/show_bug.cgi?id=1592148
|
|
|
|
assert_script_run "printf 'dhcp-match=set:efi-x86_64,option:client-arch,7\ndhcp-match=set:efi-x86_64,option:client-arch,9\ndhcp-match=set:bios,option:client-arch,0\ndhcp-match=set:efi-aarch64,option:client-arch,11\ndhcp-match=set:ppc64,option:client-arch,12\ndhcp-match=set:ppc64,option:client-arch,13\ndhcp-boot=tag:efi-x86_64,\"shim.efi\"\ndhcp-boot=tag:bios,\"pxelinux.0\"\ndhcp-boot=tag:efi-aarch64,\"grubaa64.efi\"\ndhcp-boot=tag:ppc64,\"boot/grub2/powerpc-ieee1275/core.elf\"\n' >> /etc/dnsmasq.conf";
|
|
|
|
# install and configure bootloaders
|
|
|
|
my $ourversion = get_var("CURRREL");
|
|
|
|
my $testversion = get_var("RELEASE");
|
|
|
|
my $arch = get_var("ARCH");
|
|
|
|
|
|
|
|
if ($arch eq 'x86_64') {
|
|
|
|
# x86_64: use syslinux for BIOS, grub2 with 'linuxefi' for UEFI
|
2023-05-24 18:17:47 +00:00
|
|
|
assert_script_run "mkdir -p /var/tmp/fedora";
|
2019-08-16 00:51:57 +00:00
|
|
|
assert_script_run "mkdir -p /var/lib/tftpboot/pxelinux.cfg";
|
|
|
|
# install bootloader packages
|
|
|
|
assert_script_run "dnf -y install syslinux", 120;
|
|
|
|
assert_script_run "dnf -y --releasever=$ourversion --installroot=/var/tmp/fedora install shim-x64 grub2-efi-x64", 300;
|
|
|
|
# copy bootloader files to tftp root
|
|
|
|
assert_script_run "cp /usr/share/syslinux/{pxelinux.0,vesamenu.c32,ldlinux.c32,libcom32.c32,libutil.c32} /var/lib/tftpboot";
|
|
|
|
assert_script_run "cp /var/tmp/fedora/boot/efi/EFI/fedora/{shim.efi,grubx64.efi} /var/lib/tftpboot";
|
|
|
|
# bootloader configs
|
|
|
|
# BIOS
|
2019-10-07 20:24:52 +00:00
|
|
|
assert_script_run "printf 'default vesamenu.c32\nprompt 1\ntimeout 600\n\nlabel linux\n menu label ^Install Fedora 64-bit\n menu default\n kernel fedora/vmlinuz\n append initrd=fedora/initrd.img inst.ks=file:///ks.cfg ip=dhcp\nlabel local\n menu label Boot from ^local drive\n localboot 0xffff\n' >> /var/lib/tftpboot/pxelinux.cfg/default";
|
2019-08-16 00:51:57 +00:00
|
|
|
# UEFI
|
2019-10-07 20:24:52 +00:00
|
|
|
assert_script_run "printf 'function load_video {\n insmod efi_gop\n insmod efi_uga\n insmod ieee1275_fb\n insmod vbe\n insmod vga\n insmod video_bochs\n insmod video_cirrus\n}\n\nload_video\nset gfxpayload=keep\ninsmod gzio\n\nmenuentry \"Install Fedora 64-bit\" --class fedora --class gnu-linux --class gnu --class os {\n linuxefi fedora/vmlinuz ip=dhcp inst.ks=file:///ks.cfg\n initrdefi fedora/initrd.img\n}' >> /var/lib/tftpboot/grub.cfg";
|
2019-08-16 00:51:57 +00:00
|
|
|
# DEBUG DEBUG
|
|
|
|
upload_logs "/etc/dnsmasq.conf";
|
|
|
|
upload_logs "/var/lib/tftpboot/grub.cfg";
|
|
|
|
upload_logs "/var/lib/tftpboot/pxelinux.cfg/default";
|
|
|
|
}
|
|
|
|
|
|
|
|
elsif ($arch eq 'ppc64le') {
|
|
|
|
# ppc64le: use grub2 for OFW
|
|
|
|
# install bootloader tools package
|
|
|
|
assert_script_run "dnf -y install grub2-tools-extra", 180;
|
|
|
|
# install a network bootloader to tftp root
|
|
|
|
assert_script_run "grub2-mknetdir --net-directory=/var/lib/tftpboot";
|
|
|
|
# bootloader config
|
2019-10-07 20:24:52 +00:00
|
|
|
assert_script_run "printf 'set default=0\nset timeout=5\n\nmenuentry \"Install Fedora 64-bit\" --class fedora --class gnu-linux --class gnu --class os {\n linux fedora/vmlinuz ip=dhcp inst.ks=file:///ks.cfg\n initrd fedora/initrd.img\n}' >> /var/lib/tftpboot/boot/grub2/grub.cfg";
|
2019-08-16 00:51:57 +00:00
|
|
|
# DEBUG DEBUG
|
|
|
|
upload_logs "/etc/dnsmasq.conf";
|
|
|
|
upload_logs "/var/lib/tftpboot/boot/grub2/grub.cfg";
|
|
|
|
}
|
|
|
|
|
|
|
|
elsif ($arch eq 'aarch64') {
|
|
|
|
# aarch64: use grub2 with 'linux' for UEFI
|
|
|
|
# copy bootloader files to tftp root (we just use the system
|
|
|
|
# bootloader, no need to install packages)
|
|
|
|
assert_script_run "cp /boot/efi/EFI/fedora/{shim.efi,grubaa64.efi} /var/lib/tftpboot";
|
|
|
|
# bootloader config
|
2019-10-07 20:24:52 +00:00
|
|
|
assert_script_run "printf 'function load_video {\n insmod efi_gop\n insmod efi_uga\n insmod ieee1275_fb\n insmod vbe\n insmod vga\n insmod video_bochs\n insmod video_cirrus\n}\n\nload_video\nset gfxpayload=keep\ninsmod gzio\n\nmenuentry \"Install Fedora 64-bit\" --class fedora --class gnu-linux --class gnu --class os {\n linux fedora/vmlinuz ip=dhcp inst.ks=file:///ks.cfg\n initrd fedora/initrd.img\n}' >> /var/lib/tftpboot/grub.cfg";
|
2019-08-16 00:51:57 +00:00
|
|
|
# DEBUG DEBUG
|
|
|
|
upload_logs "/etc/dnsmasq.conf";
|
|
|
|
upload_logs "/var/lib/tftpboot/grub.cfg";
|
|
|
|
}
|
|
|
|
|
|
|
|
# download kernel and initramfs
|
|
|
|
my $location = get_var("LOCATION");
|
|
|
|
my $kernpath = "images/pxeboot";
|
2022-07-28 20:32:57 +00:00
|
|
|
# for some crazy reason these are in a different place for ppc64
|
2019-08-16 00:51:57 +00:00
|
|
|
$kernpath = "ppc/ppc64" if ($arch eq 'ppc64le');
|
|
|
|
assert_script_run "curl -o /var/lib/tftpboot/fedora/vmlinuz $location/Everything/${arch}/os/${kernpath}/vmlinuz";
|
|
|
|
assert_script_run "curl -o /var/lib/tftpboot/fedora/initrd.img $location/Everything/${arch}/os/${kernpath}/initrd.img";
|
Move most 'universal' tests to Server-dvd-iso
The 'universal' flavor has been kinda pointless for some time
now. It dates back to the earliest days of openQA, before Pungi
4 was a thing, when composes were very different; we only built
a boot.iso and some live images nightly for Rawhide, these
weren't even formally grouped as a 'compose' at all (fedfind had
to invent the concept). The TCs/RCs had DVD installer images
(not *Server* DVD, at the time, just a universal DVD installer).
We wanted to run some tests on the DVD image if it was available,
but we still wanted to run them for the nightlies, so we invented
a whole mechanism for that - this 'universal' flavor, with some
complicated logic in fedora_openqa which schedules universal on
the 'best available' image it can find in the compose.
All this is functionally obsolete now. All composes we test are
now run through Pungi (except the live respins, but they aren't
relevant here). In current config, the Server DVD is non-failable
on x86_64 and aarch64, which means it will *always be there* -
if it fails to build, the compose itself fails, so we won't test
it. It's failable for ppc64le, but we don't care that much about
ppc64le; I'm fine with these tests just not running if the Server
DVD happens to fail in a ppc64le compose.
As a cherry on top, some of the 'universal' tests aren't really
universal anyway, they fail if you run them on a netinst (off
the top of my head, all the NFS install tests are like this, as
we use the ISO to populate the NFS share on the server end).
So let's just move all the tests that actually need an installer
image to the Server-dvd-iso flavor. Left over in the 'universal'
flavor are upgrade tests, which don't need an ISO at all - they
boot from hard disk images and run an upgrade using repos. We
can change the scheduler logic to be more simple for these, and
just always schedule them, with no ISO attached. We could even
rename this flavor 'upgrade', but it might not be worth it.
One slight complication is that the split happened to be helping
us avoid too many tests in a single support_server cluster; we
have a cluster of five support_server tests on Server-dvd-iso
and five support_server tests on universal. I try to avoid the
clusters getting too big as you need as many worker instances on
at least one worker host as your largest cluster; if you don't
have that many, the cluster's tests simply never get scheduled.
Requiring folks to have at least ten worker instances on one
host to run these tests is a bit of a big ask. So, to handle
that, we create a support_server_2 and have the former universal
tests use that one instead, so we'll have two separate clusters
on Server-dvd-iso now.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2023-05-03 20:45:52 +00:00
|
|
|
# extract our IP from POST_STATIC
|
|
|
|
my $poststatic = get_var("POST_STATIC");
|
|
|
|
my $ip = substr($poststatic, 0, index($poststatic, " "));
|
2019-10-07 20:24:52 +00:00
|
|
|
# get a kickstart to embed in the initramfs, for testing:
|
|
|
|
# https://fedoraproject.org/wiki/QA:Testcase_Kickstart_File_Path_Ks_Cfg
|
2021-01-19 23:03:48 +00:00
|
|
|
assert_script_run "curl -o ks.cfg https://fedorapeople.org/groups/qa/kickstarts/root-user-crypted-net.ks";
|
2019-10-07 20:24:52 +00:00
|
|
|
# tweak the repo config in it
|
Move most 'universal' tests to Server-dvd-iso
The 'universal' flavor has been kinda pointless for some time
now. It dates back to the earliest days of openQA, before Pungi
4 was a thing, when composes were very different; we only built
a boot.iso and some live images nightly for Rawhide, these
weren't even formally grouped as a 'compose' at all (fedfind had
to invent the concept). The TCs/RCs had DVD installer images
(not *Server* DVD, at the time, just a universal DVD installer).
We wanted to run some tests on the DVD image if it was available,
but we still wanted to run them for the nightlies, so we invented
a whole mechanism for that - this 'universal' flavor, with some
complicated logic in fedora_openqa which schedules universal on
the 'best available' image it can find in the compose.
All this is functionally obsolete now. All composes we test are
now run through Pungi (except the live respins, but they aren't
relevant here). In current config, the Server DVD is non-failable
on x86_64 and aarch64, which means it will *always be there* -
if it fails to build, the compose itself fails, so we won't test
it. It's failable for ppc64le, but we don't care that much about
ppc64le; I'm fine with these tests just not running if the Server
DVD happens to fail in a ppc64le compose.
As a cherry on top, some of the 'universal' tests aren't really
universal anyway, they fail if you run them on a netinst (off
the top of my head, all the NFS install tests are like this, as
we use the ISO to populate the NFS share on the server end).
So let's just move all the tests that actually need an installer
image to the Server-dvd-iso flavor. Left over in the 'universal'
flavor are upgrade tests, which don't need an ISO at all - they
boot from hard disk images and run an upgrade using repos. We
can change the scheduler logic to be more simple for these, and
just always schedule them, with no ISO attached. We could even
rename this flavor 'upgrade', but it might not be worth it.
One slight complication is that the split happened to be helping
us avoid too many tests in a single support_server cluster; we
have a cluster of five support_server tests on Server-dvd-iso
and five support_server tests on universal. I try to avoid the
clusters getting too big as you need as many worker instances on
at least one worker host as your largest cluster; if you don't
have that many, the cluster's tests simply never get scheduled.
Requiring folks to have at least ten worker instances on one
host to run these tests is a bit of a big ask. So, to handle
that, we create a support_server_2 and have the former universal
tests use that one instead, so we'll have two separate clusters
on Server-dvd-iso now.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2023-05-03 20:45:52 +00:00
|
|
|
assert_script_run "sed -i -e 's,^url.*,nfs --server $ip --dir /repo --opts nfsvers=4,g' ks.cfg";
|
2019-10-07 20:24:52 +00:00
|
|
|
# embed it
|
|
|
|
assert_script_run "echo ks.cfg | cpio -c -o >> /var/lib/tftpboot/fedora/initrd.img";
|
2019-08-16 00:51:57 +00:00
|
|
|
# chown root
|
|
|
|
assert_script_run "chown -R dnsmasq /var/lib/tftpboot";
|
|
|
|
assert_script_run "restorecon -vr /var/lib/tftpboot";
|
|
|
|
# open firewall ports
|
|
|
|
assert_script_run "firewall-cmd --add-service=tftp";
|
|
|
|
}
|
|
|
|
|
2016-06-09 15:43:46 +00:00
|
|
|
sub run {
|
2022-07-28 20:32:57 +00:00
|
|
|
my $self = shift;
|
2020-10-28 16:07:46 +00:00
|
|
|
# disable systemd-resolved, it conflicts with dnsmasq
|
|
|
|
unless (script_run "systemctl is-active systemd-resolved.service") {
|
|
|
|
script_run "systemctl stop systemd-resolved.service";
|
|
|
|
script_run "systemctl disable systemd-resolved.service";
|
|
|
|
script_run "rm -f /etc/resolv.conf";
|
|
|
|
script_run "systemctl restart NetworkManager";
|
|
|
|
}
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
## DNS / DHCP (dnsmasq)
|
|
|
|
# create config
|
2020-12-15 20:43:53 +00:00
|
|
|
assert_script_run "printf 'domain=test.openqa.fedoraproject.org\ndhcp-range=172.16.2.150,172.16.2.199\ndhcp-option=option:router,172.16.2.2\n' > /etc/dnsmasq.conf";
|
2019-08-16 00:51:57 +00:00
|
|
|
# do PXE setup if this is not an update test
|
Move most 'universal' tests to Server-dvd-iso
The 'universal' flavor has been kinda pointless for some time
now. It dates back to the earliest days of openQA, before Pungi
4 was a thing, when composes were very different; we only built
a boot.iso and some live images nightly for Rawhide, these
weren't even formally grouped as a 'compose' at all (fedfind had
to invent the concept). The TCs/RCs had DVD installer images
(not *Server* DVD, at the time, just a universal DVD installer).
We wanted to run some tests on the DVD image if it was available,
but we still wanted to run them for the nightlies, so we invented
a whole mechanism for that - this 'universal' flavor, with some
complicated logic in fedora_openqa which schedules universal on
the 'best available' image it can find in the compose.
All this is functionally obsolete now. All composes we test are
now run through Pungi (except the live respins, but they aren't
relevant here). In current config, the Server DVD is non-failable
on x86_64 and aarch64, which means it will *always be there* -
if it fails to build, the compose itself fails, so we won't test
it. It's failable for ppc64le, but we don't care that much about
ppc64le; I'm fine with these tests just not running if the Server
DVD happens to fail in a ppc64le compose.
As a cherry on top, some of the 'universal' tests aren't really
universal anyway, they fail if you run them on a netinst (off
the top of my head, all the NFS install tests are like this, as
we use the ISO to populate the NFS share on the server end).
So let's just move all the tests that actually need an installer
image to the Server-dvd-iso flavor. Left over in the 'universal'
flavor are upgrade tests, which don't need an ISO at all - they
boot from hard disk images and run an upgrade using repos. We
can change the scheduler logic to be more simple for these, and
just always schedule them, with no ISO attached. We could even
rename this flavor 'upgrade', but it might not be worth it.
One slight complication is that the split happened to be helping
us avoid too many tests in a single support_server cluster; we
have a cluster of five support_server tests on Server-dvd-iso
and five support_server tests on universal. I try to avoid the
clusters getting too big as you need as many worker instances on
at least one worker host as your largest cluster; if you don't
have that many, the cluster's tests simply never get scheduled.
Requiring folks to have at least ten worker instances on one
host to run these tests is a bit of a big ask. So, to handle
that, we create a support_server_2 and have the former universal
tests use that one instead, so we'll have two separate clusters
on Server-dvd-iso now.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2023-05-03 20:45:52 +00:00
|
|
|
_pxe_setup() if (get_var("SUPPORT_PXE"));
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
# open firewall ports
|
|
|
|
assert_script_run "firewall-cmd --add-service=dhcp";
|
|
|
|
assert_script_run "firewall-cmd --add-service=dns";
|
|
|
|
# start server
|
|
|
|
assert_script_run "systemctl restart dnsmasq.service";
|
|
|
|
assert_script_run "systemctl is-active dnsmasq.service";
|
|
|
|
|
|
|
|
## ISCSI
|
|
|
|
|
Move most 'universal' tests to Server-dvd-iso
The 'universal' flavor has been kinda pointless for some time
now. It dates back to the earliest days of openQA, before Pungi
4 was a thing, when composes were very different; we only built
a boot.iso and some live images nightly for Rawhide, these
weren't even formally grouped as a 'compose' at all (fedfind had
to invent the concept). The TCs/RCs had DVD installer images
(not *Server* DVD, at the time, just a universal DVD installer).
We wanted to run some tests on the DVD image if it was available,
but we still wanted to run them for the nightlies, so we invented
a whole mechanism for that - this 'universal' flavor, with some
complicated logic in fedora_openqa which schedules universal on
the 'best available' image it can find in the compose.
All this is functionally obsolete now. All composes we test are
now run through Pungi (except the live respins, but they aren't
relevant here). In current config, the Server DVD is non-failable
on x86_64 and aarch64, which means it will *always be there* -
if it fails to build, the compose itself fails, so we won't test
it. It's failable for ppc64le, but we don't care that much about
ppc64le; I'm fine with these tests just not running if the Server
DVD happens to fail in a ppc64le compose.
As a cherry on top, some of the 'universal' tests aren't really
universal anyway, they fail if you run them on a netinst (off
the top of my head, all the NFS install tests are like this, as
we use the ISO to populate the NFS share on the server end).
So let's just move all the tests that actually need an installer
image to the Server-dvd-iso flavor. Left over in the 'universal'
flavor are upgrade tests, which don't need an ISO at all - they
boot from hard disk images and run an upgrade using repos. We
can change the scheduler logic to be more simple for these, and
just always schedule them, with no ISO attached. We could even
rename this flavor 'upgrade', but it might not be worth it.
One slight complication is that the split happened to be helping
us avoid too many tests in a single support_server cluster; we
have a cluster of five support_server tests on Server-dvd-iso
and five support_server tests on universal. I try to avoid the
clusters getting too big as you need as many worker instances on
at least one worker host as your largest cluster; if you don't
have that many, the cluster's tests simply never get scheduled.
Requiring folks to have at least ten worker instances on one
host to run these tests is a bit of a big ask. So, to handle
that, we create a support_server_2 and have the former universal
tests use that one instead, so we'll have two separate clusters
on Server-dvd-iso now.
Signed-off-by: Adam Williamson <awilliam@redhat.com>
2023-05-03 20:45:52 +00:00
|
|
|
if (get_var("SUPPORT_ISCSI")) {
|
|
|
|
# start up iscsi target
|
|
|
|
assert_script_run "printf '<target iqn.2016-06.local.domain:support.target1>\n backing-store /dev/vdb\n incominguser test weakpassword\n</target>' > /etc/tgt/conf.d/openqa.conf";
|
|
|
|
# open firewall port
|
|
|
|
assert_script_run "firewall-cmd --add-service=iscsi-target";
|
|
|
|
assert_script_run "systemctl restart tgtd.service";
|
|
|
|
assert_script_run "systemctl is-active tgtd.service";
|
|
|
|
}
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
|
|
|
|
## NFS
|
|
|
|
|
|
|
|
# create the file share
|
|
|
|
assert_script_run "mkdir -p /export";
|
|
|
|
# get the kickstart
|
2021-01-19 23:03:48 +00:00
|
|
|
assert_script_run "curl -o /export/root-user-crypted-net.ks https://fedorapeople.org/groups/qa/kickstarts/root-user-crypted-net.ks";
|
2019-01-25 13:41:32 +00:00
|
|
|
# for update tests, set up the update repository and export it
|
2019-01-29 09:06:16 +00:00
|
|
|
if (get_var("ADVISORY_OR_TASK")) {
|
2023-06-13 21:48:53 +00:00
|
|
|
assert_script_run "echo '/mnt/updateiso/update_repo 172.16.2.0/24(ro)' >> /etc/exports";
|
2019-01-25 13:41:32 +00:00
|
|
|
}
|
|
|
|
# for compose tests, we do all this stuff
|
|
|
|
else {
|
|
|
|
# create the repo share
|
|
|
|
assert_script_run "mkdir -p /repo";
|
|
|
|
# create a mount point for the ISO
|
|
|
|
assert_script_run "mkdir -p /mnt/iso";
|
|
|
|
# mount the ISO there
|
|
|
|
assert_script_run "mount /dev/cdrom /mnt/iso";
|
|
|
|
# copy the contents of the ISO to the repo share
|
2020-06-19 00:00:42 +00:00
|
|
|
assert_script_run "dnf -y install rsync", 180;
|
|
|
|
assert_script_run "rsync -av /mnt/iso/ /repo", 180;
|
2019-01-25 13:41:32 +00:00
|
|
|
# put the updates image in the NFS repo (for testing this update
|
|
|
|
# image delivery method)
|
|
|
|
assert_script_run "curl -o /repo/images/updates.img https://fedorapeople.org/groups/qa/updates/updates-openqa.img";
|
2019-02-04 14:46:31 +00:00
|
|
|
# create the iso share
|
|
|
|
assert_script_run "mkdir -p /iso";
|
|
|
|
# recreate an iso file
|
|
|
|
copy_devcdrom_as_isofile('/iso/image.iso');
|
2019-01-25 13:41:32 +00:00
|
|
|
# set up the exports
|
2020-07-17 20:42:48 +00:00
|
|
|
assert_script_run "printf '/export 172.16.2.0/24(ro)\n/repo 172.16.2.0/24(ro)\n/iso 172.16.2.0/24(ro)' > /etc/exports";
|
2019-01-25 13:41:32 +00:00
|
|
|
}
|
|
|
|
|
add NFS tests (and DHCP/DNS in the support server)
Summary:
Set up the support server to provide DHCP/DNS functionality and
an NFS server, providing a kickstart. Add a kickstart test just
like the other root-user-crypted-net kickstart tests except it
gets the kickstart from the support server via NFS. Also add NFS
repository tests and a second support server for Server-dvd-iso
flavor: this test must run on that flavor to ensure that packages
are actually available. The support server just mounts the
attached 'DVD' and exports it via NFS.
Note we don't need to do anything clever to avoid IP conflicts
between the two support servers, because os-autoinst-openvswitch
ensures each worker group is on its own VLAN.
As part of adding the NFS repo tests, I did a bit of cleanup,
moving little things we were repeating a lot into anacondatest,
and sharing the 'check if the repo was used' logic between all
the tests (by making it into a test step that's loaded for all
of them). I also simplified the 'was repo used' checks a bit,
it seems silly to run a 'grep' command inside the VM then have
os-autoinst do a grep on the output (which is effectively what
we were doing before), instead we'll just use a single grep
within the VM, and clean up the messy quoting/escaping a bit.
Test Plan:
Run all tests - at least all repository tests - and
check they work (make sure the tests are actually still sane,
not just that they pass). I've done runs of all the repo tests
and they look good to me, but please double-check. I'm currently
re-running the whole 24-20160609.n.0 test on staging with these
changes.
Reviewers: jskladan, garretraziel
Reviewed By: garretraziel
Subscribers: tflink
Differential Revision: https://phab.qadevel.cloud.fedoraproject.org/D888
2016-06-13 15:42:30 +00:00
|
|
|
# open firewall port
|
|
|
|
assert_script_run "firewall-cmd --add-service=nfs";
|
|
|
|
# start the server
|
|
|
|
assert_script_run "systemctl restart nfs-server.service";
|
|
|
|
assert_script_run "systemctl is-active nfs-server.service";
|
|
|
|
|
2016-06-09 15:43:46 +00:00
|
|
|
# report ready, wait for children
|
|
|
|
mutex_create('support_ready');
|
|
|
|
wait_for_children;
|
2019-08-16 00:51:57 +00:00
|
|
|
# upload logs in case of child failures
|
|
|
|
$self->post_fail_hook();
|
2016-06-09 15:43:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub test_flags {
|
2022-07-28 20:32:57 +00:00
|
|
|
return {fatal => 1};
|
2016-06-09 15:43:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
1;
|
|
|
|
|
|
|
|
# vim: set sw=4 et:
|