Backport cloud image tests to RHEL 8
Related: rhbz#1653934 note: for now use Fedora 28 for the Docker container until CentOS 8 is released or we figure out how to use subscriptions for official RHEL 8 images.
This commit is contained in:
parent
7dad328424
commit
55bdb8a27a
@ -8,10 +8,7 @@ script:
|
||||
|
||||
after_success:
|
||||
- |
|
||||
|
||||
sudo docker create --name results-cont welder/lorax /bin/echo
|
||||
sudo docker cp results-cont:/lorax/.coverage .coverage.docker
|
||||
sudo docker rm results-cont
|
||||
cp .test-results/.coverage ./.coverage.docker
|
||||
|
||||
pip install coverage coveralls
|
||||
coverage combine
|
||||
|
@ -1,18 +1,37 @@
|
||||
FROM fedora:28
|
||||
|
||||
RUN dnf -y install make libgit2-glib tito python3-pylint \
|
||||
python3-nose python3-mako python3-flask \
|
||||
python3-coverage libselinux-python3 sudo \
|
||||
pykickstart python3-pytoml python3-sphinx \
|
||||
python3-semantic_version python3-rpmfluff \
|
||||
anaconda-tui python3-gevent beakerlib && \
|
||||
useradd weldr
|
||||
|
||||
RUN mkdir /lorax
|
||||
COPY . /lorax
|
||||
# remove byte-compiled files to avoid issues between Python 2/3
|
||||
# this can happen when you switch between rhel7 and master branches
|
||||
RUN find /lorax -name "*.pyc" -exec rm -f {} \;
|
||||
|
||||
WORKDIR /lorax
|
||||
RUN make test
|
||||
FROM registry.fedoraproject.org/fedora:28
|
||||
RUN dnf -y install \
|
||||
anaconda-tui \
|
||||
libgit2-glib \
|
||||
libselinux-python3 \
|
||||
make \
|
||||
pykickstart \
|
||||
python3-coverage \
|
||||
python3-coveralls \
|
||||
python3-flask \
|
||||
python3-gevent \
|
||||
python3-magic \
|
||||
python3-mako \
|
||||
python3-nose \
|
||||
python3-pocketlint \
|
||||
python3-pylint \
|
||||
python3-pyparted \
|
||||
python3-pytoml \
|
||||
python3-semantic_version \
|
||||
python3-sphinx \
|
||||
python3-rpmfluff \
|
||||
python3-librepo \
|
||||
beakerlib \
|
||||
sudo \
|
||||
tito \
|
||||
rsync \
|
||||
e2fsprogs \
|
||||
xz-lzma-compat \
|
||||
pbzip2 \
|
||||
squashfs-tools \
|
||||
qemu-img \
|
||||
which && \
|
||||
touch /.in-container
|
||||
RUN useradd weldr
|
||||
VOLUME /lorax-ro
|
||||
VOLUME /test-results
|
||||
WORKDIR /lorax-ro
|
||||
|
33
Makefile
33
Makefile
@ -1,11 +1,13 @@
|
||||
PYTHON ?= /usr/bin/python3
|
||||
DESTDIR ?= /
|
||||
DOCKER ?= docker
|
||||
|
||||
PKGNAME = lorax
|
||||
VERSION = $(shell awk '/Version:/ { print $$2 }' $(PKGNAME).spec)
|
||||
RELEASE = $(shell awk '/Release:/ { print $$2 }' $(PKGNAME).spec | sed -e 's|%.*$$||g')
|
||||
TAG = lorax-$(VERSION)-$(RELEASE)
|
||||
|
||||
IMAGE_RELEASE = rhel8-latest
|
||||
|
||||
default: all
|
||||
|
||||
@ -43,7 +45,23 @@ test:
|
||||
|
||||
./tests/test_cli.sh
|
||||
|
||||
# need `losetup`, which needs Docker to be in privileged mode (--privileged)
|
||||
# but even so fails in Travis CI
|
||||
test_images:
|
||||
sudo -E ./tests/test_cli.sh tests/cli/test_compose_ext4-filesystem.sh \
|
||||
tests/cli/test_compose_partitioned-disk.sh
|
||||
|
||||
test_aws:
|
||||
sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_aws.sh
|
||||
|
||||
test_azure:
|
||||
sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_azure.sh
|
||||
|
||||
test_openstack:
|
||||
sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_openstack.sh
|
||||
|
||||
test_vmware:
|
||||
sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_vmware.sh
|
||||
|
||||
clean:
|
||||
-rm -rf build src/pylorax/version.py
|
||||
@ -72,9 +90,22 @@ local:
|
||||
@rm -rf /var/tmp/$(PKGNAME)-$(VERSION)
|
||||
@echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz"
|
||||
|
||||
test-in-copy:
|
||||
rsync -aP --exclude=.git /lorax-ro/ /lorax/
|
||||
make -C /lorax/ check test
|
||||
cp /lorax/.coverage /test-results/
|
||||
|
||||
test-in-docker:
|
||||
sudo docker build -t welder/lorax:latest -f Dockerfile.test .
|
||||
sudo $(DOCKER) build -t welder/lorax-tests:$(IMAGE_RELEASE) -f Dockerfile.test .
|
||||
sudo $(DOCKER) run --rm -it -v `pwd`/.test-results/:/test-results -v `pwd`:/lorax-ro:ro --security-opt label=disable welder/lorax-tests:$(IMAGE_RELEASE) make test-in-copy
|
||||
|
||||
docs-in-docker:
|
||||
sudo $(DOCKER) run -it --rm -v `pwd`/docs/html/:/lorax/docs/html/ --security-opt label=disable welder/lorax-tests:$(IMAGE_RELEASE) make docs
|
||||
|
||||
ci: check test
|
||||
|
||||
.PHONY: ci_after_success
|
||||
ci_after_success:
|
||||
# nothing to do here, but Jenkins expects this to be present, otherwise fails
|
||||
|
||||
.PHONY: docs
|
||||
|
@ -21,9 +21,8 @@ rlJournalStart
|
||||
rlAssertGrep "httpd" "example-http-server.toml"
|
||||
|
||||
# non-existing blueprint
|
||||
# enable test for https://github.com/weldr/lorax/issues/460
|
||||
# rlRun -t -c "$CLI blueprints save non-existing-bp" 1
|
||||
# rlAssertNotExists "non-existing-bp.toml"
|
||||
rlRun -t -c "$CLI blueprints save non-existing-bp" 1
|
||||
rlAssertNotExists "non-existing-bp.toml"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "blueprints push"
|
||||
|
202
tests/cli/test_build_and_deploy_aws.sh
Normal file
202
tests/cli/test_build_and_deploy_aws.sh
Normal file
@ -0,0 +1,202 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure we can build an image and deploy it inside AWS!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartSetup
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
rlFail "AWS_ACCESS_KEY_ID is empty!"
|
||||
else
|
||||
rlLogInfo "AWS_ACCESS_KEY_ID is configured"
|
||||
fi
|
||||
|
||||
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
rlFail "AWS_SECRET_ACCESS_KEY is empty!"
|
||||
else
|
||||
rlLogInfo "AWS_SECRET_ACCESS_KEY is configured"
|
||||
fi
|
||||
|
||||
AWS_BUCKET="${AWS_BUCKET:-composerredhat}"
|
||||
AWS_REGION="${AWS_REGION:-us-east-1}"
|
||||
|
||||
rlLogInfo "AWS_BUCKET=$AWS_BUCKET"
|
||||
rlLogInfo "AWS_REGION=$AWS_REGION"
|
||||
|
||||
if ! rlCheckRpm "python3-pip"; then
|
||||
rlRun -t -c "dnf -y install python3-pip"
|
||||
rlAssertRpm python3-pip
|
||||
fi
|
||||
|
||||
rlRun -t -c "pip3 install awscli"
|
||||
|
||||
# aws configure
|
||||
[ -d ~/.aws/ ] || mkdir ~/.aws/
|
||||
|
||||
if [ -f ~/.aws/config ]; then
|
||||
rlLogInfo "Reusing existing ~/.aws/config"
|
||||
else
|
||||
rlLogInfo "Creating ~/.aws/config"
|
||||
cat > ~/.aws/config << __EOF__
|
||||
[default]
|
||||
region = $AWS_REGION
|
||||
__EOF__
|
||||
fi
|
||||
|
||||
if [ -f ~/.aws/credentials ]; then
|
||||
rlLogInfo "Reusing existing ~/.aws/credentials"
|
||||
else
|
||||
rlLogInfo "Creating ~/.aws/credentials"
|
||||
cat > ~/.aws/credentials << __EOF__
|
||||
[default]
|
||||
aws_access_key_id = $AWS_ACCESS_KEY_ID
|
||||
aws_secret_access_key = $AWS_SECRET_ACCESS_KEY
|
||||
__EOF__
|
||||
fi
|
||||
|
||||
# make sure bucket exists
|
||||
rlRun -t -c "aws s3 mb s3://$AWS_BUCKET"
|
||||
|
||||
# make sure vmimport role exists
|
||||
rlRun -t -c "aws iam get-role --role-name vmimport"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose start"
|
||||
UUID=`$CLI compose start example-http-server ami`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
sleep 30
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Import AMI image in AWS"
|
||||
rlRun -t -c "$CLI compose image $UUID"
|
||||
|
||||
AMI="$UUID-disk.ami"
|
||||
|
||||
# upload to S3
|
||||
rlRun -t -c "aws s3 cp $AMI s3://$AWS_BUCKET"
|
||||
|
||||
# import image as snapshot into EC2
|
||||
cat > containers.json << __EOF__
|
||||
{
|
||||
"Description": "Composer image",
|
||||
"Format": "raw",
|
||||
"UserBucket": {
|
||||
"S3Bucket": "$AWS_BUCKET",
|
||||
"S3Key": "$AMI"
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
IMPORT_TASK_ID=`aws ec2 import-snapshot --disk-container file://containers.json | grep ImportTaskId | cut -f4 -d'"'`
|
||||
|
||||
if [ -z "$IMPORT_TASK_ID" ]; then
|
||||
rlFail "IMPORT_TASK_ID is empty!"
|
||||
fi
|
||||
|
||||
# wait for the import to complete
|
||||
while aws ec2 describe-import-snapshot-tasks --filters Name=task-state,Values=active | grep $IMPORT_TASK_ID; do
|
||||
rlLogInfo "Waiting for $IMPORT_TASK_ID to complete ..."
|
||||
sleep 60
|
||||
done
|
||||
|
||||
DESCRIPTION="Created by AWS-VMImport service for $IMPORT_TASK_ID"
|
||||
rlRun -t -c "aws ec2 describe-snapshots --filters Name=description,Values='$DESCRIPTION'"
|
||||
SNAPSHOT_ID=`aws ec2 describe-snapshots --filters Name=description,Values="$DESCRIPTION" | grep SnapshotId | cut -f4 -d'"'`
|
||||
|
||||
if [ -z "$SNAPSHOT_ID" ]; then
|
||||
rlFail "SNAPSHOT_ID is empty!"
|
||||
else
|
||||
rlLogInfo "SNAPSHOT_ID=$SNAPSHOT_ID"
|
||||
fi
|
||||
|
||||
# create an image from the imported selected snapshot
|
||||
AMI_ID=`aws ec2 register-image --name "Composer-Test-$UUID" --virtualization-type hvm --root-device-name /dev/sda1 \
|
||||
--block-device-mappings "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": {\"SnapshotId\": \"$SNAPSHOT_ID\"}}]" | \
|
||||
grep ImageId | cut -f4 -d'"'`
|
||||
|
||||
if [ -z "$AMI_ID" ]; then
|
||||
rlFail "AMI_ID is empty!"
|
||||
else
|
||||
rlLogInfo "AMI_ID=$AMI_ID"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Start EC2 instance"
|
||||
# generate new ssh key and import it into EC2
|
||||
KEY_NAME=composer-$UUID
|
||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
||||
rlRun -t -c "aws ec2 import-key-pair --key-name $KEY_NAME --public-key-material file://$SSH_KEY_DIR/id_rsa.pub"
|
||||
|
||||
# start a new instance with selected ssh key, enable ssh
|
||||
INSTANCE_ID=`aws ec2 run-instances --image-id $AMI_ID --instance-type t2.small --key-name $KEY_NAME \
|
||||
--security-groups allow-ssh --instance-initiated-shutdown-behavior terminate --enable-api-termination \
|
||||
--count 1| grep InstanceId | cut -f4 -d'"'`
|
||||
|
||||
if [ -z "$INSTANCE_ID" ]; then
|
||||
rlFail "INSTANCE_ID is empty!"
|
||||
else
|
||||
rlLogInfo "INSTANCE_ID=$INSTANCE_ID"
|
||||
fi
|
||||
|
||||
# wait for instance to become running and had assigned a public IP
|
||||
IP_ADDRESS=""
|
||||
while [ -z "$IP_ADDRESS" ]; do
|
||||
rlLogInfo "IP_ADDRESS is not assigned yet ..."
|
||||
sleep 10
|
||||
IP_ADDRESS=`aws ec2 describe-instances --instance-ids $INSTANCE_ID --filters=Name=instance-state-name,Values=running | grep PublicIpAddress | cut -f4 -d'"'`
|
||||
done
|
||||
|
||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
||||
|
||||
until aws ec2 describe-instance-status --instance-ids $INSTANCE_ID --filter Name=instance-status.status,Values=ok | grep ok; do
|
||||
rlLogInfo "Waiting for instance to initialize ..."
|
||||
sleep 60
|
||||
done
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Verify EC2 instance"
|
||||
# cloud-init default config differs between RHEL and Fedora
|
||||
# and ami.ks will create ec2-user only on RHEL
|
||||
CLOUD_USER="ec2-user"
|
||||
if [ -f "/etc/fedora-release" ]; then
|
||||
CLOUD_USER="fedora"
|
||||
fi
|
||||
|
||||
# verify we can login into that instance and maybe some other details
|
||||
rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa $CLOUD_USER@$IP_ADDRESS 'cat /etc/redhat-release'"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartCleanup
|
||||
rlRun -t -c "aws ec2 terminate-instances --instance-ids $INSTANCE_ID"
|
||||
rlRun -t -c "aws ec2 delete-key-pair --key-name $KEY_NAME"
|
||||
rlRun -t -c "aws ec2 deregister-image --image-id $AMI_ID"
|
||||
rlRun -t -c "aws ec2 delete-snapshot --snapshot-id $SNAPSHOT_ID"
|
||||
rlRun -t -c "aws s3 rm s3://$AWS_BUCKET/$AMI"
|
||||
rlRun -t -c "$CLI compose delete $UUID"
|
||||
rlRun -t -c "rm -rf $AMI $SSH_KEY_DIR containers.json"
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
149
tests/cli/test_build_and_deploy_azure.sh
Normal file
149
tests/cli/test_build_and_deploy_azure.sh
Normal file
@ -0,0 +1,149 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure we can build an image and deploy it inside Azure!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartSetup
|
||||
if [ -z "$AZURE_SUBSCRIPTION_ID" ]; then
|
||||
rlFail "AZURE_SUBSCRIPTION_ID is empty!"
|
||||
else
|
||||
rlLogInfo "AZURE_SUBSCRIPTION_ID is configured"
|
||||
fi
|
||||
|
||||
if [ -z "$AZURE_TENANT" ]; then
|
||||
rlFail "AZURE_TENANT is empty!"
|
||||
else
|
||||
rlLogInfo "AZURE_TENANT is configured"
|
||||
fi
|
||||
|
||||
if [ -z "$AZURE_CLIENT_ID" ]; then
|
||||
rlFail "AZURE_CLIENT_ID is empty!"
|
||||
else
|
||||
rlLogInfo "AZURE_CLIENT_ID is configured"
|
||||
fi
|
||||
|
||||
if [ -z "$AZURE_SECRET" ]; then
|
||||
rlFail "AZURE_SECRET is empty!"
|
||||
else
|
||||
rlLogInfo "AZURE_SECRET is configured"
|
||||
fi
|
||||
|
||||
export AZURE_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP:-composer}"
|
||||
rlLogInfo "AZURE_RESOURCE_GROUP=$AZURE_RESOURCE_GROUP"
|
||||
|
||||
export AZURE_STORAGE_ACCOUNT="${AZURE_STORAGE_ACCOUNT:-composerredhat}"
|
||||
rlLogInfo "AZURE_STORAGE_ACCOUNT=$AZURE_STORAGE_ACCOUNT"
|
||||
|
||||
export AZURE_STORAGE_CONTAINER="${AZURE_STORAGE_CONTAINER:-composerredhat}"
|
||||
rlLogInfo "AZURE_STORAGE_CONTAINER=$AZURE_STORAGE_CONTAINER"
|
||||
|
||||
if ! rlCheckRpm "python3-pip"; then
|
||||
rlRun -t -c "dnf -y install python3-pip"
|
||||
rlAssertRpm python3-pip
|
||||
fi
|
||||
|
||||
rlRun -t -c "pip3 install ansible[azure]"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose start"
|
||||
UUID=`$CLI compose start example-http-server vhd`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
sleep 30
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Upload image to Azure"
|
||||
rlRun -t -c "$CLI compose image $UUID"
|
||||
IMAGE="$UUID-disk.vhd"
|
||||
OS_IMAGE_NAME="Composer-$UUID-Automated-Import"
|
||||
|
||||
rlRun -t -c "ansible localhost -m azure_rm_storageblob -a \
|
||||
'resource_group=$AZURE_RESOURCE_GROUP \
|
||||
storage_account_name=$AZURE_STORAGE_ACCOUNT \
|
||||
container=$AZURE_STORAGE_CONTAINER \
|
||||
blob=$IMAGE src=$IMAGE blob_type=page'"
|
||||
|
||||
# create image from blob
|
||||
rlRun -t -c "ansible localhost -m azure_rm_image -a \
|
||||
'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME os_type=Linux location=eastus \
|
||||
source=https://$AZURE_STORAGE_ACCOUNT.blob.core.windows.net/$AZURE_STORAGE_CONTAINER/$IMAGE'"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Start VM instance"
|
||||
VM_NAME="Composer-Auto-VM-$UUID"
|
||||
|
||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
||||
SSH_PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub`
|
||||
|
||||
TMP_DIR=`mktemp -d /tmp/composer-azure.XXXXX`
|
||||
cat > $TMP_DIR/azure-playbook.yaml << __EOF__
|
||||
---
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create a VM
|
||||
azure_rm_virtualmachine:
|
||||
resource_group: $AZURE_RESOURCE_GROUP
|
||||
name: $VM_NAME
|
||||
vm_size: Standard_B2s
|
||||
location: eastus
|
||||
admin_username: azure-user
|
||||
ssh_password_enabled: false
|
||||
ssh_public_keys:
|
||||
- path: /home/azure-user/.ssh/authorized_keys
|
||||
key_data: "$SSH_PUB_KEY"
|
||||
image:
|
||||
name: $OS_IMAGE_NAME
|
||||
resource_group: $AZURE_RESOURCE_GROUP
|
||||
__EOF__
|
||||
|
||||
rlRun -t -c "ansible-playbook $TMP_DIR/azure-playbook.yaml"
|
||||
|
||||
response=`ansible localhost -m azure_rm_virtualmachine -a "resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME"`
|
||||
rlAssert0 "Received VM info successfully" $?
|
||||
rlLogInfo "$response"
|
||||
|
||||
IP_ADDRESS=`echo "$response" | grep '"ipAddress":' | cut -f4 -d'"'`
|
||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
||||
|
||||
rlLogInfo "Waiting 60sec for instance to initialize ..."
|
||||
sleep 60
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Verify VM instance"
|
||||
# verify we can login into that instance
|
||||
rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa azure-user@$IP_ADDRESS 'cat /etc/redhat-release'"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartCleanup
|
||||
rlRun -t -c "ansible localhost -m azure_rm_virtualmachine -a 'resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME location=eastus state=absent'"
|
||||
rlRun -t -c "ansible localhost -m azure_rm_image -a 'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME state=absent'"
|
||||
rlRun -t -c "ansible localhost -m azure_rm_storageblob -a 'resource_group=$AZURE_RESOURCE_GROUP storage_account_name=$AZURE_STORAGE_ACCOUNT container=$AZURE_STORAGE_CONTAINER blob=$IMAGE state=absent'"
|
||||
rlRun -t -c "$CLI compose delete $UUID"
|
||||
rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR"
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
132
tests/cli/test_build_and_deploy_openstack.sh
Normal file
132
tests/cli/test_build_and_deploy_openstack.sh
Normal file
@ -0,0 +1,132 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure we can build an image and deploy it inside OpenStack!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartSetup
|
||||
if [ -z "$OS_AUTH_URL" ]; then
|
||||
rlFail "OS_AUTH_URL is empty!"
|
||||
else
|
||||
rlLogInfo "OS_AUTH_URL=$OS_AUTH_URL"
|
||||
fi
|
||||
|
||||
if [ -z "$OS_USERNAME" ]; then
|
||||
rlFail "OS_USERNAME is empty!"
|
||||
else
|
||||
rlLogInfo "OS_USERNAME=$OS_USERNAME"
|
||||
fi
|
||||
|
||||
export OS_TENANT_NAME="${OS_TENANT_NAME:-$OS_USERNAME}"
|
||||
rlLogInfo "OS_TENANT_NAME=$OS_TENANT_NAME"
|
||||
|
||||
if [ -z "$OS_PASSWORD" ]; then
|
||||
rlFail "OS_PASSWORD is empty!"
|
||||
else
|
||||
rlLogInfo "OS_PASSWORD is configured"
|
||||
fi
|
||||
|
||||
if ! rlCheckRpm "python3-pip"; then
|
||||
rlRun -t -c "dnf -y install python3-pip"
|
||||
rlAssertRpm python3-pip
|
||||
fi
|
||||
|
||||
rlRun -t -c "pip3 install ansible openstacksdk"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose start"
|
||||
# workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1639326
|
||||
cat > $TMP_DIR/http-with-rng.toml << __EOF__
|
||||
name = "http-with-rng"
|
||||
description = "HTTP image for OpenStack with rng-tools"
|
||||
version = "0.0.1"
|
||||
|
||||
[[modules]]
|
||||
name = "httpd"
|
||||
version = "*"
|
||||
|
||||
[[modules]]
|
||||
name = "rng-tools"
|
||||
version = "*"
|
||||
__EOF__
|
||||
|
||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/http-with-rng.toml"
|
||||
|
||||
UUID=`$CLI compose start http-with-rng openstack`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
sleep 30
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Upload QCOW2 image to OpenStack"
|
||||
rlRun -t -c "$CLI compose image $UUID"
|
||||
IMAGE="$UUID-disk.qcow2"
|
||||
OS_IMAGE_NAME="Composer-$UUID-Automated-Import"
|
||||
|
||||
response=`ansible localhost -m os_image -a "name=$OS_IMAGE_NAME filename=$IMAGE is_public=no"`
|
||||
rlAssert0 "Image upload successfull" $?
|
||||
rlLogInfo "$response"
|
||||
|
||||
OS_IMAGE_UUID=`echo "$response" | grep '"changed": true' -A1 | grep '"id":' | cut -d'"' -f4`
|
||||
rlLogInfo "OS_IMAGE_UUID=$OS_IMAGE_UUID"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Start VM instance"
|
||||
VM_NAME="Composer-Auto-VM-$UUID"
|
||||
|
||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
||||
rlRun -t -c "ansible localhost -m os_keypair -a 'name=$VM_NAME-key public_key_file=$SSH_KEY_DIR/id_rsa.pub'"
|
||||
|
||||
response=`ansible localhost -m os_server -a "name=$VM_NAME image=$OS_IMAGE_UUID flavor=t2.medium key_name=$VM_NAME-key auto_ip=yes"`
|
||||
rlAssert0 "VM started successfully" $?
|
||||
rlLogInfo "$response"
|
||||
|
||||
IP_ADDRESS=`echo "$response" | grep '"OS-EXT-IPS:type": "floating"' -A1| grep '"addr":' | cut -f4 -d'"' | head -n 1`
|
||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
||||
|
||||
rlLogInfo "Waiting 60sec for instance to initialize ..."
|
||||
sleep 60
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Verify VM instance"
|
||||
# cloud-init default config differs between RHEL and Fedora
|
||||
CLOUD_USER="cloud-user"
|
||||
if [ -f "/etc/fedora-release" ]; then
|
||||
CLOUD_USER="fedora"
|
||||
fi
|
||||
|
||||
# verify we can login into that instance
|
||||
rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa $CLOUD_USER@$IP_ADDRESS 'cat /etc/redhat-release'"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartCleanup
|
||||
rlRun -t -c "ansible localhost -m os_keypair -a 'name=$VM_NAME-key state=absent'"
|
||||
rlRun -t -c "ansible localhost -m os_server -a 'name=$VM_NAME state=absent'"
|
||||
rlRun -t -c "ansible localhost -m os_image -a 'name=$OS_IMAGE_NAME state=absent'"
|
||||
rlRun -t -c "$CLI compose delete $UUID"
|
||||
rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR"
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
155
tests/cli/test_build_and_deploy_vmware.sh
Normal file
155
tests/cli/test_build_and_deploy_vmware.sh
Normal file
@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure we can build an image and deploy it inside vSphere!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartSetup
|
||||
if [ -z "$V_HOST" ]; then
|
||||
rlFail "V_HOST is empty!"
|
||||
else
|
||||
rlLogInfo "V_HOST=$V_HOST"
|
||||
fi
|
||||
|
||||
if [ -z "$V_USERNAME" ]; then
|
||||
rlFail "V_USERNAME is empty!"
|
||||
else
|
||||
rlLogInfo "V_USERNAME=$V_USERNAME"
|
||||
fi
|
||||
|
||||
if [ -z "$V_PASSWORD" ]; then
|
||||
rlFail "V_PASSWORD is empty!"
|
||||
else
|
||||
rlLogInfo "V_PASSWORD is configured"
|
||||
fi
|
||||
|
||||
V_DATACENTER="${V_DATACENTER:-RH_Engineering}"
|
||||
rlLogInfo "V_DATACENTER=$V_DATACENTER"
|
||||
|
||||
V_CLUSTER="${V_CLUSTER:-SysMgmt_vMotion}"
|
||||
rlLogInfo "V_CLUSTER=$V_CLUSTER"
|
||||
|
||||
V_NETWORK="${V_NETWORK:-CEE_VM_Network}"
|
||||
rlLogInfo "V_NETWORK=$V_NETWORK"
|
||||
|
||||
V_DATASTORE="${V_DATASTORE:-NFS-Synology-1}"
|
||||
rlLogInfo "V_DATASTORE=$V_DATASTORE"
|
||||
|
||||
V_FOLDER="${V_FOLDER:-Composer}"
|
||||
rlLogInfo "V_FOLDER=$V_FOLDER"
|
||||
|
||||
if ! rlCheckRpm "python3-pip"; then
|
||||
rlRun -t -c "dnf -y install python3-pip"
|
||||
rlAssertRpm python3-pip
|
||||
fi
|
||||
|
||||
rlRun -t -c "pip3 install pyvmomi"
|
||||
|
||||
TMP_DIR=`mktemp -d /tmp/composer-vmware.XXXXX`
|
||||
SAMPLES="$TMP_DIR/pyvmomi-community-samples"
|
||||
if [ ! -d "$SAMPLES" ]; then
|
||||
rlRun -t -c "git clone https://github.com/weldr/pyvmomi-community-samples $SAMPLES"
|
||||
pushd $SAMPLES && git checkout composer_testing && popd
|
||||
fi
|
||||
SAMPLES="$SAMPLES/samples"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose start"
|
||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
||||
PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub`
|
||||
|
||||
cat > $TMP_DIR/vmware.toml << __EOF__
|
||||
name = "vmware"
|
||||
description = "HTTP image for vmware"
|
||||
version = "0.0.1"
|
||||
|
||||
[[modules]]
|
||||
name = "httpd"
|
||||
version = "*"
|
||||
|
||||
[[customizations.user]]
|
||||
name = "root"
|
||||
key = "$PUB_KEY"
|
||||
__EOF__
|
||||
|
||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/vmware.toml"
|
||||
|
||||
UUID=`$CLI compose start vmware vmdk`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
sleep 30
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Upload vmdk image in vCenter"
|
||||
rlRun -t -c "$CLI compose image $UUID"
|
||||
IMAGE="$UUID-disk.vmdk"
|
||||
|
||||
python3 $SAMPLES/upload_file_to_datastore.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
||||
-d $V_DATASTORE -l `readlink -f $IMAGE` -r $IMAGE
|
||||
rlAssert0 "Image upload successfull" $?
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Start VM instance"
|
||||
VM_NAME="Composer-Auto-VM-$UUID"
|
||||
INSTANCE_UUID=`python3 $SAMPLES/create_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
||||
--datacenter $V_DATACENTER -c $V_CLUSTER -f $V_FOLDER -d $V_DATASTORE \
|
||||
--portgroup $V_NETWORK -v $IMAGE -m 2048 -g rhel7_64Guest -n $VM_NAME \
|
||||
--power-on`
|
||||
|
||||
if [ -z "$INSTANCE_UUID" ]; then
|
||||
rlFail "INSTANCE_UUID is empty!"
|
||||
else
|
||||
rlLogInfo "INSTANCE_UUID=$INSTANCE_UUID"
|
||||
fi
|
||||
|
||||
# wait for instance to become running and had assigned a public IP
|
||||
IP_ADDRESS="None"
|
||||
while [ "$IP_ADDRESS" == "None" ]; do
|
||||
rlLogInfo "IP_ADDRESS is not assigned yet ..."
|
||||
sleep 30
|
||||
IP_ADDRESS=`python3 $SAMPLES/find_by_uuid.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
||||
--uuid $INSTANCE_UUID | grep 'ip address' | tr -d ' ' | cut -f2 -d:`
|
||||
done
|
||||
|
||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
||||
|
||||
rlLogInfo "Waiting 30sec for instance to initialize ..."
|
||||
sleep 30
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "Verify VM instance"
|
||||
# verify we can login into that instance
|
||||
rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa root@$IP_ADDRESS 'cat /etc/redhat-release'"
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartCleanup
|
||||
# note: vmdk disk is removed when destroying the VM
|
||||
python3 $SAMPLES/destroy_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD --uuid $INSTANCE_UUID
|
||||
rlAssert0 "VM destroyed" $?
|
||||
rlRun -t -c "$CLI compose delete $UUID"
|
||||
rlRun -t -c "rm -rf $IMAGE $TMP_DIR $SSH_KEY_DIR"
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
37
tests/cli/test_compose_ext4-filesystem.sh
Normal file
37
tests/cli/test_compose_ext4-filesystem.sh
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure an ext4-filesystem compose can be built without errors!
|
||||
# Note: according to existing test plan we're not going to validate
|
||||
# direct usage-scenarios for this image type!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartTest "compose start"
|
||||
UUID=`$CLI compose start example-http-server ext4-filesystem`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
sleep 10
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
37
tests/cli/test_compose_partitioned-disk.sh
Normal file
37
tests/cli/test_compose_partitioned-disk.sh
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
# Note: execute this file from the project root directory
|
||||
|
||||
#####
|
||||
#
|
||||
# Make sure a partitioned-disk compose can be built without errors!
|
||||
# Note: according to existing test plan we're not going to validate
|
||||
# direct usage-scenarios for this image type!
|
||||
#
|
||||
#####
|
||||
|
||||
. /usr/share/beakerlib/beakerlib.sh
|
||||
|
||||
CLI="./src/bin/composer-cli"
|
||||
|
||||
|
||||
rlJournalStart
|
||||
rlPhaseStartTest "compose start"
|
||||
UUID=`$CLI compose start example-http-server partitioned-disk`
|
||||
rlAssertEquals "exit code should be zero" $? 0
|
||||
|
||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
||||
rlPhaseEnd
|
||||
|
||||
rlPhaseStartTest "compose finished"
|
||||
if [ -n "$UUID" ]; then
|
||||
until $CLI compose info $UUID | grep FINISHED; do
|
||||
sleep 10
|
||||
rlLogInfo "Waiting for compose to finish ..."
|
||||
done;
|
||||
else
|
||||
rlFail "Compose UUID is empty!"
|
||||
fi
|
||||
rlPhaseEnd
|
||||
|
||||
rlJournalEnd
|
||||
rlJournalPrintText
|
@ -6,18 +6,40 @@ rm -rf /var/tmp/beakerlib-*/
|
||||
export top_srcdir=`pwd`
|
||||
. ./tests/testenv.sh
|
||||
|
||||
BLUEPRINTS_DIR=`mktemp -d '/tmp/composer-blueprints.XXXXX'`
|
||||
cp ./tests/pylorax/blueprints/*.toml $BLUEPRINTS_DIR
|
||||
|
||||
SHARE_DIR=`mktemp -d '/tmp/composer-share.XXXXX'`
|
||||
cp -R ./share/* $SHARE_DIR
|
||||
chmod a+rx -R $SHARE_DIR
|
||||
|
||||
# start the lorax-composer daemon
|
||||
./src/sbin/lorax-composer --sharedir ./share/ ./tests/pylorax/blueprints/ &
|
||||
./src/sbin/lorax-composer --sharedir $SHARE_DIR $BLUEPRINTS_DIR &
|
||||
|
||||
# wait for the backend to become ready
|
||||
until curl --unix-socket /run/weldr/api.socket http://localhost:4000/api/status | grep '"db_supported": true'; do
|
||||
tries=0
|
||||
until curl -m 15 --unix-socket /run/weldr/api.socket http://localhost:4000/api/status | grep 'db_supported.*true'; do
|
||||
tries=$((tries + 1))
|
||||
if [ $tries -gt 20 ]; then
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
echo "DEBUG: Waiting for backend API to become ready before testing ..."
|
||||
done;
|
||||
|
||||
# invoke cli/ tests
|
||||
./tests/cli/test_blueprints_sanity.sh
|
||||
./tests/cli/test_compose_sanity.sh
|
||||
|
||||
if [ -z "$*" ]; then
|
||||
# invoke cli/ tests which can be executed without special preparation
|
||||
./tests/cli/test_blueprints_sanity.sh
|
||||
./tests/cli/test_compose_sanity.sh
|
||||
else
|
||||
# execute other cli tests which need more adjustments in the calling environment
|
||||
# or can't be executed inside Travis CI
|
||||
for TEST in "$*"; do
|
||||
./$TEST
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Stop lorax-composer and remove /run/weldr/api.socket
|
||||
pkill -9 lorax-composer
|
||||
|
Loading…
Reference in New Issue
Block a user