From f8137b94dbd6fa09f64a73bae2de1d811aa877b3 Mon Sep 17 00:00:00 2001 From: Alexander Todorov Date: Wed, 12 Dec 2018 13:38:09 +0200 Subject: [PATCH] Backport cloud image tests from master --- Dockerfile.test | 51 +++-- Makefile | 51 ++++- tests/cli/test_blueprints_sanity.sh | 45 +++++ tests/cli/test_build_and_deploy_aws.sh | 202 +++++++++++++++++++ tests/cli/test_build_and_deploy_azure.sh | 149 ++++++++++++++ tests/cli/test_build_and_deploy_openstack.sh | 132 ++++++++++++ tests/cli/test_build_and_deploy_vmware.sh | 155 ++++++++++++++ tests/cli/test_compose_ext4-filesystem.sh | 37 ++++ tests/cli/test_compose_partitioned-disk.sh | 37 ++++ tests/cli/test_compose_sanity.sh | 45 +++++ tests/test_cli.sh | 52 +++++ tests/testenv.sh | 19 ++ tests/usercustomize.py | 8 - 13 files changed, 950 insertions(+), 33 deletions(-) create mode 100755 tests/cli/test_blueprints_sanity.sh create mode 100755 tests/cli/test_build_and_deploy_aws.sh create mode 100755 tests/cli/test_build_and_deploy_azure.sh create mode 100755 tests/cli/test_build_and_deploy_openstack.sh create mode 100755 tests/cli/test_build_and_deploy_vmware.sh create mode 100755 tests/cli/test_compose_ext4-filesystem.sh create mode 100755 tests/cli/test_compose_partitioned-disk.sh create mode 100755 tests/cli/test_compose_sanity.sh create mode 100755 tests/test_cli.sh create mode 100644 tests/testenv.sh delete mode 100644 tests/usercustomize.py diff --git a/Dockerfile.test b/Dockerfile.test index 002cb64a..61ecd8fe 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -3,17 +3,40 @@ FROM centos:7 COPY epel.repo /etc/yum.repos.d/ RUN yum -y install --nogpgcheck epel-release && \ rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-* && \ - yum -y install make libgit2-glib tito python-pylint \ - python-nose python-mako python-flask \ - python-coverage libselinux-python sudo \ - pykickstart python2-pytoml python-sphinx \ - python2-mock python-semantic_version \ - anaconda-tui python-rpmfluff && \ - yum clean all && \ - rm -rf /var/cache/yum - -RUN mkdir /lorax -COPY . /lorax - -WORKDIR /lorax -RUN make test + yum -y install \ + anaconda-tui \ + libgit2-glib \ + libselinux-python \ + make \ + pykickstart \ + python-coverage \ + python-coveralls \ + python-flask \ + python-gevent \ + python-magic \ + python-mako \ + python2-mock \ + python-nose \ + python-pocketlint \ + python-pylint \ + pyparted \ + python2-pytoml \ + python-semantic_version \ + python-sphinx \ + python-rpmfluff \ + python-librepo \ + beakerlib \ + sudo \ + tito \ + rsync \ + e2fsprogs \ + xz-lzma-compat \ + pbzip2 \ + squashfs-tools \ + qemu-img \ + which && \ + touch /.in-container +RUN useradd weldr +VOLUME /lorax-ro +VOLUME /test-results +WORKDIR /lorax-ro diff --git a/Makefile b/Makefile index bf26f3c8..c8b35e8f 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,13 @@ PYTHON ?= /usr/bin/python DESTDIR ?= / +DOCKER ?= docker PKGNAME = lorax-composer VERSION = $(shell awk '/Version:/ { print $$2 }' $(PKGNAME).spec) RELEASE = $(shell awk '/Release:/ { print $$2 }' $(PKGNAME).spec | sed -e 's|%.*$$||g') TAG = lorax-$(VERSION)-$(RELEASE) -PW_DIR ?= $(shell pwd) -USER_SITE_PACKAGES ?= $(shell sudo $(PYTHON) -m site --user-site) +IMAGE_RELEASE = $(shell awk -F: '/FROM/ { print $$2}' Dockerfile.test) default: all @@ -35,18 +35,34 @@ check: @echo "*** Running pylint ***" PYTHONPATH=$(PYTHONPATH):./src/ ./tests/pylint/runpylint.py -# /api/docs/ tests require we have the documentation already built -test: docs +test: @echo "*** Running tests ***" - sudo mkdir -p $(USER_SITE_PACKAGES) - sudo cp ./tests/usercustomize.py $(USER_SITE_PACKAGES) - sudo COVERAGE_PROCESS_START=$(PW_DIR)/.coveragerc PYTHONPATH=$(PYTHONPATH):./src/ \ - $(PYTHON) -m nose -v ./src/pylorax/ ./src/composer/ ./tests/pylorax/ ./tests/composer/ - sudo rm -rf $(USER_SITE_PACKAGES) + PYTHONPATH=$(PYTHONPATH):./src/ $(PYTHON) -m nose -v --with-coverage --cover-erase --cover-branches \ + --cover-package=pylorax --cover-inclusive \ + ./tests/pylorax/ ./tests/composer/ - coverage combine coverage report -m [ -f "/usr/bin/coveralls" ] && [ -n "$(COVERALLS_REPO_TOKEN)" ] && coveralls || echo + + ./tests/test_cli.sh + +# need `losetup`, which needs Docker to be in privileged mode (--privileged) +# but even so fails in Travis CI +test_images: + sudo -E ./tests/test_cli.sh tests/cli/test_compose_ext4-filesystem.sh \ + tests/cli/test_compose_partitioned-disk.sh + +test_aws: + sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_aws.sh + +test_azure: + sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_azure.sh + +test_openstack: + sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_openstack.sh + +test_vmware: + sudo -E ./tests/test_cli.sh tests/cli/test_build_and_deploy_vmware.sh clean: -rm -rf build src/pylorax/version.py @@ -73,9 +89,22 @@ local: @rm -rf /var/tmp/$(PKGNAME)-$(VERSION) @echo "The archive is in $(PKGNAME)-$(VERSION).tar.gz" +test-in-copy: + rsync -aP --exclude=.git /lorax-ro/ /lorax/ + make -C /lorax/ check test + cp /lorax/.coverage /test-results/ + test-in-docker: - sudo docker build -t welder/lorax-composer:latest -f Dockerfile.test . + sudo $(DOCKER) build -t welder/lorax-tests:$(IMAGE_RELEASE) -f Dockerfile.test . + sudo $(DOCKER) run --rm -it -v `pwd`/.test-results/:/test-results -v `pwd`:/lorax-ro:ro --security-opt label=disable welder/lorax-tests:$(IMAGE_RELEASE) make test-in-copy + +docs-in-docker: + sudo $(DOCKER) run -it --rm -v `pwd`/docs/html/:/lorax/docs/html/ --security-opt label=disable welder/lorax-tests:$(IMAGE_RELEASE) make docs ci: check test .PHONY: all install check test clean tag docs archive local + +.PHONY: ci_after_success +ci_after_success: +# nothing to do here, but Jenkins expects this to be present, otherwise fails diff --git a/tests/cli/test_blueprints_sanity.sh b/tests/cli/test_blueprints_sanity.sh new file mode 100755 index 00000000..90e1d23d --- /dev/null +++ b/tests/cli/test_blueprints_sanity.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartTest "blueprints list" + for bp in example-http-server example-development example-atlas; do + rlAssertEquals "blueprint list finds $bp" \ + "`$CLI blueprints list | grep $bp`" "$bp" + done + rlPhaseEnd + + rlPhaseStartTest "blueprints save" + rlRun -t -c "$CLI blueprints save example-http-server" + rlAssertExists "example-http-server.toml" + rlAssertGrep "example-http-server" "example-http-server.toml" + rlAssertGrep "httpd" "example-http-server.toml" + + # non-existing blueprint + rlRun -t -c "$CLI blueprints save non-existing-bp" 1 + rlAssertNotExists "non-existing-bp.toml" + rlPhaseEnd + + rlPhaseStartTest "blueprints push" + + cat > beakerlib.toml << __EOF__ +name = "beakerlib" +description = "Start building tests with beakerlib." +version = "0.0.1" + +[[modules]] +name = "beakerlib" +version = "*" +__EOF__ + + rlRun -t -c "$CLI blueprints push beakerlib.toml" + rlAssertEquals "pushed bp is found via list" "`$CLI blueprints list | grep beakerlib`" "beakerlib" + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_build_and_deploy_aws.sh b/tests/cli/test_build_and_deploy_aws.sh new file mode 100755 index 00000000..fc648ae1 --- /dev/null +++ b/tests/cli/test_build_and_deploy_aws.sh @@ -0,0 +1,202 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure we can build an image and deploy it inside AWS! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartSetup + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + rlFail "AWS_ACCESS_KEY_ID is empty!" + else + rlLogInfo "AWS_ACCESS_KEY_ID is configured" + fi + + if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then + rlFail "AWS_SECRET_ACCESS_KEY is empty!" + else + rlLogInfo "AWS_SECRET_ACCESS_KEY is configured" + fi + + AWS_BUCKET="${AWS_BUCKET:-composerredhat}" + AWS_REGION="${AWS_REGION:-us-east-1}" + + rlLogInfo "AWS_BUCKET=$AWS_BUCKET" + rlLogInfo "AWS_REGION=$AWS_REGION" + + if ! rlCheckRpm "python3-pip"; then + rlRun -t -c "dnf -y install python3-pip" + rlAssertRpm python3-pip + fi + + rlRun -t -c "pip3 install awscli" + + # aws configure + [ -d ~/.aws/ ] || mkdir ~/.aws/ + + if [ -f ~/.aws/config ]; then + rlLogInfo "Reusing existing ~/.aws/config" + else + rlLogInfo "Creating ~/.aws/config" + cat > ~/.aws/config << __EOF__ +[default] +region = $AWS_REGION +__EOF__ + fi + + if [ -f ~/.aws/credentials ]; then + rlLogInfo "Reusing existing ~/.aws/credentials" + else + rlLogInfo "Creating ~/.aws/credentials" + cat > ~/.aws/credentials << __EOF__ +[default] +aws_access_key_id = $AWS_ACCESS_KEY_ID +aws_secret_access_key = $AWS_SECRET_ACCESS_KEY +__EOF__ + fi + + # make sure bucket exists + rlRun -t -c "aws s3 mb s3://$AWS_BUCKET" + + # make sure vmimport role exists + rlRun -t -c "aws iam get-role --role-name vmimport" + rlPhaseEnd + + rlPhaseStartTest "compose start" + UUID=`$CLI compose start example-http-server ami` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + rlLogInfo "Waiting for compose to finish ..." + sleep 30 + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + + rlPhaseStartTest "Import AMI image in AWS" + rlRun -t -c "$CLI compose image $UUID" + + AMI="$UUID-disk.ami" + + # upload to S3 + rlRun -t -c "aws s3 cp $AMI s3://$AWS_BUCKET" + + # import image as snapshot into EC2 + cat > containers.json << __EOF__ +{ + "Description": "Composer image", + "Format": "raw", + "UserBucket": { + "S3Bucket": "$AWS_BUCKET", + "S3Key": "$AMI" + } +} +__EOF__ + + IMPORT_TASK_ID=`aws ec2 import-snapshot --disk-container file://containers.json | grep ImportTaskId | cut -f4 -d'"'` + + if [ -z "$IMPORT_TASK_ID" ]; then + rlFail "IMPORT_TASK_ID is empty!" + fi + + # wait for the import to complete + while aws ec2 describe-import-snapshot-tasks --filters Name=task-state,Values=active | grep $IMPORT_TASK_ID; do + rlLogInfo "Waiting for $IMPORT_TASK_ID to complete ..." + sleep 60 + done + + DESCRIPTION="Created by AWS-VMImport service for $IMPORT_TASK_ID" + rlRun -t -c "aws ec2 describe-snapshots --filters Name=description,Values='$DESCRIPTION'" + SNAPSHOT_ID=`aws ec2 describe-snapshots --filters Name=description,Values="$DESCRIPTION" | grep SnapshotId | cut -f4 -d'"'` + + if [ -z "$SNAPSHOT_ID" ]; then + rlFail "SNAPSHOT_ID is empty!" + else + rlLogInfo "SNAPSHOT_ID=$SNAPSHOT_ID" + fi + + # create an image from the imported selected snapshot + AMI_ID=`aws ec2 register-image --name "Composer-Test-$UUID" --virtualization-type hvm --root-device-name /dev/sda1 \ + --block-device-mappings "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": {\"SnapshotId\": \"$SNAPSHOT_ID\"}}]" | \ + grep ImageId | cut -f4 -d'"'` + + if [ -z "$AMI_ID" ]; then + rlFail "AMI_ID is empty!" + else + rlLogInfo "AMI_ID=$AMI_ID" + fi + rlPhaseEnd + + rlPhaseStartTest "Start EC2 instance" + # generate new ssh key and import it into EC2 + KEY_NAME=composer-$UUID + SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX` + rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa" + rlRun -t -c "aws ec2 import-key-pair --key-name $KEY_NAME --public-key-material file://$SSH_KEY_DIR/id_rsa.pub" + + # start a new instance with selected ssh key, enable ssh + INSTANCE_ID=`aws ec2 run-instances --image-id $AMI_ID --instance-type t2.small --key-name $KEY_NAME \ + --security-groups allow-ssh --instance-initiated-shutdown-behavior terminate --enable-api-termination \ + --count 1| grep InstanceId | cut -f4 -d'"'` + + if [ -z "$INSTANCE_ID" ]; then + rlFail "INSTANCE_ID is empty!" + else + rlLogInfo "INSTANCE_ID=$INSTANCE_ID" + fi + + # wait for instance to become running and had assigned a public IP + IP_ADDRESS="" + while [ -z "$IP_ADDRESS" ]; do + rlLogInfo "IP_ADDRESS is not assigned yet ..." + sleep 10 + IP_ADDRESS=`aws ec2 describe-instances --instance-ids $INSTANCE_ID --filters=Name=instance-state-name,Values=running | grep PublicIpAddress | cut -f4 -d'"'` + done + + rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS" + + until aws ec2 describe-instance-status --instance-ids $INSTANCE_ID --filter Name=instance-status.status,Values=ok | grep ok; do + rlLogInfo "Waiting for instance to initialize ..." + sleep 60 + done + rlPhaseEnd + + rlPhaseStartTest "Verify EC2 instance" + # cloud-init default config differs between RHEL and Fedora + # and ami.ks will create ec2-user only on RHEL + CLOUD_USER="ec2-user" + if [ -f "/etc/fedora-release" ]; then + CLOUD_USER="fedora" + fi + + # verify we can login into that instance and maybe some other details + rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa $CLOUD_USER@$IP_ADDRESS 'cat /etc/redhat-release'" + rlPhaseEnd + + rlPhaseStartCleanup + rlRun -t -c "aws ec2 terminate-instances --instance-ids $INSTANCE_ID" + rlRun -t -c "aws ec2 delete-key-pair --key-name $KEY_NAME" + rlRun -t -c "aws ec2 deregister-image --image-id $AMI_ID" + rlRun -t -c "aws ec2 delete-snapshot --snapshot-id $SNAPSHOT_ID" + rlRun -t -c "aws s3 rm s3://$AWS_BUCKET/$AMI" + rlRun -t -c "$CLI compose delete $UUID" + rlRun -t -c "rm -rf $AMI $SSH_KEY_DIR containers.json" + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_build_and_deploy_azure.sh b/tests/cli/test_build_and_deploy_azure.sh new file mode 100755 index 00000000..56dd75c7 --- /dev/null +++ b/tests/cli/test_build_and_deploy_azure.sh @@ -0,0 +1,149 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure we can build an image and deploy it inside Azure! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartSetup + if [ -z "$AZURE_SUBSCRIPTION_ID" ]; then + rlFail "AZURE_SUBSCRIPTION_ID is empty!" + else + rlLogInfo "AZURE_SUBSCRIPTION_ID is configured" + fi + + if [ -z "$AZURE_TENANT" ]; then + rlFail "AZURE_TENANT is empty!" + else + rlLogInfo "AZURE_TENANT is configured" + fi + + if [ -z "$AZURE_CLIENT_ID" ]; then + rlFail "AZURE_CLIENT_ID is empty!" + else + rlLogInfo "AZURE_CLIENT_ID is configured" + fi + + if [ -z "$AZURE_SECRET" ]; then + rlFail "AZURE_SECRET is empty!" + else + rlLogInfo "AZURE_SECRET is configured" + fi + + export AZURE_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP:-composer}" + rlLogInfo "AZURE_RESOURCE_GROUP=$AZURE_RESOURCE_GROUP" + + export AZURE_STORAGE_ACCOUNT="${AZURE_STORAGE_ACCOUNT:-composerredhat}" + rlLogInfo "AZURE_STORAGE_ACCOUNT=$AZURE_STORAGE_ACCOUNT" + + export AZURE_STORAGE_CONTAINER="${AZURE_STORAGE_CONTAINER:-composerredhat}" + rlLogInfo "AZURE_STORAGE_CONTAINER=$AZURE_STORAGE_CONTAINER" + + if ! rlCheckRpm "python3-pip"; then + rlRun -t -c "dnf -y install python3-pip" + rlAssertRpm python3-pip + fi + + rlRun -t -c "pip3 install ansible[azure]" + rlPhaseEnd + + rlPhaseStartTest "compose start" + UUID=`$CLI compose start example-http-server vhd` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + rlLogInfo "Waiting for compose to finish ..." + sleep 30 + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + + rlPhaseStartTest "Upload image to Azure" + rlRun -t -c "$CLI compose image $UUID" + IMAGE="$UUID-disk.vhd" + OS_IMAGE_NAME="Composer-$UUID-Automated-Import" + + rlRun -t -c "ansible localhost -m azure_rm_storageblob -a \ + 'resource_group=$AZURE_RESOURCE_GROUP \ + storage_account_name=$AZURE_STORAGE_ACCOUNT \ + container=$AZURE_STORAGE_CONTAINER \ + blob=$IMAGE src=$IMAGE blob_type=page'" + + # create image from blob + rlRun -t -c "ansible localhost -m azure_rm_image -a \ + 'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME os_type=Linux location=eastus \ + source=https://$AZURE_STORAGE_ACCOUNT.blob.core.windows.net/$AZURE_STORAGE_CONTAINER/$IMAGE'" + rlPhaseEnd + + rlPhaseStartTest "Start VM instance" + VM_NAME="Composer-Auto-VM-$UUID" + + SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX` + rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa" + SSH_PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub` + + TMP_DIR=`mktemp -d /tmp/composer-azure.XXXXX` + cat > $TMP_DIR/azure-playbook.yaml << __EOF__ +--- +- hosts: localhost + connection: local + tasks: + - name: Create a VM + azure_rm_virtualmachine: + resource_group: $AZURE_RESOURCE_GROUP + name: $VM_NAME + vm_size: Standard_B2s + location: eastus + admin_username: azure-user + ssh_password_enabled: false + ssh_public_keys: + - path: /home/azure-user/.ssh/authorized_keys + key_data: "$SSH_PUB_KEY" + image: + name: $OS_IMAGE_NAME + resource_group: $AZURE_RESOURCE_GROUP +__EOF__ + + rlRun -t -c "ansible-playbook $TMP_DIR/azure-playbook.yaml" + + response=`ansible localhost -m azure_rm_virtualmachine -a "resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME"` + rlAssert0 "Received VM info successfully" $? + rlLogInfo "$response" + + IP_ADDRESS=`echo "$response" | grep '"ipAddress":' | cut -f4 -d'"'` + rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS" + + rlLogInfo "Waiting 60sec for instance to initialize ..." + sleep 60 + rlPhaseEnd + + rlPhaseStartTest "Verify VM instance" + # verify we can login into that instance + rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa azure-user@$IP_ADDRESS 'cat /etc/redhat-release'" + rlPhaseEnd + + rlPhaseStartCleanup + rlRun -t -c "ansible localhost -m azure_rm_virtualmachine -a 'resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME location=eastus state=absent'" + rlRun -t -c "ansible localhost -m azure_rm_image -a 'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME state=absent'" + rlRun -t -c "ansible localhost -m azure_rm_storageblob -a 'resource_group=$AZURE_RESOURCE_GROUP storage_account_name=$AZURE_STORAGE_ACCOUNT container=$AZURE_STORAGE_CONTAINER blob=$IMAGE state=absent'" + rlRun -t -c "$CLI compose delete $UUID" + rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR" + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_build_and_deploy_openstack.sh b/tests/cli/test_build_and_deploy_openstack.sh new file mode 100755 index 00000000..d09b8c84 --- /dev/null +++ b/tests/cli/test_build_and_deploy_openstack.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure we can build an image and deploy it inside OpenStack! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartSetup + if [ -z "$OS_AUTH_URL" ]; then + rlFail "OS_AUTH_URL is empty!" + else + rlLogInfo "OS_AUTH_URL=$OS_AUTH_URL" + fi + + if [ -z "$OS_USERNAME" ]; then + rlFail "OS_USERNAME is empty!" + else + rlLogInfo "OS_USERNAME=$OS_USERNAME" + fi + + export OS_TENANT_NAME="${OS_TENANT_NAME:-$OS_USERNAME}" + rlLogInfo "OS_TENANT_NAME=$OS_TENANT_NAME" + + if [ -z "$OS_PASSWORD" ]; then + rlFail "OS_PASSWORD is empty!" + else + rlLogInfo "OS_PASSWORD is configured" + fi + + if ! rlCheckRpm "python3-pip"; then + rlRun -t -c "dnf -y install python3-pip" + rlAssertRpm python3-pip + fi + + rlRun -t -c "pip3 install ansible openstacksdk" + rlPhaseEnd + + rlPhaseStartTest "compose start" + # workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1639326 + cat > $TMP_DIR/http-with-rng.toml << __EOF__ +name = "http-with-rng" +description = "HTTP image for OpenStack with rng-tools" +version = "0.0.1" + +[[modules]] +name = "httpd" +version = "*" + +[[modules]] +name = "rng-tools" +version = "*" +__EOF__ + + rlRun -t -c "$CLI blueprints push $TMP_DIR/http-with-rng.toml" + + UUID=`$CLI compose start http-with-rng openstack` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + rlLogInfo "Waiting for compose to finish ..." + sleep 30 + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + + rlPhaseStartTest "Upload QCOW2 image to OpenStack" + rlRun -t -c "$CLI compose image $UUID" + IMAGE="$UUID-disk.qcow2" + OS_IMAGE_NAME="Composer-$UUID-Automated-Import" + + response=`ansible localhost -m os_image -a "name=$OS_IMAGE_NAME filename=$IMAGE is_public=no"` + rlAssert0 "Image upload successfull" $? + rlLogInfo "$response" + + OS_IMAGE_UUID=`echo "$response" | grep '"changed": true' -A1 | grep '"id":' | cut -d'"' -f4` + rlLogInfo "OS_IMAGE_UUID=$OS_IMAGE_UUID" + rlPhaseEnd + + rlPhaseStartTest "Start VM instance" + VM_NAME="Composer-Auto-VM-$UUID" + + SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX` + rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa" + rlRun -t -c "ansible localhost -m os_keypair -a 'name=$VM_NAME-key public_key_file=$SSH_KEY_DIR/id_rsa.pub'" + + response=`ansible localhost -m os_server -a "name=$VM_NAME image=$OS_IMAGE_UUID flavor=t2.medium key_name=$VM_NAME-key auto_ip=yes"` + rlAssert0 "VM started successfully" $? + rlLogInfo "$response" + + IP_ADDRESS=`echo "$response" | grep '"OS-EXT-IPS:type": "floating"' -A1| grep '"addr":' | cut -f4 -d'"' | head -n 1` + rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS" + + rlLogInfo "Waiting 60sec for instance to initialize ..." + sleep 60 + rlPhaseEnd + + rlPhaseStartTest "Verify VM instance" + # cloud-init default config differs between RHEL and Fedora + CLOUD_USER="cloud-user" + if [ -f "/etc/fedora-release" ]; then + CLOUD_USER="fedora" + fi + + # verify we can login into that instance + rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa $CLOUD_USER@$IP_ADDRESS 'cat /etc/redhat-release'" + rlPhaseEnd + + rlPhaseStartCleanup + rlRun -t -c "ansible localhost -m os_keypair -a 'name=$VM_NAME-key state=absent'" + rlRun -t -c "ansible localhost -m os_server -a 'name=$VM_NAME state=absent'" + rlRun -t -c "ansible localhost -m os_image -a 'name=$OS_IMAGE_NAME state=absent'" + rlRun -t -c "$CLI compose delete $UUID" + rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR" + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_build_and_deploy_vmware.sh b/tests/cli/test_build_and_deploy_vmware.sh new file mode 100755 index 00000000..e5f47111 --- /dev/null +++ b/tests/cli/test_build_and_deploy_vmware.sh @@ -0,0 +1,155 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure we can build an image and deploy it inside vSphere! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartSetup + if [ -z "$V_HOST" ]; then + rlFail "V_HOST is empty!" + else + rlLogInfo "V_HOST=$V_HOST" + fi + + if [ -z "$V_USERNAME" ]; then + rlFail "V_USERNAME is empty!" + else + rlLogInfo "V_USERNAME=$V_USERNAME" + fi + + if [ -z "$V_PASSWORD" ]; then + rlFail "V_PASSWORD is empty!" + else + rlLogInfo "V_PASSWORD is configured" + fi + + V_DATACENTER="${V_DATACENTER:-RH_Engineering}" + rlLogInfo "V_DATACENTER=$V_DATACENTER" + + V_CLUSTER="${V_CLUSTER:-SysMgmt_vMotion}" + rlLogInfo "V_CLUSTER=$V_CLUSTER" + + V_NETWORK="${V_NETWORK:-CEE_VM_Network}" + rlLogInfo "V_NETWORK=$V_NETWORK" + + V_DATASTORE="${V_DATASTORE:-NFS-Synology-1}" + rlLogInfo "V_DATASTORE=$V_DATASTORE" + + V_FOLDER="${V_FOLDER:-Composer}" + rlLogInfo "V_FOLDER=$V_FOLDER" + + if ! rlCheckRpm "python3-pip"; then + rlRun -t -c "dnf -y install python3-pip" + rlAssertRpm python3-pip + fi + + rlRun -t -c "pip3 install pyvmomi" + + TMP_DIR=`mktemp -d /tmp/composer-vmware.XXXXX` + SAMPLES="$TMP_DIR/pyvmomi-community-samples" + if [ ! -d "$SAMPLES" ]; then + rlRun -t -c "git clone https://github.com/weldr/pyvmomi-community-samples $SAMPLES" + pushd $SAMPLES && git checkout composer_testing && popd + fi + SAMPLES="$SAMPLES/samples" + rlPhaseEnd + + rlPhaseStartTest "compose start" + SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX` + rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa" + PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub` + + cat > $TMP_DIR/vmware.toml << __EOF__ +name = "vmware" +description = "HTTP image for vmware" +version = "0.0.1" + +[[modules]] +name = "httpd" +version = "*" + +[[customizations.user]] +name = "root" +key = "$PUB_KEY" +__EOF__ + + rlRun -t -c "$CLI blueprints push $TMP_DIR/vmware.toml" + + UUID=`$CLI compose start vmware vmdk` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + rlLogInfo "Waiting for compose to finish ..." + sleep 30 + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + + rlPhaseStartTest "Upload vmdk image in vCenter" + rlRun -t -c "$CLI compose image $UUID" + IMAGE="$UUID-disk.vmdk" + + python3 $SAMPLES/upload_file_to_datastore.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \ + -d $V_DATASTORE -l `readlink -f $IMAGE` -r $IMAGE + rlAssert0 "Image upload successfull" $? + rlPhaseEnd + + rlPhaseStartTest "Start VM instance" + VM_NAME="Composer-Auto-VM-$UUID" + INSTANCE_UUID=`python3 $SAMPLES/create_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \ + --datacenter $V_DATACENTER -c $V_CLUSTER -f $V_FOLDER -d $V_DATASTORE \ + --portgroup $V_NETWORK -v $IMAGE -m 2048 -g rhel7_64Guest -n $VM_NAME \ + --power-on` + + if [ -z "$INSTANCE_UUID" ]; then + rlFail "INSTANCE_UUID is empty!" + else + rlLogInfo "INSTANCE_UUID=$INSTANCE_UUID" + fi + + # wait for instance to become running and had assigned a public IP + IP_ADDRESS="None" + while [ "$IP_ADDRESS" == "None" ]; do + rlLogInfo "IP_ADDRESS is not assigned yet ..." + sleep 30 + IP_ADDRESS=`python3 $SAMPLES/find_by_uuid.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \ + --uuid $INSTANCE_UUID | grep 'ip address' | tr -d ' ' | cut -f2 -d:` + done + + rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS" + + rlLogInfo "Waiting 30sec for instance to initialize ..." + sleep 30 + rlPhaseEnd + + rlPhaseStartTest "Verify VM instance" + # verify we can login into that instance + rlRun -t -c "ssh -oStrictHostKeyChecking=no -i $SSH_KEY_DIR/id_rsa root@$IP_ADDRESS 'cat /etc/redhat-release'" + rlPhaseEnd + + rlPhaseStartCleanup + # note: vmdk disk is removed when destroying the VM + python3 $SAMPLES/destroy_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD --uuid $INSTANCE_UUID + rlAssert0 "VM destroyed" $? + rlRun -t -c "$CLI compose delete $UUID" + rlRun -t -c "rm -rf $IMAGE $TMP_DIR $SSH_KEY_DIR" + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_compose_ext4-filesystem.sh b/tests/cli/test_compose_ext4-filesystem.sh new file mode 100755 index 00000000..e0170476 --- /dev/null +++ b/tests/cli/test_compose_ext4-filesystem.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure an ext4-filesystem compose can be built without errors! +# Note: according to existing test plan we're not going to validate +# direct usage-scenarios for this image type! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartTest "compose start" + UUID=`$CLI compose start example-http-server ext4-filesystem` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + sleep 60 + rlLogInfo "Waiting for compose to finish ..." + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_compose_partitioned-disk.sh b/tests/cli/test_compose_partitioned-disk.sh new file mode 100755 index 00000000..165eca57 --- /dev/null +++ b/tests/cli/test_compose_partitioned-disk.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +##### +# +# Make sure a partitioned-disk compose can be built without errors! +# Note: according to existing test plan we're not going to validate +# direct usage-scenarios for this image type! +# +##### + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartTest "compose start" + UUID=`$CLI compose start example-http-server partitioned-disk` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose finished" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + sleep 60 + rlLogInfo "Waiting for compose to finish ..." + done; + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/cli/test_compose_sanity.sh b/tests/cli/test_compose_sanity.sh new file mode 100755 index 00000000..f82c7632 --- /dev/null +++ b/tests/cli/test_compose_sanity.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +. /usr/share/beakerlib/beakerlib.sh + +CLI="./src/bin/composer-cli" + + +rlJournalStart + rlPhaseStartTest "compose types" + rlAssertEquals "lists all supported types" \ + "`$CLI compose types | sort | xargs`" "ami ext4-filesystem live-iso openstack partitioned-disk qcow2 tar vhd vmdk" + rlPhaseEnd + + rlPhaseStartTest "compose start" + UUID=`$CLI --test=2 compose start example-http-server tar` + rlAssertEquals "exit code should be zero" $? 0 + + UUID=`echo $UUID | cut -f 2 -d' '` + rlPhaseEnd + + rlPhaseStartTest "compose info" + if [ -n "$UUID" ]; then + rlRun -t -c "$CLI compose info $UUID | egrep 'RUNNING|WAITING'" + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + + rlPhaseStartTest "compose image" + if [ -n "$UUID" ]; then + until $CLI compose info $UUID | grep FINISHED; do + sleep 60 + rlLogInfo "Waiting for compose to finish ..." + done; + + rlRun -t -c "$CLI compose image $UUID" + rlAssertExists "$UUID-root.tar.xz" + else + rlFail "Compose UUID is empty!" + fi + rlPhaseEnd + +rlJournalEnd +rlJournalPrintText diff --git a/tests/test_cli.sh b/tests/test_cli.sh new file mode 100755 index 00000000..9bb4ac12 --- /dev/null +++ b/tests/test_cli.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# Note: execute this file from the project root directory + +# setup +rm -rf /var/tmp/beakerlib-*/ +export top_srcdir=`pwd` +. ./tests/testenv.sh + +BLUEPRINTS_DIR=`mktemp -d '/tmp/composer-blueprints.XXXXX'` +cp ./tests/pylorax/blueprints/*.toml $BLUEPRINTS_DIR + +SHARE_DIR=`mktemp -d '/tmp/composer-share.XXXXX'` +cp -R ./share/* $SHARE_DIR +chmod a+rx -R $SHARE_DIR + +# start the lorax-composer daemon +./src/sbin/lorax-composer --sharedir $SHARE_DIR $BLUEPRINTS_DIR & + +# wait for the backend to become ready +tries=0 +until curl -m 15 --unix-socket /run/weldr/api.socket http://localhost:4000/api/status | grep 'db_supported.*true'; do + tries=$((tries + 1)) + if [ $tries -gt 20 ]; then + exit 1 + fi + sleep 2 + echo "DEBUG: Waiting for backend API to become ready before testing ..." +done; + + +if [ -z "$*" ]; then + # invoke cli/ tests which can be executed without special preparation + ./tests/cli/test_blueprints_sanity.sh + ./tests/cli/test_compose_sanity.sh +else + # execute other cli tests which need more adjustments in the calling environment + # or can't be executed inside Travis CI + for TEST in "$*"; do + ./$TEST + done +fi + + +# Stop lorax-composer and remove /run/weldr/api.socket +pkill -9 lorax-composer +rm -f /run/weldr/api.socket + +# look for failures +grep RESULT_STRING /var/tmp/beakerlib-*/TestResults | grep -v PASS && exit 1 + +# explicit return code for Makefile +exit 0 diff --git a/tests/testenv.sh b/tests/testenv.sh new file mode 100644 index 00000000..48074422 --- /dev/null +++ b/tests/testenv.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +if [ -z "$top_srcdir" ]; then + echo "*** top_srcdir must be set" + exit 1 +fi + +# If no top_builddir is set, use top_srcdir +: "${top_builddir:=$top_srcdir}" + +if [ -z "$PYTHONPATH" ]; then + PYTHONPATH="${top_builddir}/src/:${top_srcdir}/tests/lib" +else + PYTHONPATH="${PYTHONPATH}:${top_srcdir}/src/:${top_srcdir}:${top_srcdir}/tests/lib" +fi + +export PYTHONPATH +export top_srcdir +export top_builddir diff --git a/tests/usercustomize.py b/tests/usercustomize.py deleted file mode 100644 index e0563b72..00000000 --- a/tests/usercustomize.py +++ /dev/null @@ -1,8 +0,0 @@ -# Enable Python coverage for subprocesses. See: -# http://nedbatchelder.com/code/coverage/subprocess.html - -try: - import coverage - coverage.process_startup() -except ImportError: - pass