tests: Remove unused lorax-composer tests
This leaves the composer-cli related tests, and switches the default backend to osbuild-composer for the tests.
This commit is contained in:
parent
7616a10373
commit
0eda7829a8
18
.tasks
18
.tasks
@ -1,18 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# When run automated, randomize to minimize stampeding herd
|
|
||||||
if [ -t 0 ]; then
|
|
||||||
chance=10
|
|
||||||
else
|
|
||||||
chance=$(shuf -i 0-10 -n 1)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $chance -gt 9 ]; then
|
|
||||||
# command lines representing pending work. one will be executed
|
|
||||||
# randomly by the calling environment
|
|
||||||
echo "./tests/cleanup/remove_old_objects_alibaba.sh"
|
|
||||||
echo "./tests/cleanup/remove_old_objects_aws.sh"
|
|
||||||
echo "./tests/cleanup/remove_old_objects_azure.sh"
|
|
||||||
echo "./tests/cleanup/remove_old_objects_openstack.sh"
|
|
||||||
echo "./tests/cleanup/remove_old_objects_vmware.sh"
|
|
||||||
fi
|
|
10
Makefile
10
Makefile
@ -57,16 +57,6 @@ test:
|
|||||||
coverage3 report -m
|
coverage3 report -m
|
||||||
[ -f "/usr/bin/coveralls" ] && [ -n "$(COVERALLS_REPO_TOKEN)" ] && coveralls || echo
|
[ -f "/usr/bin/coveralls" ] && [ -n "$(COVERALLS_REPO_TOKEN)" ] && coveralls || echo
|
||||||
|
|
||||||
# need `losetup`, which needs Docker to be in privileged mode (--privileged)
|
|
||||||
# but even so fails in Travis CI
|
|
||||||
test_images:
|
|
||||||
sudo -E ./tests/test_cli.sh tests/cli/test_compose_ext4-filesystem.sh \
|
|
||||||
tests/cli/test_compose_partitioned-disk.sh \
|
|
||||||
tests/cli/test_compose_tar.sh \
|
|
||||||
tests/cli/test_compose_tar_kickstart.sh \
|
|
||||||
tests/cli/test_compose_qcow2.sh \
|
|
||||||
tests/cli/test_compose_live-iso.sh
|
|
||||||
|
|
||||||
test_cli:
|
test_cli:
|
||||||
sudo -E ./tests/test_cli.sh
|
sudo -E ./tests/test_cli.sh
|
||||||
|
|
||||||
|
@ -1,92 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import composertest
|
|
||||||
import requests
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("API is now tested by osbuild-composer test suite")
|
|
||||||
class TestApi(composertest.ComposerTestCase):
|
|
||||||
"""Test Composer HTTP API"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super().setUp()
|
|
||||||
|
|
||||||
# Forward /run/weldr/api.socket to a port on the host
|
|
||||||
# Set ExitOnForwardFailure so that ssh blocks until the forward is set
|
|
||||||
# up before going to the background (-f), which it closes stdout. We
|
|
||||||
# wait for that by calling read() on it.
|
|
||||||
self.composer_port = self.network._lock(8080)
|
|
||||||
forwarder_command = [*self.ssh_command, "-fNT",
|
|
||||||
"-o", "ExitOnForwardFailure=yes",
|
|
||||||
"-L", f"localhost:{self.composer_port}:/run/weldr/api.socket"]
|
|
||||||
self.forwarder_proc = subprocess.Popen(forwarder_command, stdout=subprocess.PIPE)
|
|
||||||
self.forwarder_proc.stdout.read()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.forwarder_proc.terminate()
|
|
||||||
try:
|
|
||||||
self.forwarder_proc.wait(timeout=1)
|
|
||||||
except TimeoutError:
|
|
||||||
self.forwarder_proc.kill()
|
|
||||||
super().tearDown()
|
|
||||||
|
|
||||||
def request(self, method, path, json=None, check=True):
|
|
||||||
self.assertEqual(path[0], "/")
|
|
||||||
r = requests.request(method, f"http://localhost:{self.composer_port}{path}", json=json, timeout=30)
|
|
||||||
if check:
|
|
||||||
r.raise_for_status()
|
|
||||||
return r
|
|
||||||
|
|
||||||
def test_basic(self):
|
|
||||||
"""Basic checks for the API"""
|
|
||||||
|
|
||||||
#
|
|
||||||
# API status without depsolve errors
|
|
||||||
#
|
|
||||||
r = self.request("GET", "/api/status")
|
|
||||||
self.assertEqual(r.status_code, 200)
|
|
||||||
status = r.json()
|
|
||||||
self.assertEqual(status.keys(), { "build", "api", "db_version", "schema_version", "db_supported", "backend", "msgs" })
|
|
||||||
self.assertEqual(status["msgs"], [])
|
|
||||||
self.assertEqual(r.headers.keys(), { "Content-Type", "Content-Length", "Date" })
|
|
||||||
|
|
||||||
#
|
|
||||||
# HTTP errors should return json responses
|
|
||||||
#
|
|
||||||
r = self.request("GET", "/marmalade", check=False)
|
|
||||||
self.assertEqual(r.status_code, 404)
|
|
||||||
self.assertEqual(r.json(), {
|
|
||||||
"status": False,
|
|
||||||
"errors": [{ "id": "HTTPError", "code": 404, "msg": "Not Found" }]
|
|
||||||
})
|
|
||||||
|
|
||||||
r = self.request("POST", "/api/status", check=False)
|
|
||||||
self.assertEqual(r.status_code, 405)
|
|
||||||
self.assertEqual(r.json(), {
|
|
||||||
"status": False,
|
|
||||||
"errors": [{ "id": "HTTPError", "code": 405, "msg": "Method Not Allowed" }]
|
|
||||||
})
|
|
||||||
|
|
||||||
#
|
|
||||||
# API create blueprint with InvalidChars
|
|
||||||
#
|
|
||||||
invalid_blueprint = {
|
|
||||||
"name": "Name,With,Commas",
|
|
||||||
"description": "",
|
|
||||||
"version": "0.0.1",
|
|
||||||
"modules": [],
|
|
||||||
"groups": []
|
|
||||||
}
|
|
||||||
r = self.request("POST", "/api/v0/blueprints/new", json=invalid_blueprint, check=False)
|
|
||||||
self.assertEqual(r.status_code, 400)
|
|
||||||
self.assertEqual(r.json(), {
|
|
||||||
"status": False,
|
|
||||||
"errors": [{ "id": "InvalidChars", "msg": "Invalid characters in API path" }]
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
composertest.main()
|
|
@ -1,8 +1,5 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import tempfile
|
|
||||||
import composertest
|
import composertest
|
||||||
|
|
||||||
|
|
||||||
@ -22,94 +19,6 @@ class TestImages(composertest.ComposerTestCase):
|
|||||||
def test_compose_sanity(self):
|
def test_compose_sanity(self):
|
||||||
self.runCliTest("/tests/cli/test_compose_sanity.sh")
|
self.runCliTest("/tests/cli/test_compose_sanity.sh")
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
def test_ext4_filesystem(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_ext4-filesystem.sh")
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
def test_partitioned_disk(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_partitioned-disk.sh")
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
class TestQcow2(composertest.ComposerTestCase):
|
|
||||||
def tearDown(self):
|
|
||||||
super().tearDownTestMachine()
|
|
||||||
|
|
||||||
def test_qcow2(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_qcow2.sh")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix="/var/tmp/lorax-test.") as tmpdir:
|
|
||||||
# Copy the resulting qcow2 image and shut down the VM
|
|
||||||
self.tearDownVirt(virt_dir="/var/tmp/test-results/*", local_dir=tmpdir)
|
|
||||||
|
|
||||||
# Boot the image
|
|
||||||
self.setUpTestMachine(tmpdir + "/disk.qcow2", tmpdir + "/id_rsa")
|
|
||||||
|
|
||||||
# Upload SSH keys to the machine so we can use the existing assertions
|
|
||||||
# by ssh'ing to localhost
|
|
||||||
self.machine.upload([tmpdir + "/id_rsa", tmpdir + "/id_rsa.pub"], "/root/.ssh")
|
|
||||||
|
|
||||||
# Upload the contents of the ./tests/ directory to the machine
|
|
||||||
self.machine.upload(["../tests"], "/")
|
|
||||||
|
|
||||||
# Run the test, on the booted image
|
|
||||||
self.runImageTest("/tests/cli/test_boot_qcow2.sh")
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
class TestLiveIso(composertest.ComposerTestCase):
|
|
||||||
def tearDown(self):
|
|
||||||
super().tearDownTestMachine()
|
|
||||||
|
|
||||||
def test_live_iso(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_live-iso.sh")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix="/var/tmp/lorax-test.") as tmpdir:
|
|
||||||
# Copy the resulting iso and shut down the VM
|
|
||||||
self.tearDownVirt(virt_dir="/var/tmp/test-results/*", local_dir=tmpdir)
|
|
||||||
|
|
||||||
# Boot the image, login using the ssh key
|
|
||||||
self.setUpTestMachine(tmpdir + "/live.iso", tmpdir + "/id_rsa")
|
|
||||||
|
|
||||||
# Upload the contents of the ./tests/ directory to the machine
|
|
||||||
self.machine.upload(["../tests"], "/")
|
|
||||||
|
|
||||||
# Run the test, on the booted image
|
|
||||||
self.runImageTest("/tests/cli/test_boot_live-iso.sh")
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
class TestRepos(composertest.ComposerTestCase):
|
|
||||||
def test_repos_sanity(self):
|
|
||||||
self.runCliTest("/tests/cli/test_repos_sanity.sh")
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
class TestTar(composertest.ComposerTestCase):
|
|
||||||
def test_tar(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_tar.sh")
|
|
||||||
|
|
||||||
def test_tar_kickstart(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_tar_kickstart.sh")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix="/var/tmp/lorax-test.") as tmpdir:
|
|
||||||
# Copy the resulting disk image and shut down the VM
|
|
||||||
self.tearDownVirt(virt_dir="/var/tmp/test-results/*", local_dir=tmpdir)
|
|
||||||
|
|
||||||
# Boot the image, login using the ssh key
|
|
||||||
self.setUpTestMachine(tmpdir + "/disk.img", tmpdir + "/id_rsa")
|
|
||||||
|
|
||||||
# Upload SSH keys to the machine so we can use the existing assertions
|
|
||||||
# by ssh'ing to localhost
|
|
||||||
self.machine.upload([tmpdir + "/id_rsa", tmpdir + "/id_rsa.pub"], "/root/.ssh")
|
|
||||||
|
|
||||||
# Upload the contents of the ./tests/ directory to the machine
|
|
||||||
self.machine.upload(["../tests"], "/")
|
|
||||||
|
|
||||||
# Run the test, on the booted image
|
|
||||||
self.runImageTest("/tests/cli/test_boot_tar_kickstart.sh")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
composertest.main()
|
composertest.main()
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import composertest
|
|
||||||
|
|
||||||
|
|
||||||
@unittest.skip("Image building is now tested by osbuild-composer test suite")
|
|
||||||
class TestCloud(composertest.ComposerTestCase):
|
|
||||||
def test_alibaba(self):
|
|
||||||
self.runCliTest("/tests/cli/test_compose_alibaba.sh")
|
|
||||||
|
|
||||||
def test_aws(self):
|
|
||||||
self.runCliTest("/tests/cli/test_build_and_deploy_aws.sh")
|
|
||||||
|
|
||||||
def test_azure(self):
|
|
||||||
self.runCliTest("/tests/cli/test_build_and_deploy_azure.sh")
|
|
||||||
|
|
||||||
def test_openstack(self):
|
|
||||||
self.runCliTest("/tests/cli/test_build_and_deploy_openstack.sh")
|
|
||||||
|
|
||||||
def test_vmware(self):
|
|
||||||
self.runCliTest("/tests/cli/test_build_and_deploy_vmware.sh")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
composertest.main()
|
|
@ -149,7 +149,7 @@ class ComposerTestCase(VirtMachineTestCase):
|
|||||||
return local_dir
|
return local_dir
|
||||||
|
|
||||||
def runCliTest(self, script):
|
def runCliTest(self, script):
|
||||||
extra_env = ["BACKEND=%s" % os.getenv('BACKEND', 'lorax-composer')]
|
extra_env = ["BACKEND=%s" % os.getenv('BACKEND', 'osbuild-composer')]
|
||||||
if self.sit:
|
if self.sit:
|
||||||
extra_env.append("COMPOSER_TEST_FAIL_FAST=1")
|
extra_env.append("COMPOSER_TEST_FAIL_FAST=1")
|
||||||
|
|
||||||
@ -160,16 +160,6 @@ class ComposerTestCase(VirtMachineTestCase):
|
|||||||
"/tests/test_cli.sh", script])
|
"/tests/test_cli.sh", script])
|
||||||
self.assertEqual(r.returncode, 0)
|
self.assertEqual(r.returncode, 0)
|
||||||
|
|
||||||
def runImageTest(self, script):
|
|
||||||
extra_env = []
|
|
||||||
if self.sit:
|
|
||||||
extra_env.append("COMPOSER_TEST_FAIL_FAST=1")
|
|
||||||
|
|
||||||
r = self.execute(["TEST=" + self.id(),
|
|
||||||
*extra_env,
|
|
||||||
"/tests/test_image.sh", script])
|
|
||||||
self.assertEqual(r.returncode, 0)
|
|
||||||
|
|
||||||
|
|
||||||
class ComposerTestResult(unittest.TestResult):
|
class ComposerTestResult(unittest.TestResult):
|
||||||
def name(self, test):
|
def name(self, test):
|
||||||
|
4
test/run
4
test/run
@ -2,9 +2,7 @@
|
|||||||
# This is the expected entry point for Cockpit CI; will be called without
|
# This is the expected entry point for Cockpit CI; will be called without
|
||||||
# arguments but with an appropriate $TEST_OS, and optionally $TEST_SCENARIO
|
# arguments but with an appropriate $TEST_OS, and optionally $TEST_SCENARIO
|
||||||
|
|
||||||
export BACKEND="${BACKEND:-lorax-composer}"
|
if [ "$TEST_SCENARIO" == "osbuild-composer" ]; then
|
||||||
|
|
||||||
if [ "$BACKEND" == "osbuild-composer" ] || [ "$TEST_SCENARIO" == "osbuild-composer" ]; then
|
|
||||||
rm -rf ./test/images/*
|
rm -rf ./test/images/*
|
||||||
export BACKEND="osbuild-composer"
|
export BACKEND="osbuild-composer"
|
||||||
make BACKEND=osbuild-composer vm
|
make BACKEND=osbuild-composer vm
|
||||||
|
@ -1,124 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Script removes ECS Instances, Custom Images and OSS files older than
|
|
||||||
# HOURS_LIMIT (24 hours by default) from Alibaba cloud
|
|
||||||
#
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
ALI_DIR=`mktemp -d /tmp/alicloud.XXXXX`
|
|
||||||
|
|
||||||
if [ -z "$ALICLOUD_ACCESS_KEY" ]; then
|
|
||||||
rlFail "ALICLOUD_ACCESS_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "ALICLOUD_ACCESS_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$ALICLOUD_SECRET_KEY" ]; then
|
|
||||||
rlFail "ALICLOUD_SECRET_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "ALICLOUD_SECRET_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ALICLOUD_BUCKET="${ALICLOUD_BUCKET:-composer-test}"
|
|
||||||
rlLogInfo "ALICLOUD_BUCKET=$ALICLOUD_BUCKET"
|
|
||||||
|
|
||||||
# VMs older than HOURS_LIMIT will be deleted
|
|
||||||
HOURS_LIMIT="${HOURS_LIMIT:-24}"
|
|
||||||
TIMESTAMP=`date -u -d "$HOURS_LIMIT hours ago" '+%FT%T'`
|
|
||||||
rlLogInfo "HOURS_LIMIT=$HOURS_LIMIT"
|
|
||||||
rlLogInfo "TIMESTAMP=$TIMESTAMP"
|
|
||||||
|
|
||||||
for package in jq; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# use the CLI b/c Ansible modules are not yet upstream and are unreliable
|
|
||||||
TAR_FILE="aliyun-cli-linux-3.0.32-amd64.tgz"
|
|
||||||
curl -L https://github.com/aliyun/aliyun-cli/releases/download/v3.0.32/$TAR_FILE > $ALI_DIR/$TAR_FILE
|
|
||||||
tar -C $ALI_DIR/ -xzvf $ALI_DIR/$TAR_FILE
|
|
||||||
chmod a+x $ALI_DIR/aliyun
|
|
||||||
|
|
||||||
# configure
|
|
||||||
[ -d ~/.aliyun/ ] || mkdir ~/.aliyun/
|
|
||||||
|
|
||||||
if [ -f ~/.aliyun/config.json ]; then
|
|
||||||
rlLogInfo "Reusing existing ~/.aliyun/config.json"
|
|
||||||
else
|
|
||||||
rlLogInfo "Creating ~/.aliyun/config.json"
|
|
||||||
cat > ~/.aliyun/config.json << __EOF__
|
|
||||||
{
|
|
||||||
"current": "",
|
|
||||||
"profiles": [
|
|
||||||
{
|
|
||||||
"mode": "AK",
|
|
||||||
"access_key_id": "$ALICLOUD_ACCESS_KEY",
|
|
||||||
"access_key_secret": "$ALICLOUD_SECRET_KEY",
|
|
||||||
"region_id": "$ALICLOUD_REGION",
|
|
||||||
"output_format": "json",
|
|
||||||
"language": "en"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"meta_path": ""
|
|
||||||
}
|
|
||||||
__EOF__
|
|
||||||
fi
|
|
||||||
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
# Check all regions
|
|
||||||
for REGION_ID in `$ALI_DIR/aliyun Ecs DescribeRegions | jq -r '.Regions.Region[] | .RegionId'`; do
|
|
||||||
rlPhaseStartTest "Delete old VMs in region $REGION_ID"
|
|
||||||
for INSTANCE_ID in `$ALI_DIR/aliyun ecs DescribeInstances --RegionId $REGION_ID --InstanceName "Composer-Test*" | jq -r '.Instances.Instance[] | .InstanceId'`; do
|
|
||||||
CREATION_TIME=`$ALI_DIR/aliyun ecs DescribeInstanceAttribute --InstanceId $INSTANCE_ID | jq -r .CreationTime`
|
|
||||||
if [[ "$CREATION_TIME" < "$TIMESTAMP" ]]; then
|
|
||||||
rlLogInfo "Removing instance $REGION_ID/$INSTANCE_ID created at $CREATION_TIME < $TIMESTAMP"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DeleteInstance --Force True --InstanceId $INSTANCE_ID"
|
|
||||||
else
|
|
||||||
rlLogInfo "Skipping instance $REGION_ID/$INSTANCE_ID created at $CREATION_TIME >= $TIMESTAMP"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old Images in region $REGION_ID"
|
|
||||||
for IMAGE_ID in `$ALI_DIR/aliyun ecs DescribeImages --RegionId $REGION_ID --ImageName "Composer-Test*" | jq -r '.Images.Image[] | .ImageId'`; do
|
|
||||||
CREATION_TIME=`$ALI_DIR/aliyun ecs DescribeImages --ImageId $IMAGE_ID | jq -r '.Images.Image[] | .CreationTime'`
|
|
||||||
if [[ "$CREATION_TIME" < "$TIMESTAMP" ]]; then
|
|
||||||
rlLogInfo "Removing image $REGION_ID/$IMAGE_ID created at $CREATION_TIME < $TIMESTAMP"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DeleteImage --Force True --ImageId $IMAGE_ID"
|
|
||||||
else
|
|
||||||
rlLogInfo "Skipping image $REGION_ID/$IMAGE_ID created at $CREATION_TIME >= $TIMESTAMP"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete composer key pairs in region $REGION_ID"
|
|
||||||
for KEY_NAME in `$ALI_DIR/aliyun ecs DescribeKeyPairs --KeyPairName "Composer-Test*" | jq -r '.KeyPairs.KeyPair[] | .KeyPairName'`; do
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DeleteKeyPairs --KeyPairNames '[\"$KEY_NAME\"]'"
|
|
||||||
done
|
|
||||||
rlPhaseEnd
|
|
||||||
done
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old OSS objects"
|
|
||||||
all_objects=`$ALI_DIR/aliyun oss ls oss://$ALICLOUD_BUCKET/Composer-Test | awk 'NR > 1' | head -n -2`
|
|
||||||
while read date_f time_f tz_offset tz_name size_bytes storage_class etag filename_f; do
|
|
||||||
creation_date=`date -u -d "$date_f $time_f$tz_offset" '+%FT%T'`
|
|
||||||
if [[ "$creation_date" < "$TIMESTAMP" ]]; then
|
|
||||||
rlLogInfo "Removing old file $filename_f created at $creation_date < $TIMESTAMP"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun oss rm $filename_f"
|
|
||||||
else
|
|
||||||
rlLogInfo "Skipping file $filename_f created at $creation_date >= $TIMESTAMP"
|
|
||||||
fi
|
|
||||||
done <<< "$all_objects"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "rm -rf $ALI_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,186 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Script removes virtual machines, AMIs, volumes, snapshots, key pairs and S3 objects older than HOURS_LIMIT (24 hours by default) from Amazon EC2/S3
|
|
||||||
# Instances, Volumes, Snapshots, AMIs and s3 objects with the "keep_me" tag will not be affected
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
|
||||||
rlFail "AWS_ACCESS_KEY_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AWS_ACCESS_KEY_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
|
||||||
rlFail "AWS_SECRET_ACCESS_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AWS_SECRET_ACCESS_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# VMs older than HOURS_LIMIT will be deleted
|
|
||||||
HOURS_LIMIT="${HOURS_LIMIT:-24}"
|
|
||||||
export TIMESTAMP=`date -u -d "$HOURS_LIMIT hours ago" '+%FT%T'`
|
|
||||||
export AWS_REGION="${AWS_REGION:-us-east-1}"
|
|
||||||
AWS_BUCKET="${AWS_BUCKET:-composerredhat}"
|
|
||||||
|
|
||||||
rlLogInfo "HOURS_LIMIT=$HOURS_LIMIT"
|
|
||||||
rlLogInfo "AWS_REGION=$AWS_REGION"
|
|
||||||
rlLogInfo "TIMESTAMP=$TIMESTAMP"
|
|
||||||
|
|
||||||
for package in ansible python3-boto3 awscli; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Get a list of EC2 regions
|
|
||||||
regions=`aws ec2 describe-regions --region="$AWS_REGION" --query "Regions[].{Name:RegionName}" --output text | tr '\n' ' '`
|
|
||||||
if [ -z "$regions" ]; then
|
|
||||||
rlFail "No EC2 regions returned."
|
|
||||||
else
|
|
||||||
rlLogInfo "EC2 regions to be checked: $regions"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the account ID
|
|
||||||
account_id=`aws sts get-caller-identity --output text --query 'Account'`
|
|
||||||
if [ -z "$account_id" ]; then
|
|
||||||
rlFail "No account ID returned."
|
|
||||||
else
|
|
||||||
rlLogInfo "Account ID: $account_id"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PLAYBOOK_DELETE_VMS=`mktemp`
|
|
||||||
PLAYBOOK_DELETE_AMIS=`mktemp`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
# Check all EC2 regions
|
|
||||||
for region in $regions; do
|
|
||||||
rlPhaseStartTest "Delete old VMs in region $region"
|
|
||||||
cat > $PLAYBOOK_DELETE_VMS << __EOF__
|
|
||||||
- name: Delete old VMs
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
|
||||||
tasks:
|
|
||||||
- name: Get VMs
|
|
||||||
ec2_instance_facts:
|
|
||||||
region: "$region"
|
|
||||||
register: vms_facts
|
|
||||||
|
|
||||||
- name: List all VMs
|
|
||||||
debug:
|
|
||||||
var: vms_facts
|
|
||||||
|
|
||||||
- name: Delete old VMs
|
|
||||||
ec2_instance:
|
|
||||||
instance_ids: "{{item.instance_id}}"
|
|
||||||
region: "$region"
|
|
||||||
state: absent
|
|
||||||
loop: "{{vms_facts.instances}}"
|
|
||||||
when: (item.launch_time < lookup('env','TIMESTAMP')) and (item.tags['composer-test'] is defined)
|
|
||||||
loop_control:
|
|
||||||
label: "{{item.instance_id}}"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlLogInfo "Removing VMs in region $region created before $TIMESTAMP"
|
|
||||||
rlRun -t -c "ansible-playbook $PLAYBOOK_DELETE_VMS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old AMIs in region $region"
|
|
||||||
cat > $PLAYBOOK_DELETE_AMIS << __EOF__
|
|
||||||
- name: Delete old AMIs
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
|
||||||
tasks:
|
|
||||||
- name: Get AMIs
|
|
||||||
ec2_ami_facts:
|
|
||||||
region: "$region"
|
|
||||||
owners: "$account_id"
|
|
||||||
register: ami_facts
|
|
||||||
|
|
||||||
- name: List all AMIs
|
|
||||||
debug:
|
|
||||||
var: ami_facts
|
|
||||||
|
|
||||||
- name: Delete old AMIs
|
|
||||||
ec2_ami:
|
|
||||||
image_id: "{{item.image_id}}"
|
|
||||||
region: "$region"
|
|
||||||
state: absent
|
|
||||||
delete_snapshot: True
|
|
||||||
loop: "{{ami_facts.images}}"
|
|
||||||
when: (item.creation_date < lookup('env','TIMESTAMP')) and (item.tags['composer-test'] is defined)
|
|
||||||
loop_control:
|
|
||||||
label: "{{item.image_id}}"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlLogInfo "Removing AMIs in region $region created before $TIMESTAMP"
|
|
||||||
rlRun -t -c "ansible-playbook $PLAYBOOK_DELETE_AMIS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete unused composer key pairs in region $region"
|
|
||||||
# list all key pairs starting with "Composer-Test"
|
|
||||||
keys=`aws ec2 describe-key-pairs --region="$region" --query 'KeyPairs[?starts_with(KeyName, \`Composer-Test\`) == \`true\`].KeyName' --output text`
|
|
||||||
rlLogInfo "Found existing composer keys: $keys"
|
|
||||||
|
|
||||||
for key in $keys; do
|
|
||||||
# list all instances, which use $key
|
|
||||||
instances=`aws ec2 describe-instances --region="$region" --filters Name=key-name,Values="$key" --query 'Reservations[*].Instances[*].InstanceId' --output text`
|
|
||||||
# remove the key pair if it's not used
|
|
||||||
if [ -z "$instances" ]; then
|
|
||||||
rlLogInfo "Removing unused key pair $key"
|
|
||||||
rlRun -t -c "aws ec2 delete-key-pair --region='$region' --key-name='$key'"
|
|
||||||
else
|
|
||||||
rlLogInfo "Keeping key pair $key used by instance $instances"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old volumes in region $region"
|
|
||||||
# get a list of unused ("available") volumes older than $TIMESTAMP having tag "composer-test"
|
|
||||||
# note: volume tags inherit from instance tags when instances are created
|
|
||||||
volumes_to_delete=$(aws ec2 describe-volumes --region="$region" --query "Volumes[?CreateTime<\`$TIMESTAMP\`] | [?(Tags[?Key==\`composer-test\`])] | [?State==\`available\`].[VolumeId,CreateTime]" --output text)
|
|
||||||
|
|
||||||
while read volume_id creation_time; do
|
|
||||||
if [ -n "$volume_id" ]; then
|
|
||||||
rlLogInfo "Removing volume $volume_id created $creation_time"
|
|
||||||
rlRun -t -c "aws ec2 delete-volume --region='$region' --volume-id '$volume_id'"
|
|
||||||
fi
|
|
||||||
done <<< "$volumes_to_delete"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old snapshots in region $region"
|
|
||||||
# get a list of snapshots older than $TIMESTAMP and owned by our account and having the tag "composer-test"
|
|
||||||
snapshots_to_delete=$(aws ec2 describe-snapshots --region="$region" --owner-ids "$account_id" --query "Snapshots[?StartTime<\`$TIMESTAMP\`] |[?(Tags[?Key==\`composer-test\`])].[SnapshotId,StartTime]" --output text)
|
|
||||||
|
|
||||||
while read snapshot_id start_time; do
|
|
||||||
if [ -n "$snapshot_id" ]; then
|
|
||||||
rlLogInfo "Removing snapshot $snapshot_id started $start_time"
|
|
||||||
rlRun -t -c "aws ec2 delete-snapshot --region='$region' --snapshot-id '$snapshot_id'"
|
|
||||||
fi
|
|
||||||
done <<< "$snapshots_to_delete"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
# Check all EC2 regions
|
|
||||||
done
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old Amazon S3 objects"
|
|
||||||
all_objects=`aws s3 ls s3://${AWS_BUCKET} --recursive`
|
|
||||||
while read date_f time_f size_f filename_f; do
|
|
||||||
creation_date=`date -u -d "$date_f $time_f" '+%FT%T'`
|
|
||||||
if [ "$filename_f" =~ ^Composer-Test ] && [ "$creation_date" \< "$TIMESTAMP" ]; then
|
|
||||||
rlLogInfo "Removing old file $filename_f created $date_f $time_f"
|
|
||||||
rlRun -t -c "aws s3 rm s3://${AWS_BUCKET}/${filename_f}"
|
|
||||||
fi
|
|
||||||
done <<< "$all_objects"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "rm -f $PLAYBOOK_DELETE_VMS"
|
|
||||||
rlRun -t -c "rm -f $PLAYBOOK_DELETE_AMIS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,149 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Script removes virtual machines and other artifacts older than HOURS_LIMIT (24 hours by default) from Azure
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
|
|
||||||
|
|
||||||
# Delete old objects based on first_seen tag value
|
|
||||||
delete_old_resources() {
|
|
||||||
local resource_type="$1"
|
|
||||||
|
|
||||||
# list composer-test resources older than $TIMESTAMP based on the first_seen tag
|
|
||||||
# timestamp tagging is done either when resources are created or by the function below
|
|
||||||
# Note: the query language here is called JMESPath
|
|
||||||
rlRun -c -s 'az resource list --resource-type $resource_type --query "[?tags.$TAG_NAME < \`$TIMESTAMP\` && tags.composer_test == \`true\`].name" --output tsv' 0 "Get a list of $resource_type older than $TIMESTAMP"
|
|
||||||
resources_to_delete=$(cat $rlRun_LOG)
|
|
||||||
|
|
||||||
if [ -n "$resources_to_delete" ]; then
|
|
||||||
for object in $resources_to_delete; do
|
|
||||||
rlRun -t -c "az resource delete --resource-type=$resource_type --name $object --resource-group $AZURE_RESOURCE_GROUP"
|
|
||||||
done
|
|
||||||
else
|
|
||||||
rlLogInfo "No $resource_type older than $TIMESTAMP was found."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Find objects without the first_seen tag and create the tag with the current date/time value
|
|
||||||
tag_new_resources() {
|
|
||||||
local resource_type="$1"
|
|
||||||
|
|
||||||
# list composer-test resources without the first_seen tag
|
|
||||||
rlRun -c -s 'az resource list --resource-type $resource_type --query "[?tags.$TAG_NAME == null && tags.composer_test == \`true\`].name" --output tsv' 0 "Get a list of $resource_type without the $TAG_NAME tag."
|
|
||||||
resources_without_tag=$(cat $rlRun_LOG)
|
|
||||||
|
|
||||||
if [ -n "$resources_without_tag" ]; then
|
|
||||||
now=$(date -u '+%FT%T')
|
|
||||||
for object in $resources_without_tag; do
|
|
||||||
rlRun -t -c 'az resource update --resource-type $resource_type --name $object --resource-group $AZURE_RESOURCE_GROUP --set "tags.$TAG_NAME=$now"' 0 "Add tag $TAG_NAME:$now to $resource_type: $object"
|
|
||||||
done
|
|
||||||
else
|
|
||||||
rlLogInfo "No $resource_type without the $TAG_NAME tag was found."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$AZURE_SUBSCRIPTION_ID" ]; then
|
|
||||||
rlFail "AZURE_SUBSCRIPTION_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_SUBSCRIPTION_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_TENANT" ]; then
|
|
||||||
rlFail "AZURE_TENANT is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_TENANT is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_CLIENT_ID" ]; then
|
|
||||||
rlFail "AZURE_CLIENT_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_CLIENT_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_SECRET" ]; then
|
|
||||||
rlFail "AZURE_SECRET is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_SECRET is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export AZURE_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP:-composer}"
|
|
||||||
rlLogInfo "AZURE_RESOURCE_GROUP=$AZURE_RESOURCE_GROUP"
|
|
||||||
|
|
||||||
export AZURE_STORAGE_ACCOUNT="${AZURE_STORAGE_ACCOUNT:-composerredhat}"
|
|
||||||
rlLogInfo "AZURE_STORAGE_ACCOUNT=$AZURE_STORAGE_ACCOUNT"
|
|
||||||
|
|
||||||
export AZURE_STORAGE_CONTAINER="${AZURE_STORAGE_CONTAINER:-composerredhat}"
|
|
||||||
rlLogInfo "AZURE_STORAGE_CONTAINER=$AZURE_STORAGE_CONTAINER"
|
|
||||||
|
|
||||||
# VMs older than HOURS_LIMIT will be deleted
|
|
||||||
HOURS_LIMIT="${HOURS_LIMIT:-24}"
|
|
||||||
export TIMESTAMP=`date -u -d "$HOURS_LIMIT hours ago" '+%FT%T'`
|
|
||||||
|
|
||||||
rlLogInfo "HOURS_LIMIT=$HOURS_LIMIT"
|
|
||||||
rlLogInfo "TIMESTAMP=$TIMESTAMP"
|
|
||||||
|
|
||||||
# It's not easily possible to get creation date/time of Azure objects.
|
|
||||||
# Use a tag to record when the object was seen for the first time
|
|
||||||
# and remove objects based on the value of the tag. The value is UTC
|
|
||||||
# date/time, format: 2019-01-29T15:16:40
|
|
||||||
TAG_NAME="first_seen"
|
|
||||||
|
|
||||||
# Use Microsoft repository to install azure-cli
|
|
||||||
rlRun -t -c "rpm --import https://packages.microsoft.com/keys/microsoft.asc"
|
|
||||||
cat > /etc/yum.repos.d/azure-cli.repo << __EOF__
|
|
||||||
[azure-cli]
|
|
||||||
name=Azure CLI
|
|
||||||
baseurl=https://packages.microsoft.com/yumrepos/azure-cli
|
|
||||||
enabled=1
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=https://packages.microsoft.com/keys/microsoft.asc
|
|
||||||
__EOF__
|
|
||||||
rlRun -c -t "dnf install -y azure-cli"
|
|
||||||
|
|
||||||
# sign in
|
|
||||||
rlRun -c -t 'az login --service-principal --username "$AZURE_CLIENT_ID" --password "$AZURE_SECRET" --tenant "$AZURE_TENANT"'
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
# A list of Azure resources we want to clean
|
|
||||||
resource_types="
|
|
||||||
Microsoft.Compute/virtualMachines
|
|
||||||
Microsoft.Network/networkInterfaces
|
|
||||||
Microsoft.Network/publicIPAddresses
|
|
||||||
Microsoft.Network/networkSecurityGroups
|
|
||||||
Microsoft.Compute/disks
|
|
||||||
Microsoft.Compute/images
|
|
||||||
"
|
|
||||||
|
|
||||||
# Remove old resources and tag new resources
|
|
||||||
for resource_type in $resource_types; do
|
|
||||||
rlPhaseStartTest "Delete old $resource_type"
|
|
||||||
delete_old_resources $resource_type
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Tag new $resource_type"
|
|
||||||
tag_new_resources $resource_type
|
|
||||||
rlPhaseEnd
|
|
||||||
done
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old blobs"
|
|
||||||
# get a list of blobs older than $TIMESTAMP
|
|
||||||
rlRun -c -s 'az storage blob list --container-name $AZURE_STORAGE_CONTAINER --query "[?properties.creationTime < \`$TIMESTAMP\` && tags.composer_test == \`true\`].[name,properties.creationTime]" --output tsv'
|
|
||||||
blobs_to_delete=$(cat $rlRun_LOG)
|
|
||||||
|
|
||||||
if [ -n "$blobs_to_delete" ]; then
|
|
||||||
while read name creation_time; do
|
|
||||||
rlLogInfo "Removing blob $name created $creation_time"
|
|
||||||
rlRun -t -c "az storage blob delete --container-name $AZURE_STORAGE_CONTAINER --name $name"
|
|
||||||
done <<< "$blobs_to_delete"
|
|
||||||
else
|
|
||||||
rlLogInfo "No blob older than $TIMESTAMP was found."
|
|
||||||
fi
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -c -t "rm -f /etc/yum.repos.d/azure-cli.repo"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,142 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Script removes virtual machines and other artifacts older than HOURS_LIMIT (24 hours by default) from OpenStack
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$OS_AUTH_URL" ]; then
|
|
||||||
rlFail "OS_AUTH_URL is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_AUTH_URL=$OS_AUTH_URL"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$OS_USERNAME" ]; then
|
|
||||||
rlFail "OS_USERNAME is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_USERNAME is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$OS_PASSWORD" ]; then
|
|
||||||
rlFail "OS_PASSWORD is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_PASSWORD is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export OS_PROJECT_NAME="${OS_PROJECT_NAME:-$OS_USERNAME}"
|
|
||||||
rlLogInfo "OS_PROJECT_NAME=$OS_PROJECT_NAME"
|
|
||||||
|
|
||||||
# VMs older than HOURS_LIMIT will be deleted
|
|
||||||
HOURS_LIMIT="${HOURS_LIMIT:-24}"
|
|
||||||
export TIMESTAMP=`date -u -d "$HOURS_LIMIT hours ago" '+%FT%T'`
|
|
||||||
|
|
||||||
rlLogInfo "HOURS_LIMIT=$HOURS_LIMIT"
|
|
||||||
|
|
||||||
for package in ansible python3-openstacksdk python3-openstackclient; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
PLAYBOOK_DELETE_VMS=`mktemp`
|
|
||||||
PLAYBOOK_DELETE_IMAGES=`mktemp`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old VMs"
|
|
||||||
# The openstack_servers variable used in the playbook bellow is set by the os_server_facts ansible module.
|
|
||||||
# The variable contains details about all discovered virtual machines.
|
|
||||||
# See https://docs.ansible.com/ansible/latest/modules/os_server_facts_module.html
|
|
||||||
cat > $PLAYBOOK_DELETE_VMS << __EOF__
|
|
||||||
- name: Delete old VMs
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
|
||||||
tasks:
|
|
||||||
- name: Get VMs
|
|
||||||
os_server_facts:
|
|
||||||
|
|
||||||
- name: List all VMs
|
|
||||||
debug:
|
|
||||||
var: openstack_servers
|
|
||||||
|
|
||||||
- name: Delete old VMs by tag
|
|
||||||
os_server:
|
|
||||||
name: "{{item.id}}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{openstack_servers}}"
|
|
||||||
when: item.created < lookup('env','TIMESTAMP') and (item.metadata.Tag is defined and item.metadata.Tag == "composer-test")
|
|
||||||
loop_control:
|
|
||||||
label: "{{item.name}} (id: {{item.id}} created: {{item.created}} metadata: {{item.metadata}})"
|
|
||||||
|
|
||||||
- name: Delete old VMs by name
|
|
||||||
os_server:
|
|
||||||
name: "{{item.id}}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{openstack_servers}}"
|
|
||||||
when: item.created < lookup('env','TIMESTAMP') and (item.name | regex_search('Composer-Test'))
|
|
||||||
loop_control:
|
|
||||||
label: "{{item.name}} (id: {{item.id}} created: {{item.created}} metadata: {{item.metadata}})"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlLogInfo "Removing VMs created before $TIMESTAMP"
|
|
||||||
rlRun -t -c "ansible-playbook $PLAYBOOK_DELETE_VMS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old images"
|
|
||||||
# The openstack_image variable used in the playbook bellow is set by the os_image_facts ansible module.
|
|
||||||
# The variable contains details about all discovered images.
|
|
||||||
# See https://docs.ansible.com/ansible/latest/modules/os_image_facts_module.html
|
|
||||||
cat > $PLAYBOOK_DELETE_IMAGES << __EOF__
|
|
||||||
- name: Delete old images
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: False
|
|
||||||
tasks:
|
|
||||||
- name: Get images
|
|
||||||
os_image_facts:
|
|
||||||
|
|
||||||
- name: Delete old images
|
|
||||||
os_image:
|
|
||||||
name: "{{item.name}}"
|
|
||||||
id: "{{item.id}}"
|
|
||||||
state: absent
|
|
||||||
loop: "{{openstack_image}}"
|
|
||||||
when: (item.created_at < lookup('env','TIMESTAMP')) and (item.name | regex_search('Composer-Test'))
|
|
||||||
loop_control:
|
|
||||||
label: "{{item.name}} (id: {{item.id}} created: {{item.created_at}})"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlLogInfo "Removing images created before $TIMESTAMP"
|
|
||||||
rlRun -t -c "ansible-playbook $PLAYBOOK_DELETE_IMAGES"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old volumes"
|
|
||||||
volume_list=`openstack-3 volume list --format value --column ID`
|
|
||||||
for volume in $volume_list; do
|
|
||||||
creation_date=`openstack-3 volume show $volume --column created_at --format value`
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
rlLogWarning "Failed to get the creation date of volume $volume"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The format of the date/time returned by openstack-3 looks like this:
|
|
||||||
# 2019-01-22T18:50:14.000000
|
|
||||||
# The TIMESTAMP variable format is:
|
|
||||||
# 2019-01-21T18:45:36
|
|
||||||
# "<" does a lexicographic comparison using the character collating sequence
|
|
||||||
# specified by the ‘LC_COLLATE’ locale. "<" needs to be escaped, otherwise
|
|
||||||
# it's a symbol for redirection.
|
|
||||||
if [ "$creation_date" \< "$TIMESTAMP" ]; then
|
|
||||||
rlLogInfo "Removing old volume $volume created $creation_date"
|
|
||||||
rlRun -t -c "openstack-3 volume delete $volume"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "rm -f $PLAYBOOK_DELETE_VMS"
|
|
||||||
rlRun -t -c "rm -f $PLAYBOOK_DELETE_IMAGES"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,75 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Script removes virtual machines and other artifacts older than HOURS_LIMIT (24 hours by default) from VMware vShere
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$V_HOST" ]; then
|
|
||||||
rlFail "V_HOST is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_HOST=$V_HOST"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$V_USERNAME" ]; then
|
|
||||||
rlFail "V_USERNAME is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_USERNAME=$V_USERNAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$V_PASSWORD" ]; then
|
|
||||||
rlFail "V_PASSWORD is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_PASSWORD is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# VMs older than HOURS_LIMIT will be deleted
|
|
||||||
HOURS_LIMIT="${HOURS_LIMIT:-24}"
|
|
||||||
export TIMESTAMP=`date -u -d "$HOURS_LIMIT hours ago" '+%FT%T'`
|
|
||||||
|
|
||||||
rlLogInfo "HOURS_LIMIT=$HOURS_LIMIT"
|
|
||||||
rlLogInfo "TIMESTAMP=$TIMESTAMP"
|
|
||||||
|
|
||||||
for package in python3-pip git; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
rlRun -t -c "pip3 install pyvmomi"
|
|
||||||
|
|
||||||
TMP_DIR=`mktemp -d /tmp/composer-vmware.XXXXX`
|
|
||||||
SAMPLES="$TMP_DIR/pyvmomi-community-samples"
|
|
||||||
if [ ! -d "$SAMPLES" ]; then
|
|
||||||
rlRun -t -c "git clone https://github.com/weldr/pyvmomi-community-samples $SAMPLES"
|
|
||||||
pushd $SAMPLES && git checkout composer_testing && popd
|
|
||||||
fi
|
|
||||||
SAMPLES="$SAMPLES/samples"
|
|
||||||
SCRIPT_DIR=$(dirname "$0")
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Delete old VMs"
|
|
||||||
# list all VMs
|
|
||||||
rlRun -t -c 'python3 $SCRIPT_DIR/vmware_list_vms.py --host $V_HOST --user $V_USERNAME --password $V_PASSWORD --disable_ssl_verification > $TMP_DIR/vmware_vms' 0 'Getting a list of VMs'
|
|
||||||
|
|
||||||
while read name uuid creation_date; do
|
|
||||||
# remove VMs with name starting "Composer-Test" and older than $TIMESTAMP
|
|
||||||
echo $name | grep ^Composer-Test > /dev/null
|
|
||||||
if [ $? -eq 0 -a "$creation_date" \< "$TIMESTAMP" ]; then
|
|
||||||
# note: vmdk disk is removed when destroying the VM
|
|
||||||
rlRun 'python3 $SAMPLES/destroy_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD --uuid $uuid' 0 "Delete VM: $name UUID: $uuid"
|
|
||||||
rlAssert0 "VM destroyed" $?
|
|
||||||
fi
|
|
||||||
done < $TMP_DIR/vmware_vms
|
|
||||||
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "rm -rf $TMP_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# list all VMs in vSphere and print their name, UUID and date/time of creation
|
|
||||||
|
|
||||||
import atexit
|
|
||||||
import argparse
|
|
||||||
import getpass
|
|
||||||
import ssl
|
|
||||||
|
|
||||||
from pyVim import connect
|
|
||||||
from pyVmomi import vim # pylint: disable=no-name-in-module
|
|
||||||
|
|
||||||
|
|
||||||
def setup_args():
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
|
|
||||||
parser.add_argument('--host', required=True, help='vSphere service to connect to')
|
|
||||||
parser.add_argument('--port', type=int, default=443, help="Port number (default is 443)")
|
|
||||||
parser.add_argument('--username', required=True, help='User name')
|
|
||||||
parser.add_argument('--password', help='User password')
|
|
||||||
parser.add_argument('--disable_ssl_verification', action='store_true', help='Disable ssl host certificate verification')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
if not args.password:
|
|
||||||
args.password = getpass.getpass()
|
|
||||||
return args
|
|
||||||
|
|
||||||
def print_vm_datetime(vm):
|
|
||||||
create_date = vm.config.createDate
|
|
||||||
# spaces are used as field separators, remove them from VM names
|
|
||||||
name = vm.config.name.replace(' ', '')
|
|
||||||
uuid = vm.config.instanceUuid
|
|
||||||
if create_date:
|
|
||||||
print(name, uuid, create_date.isoformat())
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = setup_args()
|
|
||||||
|
|
||||||
sslContext = None
|
|
||||||
if args.disable_ssl_verification:
|
|
||||||
sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
|
||||||
sslContext.verify_mode = ssl.CERT_NONE
|
|
||||||
|
|
||||||
try:
|
|
||||||
service_instance = connect.SmartConnect(host=args.host,
|
|
||||||
port=args.port,
|
|
||||||
user=args.username,
|
|
||||||
pwd=args.password,
|
|
||||||
sslContext=sslContext)
|
|
||||||
except Exception:
|
|
||||||
print("Unable to connect to %s" % args.host)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
atexit.register(connect.Disconnect, service_instance)
|
|
||||||
|
|
||||||
content = service_instance.RetrieveContent()
|
|
||||||
viewType = [vim.VirtualMachine]
|
|
||||||
container = content.viewManager.CreateContainerView(content.rootFolder, viewType, recursive=True)
|
|
||||||
|
|
||||||
for child in container.view:
|
|
||||||
print_vm_datetime(child)
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
exit(main())
|
|
@ -1,37 +0,0 @@
|
|||||||
- hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Import SSH key pair
|
|
||||||
ec2_key:
|
|
||||||
name: "{{ key_name }}"
|
|
||||||
key_material: "{{ lookup('file', ssh_key_dir + '/id_rsa.pub') }}"
|
|
||||||
|
|
||||||
- name: Create instance
|
|
||||||
ec2_instance:
|
|
||||||
name: "{{ vm_name }}"
|
|
||||||
tags:
|
|
||||||
composer-test: true
|
|
||||||
image_id: "{{ ami_id }}"
|
|
||||||
key_name: "{{ key_name }}"
|
|
||||||
instance_type: "{{ instance_type }}"
|
|
||||||
security_group: allow-ssh
|
|
||||||
instance_initiated_shutdown_behavior: terminate
|
|
||||||
state: present
|
|
||||||
register: ec2
|
|
||||||
|
|
||||||
- name: Wait for SSH to come up
|
|
||||||
wait_for:
|
|
||||||
host: "{{ item.public_ip_address }}"
|
|
||||||
port: 22
|
|
||||||
state: started
|
|
||||||
with_items: "{{ ec2.instances }}"
|
|
||||||
when: item.image_id == ami_id
|
|
||||||
|
|
||||||
- name: Save instance ID
|
|
||||||
local_action: copy content={{ item.instance_id }} dest={{ tmp_dir }}/instance_id
|
|
||||||
with_items: "{{ ec2.instances }}"
|
|
||||||
when: item.image_id == ami_id
|
|
||||||
|
|
||||||
- name: Save public IP
|
|
||||||
local_action: copy content={{ item.public_ip_address }} dest={{ tmp_dir }}/public_ip
|
|
||||||
with_items: "{{ ec2.instances }}"
|
|
||||||
when: item.image_id == ami_id
|
|
@ -1,14 +0,0 @@
|
|||||||
- hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Make sure bucket exists
|
|
||||||
aws_s3:
|
|
||||||
bucket: "{{ aws_bucket }}"
|
|
||||||
mode: create
|
|
||||||
|
|
||||||
- name: Make sure vmimport role exists
|
|
||||||
iam_role_facts:
|
|
||||||
name: vmimport
|
|
||||||
register: role_facts
|
|
||||||
- fail:
|
|
||||||
msg: "Role vmimport doesn't exist"
|
|
||||||
when: role_facts.iam_roles | length < 1
|
|
@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Test the live-iso image
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "Verify live iso"
|
|
||||||
# Just the fact that this is running means the image can boot and ssh is working
|
|
||||||
ROOT_ACCOUNT_LOCKED=0 verify_image liveuser localhost "-p 22"
|
|
||||||
rlAssertGrep "liveuser" /etc/passwd
|
|
||||||
rlAssertGrep "custom_cmdline_arg" /proc/cmdline
|
|
||||||
rlPhaseEnd
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Test the qcow2 image
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "Verify VM instance"
|
|
||||||
# Just the fact that this is running means the image can boot and ssh is working
|
|
||||||
verify_image root localhost "-p 22"
|
|
||||||
rlAssertExists "/root/.ssh/authorized_keys"
|
|
||||||
rlPhaseEnd
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Test the liveimg installed tar disk image
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "Verify VM instance"
|
|
||||||
# Just the fact that this is running means the image can boot and ssh is working
|
|
||||||
CHECK_CMDLINE=0 verify_image root localhost "-p 22"
|
|
||||||
rlAssertExists "/root/.ssh/authorized_keys"
|
|
||||||
rlPhaseEnd
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,207 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure we can build an image and deploy it inside AWS!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
|
||||||
rlFail "AWS_ACCESS_KEY_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AWS_ACCESS_KEY_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
|
||||||
rlFail "AWS_SECRET_ACCESS_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AWS_SECRET_ACCESS_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
AWS_BUCKET="${AWS_BUCKET:-composerredhat}"
|
|
||||||
AWS_REGION="${AWS_REGION:-us-east-1}"
|
|
||||||
|
|
||||||
rlLogInfo "AWS_BUCKET=$AWS_BUCKET"
|
|
||||||
rlLogInfo "AWS_REGION=$AWS_REGION"
|
|
||||||
|
|
||||||
for package in python3-pip python3-boto3; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
rlRun -t -c "pip3 install awscli ansible[aws]"
|
|
||||||
|
|
||||||
# aws configure
|
|
||||||
[ -d ~/.aws/ ] || mkdir ~/.aws/
|
|
||||||
|
|
||||||
if [ -f ~/.aws/config ]; then
|
|
||||||
rlLogInfo "Reusing existing ~/.aws/config"
|
|
||||||
else
|
|
||||||
rlLogInfo "Creating ~/.aws/config"
|
|
||||||
cat > ~/.aws/config << __EOF__
|
|
||||||
[default]
|
|
||||||
region = $AWS_REGION
|
|
||||||
__EOF__
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f ~/.aws/credentials ]; then
|
|
||||||
rlLogInfo "Reusing existing ~/.aws/credentials"
|
|
||||||
else
|
|
||||||
rlLogInfo "Creating ~/.aws/credentials"
|
|
||||||
cat > ~/.aws/credentials << __EOF__
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = $AWS_ACCESS_KEY_ID
|
|
||||||
aws_secret_access_key = $AWS_SECRET_ACCESS_KEY
|
|
||||||
__EOF__
|
|
||||||
fi
|
|
||||||
|
|
||||||
TMP_DIR=$(mktemp -d)
|
|
||||||
PLAYBOOKS_DIR=$(dirname "$0")/playbooks/aws
|
|
||||||
|
|
||||||
# make sure bucket and vmimport role exist
|
|
||||||
rlRun -t -c "ansible-playbook --extra-vars 'aws_bucket=$AWS_BUCKET' $PLAYBOOKS_DIR/setup.yml"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server ami`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Import AMI image in AWS"
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
rlRun -t -c "mv $UUID-disk.ami Composer-Test-$UUID-disk.ami"
|
|
||||||
|
|
||||||
AMI="Composer-Test-$UUID-disk.ami"
|
|
||||||
|
|
||||||
# upload to S3
|
|
||||||
rlRun -t -c "ansible localhost -m aws_s3 -a \
|
|
||||||
'bucket=$AWS_BUCKET \
|
|
||||||
src=$AMI \
|
|
||||||
object=$AMI \
|
|
||||||
mode=put'"
|
|
||||||
|
|
||||||
# import image as snapshot into EC2
|
|
||||||
cat > containers.json << __EOF__
|
|
||||||
{
|
|
||||||
"Description": "Composer image",
|
|
||||||
"Format": "raw",
|
|
||||||
"UserBucket": {
|
|
||||||
"S3Bucket": "$AWS_BUCKET",
|
|
||||||
"S3Key": "$AMI"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
IMPORT_TASK_ID=`aws ec2 import-snapshot --disk-container file://containers.json | grep ImportTaskId | cut -f4 -d'"'`
|
|
||||||
|
|
||||||
if [ -z "$IMPORT_TASK_ID" ]; then
|
|
||||||
rlFail "IMPORT_TASK_ID is empty!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# wait for the import to complete
|
|
||||||
while aws ec2 describe-import-snapshot-tasks --filters Name=task-state,Values=active | grep $IMPORT_TASK_ID; do
|
|
||||||
rlLogInfo "Waiting for $IMPORT_TASK_ID to complete ..."
|
|
||||||
sleep 60
|
|
||||||
done
|
|
||||||
|
|
||||||
DESCRIPTION="Created by AWS-VMImport service for $IMPORT_TASK_ID"
|
|
||||||
rlRun -t -c "aws ec2 describe-snapshots --filters Name=description,Values='$DESCRIPTION'"
|
|
||||||
SNAPSHOT_ID=`aws ec2 describe-snapshots --filters Name=description,Values="$DESCRIPTION" | grep SnapshotId | cut -f4 -d'"'`
|
|
||||||
|
|
||||||
if [ -z "$SNAPSHOT_ID" ]; then
|
|
||||||
rlFail "SNAPSHOT_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "SNAPSHOT_ID=$SNAPSHOT_ID"
|
|
||||||
fi
|
|
||||||
# tag snapshot
|
|
||||||
aws ec2 create-tags --resources $SNAPSHOT_ID --tags Key=composer-test,Value=true
|
|
||||||
|
|
||||||
# create an image from the imported selected snapshot
|
|
||||||
AMI_ARCH="$(uname -m)"
|
|
||||||
if [ "$AMI_ARCH" == "aarch64" ]; then
|
|
||||||
AMI_ARCH="arm64"
|
|
||||||
fi
|
|
||||||
AMI_ID=`aws ec2 register-image --name "Composer-Test-$UUID" --virtualization-type hvm --root-device-name /dev/sda1 \
|
|
||||||
--ena-support --architecture $AMI_ARCH \
|
|
||||||
--block-device-mappings "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": {\"SnapshotId\": \"$SNAPSHOT_ID\"}}]" | \
|
|
||||||
grep ImageId | cut -f4 -d'"'`
|
|
||||||
|
|
||||||
if [ -z "$AMI_ID" ]; then
|
|
||||||
rlFail "AMI_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AMI_ID=$AMI_ID"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# tag AMI
|
|
||||||
aws ec2 create-tags --resources $AMI_ID --tags Key=composer-test,Value=true
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Start EC2 instance"
|
|
||||||
INSTANCE_TYPE="t2.small"
|
|
||||||
if [ "$(uname -m)" == "aarch64" ]; then
|
|
||||||
INSTANCE_TYPE="a1.medium"
|
|
||||||
fi
|
|
||||||
# generate new ssh key
|
|
||||||
KEY_NAME="Composer-Test-Key-$UUID"
|
|
||||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
|
|
||||||
rlRun -t -c "ansible-playbook --extra-vars \
|
|
||||||
'key_name=$KEY_NAME \
|
|
||||||
ssh_key_dir=$SSH_KEY_DIR \
|
|
||||||
vm_name=Composer-Test-VM-$AMI_ID \
|
|
||||||
ami_id=$AMI_ID \
|
|
||||||
instance_type=$INSTANCE_TYPE \
|
|
||||||
tmp_dir=$TMP_DIR' \
|
|
||||||
$PLAYBOOKS_DIR/instance.yml"
|
|
||||||
|
|
||||||
INSTANCE_ID=$(cat $TMP_DIR/instance_id)
|
|
||||||
IP_ADDRESS=$(cat $TMP_DIR/public_ip)
|
|
||||||
|
|
||||||
rlLogInfo "Running INSTANCE_ID=$INSTANCE_ID with IP_ADDRESS=$IP_ADDRESS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify EC2 instance"
|
|
||||||
# cloud-init default config differs between RHEL and Fedora
|
|
||||||
# and ami.ks will create ec2-user only on RHEL
|
|
||||||
CLOUD_USER="ec2-user"
|
|
||||||
if [ -f "/etc/fedora-release" ]; then
|
|
||||||
CLOUD_USER="fedora"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# run generic tests to verify the instance
|
|
||||||
verify_image "$CLOUD_USER" "$IP_ADDRESS" "-i $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "ansible localhost -m ec2_instance -a 'state=terminated instance_ids=$INSTANCE_ID'"
|
|
||||||
rlRun -t -c "ansible localhost -m ec2_key -a 'state=absent name=$KEY_NAME'"
|
|
||||||
rlRun -t -c "ansible localhost -m ec2_ami -a 'state=absent image_id=$AMI_ID delete_snapshot=True'"
|
|
||||||
rlRun -t -c "ansible localhost -m aws_s3 -a 'mode=delobj bucket=$AWS_BUCKET object=$AMI'"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $AMI $SSH_KEY_DIR containers.json $TMP_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,162 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure we can build an image and deploy it inside Azure!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
# NOTE: see test/README.md for information how to obtain these
|
|
||||||
# UUIDs and what configuration is expected on the Azure side
|
|
||||||
if [ -z "$AZURE_SUBSCRIPTION_ID" ]; then
|
|
||||||
rlFail "AZURE_SUBSCRIPTION_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_SUBSCRIPTION_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_TENANT" ]; then
|
|
||||||
rlFail "AZURE_TENANT is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_TENANT is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_CLIENT_ID" ]; then
|
|
||||||
rlFail "AZURE_CLIENT_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_CLIENT_ID is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$AZURE_SECRET" ]; then
|
|
||||||
rlFail "AZURE_SECRET is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "AZURE_SECRET is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export AZURE_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP:-composer}"
|
|
||||||
rlLogInfo "AZURE_RESOURCE_GROUP=$AZURE_RESOURCE_GROUP"
|
|
||||||
|
|
||||||
export AZURE_STORAGE_ACCOUNT="${AZURE_STORAGE_ACCOUNT:-composerredhat}"
|
|
||||||
rlLogInfo "AZURE_STORAGE_ACCOUNT=$AZURE_STORAGE_ACCOUNT"
|
|
||||||
|
|
||||||
export AZURE_STORAGE_CONTAINER="${AZURE_STORAGE_CONTAINER:-composerredhat}"
|
|
||||||
rlLogInfo "AZURE_STORAGE_CONTAINER=$AZURE_STORAGE_CONTAINER"
|
|
||||||
|
|
||||||
if ! rlCheckRpm "python3-pip"; then
|
|
||||||
rlRun -t -c "dnf -y install python3-pip"
|
|
||||||
rlAssertRpm python3-pip
|
|
||||||
fi
|
|
||||||
|
|
||||||
rlRun -t -c "pip3 install ansible[azure]"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server vhd`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Upload image to Azure"
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
rlRun -t -c "mv $UUID-disk.vhd Composer-Test-$UUID-disk.vhd"
|
|
||||||
IMAGE="Composer-Test-$UUID-disk.vhd"
|
|
||||||
OS_IMAGE_NAME="Composer-Test-$UUID"
|
|
||||||
|
|
||||||
rlRun -t -c "ansible localhost -m azure_rm_storageblob -a \
|
|
||||||
'resource_group=$AZURE_RESOURCE_GROUP \
|
|
||||||
storage_account_name=$AZURE_STORAGE_ACCOUNT \
|
|
||||||
container=$AZURE_STORAGE_CONTAINER \
|
|
||||||
blob=$IMAGE src=$IMAGE blob_type=page \
|
|
||||||
tags={\"composer_test\":\"true\"}'"
|
|
||||||
|
|
||||||
# create image from blob
|
|
||||||
now=$(date -u '+%FT%T')
|
|
||||||
rlRun -t -c "ansible localhost -m azure_rm_image -a \
|
|
||||||
'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME os_type=Linux location=eastus \
|
|
||||||
source=https://$AZURE_STORAGE_ACCOUNT.blob.core.windows.net/$AZURE_STORAGE_CONTAINER/$IMAGE \
|
|
||||||
tags={\"composer_test\":\"true\",\"first_seen\":\"$now\"}'"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Start VM instance"
|
|
||||||
VM_NAME="Composer-Test-VM-$UUID"
|
|
||||||
|
|
||||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
SSH_PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub`
|
|
||||||
|
|
||||||
now=$(date -u '+%FT%T')
|
|
||||||
|
|
||||||
TMP_DIR=`mktemp -d /tmp/composer-azure.XXXXX`
|
|
||||||
cat > $TMP_DIR/azure-playbook.yaml << __EOF__
|
|
||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
connection: local
|
|
||||||
tasks:
|
|
||||||
- name: Create a VM
|
|
||||||
azure_rm_virtualmachine:
|
|
||||||
resource_group: $AZURE_RESOURCE_GROUP
|
|
||||||
name: $VM_NAME
|
|
||||||
vm_size: Standard_B2s
|
|
||||||
location: eastus
|
|
||||||
admin_username: azure-user
|
|
||||||
ssh_password_enabled: false
|
|
||||||
ssh_public_keys:
|
|
||||||
- path: /home/azure-user/.ssh/authorized_keys
|
|
||||||
key_data: "$SSH_PUB_KEY"
|
|
||||||
image:
|
|
||||||
name: $OS_IMAGE_NAME
|
|
||||||
resource_group: $AZURE_RESOURCE_GROUP
|
|
||||||
tags:
|
|
||||||
"first_seen": "$now"
|
|
||||||
"composer_test": "true"
|
|
||||||
storage_account_name: $AZURE_STORAGE_ACCOUNT
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlRun -t -c "ansible-playbook $TMP_DIR/azure-playbook.yaml"
|
|
||||||
|
|
||||||
response=`ansible localhost -m azure_rm_virtualmachine -a "resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME"`
|
|
||||||
rlAssert0 "Received VM info successfully" $?
|
|
||||||
rlLogInfo "$response"
|
|
||||||
|
|
||||||
IP_ADDRESS=`echo "$response" | grep '"ipAddress":' | cut -f4 -d'"'`
|
|
||||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
|
||||||
|
|
||||||
rlLogInfo "Waiting 60sec for instance to initialize ..."
|
|
||||||
sleep 60
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify VM instance"
|
|
||||||
# run generic tests to verify the instance and check if cloud-init is installed and running
|
|
||||||
verify_image azure-user "$IP_ADDRESS" "-i $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlRun -t -c "ssh -o StrictHostKeyChecking=no -o BatchMode=yes -i $SSH_KEY_DIR/id_rsa azure-user@$IP_ADDRESS 'rpm -q cloud-init'"
|
|
||||||
# disabled cloud-init check, see https://github.com/osbuild/osbuild-composer/issues/698
|
|
||||||
# rlRun -t -c "ssh -o StrictHostKeyChecking=no -o BatchMode=yes -i $SSH_KEY_DIR/id_rsa azure-user@$IP_ADDRESS 'systemctl status cloud-init'"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "ansible localhost -m azure_rm_virtualmachine -a 'resource_group=$AZURE_RESOURCE_GROUP name=$VM_NAME location=eastus state=absent'"
|
|
||||||
rlRun -t -c "ansible localhost -m azure_rm_image -a 'resource_group=$AZURE_RESOURCE_GROUP name=$OS_IMAGE_NAME state=absent'"
|
|
||||||
rlRun -t -c "ansible localhost -m azure_rm_storageblob -a 'resource_group=$AZURE_RESOURCE_GROUP storage_account_name=$AZURE_STORAGE_ACCOUNT container=$AZURE_STORAGE_CONTAINER blob=$IMAGE state=absent'"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,135 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure we can build an image and deploy it inside OpenStack!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$OS_AUTH_URL" ]; then
|
|
||||||
rlFail "OS_AUTH_URL is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_AUTH_URL=$OS_AUTH_URL"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$OS_USERNAME" ]; then
|
|
||||||
rlFail "OS_USERNAME is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_USERNAME=$OS_USERNAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export OS_PROJECT_NAME="${OS_PROJECT_NAME:-$OS_USERNAME}"
|
|
||||||
rlLogInfo "OS_PROJECT_NAME=$OS_PROJECT_NAME"
|
|
||||||
|
|
||||||
if [ -z "$OS_PASSWORD" ]; then
|
|
||||||
rlFail "OS_PASSWORD is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "OS_PASSWORD is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! rlCheckRpm "python3-pip"; then
|
|
||||||
rlRun -t -c "dnf -y install python3-pip"
|
|
||||||
rlAssertRpm python3-pip
|
|
||||||
fi
|
|
||||||
|
|
||||||
rlRun -t -c "pip3 install ansible openstacksdk"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
|
|
||||||
# workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1639326
|
|
||||||
cat > $TMP_DIR/http-with-rng.toml << __EOF__
|
|
||||||
name = "http-with-rng"
|
|
||||||
description = "HTTP image for OpenStack with rng-tools"
|
|
||||||
version = "0.0.1"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "httpd"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "rng-tools"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[customizations.kernel]
|
|
||||||
append = "custom_cmdline_arg"
|
|
||||||
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/http-with-rng.toml"
|
|
||||||
|
|
||||||
UUID=`$CLI compose start http-with-rng openstack`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Upload QCOW2 image to OpenStack"
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
rlRun -t -c "mv $UUID-disk.qcow2 Composer-Test-$UUID-disk.qcow2"
|
|
||||||
IMAGE="Composer-Test-$UUID-disk.qcow2"
|
|
||||||
OS_IMAGE_NAME="Composer-Test-$UUID"
|
|
||||||
|
|
||||||
response=`ansible localhost -m os_image -a "name=$OS_IMAGE_NAME filename=$IMAGE is_public=no"`
|
|
||||||
rlAssert0 "Image upload successfull" $?
|
|
||||||
rlLogInfo "$response"
|
|
||||||
|
|
||||||
OS_IMAGE_UUID=`echo "$response" | grep '"changed": true' -A1 | grep '"id":' | cut -d'"' -f4`
|
|
||||||
rlLogInfo "OS_IMAGE_UUID=$OS_IMAGE_UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Start VM instance"
|
|
||||||
VM_NAME="Composer-Test-VM-$UUID"
|
|
||||||
|
|
||||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlRun -t -c "ansible localhost -m os_keypair -a \"name=Composer-Test-Key-$UUID public_key_file=$SSH_KEY_DIR/id_rsa.pub\""
|
|
||||||
|
|
||||||
response=`ansible localhost -m os_server -a "name=$VM_NAME image=$OS_IMAGE_UUID network=provider_net_cci_2 flavor=ci.m1.medium.ephemeral key_name=Composer-Test-Key-$UUID auto_ip=yes meta='composer-test=true'"`
|
|
||||||
rlAssert0 "VM started successfully" $?
|
|
||||||
rlLogInfo "$response"
|
|
||||||
|
|
||||||
IP_ADDRESS=`echo "$response" | grep '"OS-EXT-IPS:type": "fixed"' -A1| grep '"addr":' | cut -f4 -d'"' | head -n 1`
|
|
||||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
|
||||||
|
|
||||||
rlLogInfo "Waiting 60sec for instance to initialize ..."
|
|
||||||
sleep 60
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify VM instance"
|
|
||||||
# cloud-init default config differs between RHEL and Fedora
|
|
||||||
CLOUD_USER="cloud-user"
|
|
||||||
if [ -f "/etc/fedora-release" ]; then
|
|
||||||
CLOUD_USER="fedora"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# run generic tests to verify the instance
|
|
||||||
verify_image "$CLOUD_USER" "$IP_ADDRESS" "-i $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "ansible localhost -m os_keypair -a 'name=Composer-Test-Key-$UUID state=absent'"
|
|
||||||
rlRun -t -c "ansible localhost -m os_server -a 'name=$VM_NAME state=absent'"
|
|
||||||
rlRun -t -c "ansible localhost -m os_image -a 'name=$OS_IMAGE_NAME state=absent'"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $TMP_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,156 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure we can build an image and deploy it inside vSphere!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$V_HOST" ]; then
|
|
||||||
rlFail "V_HOST is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_HOST=$V_HOST"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$V_USERNAME" ]; then
|
|
||||||
rlFail "V_USERNAME is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_USERNAME=$V_USERNAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$V_PASSWORD" ]; then
|
|
||||||
rlFail "V_PASSWORD is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "V_PASSWORD is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
V_DATACENTER="${V_DATACENTER:-RH_Engineering}"
|
|
||||||
rlLogInfo "V_DATACENTER=$V_DATACENTER"
|
|
||||||
|
|
||||||
V_CLUSTER="${V_CLUSTER:-Satellite-Engineering}"
|
|
||||||
rlLogInfo "V_CLUSTER=$V_CLUSTER"
|
|
||||||
|
|
||||||
V_NETWORK="${V_NETWORK:-VLAN-204}"
|
|
||||||
rlLogInfo "V_NETWORK=$V_NETWORK"
|
|
||||||
|
|
||||||
V_DATASTORE="${V_DATASTORE:-NFS-Node1}"
|
|
||||||
rlLogInfo "V_DATASTORE=$V_DATASTORE"
|
|
||||||
|
|
||||||
V_FOLDER="${V_FOLDER:-Composer}"
|
|
||||||
rlLogInfo "V_FOLDER=$V_FOLDER"
|
|
||||||
|
|
||||||
if ! rlCheckRpm "python3-pip"; then
|
|
||||||
rlRun -t -c "dnf -y install python3-pip"
|
|
||||||
rlAssertRpm python3-pip
|
|
||||||
fi
|
|
||||||
|
|
||||||
rlRun -t -c "pip3 install pyvmomi"
|
|
||||||
|
|
||||||
TMP_DIR=`mktemp -d /tmp/composer-vmware.XXXXX`
|
|
||||||
SAMPLES="$TMP_DIR/pyvmomi-community-samples"
|
|
||||||
if [ ! -d "$SAMPLES" ]; then
|
|
||||||
rlRun -t -c "git clone https://github.com/weldr/pyvmomi-community-samples $SAMPLES"
|
|
||||||
pushd $SAMPLES && git checkout composer_testing && popd
|
|
||||||
fi
|
|
||||||
SAMPLES="$SAMPLES/samples"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
PUB_KEY=`cat $SSH_KEY_DIR/id_rsa.pub`
|
|
||||||
|
|
||||||
cat > $TMP_DIR/vmware.toml << __EOF__
|
|
||||||
name = "vmware"
|
|
||||||
description = "HTTP image for vmware"
|
|
||||||
version = "0.0.1"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "httpd"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[customizations.user]]
|
|
||||||
name = "root"
|
|
||||||
key = "$PUB_KEY"
|
|
||||||
|
|
||||||
[customizations.kernel]
|
|
||||||
append = "custom_cmdline_arg"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/vmware.toml"
|
|
||||||
|
|
||||||
UUID=`$CLI compose start vmware vmdk`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Upload VMDK image in vCenter"
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
rlRun -t -c "mv $UUID-disk.vmdk Composer-Test-$UUID-disk.vmdk"
|
|
||||||
IMAGE="Composer-Test-$UUID-disk.vmdk"
|
|
||||||
|
|
||||||
python3 $SAMPLES/upload_file_to_datastore.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
|
||||||
-d $V_DATASTORE -l `readlink -f $IMAGE` -r $IMAGE
|
|
||||||
rlAssert0 "Image upload successfull" $?
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Start VM instance"
|
|
||||||
VM_NAME="Composer-Test-VM-$UUID"
|
|
||||||
INSTANCE_UUID=`python3 $SAMPLES/create_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
|
||||||
--datacenter $V_DATACENTER -c $V_CLUSTER -f $V_FOLDER -d $V_DATASTORE \
|
|
||||||
--portgroup $V_NETWORK -v $IMAGE -m 2048 -g rhel7_64Guest -n $VM_NAME \
|
|
||||||
--power-on`
|
|
||||||
|
|
||||||
if [ -z "$INSTANCE_UUID" ]; then
|
|
||||||
rlFail "INSTANCE_UUID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "INSTANCE_UUID=$INSTANCE_UUID"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# wait for instance to become running and had assigned a public IP
|
|
||||||
IP_ADDRESS="None"
|
|
||||||
while [ "$IP_ADDRESS" == "None" ]; do
|
|
||||||
rlLogInfo "IP_ADDRESS is not assigned yet ..."
|
|
||||||
sleep 30
|
|
||||||
IP_ADDRESS=`python3 $SAMPLES/find_by_uuid.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD \
|
|
||||||
--uuid $INSTANCE_UUID | grep 'ip address' | tr -d ' ' | cut -f2- -d:`
|
|
||||||
done
|
|
||||||
|
|
||||||
rlLogInfo "Running instance IP_ADDRESS=$IP_ADDRESS"
|
|
||||||
|
|
||||||
rlLogInfo "Waiting 30sec for instance to initialize ..."
|
|
||||||
sleep 30
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify VM instance"
|
|
||||||
# run generic tests to verify the instance
|
|
||||||
verify_image root "$IP_ADDRESS" "-i $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
# note: vmdk disk is removed when destroying the VM
|
|
||||||
python3 $SAMPLES/destroy_vm.py -S -s $V_HOST -u $V_USERNAME -p $V_PASSWORD --uuid $INSTANCE_UUID
|
|
||||||
rlAssert0 "VM destroyed" $?
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $IMAGE $TMP_DIR $SSH_KEY_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,197 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure we can build an image and deploy it inside Alibaba cloud!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
if [ -z "$ALICLOUD_ACCESS_KEY" ]; then
|
|
||||||
rlFail "ALICLOUD_ACCESS_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "ALICLOUD_ACCESS_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$ALICLOUD_SECRET_KEY" ]; then
|
|
||||||
rlFail "ALICLOUD_SECRET_KEY is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "ALICLOUD_SECRET_KEY is configured"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ALICLOUD_BUCKET="${ALICLOUD_BUCKET:-composer-test}"
|
|
||||||
ALICLOUD_REGION="${ALICLOUD_REGION:-us-east-1}"
|
|
||||||
|
|
||||||
rlLogInfo "ALICLOUD_BUCKET=$ALICLOUD_BUCKET"
|
|
||||||
rlLogInfo "ALICLOUD_REGION=$ALICLOUD_REGION"
|
|
||||||
|
|
||||||
for package in jq; do
|
|
||||||
if ! rlCheckRpm "$package"; then
|
|
||||||
rlRun -t -c "dnf -y install $package"
|
|
||||||
rlAssertRpm "$package"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
ALI_DIR=`mktemp -d /tmp/alicloud.XXXXX`
|
|
||||||
# use the CLI b/c Ansible modules are not yet upstream and are unreliable
|
|
||||||
TAR_FILE="aliyun-cli-linux-3.0.32-amd64.tgz"
|
|
||||||
curl -L https://github.com/aliyun/aliyun-cli/releases/download/v3.0.32/$TAR_FILE > $ALI_DIR/$TAR_FILE
|
|
||||||
tar -C $ALI_DIR/ -xzvf $ALI_DIR/$TAR_FILE
|
|
||||||
chmod a+x $ALI_DIR/aliyun
|
|
||||||
|
|
||||||
# configure
|
|
||||||
[ -d ~/.aliyun/ ] || mkdir ~/.aliyun/
|
|
||||||
|
|
||||||
if [ -f ~/.aliyun/config.json ]; then
|
|
||||||
rlLogInfo "Reusing existing ~/.aliyun/config.json"
|
|
||||||
else
|
|
||||||
rlLogInfo "Creating ~/.aliyun/config.json"
|
|
||||||
cat > ~/.aliyun/config.json << __EOF__
|
|
||||||
{
|
|
||||||
"current": "",
|
|
||||||
"profiles": [
|
|
||||||
{
|
|
||||||
"mode": "AK",
|
|
||||||
"access_key_id": "$ALICLOUD_ACCESS_KEY",
|
|
||||||
"access_key_secret": "$ALICLOUD_SECRET_KEY",
|
|
||||||
"region_id": "$ALICLOUD_REGION",
|
|
||||||
"output_format": "json",
|
|
||||||
"language": "en"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"meta_path": ""
|
|
||||||
}
|
|
||||||
__EOF__
|
|
||||||
fi
|
|
||||||
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
UUID=`$CLI compose start example-http-server alibaba`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Import image in Alibaba cloud"
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
|
|
||||||
rlRun -t -c "mv $UUID-disk.qcow2 Composer-Test-$UUID-disk.qcow2"
|
|
||||||
IMAGE="Composer-Test-$UUID-disk.qcow2"
|
|
||||||
|
|
||||||
# upload to OSS
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun oss cp --retry-count 20 $IMAGE oss://$ALICLOUD_BUCKET/$IMAGE"
|
|
||||||
|
|
||||||
# now import as machine image
|
|
||||||
# WARNING: DiskImageSize *MUST BE* 40 GiB. We don't need all of that but
|
|
||||||
# VMs fail to boot otherwise !!! Not sure why.
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs ImportImage \
|
|
||||||
--OSType linux --Platform RedHat \
|
|
||||||
--Architecture x86_64 \
|
|
||||||
--DiskDeviceMapping.1.DiskImageSize 40 \
|
|
||||||
--DiskDeviceMapping.1.Format qcow2 \
|
|
||||||
--DiskDeviceMapping.1.OSSBucket $ALICLOUD_BUCKET \
|
|
||||||
--DiskDeviceMapping.1.OSSObject $IMAGE \
|
|
||||||
--ImageName $IMAGE"
|
|
||||||
|
|
||||||
# wait for status to become available
|
|
||||||
while [ `$ALI_DIR/aliyun ecs DescribeImages --ImageName $IMAGE --Status Available | jq .Images.Image | jq -r '.[0].ImageName'` == "null" ]; do
|
|
||||||
rlLogInfo "Waiting for import to complete ..."
|
|
||||||
sleep 60
|
|
||||||
done
|
|
||||||
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DescribeImages --ImageName $IMAGE"
|
|
||||||
IMAGE_ID=`$ALI_DIR/aliyun ecs DescribeImages --ImageName $IMAGE | jq .Images.Image | jq -r '.[0].ImageId'`
|
|
||||||
|
|
||||||
if [ "$IMAGE_ID" == "null" ]; then
|
|
||||||
rlFail "IMAGE_ID is empty!"
|
|
||||||
else
|
|
||||||
rlLogInfo "IMAGE_ID=$IMAGE_ID"
|
|
||||||
fi
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Start ECS instance"
|
|
||||||
INSTANCE_TYPE="ecs.n1.medium"
|
|
||||||
|
|
||||||
# generate & import new ssh key
|
|
||||||
KEY_NAME=Composer-Test-$UUID
|
|
||||||
SSH_KEY_DIR=`mktemp -d /tmp/composer-ssh-keys.XXXXXX`
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
SSH_PUB_KEY=$(cat $SSH_KEY_DIR/id_rsa.pub)
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs ImportKeyPair --KeyPairName $KEY_NAME --PublicKeyBody '$SSH_PUB_KEY'"
|
|
||||||
|
|
||||||
RELEASE_TIME=$(date -u -d "24 hours" '+%FT%TZ')
|
|
||||||
|
|
||||||
# SecurityGroup is composer-allow-ssh
|
|
||||||
# VPC is composer-vpc
|
|
||||||
response=$($ALI_DIR/aliyun ecs RunInstances --Amount 1 --ImageId $IMAGE_ID \
|
|
||||||
--InstanceType=$INSTANCE_TYPE --InstanceName Composer-Test-VM-$UUID \
|
|
||||||
--SecurityGroupId sg-0xi4w9isg0p1ytj1qbhf \
|
|
||||||
--VSwitchId vsw-0xi36w0a9l894vf2momfb \
|
|
||||||
--KeyPairName $KEY_NAME \
|
|
||||||
--InternetMaxBandwidthIn 5 --InternetMaxBandwidthOut 5 \
|
|
||||||
--AutoReleaseTime $RELEASE_TIME)
|
|
||||||
rlAssert0 "VM started successfully" $?
|
|
||||||
rlLogInfo "$response"
|
|
||||||
|
|
||||||
INSTANCE_ID=`echo "$response" | jq .InstanceIdSets.InstanceIdSet | jq -r '.[0]' `
|
|
||||||
|
|
||||||
until [ $($ALI_DIR/aliyun ecs DescribeInstanceAttribute --InstanceId $INSTANCE_ID | jq -r .Status | grep "Running\|Stopped") ]; do
|
|
||||||
sleep 30
|
|
||||||
rlLogInfo "Waiting for instance to start ..."
|
|
||||||
done
|
|
||||||
|
|
||||||
rlAssertEquals "Instance $INSTANCE_ID is Running" \
|
|
||||||
"$($ALI_DIR/aliyun ecs DescribeInstanceAttribute --InstanceId $INSTANCE_ID | jq -r .Status)" "Running"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DescribeInstanceAttribute --InstanceId $INSTANCE_ID"
|
|
||||||
|
|
||||||
IP_ADDRESS="null"
|
|
||||||
while [ "$IP_ADDRESS" == "null" ]; do
|
|
||||||
rlLogInfo "IP_ADDRESS is not assigned yet ..."
|
|
||||||
sleep 30
|
|
||||||
IP_ADDRESS=`$ALI_DIR/aliyun ecs DescribeInstanceAttribute --InstanceId $INSTANCE_ID | jq -r .PublicIpAddress.IpAddress | jq -r '.[0]'`
|
|
||||||
done
|
|
||||||
|
|
||||||
rlLogInfo "Running INSTANCE_ID=$INSTANCE_ID with IP_ADDRESS=$IP_ADDRESS"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify ECS instance"
|
|
||||||
# cloud-init default config differs between RHEL and Fedora
|
|
||||||
CLOUD_USER="cloud-user"
|
|
||||||
if [ -f "/etc/fedora-release" ]; then
|
|
||||||
CLOUD_USER="fedora"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# run generic tests to verify the instance
|
|
||||||
verify_image "$CLOUD_USER" "$IP_ADDRESS" "-i $SSH_KEY_DIR/id_rsa"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DeleteInstance --Force True --InstanceId $INSTANCE_ID"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun ecs DeleteImage --Force True --ImageId $IMAGE_ID"
|
|
||||||
rlRun -t -c "$ALI_DIR/aliyun oss rm oss://$ALICLOUD_BUCKET/$IMAGE --force"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
# do this here to give time for the VM instance to be removed properly
|
|
||||||
# also don't fail if the key is still attached to an instance which is waiting
|
|
||||||
# to be desroyed. We're going to remove these keys in cleanup afterwards
|
|
||||||
$ALI_DIR/aliyun ecs DeleteKeyPairs --KeyPairNames "['$KEY_NAME']" || echo
|
|
||||||
rlRun -t -c "rm -rf $IMAGE $SSH_KEY_DIR $ALI_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure an ext4-filesystem compose can be built without errors!
|
|
||||||
# Note: according to existing test plan we're not going to validate
|
|
||||||
# direct usage-scenarios for this image type!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server ext4-filesystem`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,44 +0,0 @@
|
|||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure a google compose can be built without errors
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhasStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server google`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d ' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStart "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStart "compose check"
|
|
||||||
$CLI compose image $UUID
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
fileList=$(gzip -cd "$UUID-disk.tar.gz" | tar tf -)
|
|
||||||
rlAssertEquals "archive should contain disk.raw" "$fileList" "disk.raw"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun "rm -rf $UUID-disk.tar.gz"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,81 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Builds live-iso image and test it with QEMU-KVM
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
|
|
||||||
TMP_DIR=$(mktemp -d /tmp/composer.XXXXX)
|
|
||||||
SSH_KEY_DIR=$(mktemp -d /tmp/composer-ssh-keys.XXXXXX)
|
|
||||||
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
PUB_KEY=$(cat "$SSH_KEY_DIR/id_rsa.pub")
|
|
||||||
|
|
||||||
cat > "$TMP_DIR/with-ssh.toml" << __EOF__
|
|
||||||
name = "with-ssh"
|
|
||||||
description = "HTTP image with SSH"
|
|
||||||
version = "0.0.1"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "httpd"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "openssh-server"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "beakerlib"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[customizations.services]
|
|
||||||
enabled = ["sshd"]
|
|
||||||
|
|
||||||
[[customizations.user]]
|
|
||||||
name = "root"
|
|
||||||
key = "$PUB_KEY"
|
|
||||||
|
|
||||||
[customizations.kernel]
|
|
||||||
append = "custom_cmdline_arg"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/with-ssh.toml"
|
|
||||||
|
|
||||||
# NOTE: live-iso.ks explicitly disables sshd but test_cli.sh enables it
|
|
||||||
UUID=$($CLI compose start with-ssh live-iso)
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=$(echo "$UUID" | cut -f 2 -d' ')
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose "$UUID"
|
|
||||||
|
|
||||||
# Save the results for boot test
|
|
||||||
rlAssertExists "/var/lib/lorax/composer/results/$UUID/live.iso"
|
|
||||||
rlRun -t -c "mkdir -p /var/tmp/test-results/"
|
|
||||||
rlRun -t -c "cp /var/lib/lorax/composer/results/$UUID/live.iso /var/tmp/test-results/"
|
|
||||||
# Include the ssh key needed to log into the image
|
|
||||||
rlRun -t -c "cp $SSH_KEY_DIR/* /var/tmp/test-results"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $TMP_DIR $SSH_KEY_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Make sure a partitioned-disk compose can be built without errors!
|
|
||||||
# Note: according to existing test plan we're not going to validate
|
|
||||||
# direct usage-scenarios for this image type!
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server partitioned-disk`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,77 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Builds qcow2 images
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
|
|
||||||
TMP_DIR=$(mktemp -d /tmp/composer.XXXXX)
|
|
||||||
SSH_KEY_DIR=$(mktemp -d /tmp/composer-ssh-keys.XXXXXX)
|
|
||||||
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
PUB_KEY=$(cat "$SSH_KEY_DIR/id_rsa.pub")
|
|
||||||
|
|
||||||
cat > "$TMP_DIR/with-ssh.toml" << __EOF__
|
|
||||||
name = "with-ssh"
|
|
||||||
description = "HTTP image with SSH"
|
|
||||||
version = "0.0.1"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "httpd"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "openssh-server"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "beakerlib"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[customizations.user]]
|
|
||||||
name = "root"
|
|
||||||
key = "$PUB_KEY"
|
|
||||||
|
|
||||||
[customizations.kernel]
|
|
||||||
append = "custom_cmdline_arg"
|
|
||||||
__EOF__
|
|
||||||
|
|
||||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/with-ssh.toml"
|
|
||||||
|
|
||||||
UUID=$($CLI compose start with-ssh qcow2)
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=$(echo "$UUID" | cut -f 2 -d' ')
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose "$UUID"
|
|
||||||
|
|
||||||
# Save the results for boot test
|
|
||||||
rlAssertExists "/var/lib/lorax/composer/results/$UUID/disk.qcow2"
|
|
||||||
rlRun -t -c "mkdir -p /var/tmp/test-results/"
|
|
||||||
rlRun -t -c "cp /var/lib/lorax/composer/results/$UUID/disk.qcow2 /var/tmp/test-results/"
|
|
||||||
# Include the ssh key needed to log into the image
|
|
||||||
rlRun -t -c "cp $SSH_KEY_DIR/* /var/tmp/test-results"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $TMP_DIR $SSH_KEY_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Builds tar images and tests them with Docker and systemd-nspawn
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
rlAssertExists /usr/bin/docker
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
rlRun -t -c "$CLI blueprints push $(dirname $0)/lib/test-http-server.toml"
|
|
||||||
UUID=`$CLI compose start test-http-server tar`
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=`echo $UUID | cut -f 2 -d' '`
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlRun -t -c "$CLI compose image $UUID"
|
|
||||||
IMAGE="$UUID-root.tar.xz"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify tar image with Docker"
|
|
||||||
rlRun -t -c "docker import $IMAGE composer/$UUID:latest"
|
|
||||||
|
|
||||||
# verify we can run a container with this image
|
|
||||||
rlRun -t -c "docker run --rm --entrypoint /usr/bin/cat composer/$UUID /etc/redhat-release"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Verify tar image with systemd-nspawn"
|
|
||||||
if [ -f /usr/bin/systemd-nspawn ]; then
|
|
||||||
NSPAWN_DIR=`mktemp -d /var/tmp/nspawn.XXXX`
|
|
||||||
rlRun -t -c "tar -xJf $IMAGE -C $NSPAWN_DIR"
|
|
||||||
|
|
||||||
# verify we can run a container with this image
|
|
||||||
rlRun -t -c "systemd-nspawn -D $NSPAWN_DIR cat /etc/redhat-release"
|
|
||||||
else
|
|
||||||
rlLogInfo "systemd-nspawn not found!"
|
|
||||||
fi
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "rm -rf $IMAGE $NSPAWN_DIR"
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "docker rmi composer/$UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,111 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
#####
|
|
||||||
#
|
|
||||||
# Build tar image and install it using liveimg kickstart command
|
|
||||||
#
|
|
||||||
#####
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
TMP_DIR=$(mktemp -d /tmp/composer.XXXXX)
|
|
||||||
SSH_KEY_DIR=$(mktemp -d /tmp/composer-ssh-keys.XXXXXX)
|
|
||||||
|
|
||||||
rlRun -t -c "ssh-keygen -t rsa -N '' -f $SSH_KEY_DIR/id_rsa"
|
|
||||||
PUB_KEY=$(cat "$SSH_KEY_DIR/id_rsa.pub")
|
|
||||||
|
|
||||||
cat > "$TMP_DIR/test-tar.toml" << __EOF__
|
|
||||||
name = "test-tar"
|
|
||||||
description = "tar image test"
|
|
||||||
version = "0.0.1"
|
|
||||||
modules = []
|
|
||||||
|
|
||||||
[[groups]]
|
|
||||||
name = "anaconda-tools"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "kernel"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "beakerlib"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "openssh-server"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "openssh-clients"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[packages]]
|
|
||||||
name = "passwd"
|
|
||||||
version = "*"
|
|
||||||
|
|
||||||
[[customizations.user]]
|
|
||||||
name = "root"
|
|
||||||
key = "$PUB_KEY"
|
|
||||||
|
|
||||||
__EOF__
|
|
||||||
rlRun -t -c "$CLI blueprints push $TMP_DIR/test-tar.toml"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose start"
|
|
||||||
rlAssertEquals "SELinux operates in enforcing mode" "$(getenforce)" "Enforcing"
|
|
||||||
UUID=$($CLI compose start test-tar liveimg-tar)
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
|
|
||||||
UUID=$(echo "$UUID" | cut -f 2 -d' ')
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "compose finished"
|
|
||||||
wait_for_compose "$UUID"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Install tar image using kickstart liveimg command"
|
|
||||||
cat > "$TMP_DIR/test-liveimg.ks" << __EOF__
|
|
||||||
cmdline
|
|
||||||
lang en_US.UTF-8
|
|
||||||
timezone America/New_York
|
|
||||||
keyboard us
|
|
||||||
rootpw --lock
|
|
||||||
sshkey --username root "$PUB_KEY"
|
|
||||||
bootloader --location=mbr
|
|
||||||
zerombr
|
|
||||||
clearpart --initlabel --all
|
|
||||||
autopart
|
|
||||||
# reboot is used together with --no-reboot qemu-kvm parameter, which makes the qemu-kvm
|
|
||||||
# process exit after the installation is complete and anaconda reboots the system
|
|
||||||
# (using 'poweroff' ks command just halted the machine without powering it off)
|
|
||||||
reboot
|
|
||||||
|
|
||||||
liveimg --url file:///var/lib/lorax/composer/results/$UUID/root.tar.xz
|
|
||||||
|
|
||||||
__EOF__
|
|
||||||
# Build the disk image directly in the results directory
|
|
||||||
rlRun -t -c "mkdir -p /var/tmp/test-results/"
|
|
||||||
rlRun -t -c "fallocate -l 5G /var/tmp/test-results/disk.img"
|
|
||||||
|
|
||||||
rlLogInfo "Starting installation from tar image using anaconda"
|
|
||||||
rlRun -t -c "anaconda --image=/var/tmp/test-results/disk.img --kickstart=$TMP_DIR/test-liveimg.ks"
|
|
||||||
rlLogInfo "Installation of the image finished."
|
|
||||||
|
|
||||||
# Include the ssh key needed to log into the image
|
|
||||||
rlRun -t -c "cp $SSH_KEY_DIR/* /var/tmp/test-results"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
rlRun -t -c "$CLI compose delete $UUID"
|
|
||||||
rlRun -t -c "rm -rf $TMP_DIR $SSH_KEY_DIR"
|
|
||||||
rlPhaseEnd
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,105 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. /usr/share/beakerlib/beakerlib.sh
|
|
||||||
. $(dirname $0)/lib/lib.sh
|
|
||||||
|
|
||||||
CLI="${CLI:-./src/bin/composer-cli}"
|
|
||||||
|
|
||||||
rlJournalStart
|
|
||||||
rlPhaseStartSetup
|
|
||||||
repodir_backup=$(mktemp -d composerrepos-XXXXX)
|
|
||||||
composer_stop
|
|
||||||
rlRun -t -c "mv /var/lib/lorax/composer/repos.d/* $repodir_backup"
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Run lorax-composer with --no-system-repos option and empty repos.d"
|
|
||||||
composer_start --no-system-repos
|
|
||||||
|
|
||||||
# check that there are no composer repos available
|
|
||||||
rlRun -t -c "$CLI sources list | grep -v '^$' | wc -l | grep '^0$'"
|
|
||||||
present_repos=$(ls /var/lib/lorax/composer/repos.d)
|
|
||||||
if [ -z "$present_repos" ]; then
|
|
||||||
rlPass "No repos found in repos.d"
|
|
||||||
else
|
|
||||||
rlFail "The following repos were found in repos.d: $present_repos"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# starting a compose without available repos should fail due to a depsolving error
|
|
||||||
rlRun -t -c "tmp_output='$($CLI compose start example-http-server partitioned-disk 2>&1)'"
|
|
||||||
rlRun -t -c "echo '$tmp_output' | grep -q 'Problem depsolving example-http-server:'"
|
|
||||||
MANUAL=1 composer_stop
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartTest "Run lorax-composer with --no-system-repos and manually created content in repos.d"
|
|
||||||
echo '[fedora]
|
|
||||||
name=Fedora $releasever - $basearch
|
|
||||||
failovermethod=priority
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch
|
|
||||||
enabled=1
|
|
||||||
metadata_expire=7d
|
|
||||||
repo_gpgcheck=0
|
|
||||||
type=rpm
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch
|
|
||||||
skip_if_unavailable=False
|
|
||||||
|
|
||||||
[fedora-modular]
|
|
||||||
name=Fedora Modular $releasever - $basearch
|
|
||||||
failovermethod=priority
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-modular-$releasever&arch=$basearch
|
|
||||||
enabled=1
|
|
||||||
metadata_expire=7d
|
|
||||||
repo_gpgcheck=0
|
|
||||||
type=rpm
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch
|
|
||||||
skip_if_unavailable=False
|
|
||||||
|
|
||||||
[updates]
|
|
||||||
name=Fedora $releasever - $basearch - Updates
|
|
||||||
failovermethod=priority
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f$releasever&arch=$basearch
|
|
||||||
enabled=1
|
|
||||||
repo_gpgcheck=0
|
|
||||||
type=rpm
|
|
||||||
gpgcheck=1
|
|
||||||
metadata_expire=6h
|
|
||||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch
|
|
||||||
skip_if_unavailable=False
|
|
||||||
|
|
||||||
[updates-modular]
|
|
||||||
name=Fedora Modular $releasever - $basearch - Updates
|
|
||||||
failovermethod=priority
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-modular-f$releasever&arch=$basearch
|
|
||||||
enabled=1
|
|
||||||
repo_gpgcheck=0
|
|
||||||
type=rpm
|
|
||||||
gpgcheck=1
|
|
||||||
metadata_expire=6h
|
|
||||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch
|
|
||||||
skip_if_unavailable=False
|
|
||||||
' > /var/lib/lorax/composer/repos.d/test.repo
|
|
||||||
|
|
||||||
composer_start --no-system-repos
|
|
||||||
present_repos=$(ls /var/lib/lorax/composer/repos.d/)
|
|
||||||
rlAssertEquals "Only test.repo found in repos.d" "$present_repos" "test.repo"
|
|
||||||
|
|
||||||
UUID=$(composer-cli compose start example-http-server partitioned-disk)
|
|
||||||
rlAssertEquals "exit code should be zero" $? 0
|
|
||||||
UUID=$(echo $UUID | cut -f 2 -d' ')
|
|
||||||
|
|
||||||
wait_for_compose $UUID
|
|
||||||
rlPhaseEnd
|
|
||||||
|
|
||||||
rlPhaseStartCleanup
|
|
||||||
$CLI compose delete $UUID
|
|
||||||
MANUAL=1 composer_stop
|
|
||||||
rlRun -t -c "rm -rf /var/lib/lorax/composer/repos.d"
|
|
||||||
rlRun -t -c "mv $repodir_backup /var/lib/lorax/composer/repos.d"
|
|
||||||
composer_start
|
|
||||||
rlPhaseEnd
|
|
||||||
rlJournalEnd
|
|
||||||
rlJournalPrintText
|
|
@ -1,14 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Note: execute this file from the project root directory
|
|
||||||
# Used for running a beakerlib test script inside a running VM
|
|
||||||
# without setting up composer first!
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
. $(dirname $0)/cli/lib/lib.sh
|
|
||||||
|
|
||||||
setup_beakerlib_env
|
|
||||||
|
|
||||||
run_beakerlib_tests "$@"
|
|
||||||
|
|
||||||
parse_beakerlib_results
|
|
@ -1 +0,0 @@
|
|||||||
line-length: {max: 120, allow-non-breakable-words: true}
|
|
Loading…
Reference in New Issue
Block a user