lifted: Add support for AWS upload

This uses a new Ansible module, ec2_snapshot_import, which is included
here until it is available from upstream.

It will upload the AMI to s3, convert it to a snapshot, and then
register the snapshot as an AMI. The s3 object is deleted when it has
been successfully uploaded.
This commit is contained in:
Brian C. Lane 2019-09-19 14:04:44 -07:00
parent a59c0241c4
commit c2620b0c85
7 changed files with 393 additions and 2 deletions

View File

@ -153,6 +153,8 @@ Requires: git
Requires: xz
Requires: createrepo_c
Requires: python3-ansible-runner
# For AWS playbook support
Requires: python3-boto3
%{?systemd_requires}
BuildRequires: systemd
@ -237,11 +239,13 @@ getent passwd weldr >/dev/null 2>&1 || useradd -r -g weldr -d / -s /sbin/nologin
%files composer
%config(noreplace) %{_sysconfdir}/lorax/composer.conf
%{python3_sitelib}/pylorax/api/*
%{python3_sitelib}/lifted/*
%{_sbindir}/lorax-composer
%{_unitdir}/lorax-composer.service
%{_unitdir}/lorax-composer.socket
%dir %{_datadir}/lorax/composer
%{_datadir}/lorax/composer/*
%{_datadir}/lorax/lifted/*
%{_tmpfilesdir}/lorax-composer.conf
%dir %attr(0771, root, weldr) %{_sharedstatedir}/lorax/composer/
%dir %attr(0771, root, weldr) %{_sharedstatedir}/lorax/composer/blueprints/

View File

@ -0,0 +1,258 @@
#!/usr/bin/python
# Copyright (C) 2019 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_snapshot_import
short_description: Imports a disk into an EBS snapshot
description:
- Imports a disk into an EBS snapshot
version_added: "2.10"
options:
description:
description:
- description of the import snapshot task
required: false
type: str
format:
description:
- The format of the disk image being imported.
required: true
type: str
url:
description:
- The URL to the Amazon S3-based disk image being imported. It can either be a https URL (https://..) or an Amazon S3 URL (s3://..).
Either C(url) or C(s3_bucket) and C(s3_key) are required.
required: false
type: str
s3_bucket:
description:
- The name of the S3 bucket where the disk image is located.
- C(s3_bucket) and C(s3_key) are required together if C(url) is not used.
required: false
type: str
s3_key:
description:
- The file name of the disk image.
- C(s3_bucket) and C(s3_key) are required together if C(url) is not used.
required: false
type: str
encrypted:
description:
- Whether or not the destination Snapshot should be encrypted.
type: bool
default: 'no'
kms_key_id:
description:
- KMS key id used to encrypt snapshot. If not specified, defaults to EBS Customer Master Key (CMK) for that account.
required: false
type: str
role_name:
description:
- The name of the role to use when not using the default role, 'vmimport'.
required: false
type: str
wait:
description:
- wait for the snapshot to be ready
type: bool
required: false
default: yes
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
type: int
default: 900
tags:
description:
- A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
type: dict
author: "Brian C. Lane (@bcl)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Import an S3 object as a snapshot
ec2_snapshot_import:
description: simple-http-server
format: raw
s3_bucket: mybucket
s3_key: server-image.ami
wait: yes
tags:
Name: Snapshot-Name
'''
RETURN = '''
snapshot_id:
description: id of the created snapshot
returned: when snapshot is created
type: str
sample: "snap-1234abcd"
description:
description: description of snapshot
returned: when snapshot is created
type: str
sample: "simple-http-server"
format:
description: format of the disk image being imported
returned: when snapshot is created
type: str
sample: "raw"
disk_image_size:
description: size of the disk image being imported, in bytes.
returned: when snapshot is created
type: float
sample: 3836739584.0
user_bucket:
description: S3 bucket with the image to import
returned: when snapshot is created
type: dict
sample: {
"s3_bucket": "mybucket",
"s3_key": "server-image.ami"
}
status:
description: status of the import operation
returned: when snapshot is created
type: str
sample: "completed"
'''
import time
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass
def wait_for_import_snapshot(connection, wait_timeout, import_task_id):
params = {
'ImportTaskIds': [import_task_id]
}
start_time = time.time()
while True:
status = connection.describe_import_snapshot_tasks(**params)
# What are the valid status values?
if len(status['ImportSnapshotTasks']) > 1:
raise RuntimeError("Should only be 1 Import Snapshot Task with this id.")
task = status['ImportSnapshotTasks'][0]
if task['SnapshotTaskDetail']['Status'] in ['completed']:
return status
if time.time() - start_time > wait_timeout:
raise RuntimeError('Wait timeout exceeded (%s sec)' % wait_timeout)
time.sleep(5)
def import_snapshot(module, connection):
description = module.params.get('description')
image_format = module.params.get('format')
url = module.params.get('url')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
role_name = module.params.get('role_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
tags = module.params.get('tags')
if module.check_mode:
module.exit_json(changed=True, msg="IMPORT operation skipped - running in check mode")
try:
params = {
'Description': description,
'DiskContainer': {
'Description': description,
'Format': image_format,
},
'Encrypted': encrypted
}
if url:
params['DiskContainer']['Url'] = url
else:
params['DiskContainer']['UserBucket'] = {
'S3Bucket': s3_bucket,
'S3Key': s3_key
}
if kms_key_id:
params['KmsKeyId'] = kms_key_id
if role_name:
params['RoleName'] = role_name
task = connection.import_snapshot(**params)
import_task_id = task['ImportTaskId']
detail = task['SnapshotTaskDetail']
if wait:
status = wait_for_import_snapshot(connection, wait_timeout, import_task_id)
detail = status['ImportSnapshotTasks'][0]['SnapshotTaskDetail']
if tags:
connection.create_tags(
Resources=[detail["SnapshotId"]],
Tags=[{'Key': k, 'Value': v} for k, v in tags.items()]
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, RuntimeError) as e:
module.fail_json_aws(e, msg="Error importing image")
module.exit_json(changed=True, **camel_dict_to_snake_dict(detail))
def snapshot_import_ansible_module():
argument_spec = dict(
description=dict(default=''),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=900),
format=dict(required=True),
url=dict(),
s3_bucket=dict(),
s3_key=dict(),
encrypted=dict(type='bool', default=False),
kms_key_id=dict(),
role_name=dict(),
tags=dict(type='dict')
)
return AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['s3_bucket', 'url']],
required_one_of=[['s3_bucket', 'url']],
required_together=[['s3_bucket', 's3_key']]
)
def main():
module = snapshot_import_ansible_module()
connection = module.client('ec2')
import_snapshot(module, connection)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,94 @@
- hosts: localhost
tasks:
- name: Make sure bucket exists
aws_s3:
bucket: "{{ aws_bucket }}"
mode: create
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
register: bucket_facts
- fail:
msg: "Bucket creation failed"
when:
- bucket_facts.msg != "Bucket created successfully"
- bucket_facts.msg != "Bucket already exists."
- name: Make sure vmimport role exists
iam_role_facts:
name: vmimport
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
register: role_facts
- fail:
msg: "Role vmimport doesn't exist"
when: role_facts.iam_roles | length < 1
- name: Make sure the AMI name isn't already in use
ec2_ami_facts:
filters:
name: "{{ image_name }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
register: ami_facts
- fail:
msg: "An AMI named {{ image_name }} already exists"
when: ami_facts.images | length > 0
- stat:
path: "{{ image_path }}"
register: image_stat
- set_fact:
image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.ami"
- name: Upload the .ami image to an s3 bucket
aws_s3:
bucket: "{{ aws_bucket }}"
src: "{{ image_path }}"
object: "{{ image_id }}"
mode: put
overwrite: different
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
- name: Import a snapshot from an AMI stored as an s3 object
ec2_snapshot_import:
description: "{{ image_name }}"
format: raw
s3_bucket: "{{ aws_bucket }}"
s3_key: "{{ image_id }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
wait: yes
tags:
Name: "{{ image_name }}"
register: import_facts
- fail:
msg: "Import of image from s3 failed"
when:
- import_facts.status != "completed"
- name: Register the snapshot as an AMI
ec2_ami:
name: "{{ image_name }}"
state: present
virtualization_type: hvm
root_device_name: /dev/sda1
device_mapping:
- device_name: /dev/sda1
snapshot_id: "{{ import_facts.snapshot_id }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"
wait: yes
register: register_facts
- fail:
msg: "Registering snapshot as an AMI failed"
when:
- register_facts.msg != "AMI creation operation complete."
- name: Delete the s3 object used for the snapshot/AMI
aws_s3:
bucket: "{{ aws_bucket }}"
object: "{{ image_id }}"
mode: delobj
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
region: "{{ aws_region }}"

View File

@ -0,0 +1,29 @@
display = "AWS"
supported_types = [
"ami",
]
[settings-info.aws_access_key]
display = "AWS Access Key"
type = "string"
placeholder = ""
regex = ''
[settings-info.aws_secret_key]
display = "AWS Secret Key"
type = "string"
placeholder = ""
regex = ''
[settings-info.aws_region]
display = "AWS Region"
type = "string"
placeholder = ""
regex = ''
[settings-info.aws_bucket]
display = "AWS Bucket"
type = "string"
placeholder = ""
regex = ''

View File

@ -17,6 +17,12 @@
# test profile settings for each provider
test_profiles = {
"aws": ["aws-profile", {
"aws_access_key": "theaccesskey",
"aws_secret_key": "thesecretkey",
"aws_region": "us-east-1",
"aws_bucket": "composer-mops"
}],
"azure": ["azure-profile", {
"resource_group": "production",
"storage_account_name": "HomerSimpson",

View File

@ -50,7 +50,7 @@ class ProvidersTestCase(unittest.TestCase):
def test_list_providers(self):
p = list_providers(self.config["upload"])
self.assertEqual(p, ['azure', 'dummy', 'openstack', 'vsphere'])
self.assertEqual(p, ['aws', 'azure', 'dummy', 'openstack', 'vsphere'])
def test_resolve_provider(self):
for p in list_providers(self.config["upload"]):

View File

@ -3496,7 +3496,7 @@ class ServerAPIV1TestCase(unittest.TestCase):
self.assertNotEqual(data, None)
self.assertTrue("providers" in data)
providers = sorted(data["providers"].keys())
self.assertEqual(providers, ["azure", "dummy", "openstack", "vsphere"])
self.assertEqual(providers, ["aws", "azure", "dummy", "openstack", "vsphere"])
def test_upload_01_providers_save(self):
"""Save settings for a provider"""