Automatically upload composed images to the cloud

Currently, Azure, vSphere, and OpenStack are supported. See
https://github.com/weldr/lorax/pull/826 for more details about this new
feature.

I've called the upload library "lifted" as a reference to Seuss'
The Lorax -- in the book, the Lorax lifts himself up by the seat of his
pants through a hole in the smog clouds, and they start calling him the
"Lifted Lorax."

This adds new features to the /compose route under API v1
This commit is contained in:
Evan Goode 2019-07-30 09:47:59 -04:00 committed by Brian C. Lane
parent 584a2d2076
commit 2692e8138c
20 changed files with 1443 additions and 16 deletions

View File

@ -47,7 +47,7 @@ setup(name="lorax",
url="http://www.github.com/weldr/lorax/",
download_url="http://www.github.com/weldr/lorax/releases/",
license="GPLv2+",
packages=["pylorax", "pylorax.api", "composer", "composer.cli"],
packages=["pylorax", "pylorax.api", "composer", "composer.cli", "lifted"],
package_dir={"" : "src"},
data_files=data_files
)

View File

@ -0,0 +1,47 @@
- hosts: localhost
connection: local
tasks:
- name: Make sure provided credentials work and the storage account exists
azure_rm_storageaccount_facts:
subscription_id: "{{ subscription_id }}"
client_id: "{{ client_id }}"
secret: "{{ secret }}"
tenant: "{{ tenant }}"
resource_group: "{{ resource_group }}"
name: "{{ storage_account_name }}"
register: storageaccount_facts
- name: Fail if we couldn't log in or the storage account was not found
fail:
msg: "Invalid credentials or storage account not found!"
when: storageaccount_facts.ansible_facts.azure_storageaccounts | length < 1
- stat:
path: "{{ image_path }}"
register: image_stat
- set_fact:
image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.vhd"
- name: Upload image to Azure
azure_rm_storageblob:
subscription_id: "{{ subscription_id }}"
client_id: "{{ client_id }}"
secret: "{{ secret }}"
tenant: "{{ tenant }}"
resource_group: "{{ resource_group }}"
storage_account_name: "{{ storage_account_name }}"
container: "{{ storage_container }}"
src: "{{ image_path }}"
blob: "{{ image_id }}"
blob_type: page
force: no
- set_fact:
host: "{{ storage_account_name }}.blob.core.windows.net"
- name: Import image
azure_rm_image:
subscription_id: "{{ subscription_id }}"
client_id: "{{ client_id }}"
secret: "{{ secret }}"
tenant: "{{ tenant }}"
resource_group: "{{ resource_group }}"
name: "{{ image_name }}"
os_type: Linux
location: "{{ location }}"
source: "https://{{ host }}/{{ storage_container }}/{{ image_id }}"

View File

@ -0,0 +1,53 @@
display = "Azure"
supported_types = [
"vhd",
]
[settings-info.resource_group]
display = "Resource group"
type = "string"
placeholder = ""
regex = ''
[settings-info.storage_account_name]
display = "Storage account name"
type = "string"
placeholder = ""
regex = ''
[settings-info.storage_container]
display = "Storage container"
type = "string"
placeholder = ""
regex = ''
[settings-info.subscription_id]
display = "Subscription ID"
type = "string"
placeholder = ""
regex = ''
[settings-info.client_id]
display = "Client ID"
type = "string"
placeholder = ""
regex = ''
[settings-info.secret]
display = "Secret"
type = "string"
placeholder = ""
regex = ''
[settings-info.tenant]
display = "Tenant"
type = "string"
placeholder = ""
regex = ''
[settings-info.location]
display = "Location"
type = "string"
placeholder = ""
regex = ''

View File

@ -0,0 +1,4 @@
- hosts: localhost
connection: local
tasks:
- pause: seconds=30

View File

@ -0,0 +1,4 @@
display = "Dummy"
[settings-info]
# This provider has no settings.

View File

@ -0,0 +1,20 @@
- hosts: localhost
connection: local
tasks:
- stat:
path: "{{ image_path }}"
register: image_stat
- set_fact:
image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.qcow2"
- name: Upload image to OpenStack
os_image:
auth:
auth_url: "{{ auth_url }}"
username: "{{ username }}"
password: "{{ password }}"
project_name: "{{ project_name }}"
os_user_domain_name: "{{ user_domain_name }}"
os_project_domain_name: "{{ project_domain_name }}"
name: "{{ image_id }}"
filename: "{{ image_path }}"
is_public: "{{ is_public }}"

View File

@ -0,0 +1,45 @@
display = "OpenStack"
supported_types = [
"qcow2",
]
[settings-info.auth_url]
display = "Authentication URL"
type = "string"
placeholder = ""
regex = ''
[settings-info.username]
display = "Username"
type = "string"
placeholder = ""
regex = ''
[settings-info.password]
display = "Password"
type = "string"
placeholder = ""
regex = ''
[settings-info.project_name]
display = "Project name"
type = "string"
placeholder = ""
regex = ''
[settings-info.user_domain_name]
display = "User domain name"
type = "string"
placeholder = ""
regex = ''
[settings-info.project_domain_name]
display = "Project domain name"
type = "string"
placeholder = ""
regex = ''
[settings-info.is_public]
display = "Allow public access"
type = "boolean"

View File

@ -0,0 +1,17 @@
- hosts: localhost
connection: local
tasks:
- stat:
path: "{{ image_path }}"
register: image_stat
- set_fact:
image_id: "{{ image_name }}-{{ image_stat['stat']['checksum'] }}.vmdk"
- name: Upload image to vSphere
vsphere_copy:
login: "{{ username }}"
password: "{{ password }}"
host: "{{ host }}"
datacenter: "{{ datacenter }}"
datastore: "{{ datastore }}"
src: "{{ image_path }}"
path: "{{ folder }}/{{ image_id }}"

View File

@ -0,0 +1,42 @@
display = "vSphere"
supported_types = [
"vmdk",
]
[settings-info.datacenter]
display = "Datacenter"
type = "string"
placeholder = ""
regex = ''
[settings-info.datastore]
display = "Datastore"
type = "string"
placeholder = ""
regex = ''
[settings-info.host]
display = "Host"
type = "string"
placeholder = ""
regex = ''
[settings-info.folder]
display = "Folder"
type = "string"
placeholder = ""
regex = ''
[settings-info.username]
display = "Username"
type = "string"
placeholder = ""
regex = ''
[settings-info.password]
display = "Password"
type = "string"
placeholder = ""
regex = ''

16
src/lifted/__init__.py Normal file
View File

@ -0,0 +1,16 @@
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#

172
src/lifted/providers.py Normal file
View File

@ -0,0 +1,172 @@
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from glob import glob
import os
import re
import stat
import pylorax.api.toml as toml
def resolve_provider(ucfg, provider_name):
"""Get information about the specified provider as defined in that
provider's `provider.toml`, including the provider's display name and expected
settings.
At a minimum, each setting has a display name (that likely differs from its
snake_case name) and a type. Currently, there are two types of settings:
string and boolean. String settings can optionally have a "placeholder"
value for use on the front end and a "regex" for making sure that a value
follows an expected pattern.
:param ucfg: upload config
:type ucfg: object
:param provider_name: the name of the provider to look for
:type provider_name: str
:raises: RuntimeError when the provider couldn't be found
:returns: the provider
:rtype: dict
"""
path = os.path.join(ucfg["providers_dir"], provider_name, "provider.toml")
try:
with open(path) as provider_file:
provider = toml.load(provider_file)
except OSError as error:
raise RuntimeError(f'Couldn\'t find provider "{provider_name}"!') from error
return provider
def load_profiles(ucfg, provider_name):
"""Return all settings profiles associated with a provider
:param ucfg: upload config
:type ucfg: object
:param provider_name: name a provider to find profiles for
:type provider_name: str
:returns: a dict of settings dicts, keyed by profile name
:rtype: dict
"""
def load_path(path):
with open(path) as file:
return toml.load(file)
def get_name(path):
return os.path.splitext(os.path.basename(path))[0]
paths = glob(os.path.join(ucfg["settings_dir"], provider_name, "*"))
return {get_name(path): load_path(path) for path in paths}
def resolve_playbook_path(ucfg, provider_name):
"""Given a provider's name, return the path to its playbook
:param ucfg: upload config
:type ucfg: object
:param provider_name: the name of the provider to find the playbook for
:type provider_name: str
:raises: RuntimeError when the provider couldn't be found
:returns: the path to the playbook
:rtype: str
"""
path = os.path.join(ucfg["providers_dir"], provider_name, "playbook.yaml")
if not os.path.isfile(path):
raise RuntimeError(f'Couldn\'t find playbook for "{provider_name}"!')
return path
def list_providers(ucfg):
"""List the names of the available upload providers
:param ucfg: upload config
:type ucfg: object
:returns: a list of all available provider_names
:rtype: list of str
"""
paths = glob(os.path.join(ucfg["providers_dir"], "*"))
return [os.path.basename(path) for path in paths]
def validate_settings(ucfg, provider_name, settings, image_name=None):
"""Raise a ValueError if any settings are invalid
:param ucfg: upload config
:type ucfg: object
:param provider_name: the name of the provider to validate the settings
against
:type provider_name: str
:param settings: the settings to validate
:type settings: dict
:param image_name: optionally check whether an image_name is valid
:type image_name: str
:raises: ValueError when the passed settings are invalid
:raises: RuntimeError when provider_name can't be found
"""
if image_name == "":
raise ValueError("Image name cannot be empty!")
type_map = {"string": str, "boolean": bool}
settings_info = resolve_provider(ucfg, provider_name)["settings-info"]
for key, value in settings.items():
if key not in settings_info:
raise ValueError(f'Received unexpected setting: "{key}"!')
setting_type = settings_info[key]["type"]
correct_type = type_map[setting_type]
if not isinstance(value, correct_type):
raise ValueError(
f'Expected a {correct_type} for "{key}", received a {type(value)}!'
)
if setting_type == "string" and "regex" in settings_info[key]:
if not re.match(settings_info[key]["regex"], value):
raise ValueError(f'Value "{value}" is invalid for setting "{key}"!')
def save_settings(ucfg, provider_name, profile, settings):
"""Save (and overwrite) settings for a given provider
:param ucfg: upload config
:type ucfg: object
:param provider_name: the name of the cloud provider, e.g. "azure"
:type provider_name: str
:param profile: the name of the profile to save
:type profile: str != ""
:param settings: settings to save for that provider
:type settings: dict
:raises: ValueError when passed invalid settings or an invalid profile name
"""
if not profile:
raise ValueError("Profile name cannot be empty!")
validate_settings(ucfg, provider_name, settings, image_name=None)
directory = os.path.join(ucfg["settings_dir"], provider_name)
# create the settings directory if it doesn't exist
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, f"{profile}.toml")
# touch the TOML file if it doesn't exist
if not os.path.isfile(path):
open(path, "a").close()
# make sure settings files aren't readable by others, as they will contain
# sensitive credentials
current = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current & ~stat.S_IROTH)
with open(path, "w") as settings_file:
toml.dump(settings, settings_file)

270
src/lifted/queue.py Normal file
View File

@ -0,0 +1,270 @@
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from functools import partial
from glob import glob
import logging
import multiprocessing
# We use a multiprocessing Pool for uploads so that we can cancel them with a
# simple SIGINT, which should bubble down to subprocesses.
from multiprocessing import Pool
# multiprocessing.dummy is to threads as multiprocessing is to processes.
# Since daemonic processes can't have children, we use a thread to monitor the
# upload pool.
from multiprocessing.dummy import Process
from operator import attrgetter
import os
import stat
import time
import pylorax.api.toml as toml
from lifted.upload import Upload
from lifted.providers import resolve_playbook_path, validate_settings
# the maximum number of simultaneous uploads
SIMULTANEOUS_UPLOADS = 1
log = logging.getLogger("lifted")
multiprocessing.log_to_stderr().setLevel(logging.INFO)
def _get_queue_path(ucfg):
path = ucfg["queue_dir"]
# create the upload_queue directory if it doesn't exist
os.makedirs(path, exist_ok=True)
return path
def _get_upload_path(ucfg, uuid, write=False):
path = os.path.join(_get_queue_path(ucfg), f"{uuid}.toml")
if write and not os.path.exists(path):
open(path, "a").close()
if os.path.exists(path):
# make sure uploads aren't readable by others, as they will contain
# sensitive credentials
current = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current & ~stat.S_IROTH)
return path
def _list_upload_uuids(ucfg):
paths = glob(os.path.join(_get_queue_path(ucfg), "*"))
return [os.path.splitext(os.path.basename(path))[0] for path in paths]
def _write_upload(ucfg, upload):
with open(_get_upload_path(ucfg, upload.uuid, write=True), "w") as upload_file:
toml.dump(upload.serializable(), upload_file)
def _write_callback(ucfg):
return partial(_write_upload, ucfg)
def get_upload(ucfg, uuid, ignore_missing=False, ignore_corrupt=False):
"""Get an Upload object by UUID
:param ucfg: upload config
:type ucfg: object
:param uuid: UUID of the upload to get
:type uuid: str
:param ignore_missing: if True, don't raise a RuntimeError when the
specified upload is missing, instead just return None
:type ignore_missing: bool
:param ignore_corrupt: if True, don't raise a RuntimeError when the
specified upload could not be deserialized, instead just return None
:type ignore_corrupt: bool
:returns: the upload object or None
:rtype: Upload or None
:raises: RuntimeError
"""
try:
with open(_get_upload_path(ucfg, uuid), "r") as upload_file:
return Upload(**toml.load(upload_file))
except FileNotFoundError as error:
if not ignore_missing:
raise RuntimeError(f"Could not find upload {uuid}!") from error
except toml.TomlError as error:
if not ignore_corrupt:
raise RuntimeError(f"Could not parse upload {uuid}!") from error
def get_uploads(ucfg, uuids):
"""Gets a list of Upload objects from a list of upload UUIDs, ignoring
missing or corrupt uploads
:param ucfg: upload config
:type ucfg: object
:param uuids: list of upload UUIDs to get
:type uuids: list of str
:returns: a list of the uploads that were successfully deserialized
:rtype: list of Upload
"""
uploads = (
get_upload(ucfg, uuid, ignore_missing=True, ignore_corrupt=True)
for uuid in uuids
)
return list(filter(None, uploads))
def get_all_uploads(ucfg):
"""Get a list of all stored Upload objects
:param ucfg: upload config
:type ucfg: object
:returns: a list of all stored upload objects
:rtype: list of Upload
"""
return get_uploads(ucfg, _list_upload_uuids(ucfg))
def create_upload(ucfg, provider_name, image_name, settings):
"""Creates a new upload
:param ucfg: upload config
:type ucfg: object
:param provider_name: the name of the cloud provider to upload to, e.g.
"azure"
:type provider_name: str
:param image_name: what to name the image in the cloud
:type image_name: str
:param settings: settings to pass to the upload, specific to the cloud
provider
:type settings: dict
:returns: the created upload object
:rtype: Upload
"""
validate_settings(ucfg, provider_name, settings, image_name)
return Upload(
provider_name=provider_name,
playbook_path=resolve_playbook_path(ucfg, provider_name),
image_name=image_name,
settings=settings,
status_callback=_write_callback(ucfg),
)
def ready_upload(ucfg, uuid, image_path):
"""Pass an image_path to an upload and mark it ready to execute
:param ucfg: upload config
:type ucfg: object
:param uuid: the UUID of the upload to mark ready
:type uuid: str
:param image_path: the path of the image to pass to the upload
:type image_path: str
"""
get_upload(ucfg, uuid).ready(image_path, _write_callback(ucfg))
def reset_upload(ucfg, uuid, new_image_name=None, new_settings=None):
"""Reset an upload so it can be attempted again
:param ucfg: upload config
:type ucfg: object
:param uuid: the UUID of the upload to reset
:type uuid: str
:param new_image_name: optionally update the upload's image_name
:type new_image_name: str
:param new_settings: optionally update the upload's settings
:type new_settings: dict
"""
upload = get_upload(ucfg, uuid)
validate_settings(
ucfg,
upload.provider_name,
new_settings or upload.settings,
new_image_name or upload.image_name,
)
if new_image_name:
upload.image_name = new_image_name
if new_settings:
upload.settings = new_settings
upload.reset(_write_callback(ucfg))
def cancel_upload(ucfg, uuid):
"""Cancel an upload
:param ucfg: the compose config
:type ucfg: ComposerConfig
:param uuid: the UUID of the upload to cancel
:type uuid: str
"""
get_upload(ucfg, uuid).cancel(_write_callback(ucfg))
def delete_upload(ucfg, uuid):
"""Delete an upload
:param ucfg: the compose config
:type ucfg: ComposerConfig
:param uuid: the UUID of the upload to delete
:type uuid: str
"""
upload = get_upload(ucfg, uuid)
if upload and upload.is_cancellable():
upload.cancel()
os.remove(_get_upload_path(ucfg, uuid))
def start_upload_monitor(ucfg):
"""Start a thread that manages the upload queue
:param ucfg: the compose config
:type ucfg: ComposerConfig
"""
process = Process(target=_monitor, args=(ucfg,))
process.daemon = True
process.start()
def _monitor(ucfg):
log.info("Started upload monitor.")
for upload in get_all_uploads(ucfg):
# Set abandoned uploads to FAILED
if upload.status == "RUNNING":
upload.set_status("FAILED", _write_callback(ucfg))
pool = Pool(processes=SIMULTANEOUS_UPLOADS)
pool_uuids = set()
def remover(uuid):
return lambda _: pool_uuids.remove(uuid)
while True:
# Every second, scoop up READY uploads from the filesystem and throw
# them in the pool
all_uploads = get_all_uploads(ucfg)
for upload in sorted(all_uploads, key=attrgetter("creation_time")):
ready = upload.status == "READY"
if ready and upload.uuid not in pool_uuids:
log.info("Starting upload %s...", upload.uuid)
pool_uuids.add(upload.uuid)
callback = remover(upload.uuid)
pool.apply_async(
upload.execute,
(_write_callback(ucfg),),
callback=callback,
error_callback=callback,
)
time.sleep(1)

188
src/lifted/upload.py Normal file
View File

@ -0,0 +1,188 @@
#
# Copyright (C) 2018-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
from enum import Enum
from functools import partial
import logging
from multiprocessing import current_process
import os
import signal
from uuid import uuid4
from ansible_runner.interface import run as ansible_run
log = logging.getLogger("lifted")
class Upload:
"""Represents an upload of an image to a cloud provider. Instances of this
class are serialized as TOML and stored in the upload queue directory,
which is /var/lib/lorax/upload/queue/ by default"""
def __init__(
self,
uuid=None,
provider_name=None,
playbook_path=None,
image_name=None,
settings=None,
creation_time=None,
upload_log=None,
upload_pid=None,
image_path=None,
status_callback=None,
status=None,
):
self.uuid = uuid or str(uuid4())
self.provider_name = provider_name
self.playbook_path = playbook_path
self.image_name = image_name
self.settings = settings
self.creation_time = creation_time or datetime.now().timestamp()
self.upload_log = upload_log or ""
self.upload_pid = upload_pid
self.image_path = image_path
if status:
self.status = status
else:
self.set_status("WAITING", status_callback)
def _log(self, message, callback=None):
"""Logs something to the upload log with an optional callback
:param message: the object to log
:type message: object
:param callback: a function of the form callback(self)
:type callback: function
"""
log.info(str(message))
self.upload_log += f"{message}\n"
if callback:
callback(self)
def serializable(self):
"""Returns a representation of the object as a dict for serialization
:returns: the object's __dict__
:rtype: dict
"""
return self.__dict__
def summary(self):
"""Return a dict with useful information about the upload
:returns: upload information
:rtype: dict
"""
return {
"uuid": self.uuid,
"status": self.status,
"provider_name": self.provider_name,
"image_name": self.image_name,
"image_path": self.image_path,
"creation_time": self.creation_time,
"settings": self.settings,
}
def set_status(self, status, status_callback=None):
"""Sets the status of the upload with an optional callback
:param status: the new status
:type status: str
:param status_callback: a function of the form callback(self)
:type status_callback: function
"""
self.status = status
if status_callback:
status_callback(self)
def ready(self, image_path, status_callback):
"""Provide an image_path and mark the upload as ready to execute
:param image_path: path of the image to upload
:type image_path: str
:param status_callback: a function of the form callback(self)
:type status_callback: function
"""
self.image_path = image_path
if self.status == "WAITING":
self.set_status("READY", status_callback)
def reset(self, status_callback):
"""Reset the upload so it can be attempted again
:param status_callback: a function of the form callback(self)
:type status_callback: function
"""
if self.is_cancellable():
raise RuntimeError(f"Can't reset, status is {self.status}!")
if not self.image_path:
raise RuntimeError(f"Can't reset, no image supplied yet!")
# self.error = None
self._log("Resetting...")
self.set_status("READY", status_callback)
def is_cancellable(self):
"""Is the upload in a cancellable state?
:returns: whether the upload is cancellable
:rtype: bool
"""
return self.status in ("WAITING", "READY", "RUNNING")
def cancel(self, status_callback=None):
"""Cancel the upload. Sends a SIGINT to self.upload_pid.
:param status_callback: a function of the form callback(self)
:type status_callback: function
"""
if not self.is_cancellable():
raise RuntimeError(f"Can't cancel, status is already {self.status}!")
if self.upload_pid:
os.kill(self.upload_pid, signal.SIGINT)
self.set_status("CANCELLED", status_callback)
def execute(self, status_callback=None):
"""Execute the upload. Meant to be called from a dedicated process so
that the upload can be cancelled by sending a SIGINT to
self.upload_pid.
:param status_callback: a function of the form callback(self)
:type status_callback: function
"""
if self.status != "READY":
raise RuntimeError("This upload is not ready!")
self.upload_pid = current_process().pid
self.set_status("RUNNING", status_callback)
logger = lambda e: self._log(e["stdout"], status_callback)
runner = ansible_run(
playbook=self.playbook_path,
extravars={
**self.settings,
"image_name": self.image_name,
"image_path": self.image_path,
},
event_handler=logger,
)
if runner.status == "successful":
self.set_status("FINISHED", status_callback)
else:
self.set_status("FAILED", status_callback)

View File

@ -54,6 +54,11 @@ def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=Fa
conf.add_section("users")
conf.set("users", "root", "1")
conf.add_section("upload")
conf.set("upload", "providers_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/lifted/providers/")))
conf.set("upload", "queue_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/upload/queue")))
conf.set("upload", "settings_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/upload/settings")))
# Enable all available repo files by default
conf.add_section("repos")
conf.set("repos", "use_system_repos", "1")

View File

@ -45,6 +45,9 @@ BUILD_MISSING_FILE = "BuildMissingFile"
# Returned from the API for all other errors from a /compose/* route.
COMPOSE_ERROR = "ComposeError"
# Returned from the API for all errors from a /upload/* route.
UPLOAD_ERROR = "UploadError" # TODO these errors should be more specific
# Returned from the API when invalid characters are used in a route path or in
# some identifier.
INVALID_CHARS = "InvalidChars"

View File

@ -38,6 +38,8 @@ from pylorax.base import DataHolder
from pylorax.creator import run_creator
from pylorax.sysutils import joinpaths, read_tail
from lifted.queue import create_upload, get_uploads, ready_upload, delete_upload
def check_queues(cfg):
"""Check to make sure the new and run queue symlinks are correct
@ -93,7 +95,7 @@ def start_queue_monitor(cfg, uid, gid):
lib_dir = cfg.get("composer", "lib_dir")
share_dir = cfg.get("composer", "share_dir")
tmp = cfg.get("composer", "tmp")
monitor_cfg = DataHolder(composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid, tmp=tmp)
monitor_cfg = DataHolder(cfg=cfg, composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid, tmp=tmp)
p = mp.Process(target=monitor, args=(monitor_cfg,))
p.daemon = True
p.start()
@ -163,6 +165,11 @@ def monitor(cfg):
log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst))
open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n")
write_timestamp(dst, TS_FINISHED)
upload_cfg = cfg.cfg["upload"]
for upload in get_uploads(upload_cfg, uuid_get_uploads(cfg.cfg, uuids[0])):
log.info("Readying upload %s", upload.uuid)
uuid_ready_upload(cfg.cfg, uuids[0], upload.uuid)
except Exception:
import traceback
log.error("traceback: %s", traceback.format_exc())
@ -298,7 +305,7 @@ def get_compose_type(results_dir):
raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir))
return t[0]
def compose_detail(results_dir):
def compose_detail(cfg, results_dir):
"""Return details about the build.
:param results_dir: The directory containing the metadata and results for the build
@ -338,6 +345,9 @@ def compose_detail(results_dir):
times = timestamp_dict(results_dir)
upload_uuids = uuid_get_uploads(cfg, build_id)
summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)]
return {"id": build_id,
"queue_status": status,
"job_created": times.get(TS_CREATED),
@ -346,7 +356,8 @@ def compose_detail(results_dir):
"compose_type": compose_type,
"blueprint": blueprint["name"],
"version": blueprint["version"],
"image_size": image_size
"image_size": image_size,
"uploads": summaries,
}
def queue_status(cfg):
@ -367,7 +378,7 @@ def queue_status(cfg):
new_details = []
for n in new_queue:
try:
d = compose_detail(n)
d = compose_detail(cfg, n)
except IOError:
continue
new_details.append(d)
@ -375,7 +386,7 @@ def queue_status(cfg):
run_details = []
for r in run_queue:
try:
d = compose_detail(r)
d = compose_detail(cfg, r)
except IOError:
continue
run_details.append(d)
@ -399,7 +410,7 @@ def uuid_status(cfg, uuid):
"""
uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
try:
return compose_detail(uuid_dir)
return compose_detail(cfg, uuid_dir)
except IOError:
return None
@ -430,11 +441,56 @@ def build_status(cfg, status_filter=None):
try:
status = open(joinpaths(build, "STATUS"), "r").read().strip()
if status in status_filter:
results.append(compose_detail(build))
results.append(compose_detail(cfg, build))
except IOError:
pass
return results
def _upload_list_path(cfg, uuid):
results_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
if not os.path.isdir(results_dir):
raise RuntimeError(f'"{uuid}" is not a valid build uuid!')
return joinpaths(results_dir, "UPLOADS")
def uuid_schedule_upload(cfg, uuid, provider_name, image_name, settings):
status = uuid_status(cfg, uuid)
if status is None:
raise RuntimeError(f'"{uuid}" is not a valid build uuid!')
upload = create_upload(cfg["upload"], provider_name, image_name, settings)
uuid_add_upload(cfg, uuid, upload.uuid)
return upload.uuid
def uuid_get_uploads(cfg, uuid):
try:
with open(_upload_list_path(cfg, uuid)) as uploads_file:
return frozenset(uploads_file.read().split())
except FileNotFoundError:
return frozenset()
def uuid_add_upload(cfg, uuid, upload_uuid):
if upload_uuid not in uuid_get_uploads(cfg, uuid):
with open(_upload_list_path(cfg, uuid), "a") as uploads_file:
print(upload_uuid, file=uploads_file)
status = uuid_status(cfg, uuid)
if status and status["queue_status"] == "FINISHED":
uuid_ready_upload(cfg, uuid, upload_uuid)
def uuid_remove_upload(cfg, uuid, upload_uuid):
uploads = uuid_get_uploads(cfg, uuid) - frozenset((upload_uuid,))
with open(_upload_list_path(cfg, uuid), "w") as uploads_file:
for upload in uploads:
print(upload, file=uploads_file)
def uuid_ready_upload(cfg, uuid, upload_uuid):
status = uuid_status(cfg, uuid)
if not status:
raise RuntimeError(f"{uuid} is not a valid build id!")
if status["queue_status"] != "FINISHED":
raise RuntimeError(f"Build {uuid} is not finished!")
_, image_path = uuid_image(cfg, uuid)
ready_upload(cfg["upload"], upload_uuid, image_path)
def uuid_cancel(cfg, uuid):
"""Cancel a build and delete its results
@ -510,6 +566,10 @@ def uuid_delete(cfg, uuid):
uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid)
if not uuid_dir or len(uuid_dir) < 10:
raise RuntimeError("Directory length is too short: %s" % uuid_dir)
for upload in get_uploads(cfg["upload"], uuid_get_uploads(cfg, uuid)):
delete_upload(cfg["upload"], upload.uuid)
shutil.rmtree(uuid_dir)
return True
@ -554,13 +614,16 @@ def uuid_info(cfg, uuid):
raise RuntimeError("Missing deps.toml for %s" % uuid)
deps_dict = toml.loads(open(deps_path, "r").read())
details = compose_detail(uuid_dir)
details = compose_detail(cfg, uuid_dir)
commit_path = joinpaths(uuid_dir, "COMMIT")
if not os.path.exists(commit_path):
raise RuntimeError("Missing commit hash for %s" % uuid)
commit_id = open(commit_path, "r").read().strip()
upload_uuids = uuid_get_uploads(cfg, uuid)
summaries = [upload.summary() for upload in get_uploads(cfg["upload"], upload_uuids)]
return {"id": uuid,
"config": cfg_dict,
"blueprint": frozen_dict,
@ -568,7 +631,8 @@ def uuid_info(cfg, uuid):
"deps": deps_dict,
"compose_type": details["compose_type"],
"queue_status": details["queue_status"],
"image_size": details["image_size"]
"image_size": details["image_size"],
"uploads": summaries,
}
def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False):

View File

@ -89,6 +89,10 @@ server.register_blueprint(v0_api, url_prefix="/api/v0/")
# Register the v1 API on /api/v1/
# Use v0 routes by default
server.register_blueprint(v0_api, url_prefix="/api/v1/",
skip_rules=["/projects/source/info/<source_names>", "/projects/source/new"])
skip_rules = [
"/compose",
"/projects/source/info/<source_names>",
"/projects/source/new",
]
server.register_blueprint(v0_api, url_prefix="/api/v1/", skip_rules=skip_rules)
server.register_blueprint(v1_api, url_prefix="/api/v1/")

View File

@ -58,7 +58,7 @@ from flask import current_app as api
from pylorax.sysutils import joinpaths
from pylorax.api.checkparams import checkparams
from pylorax.api.compose import start_build, compose_types
from pylorax.api.errors import * # pylint: disable=wildcard-import
from pylorax.api.errors import * # pylint: disable=wildcard-import,unused-wildcard-import
from pylorax.api.flask_blueprint import BlueprintSkip
from pylorax.api.projects import projects_list, projects_info, projects_depsolve
from pylorax.api.projects import modules_list, modules_info, ProjectsError, repo_to_source

View File

@ -23,12 +23,20 @@ log = logging.getLogger("lorax-composer")
from flask import jsonify, request
from flask import current_app as api
from lifted.queue import get_upload, reset_upload, cancel_upload, delete_upload
from lifted.providers import list_providers, resolve_provider, load_profiles, validate_settings, save_settings
from pylorax.api.checkparams import checkparams
from pylorax.api.errors import INVALID_CHARS, PROJECTS_ERROR, SYSTEM_SOURCE, UNKNOWN_SOURCE
from pylorax.api.compose import start_build
from pylorax.api.errors import BAD_COMPOSE_TYPE, BUILD_FAILED, INVALID_CHARS, MISSING_POST, PROJECTS_ERROR
from pylorax.api.errors import SYSTEM_SOURCE, UNKNOWN_BLUEPRINT, UNKNOWN_SOURCE, UNKNOWN_UUID, UPLOAD_ERROR
from pylorax.api.flask_blueprint import BlueprintSkip
from pylorax.api.projects import get_repo_sources, new_repo_source, repo_to_source
from pylorax.api.regexes import VALID_API_STRING
from pylorax.api.queue import uuid_status, uuid_schedule_upload, uuid_remove_upload
from pylorax.api.projects import get_repo_sources, repo_to_source
from pylorax.api.projects import new_repo_source
from pylorax.api.regexes import VALID_API_STRING, VALID_BLUEPRINT_NAME
import pylorax.api.toml as toml
from pylorax.api.utils import blueprint_exists
# Create the v1 routes Blueprint with skip_routes support
v1_api = BlueprintSkip("v1_routes", __name__)
@ -166,3 +174,460 @@ def v1_projects_source_new():
return jsonify(status=False, errors=[{"id": PROJECTS_ERROR, "msg": str(e)}]), 400
return jsonify(status=True)
@v1_api.route("/compose", methods=["POST"])
def v1_compose_start():
"""Start a compose
The body of the post should have these fields:
blueprint_name - The blueprint name from /blueprints/list/
compose_type - The type of output to create, from /compose/types
branch - Optional, defaults to master, selects the git branch to use for the blueprint.
**POST /api/v0/compose**
Start a compose. The content type should be 'application/json' and the body of the POST
should look like this. The "upload" object is optional.
Example::
{
"blueprint_name": "http-server",
"compose_type": "tar",
"branch": "master",
"upload": {
"image_name": "My Image",
"provider": "azure",
"settings": {
"resource_group": "SOMEBODY",
"storage_account_name": "ONCE",
"storage_container": "TOLD",
"location": "ME",
"subscription_id": "THE",
"client_id": "WORLD",
"secret": "IS",
"tenant": "GONNA"
}
}
}
Pass it the name of the blueprint, the type of output (from
'/api/v0/compose/types'), and the blueprint branch to use. 'branch' is
optional and will default to master. It will create a new build and add
it to the queue. It returns the build uuid and a status if it succeeds.
If an "upload" is given, it will schedule an upload to run when the build
finishes.
Example::
{
"build_id": "e6fa6db4-9c81-4b70-870f-a697ca405cdf",
"status": true
}
"""
# Passing ?test=1 will generate a fake FAILED compose.
# Passing ?test=2 will generate a fake FINISHED compose.
try:
test_mode = int(request.args.get("test", "0"))
except ValueError:
test_mode = 0
compose = request.get_json(cache=False)
errors = []
if not compose:
return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400
if "blueprint_name" not in compose:
errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "No 'blueprint_name' in the JSON request"})
else:
blueprint_name = compose["blueprint_name"]
if "branch" not in compose or not compose["branch"]:
branch = "master"
else:
branch = compose["branch"]
if "compose_type" not in compose:
errors.append({"id": BAD_COMPOSE_TYPE, "msg": "No 'compose_type' in the JSON request"})
else:
compose_type = compose["compose_type"]
if VALID_BLUEPRINT_NAME.match(blueprint_name) is None:
errors.append({"id": INVALID_CHARS, "msg": "Invalid characters in API path"})
if not blueprint_exists(api, branch, blueprint_name):
errors.append({"id": UNKNOWN_BLUEPRINT, "msg": "Unknown blueprint name: %s" % blueprint_name})
if "upload" in compose:
try:
image_name = compose["upload"]["image_name"]
provider_name = compose["upload"]["provider"]
settings = compose["upload"]["settings"]
except KeyError as e:
errors.append({"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'})
try:
provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name)
if "supported_types" in provider and compose_type not in provider["supported_types"]:
raise RuntimeError(f'Type "{compose_type}" is not supported by provider "{provider_name}"!')
validate_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, settings, image_name)
except Exception as e:
errors.append({"id": UPLOAD_ERROR, "msg": str(e)})
if errors:
return jsonify(status=False, errors=errors), 400
try:
build_id = start_build(api.config["COMPOSER_CFG"], api.config["DNFLOCK"], api.config["GITLOCK"],
branch, blueprint_name, compose_type, test_mode)
except Exception as e:
if "Invalid compose type" in str(e):
return jsonify(status=False, errors=[{"id": BAD_COMPOSE_TYPE, "msg": str(e)}]), 400
else:
return jsonify(status=False, errors=[{"id": BUILD_FAILED, "msg": str(e)}]), 400
if "upload" in compose:
upload_uuid = uuid_schedule_upload(
api.config["COMPOSER_CFG"],
build_id,
provider_name,
image_name,
settings
)
return jsonify(status=True, build_id=build_id)
@v1_api.route("/compose/uploads/schedule", defaults={'compose_uuid': ""}, methods=["POST"])
@v1_api.route("/compose/uploads/schedule/<compose_uuid>", methods=["POST"])
@checkparams([("compose_uuid", "", "no compose UUID given")])
def v1_compose_uploads_schedule(compose_uuid):
"""Schedule an upload of a compose to a given cloud provider
**POST /api/v1/uploads/schedule/<compose_uuid>**
Example request::
{
"image_name": "My Image",
"provider": "azure",
"settings": {
"resource_group": "SOMEBODY",
"storage_account_name": "ONCE",
"storage_container": "TOLD",
"location": "ME",
"subscription_id": "THE",
"client_id": "WORLD",
"secret": "IS",
"tenant": "GONNA"
}
}
Example response::
{
"status": true,
"upload_uuid": "572eb0d0-5348-4600-9666-14526ba628bb"
}
"""
if VALID_API_STRING.match(compose_uuid) is None:
error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"}
return jsonify(status=False, errors=[error]), 400
parsed = request.get_json(cache=False)
if not parsed:
return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400
try:
image_name = parsed["image_name"]
provider_name = parsed["provider"]
settings = parsed["settings"]
except KeyError as e:
error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'}
return jsonify(status=False, errors=[error]), 400
try:
compose_type = uuid_status(api.config["COMPOSER_CFG"], compose_uuid)["compose_type"]
provider = resolve_provider(api.config["COMPOSER_CFG"]["upload"], provider_name)
if "supported_types" in provider and compose_type not in provider["supported_types"]:
raise RuntimeError(
f'Type "{compose_type}" is not supported by provider "{provider_name}"!'
)
except Exception as e:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400
try:
upload_uuid = uuid_schedule_upload(
api.config["COMPOSER_CFG"],
compose_uuid,
provider_name,
image_name,
settings
)
except RuntimeError as e:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(e)}]), 400
return jsonify(status=True, upload_uuid=upload_uuid)
@v1_api.route("/compose/uploads/delete", defaults={"compose_uuid": "", "upload_uuid": ""}, methods=["DELETE"])
@v1_api.route("/compose/uploads/delete/<compose_uuid>/<upload_uuid>", methods=["DELETE"])
@checkparams([("compose_uuid", "", "no compose UUID given"), ("upload_uuid", "", "no upload UUID given")])
def v1_compose_uploads_delete(compose_uuid, upload_uuid):
"""Delete an upload and disassociate it from its compose
**DELETE /api/v1/uploads/delete/<compose_uuid>/<upload_uuid>**
Example response::
{
"status": true,
"upload_uuid": "572eb0d0-5348-4600-9666-14526ba628bb"
}
"""
if None in (VALID_API_STRING.match(compose_uuid), VALID_API_STRING.match(upload_uuid)):
error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"}
return jsonify(status=False, errors=[error]), 400
if not uuid_status(api.config["COMPOSER_CFG"], compose_uuid):
error = {"id": UNKNOWN_UUID, "msg": "%s is not a valid build uuid" % compose_uuid}
return jsonify(status=False, errors=[error]), 400
uuid_remove_upload(api.config["COMPOSER_CFG"], compose_uuid, upload_uuid)
try:
delete_upload(api.config["COMPOSER_CFG"]["upload"], upload_uuid)
except RuntimeError as error:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}])
return jsonify(status=True, upload_uuid=upload_uuid)
@v1_api.route("/upload/info", defaults={"uuid": ""})
@v1_api.route("/upload/info/<uuid>")
@checkparams([("uuid", "", "no UUID given")])
def v1_upload_info(uuid):
"""Returns information about a given upload
**GET /api/v1/upload/info/<uuid>**
Example response::
{
"status": true,
"upload": {
"creation_time": 1565620940.069004,
"image_name": "My Image",
"image_path": "/var/lib/lorax/composer/results/b6218e8f-0fa2-48ec-9394-f5c2918544c4/disk.vhd",
"provider_name": "azure",
"settings": {
"resource_group": "SOMEBODY",
"storage_account_name": "ONCE",
"storage_container": "TOLD",
"location": "ME",
"subscription_id": "THE",
"client_id": "WORLD",
"secret": "IS",
"tenant": "GONNA"
},
"status": "FAILED",
"uuid": "b637c411-9d9d-4279-b067-6c8d38e3b211"
}
}
"""
if VALID_API_STRING.match(uuid) is None:
return jsonify(status=False, errors=[{"id": INVALID_CHARS, "msg": "Invalid characters in API path"}]), 400
try:
upload = get_upload(api.config["COMPOSER_CFG"]["upload"], uuid).summary()
except RuntimeError as error:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}])
return jsonify(status=True, upload=upload)
@v1_api.route("/upload/log", defaults={"uuid": ""})
@v1_api.route("/upload/log/<uuid>")
@checkparams([("uuid", "", "no UUID given")])
def v1_upload_log(uuid):
"""Returns an upload's log
**GET /api/v1/upload/log/<uuid>**
Example response::
{
"status": true,
"log": "\n __________________\r\n< PLAY [localhost] >..."
}
"""
if VALID_API_STRING.match(uuid) is None:
error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"}
return jsonify(status=False, errors=[error]), 400
try:
upload = get_upload(api.config["COMPOSER_CFG"]["upload"], uuid)
except RuntimeError as error:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}])
return jsonify(status=True, log=upload.upload_log)
@v1_api.route("/upload/reset", defaults={"uuid": ""}, methods=["POST"])
@v1_api.route("/upload/reset/<uuid>", methods=["POST"])
@checkparams([("uuid", "", "no UUID given")])
def v1_upload_reset(uuid):
"""Reset an upload so it can be attempted again
**POST /api/v1/upload/reset/<uuid>**
Optionally pass in a new image name and/or new settings.
Example request::
{
"image_name": "My renamed image",
"settings": {
"resource_group": "ROLL",
"storage_account_name": "ME",
"storage_container": "I",
"location": "AIN'T",
"subscription_id": "THE",
"client_id": "SHARPEST",
"secret": "TOOL",
"tenant": "IN"
}
}
Example response::
{
"status": true,
"uuid": "c75d5d62-9d26-42fc-a8ef-18bb14679fc7"
}
"""
if VALID_API_STRING.match(uuid) is None:
error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"}
return jsonify(status=False, errors=[error]), 400
parsed = request.get_json(cache=False)
image_name = parsed.get("image_name") if parsed else None
settings = parsed.get("settings") if parsed else None
try:
reset_upload(api.config["COMPOSER_CFG"]["upload"], uuid, image_name, settings)
except RuntimeError as error:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}])
return jsonify(status=True, uuid=uuid)
@v1_api.route("/upload/cancel", defaults={"uuid": ""}, methods=["DELETE"])
@v1_api.route("/upload/cancel/<uuid>", methods=["DELETE"])
@checkparams([("uuid", "", "no UUID given")])
def v1_upload_cancel(uuid):
"""Cancel an upload that is either queued or in progress
**DELETE /api/v1/uploads/delete/<compose_uuid>/<upload_uuid>**
Example response::
{
"status": true,
"uuid": "037a3d56-b421-43e9-9935-c98350c89996"
}
"""
if VALID_API_STRING.match(uuid) is None:
error = {"id": INVALID_CHARS, "msg": "Invalid characters in API path"}
return jsonify(status=False, errors=[error]), 400
try:
cancel_upload(api.config["COMPOSER_CFG"]["upload"], uuid)
except RuntimeError as error:
return jsonify(status=False, errors=[{"id": UPLOAD_ERROR, "msg": str(error)}])
return jsonify(status=True, uuid=uuid)
@v1_api.route("/upload/providers")
def v1_upload_providers():
"""Return the information about all upload providers, including their
display names, expected settings, and saved profiles. Refer to the
`resolve_provider` function.
**GET /api/v1/upload/providers**
Example response::
{
"providers": {
"azure": {
"display": "Azure",
"profiles": {
"default": {
"client_id": "example",
...
}
},
"settings-info": {
"client_id": {
"display": "Client ID",
"placeholder": "",
"regex": "",
"type": "string"
},
...
},
"supported_types": ["vhd"]
},
...
}
}
"""
ucfg = api.config["COMPOSER_CFG"]["upload"]
provider_names = list_providers(ucfg)
def get_provider_info(provider_name):
provider = resolve_provider(ucfg, provider_name)
provider["profiles"] = load_profiles(ucfg, provider_name)
return provider
providers = {provider_name: get_provider_info(provider_name)
for provider_name in provider_names}
return jsonify(status=True, providers=providers)
@v1_api.route("/upload/providers/save", methods=["POST"])
def v1_providers_save():
"""Save provider settings as a profile for later use
**POST /api/v1/upload/providers/save**
Example request::
{
"provider": "azure",
"profile": "my-profile",
"settings": {
"resource_group": "SOMEBODY",
"storage_account_name": "ONCE",
"storage_container": "TOLD",
"location": "ME",
"subscription_id": "THE",
"client_id": "WORLD",
"secret": "IS",
"tenant": "GONNA"
}
}
Saving to an existing profile will overwrite it.
Example response::
{
"status": true
}
"""
parsed = request.get_json(cache=False)
if parsed is None:
return jsonify(status=False, errors=[{"id": MISSING_POST, "msg": "Missing POST body"}]), 400
try:
provider_name = parsed["provider"]
profile = parsed["profile"]
settings = parsed["settings"]
except KeyError as e:
error = {"id": UPLOAD_ERROR, "msg": f'Missing parameter {str(e)}!'}
return jsonify(status=False, errors=[error]), 400
try:
save_settings(api.config["COMPOSER_CFG"]["upload"], provider_name, profile, settings)
except Exception as e:
error = {"id": UPLOAD_ERROR, "msg": str(e)}
return jsonify(status=False, errors=[error])
return jsonify(status=True)

View File

@ -23,6 +23,7 @@ program_log = logging.getLogger("program")
pylorax_log = logging.getLogger("pylorax")
server_log = logging.getLogger("server")
dnf_log = logging.getLogger("dnf")
lifted_log = logging.getLogger("lifted")
import grp
import os
@ -43,12 +44,15 @@ from pylorax.api.queue import start_queue_monitor
from pylorax.api.recipes import open_or_create_repo, commit_recipe_directory
from pylorax.api.server import server, GitLock
from lifted.queue import start_upload_monitor
VERSION = "{0}-{1}".format(os.path.basename(sys.argv[0]), vernum)
def setup_logging(logfile):
# Setup logging to console and to logfile
log.setLevel(logging.DEBUG)
pylorax_log.setLevel(logging.DEBUG)
lifted_log.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
@ -56,6 +60,7 @@ def setup_logging(logfile):
sh.setFormatter(fmt)
log.addHandler(sh)
pylorax_log.addHandler(sh)
lifted_log.addHandler(sh)
fh = logging.FileHandler(filename=logfile)
fh.setLevel(logging.DEBUG)
@ -63,6 +68,7 @@ def setup_logging(logfile):
fh.setFormatter(fmt)
log.addHandler(fh)
pylorax_log.addHandler(fh)
lifted_log.addHandler(fh)
# External program output log
program_log.setLevel(logging.DEBUG)
@ -244,6 +250,8 @@ if __name__ == '__main__':
start_queue_monitor(server.config["COMPOSER_CFG"], uid, gid)
start_upload_monitor(server.config["COMPOSER_CFG"]["upload"])
# Change user and group on the main process. Note that this still happens even if
# --user and --group were passed in, but changing to the same user should be fine.
os.setgid(gid)