createhdds/openqa_trigger/report_job_results.py

85 lines
2.8 KiB
Python

import requests
import argparse
import os
import time
import conf_test_suites
API_ROOT = "http://10.34.28.126/api/v1"
SLEEPTIME = 60
def get_passed_testcases(job_ids):
"""
job_ids ~ list of int (job ids)
Returns ~ list of str - names of passed testcases
"""
running_jobs = dict([(job_id, "%s/jobs/%s" % (API_ROOT, job_id)) for job_id in job_ids])
finished_jobs = {}
while running_jobs:
for job_id, url in running_jobs.items():
job_state = requests.get(url).json()['job']
if job_state['state'] == 'done':
finished_jobs[job_id] = job_state
del running_jobs[job_id]
if running_jobs:
time.sleep(SLEEPTIME)
passed_testcases = {} # key = VERSION_BUILD_ARCH
for job_id in job_ids:
job = finished_jobs[job_id]
if job['result'] =='passed':
key = (job['settings']['VERSION'], job['settings']['FLAVOR'], job['settings'].get('BUILD', None), job['settings']['ARCH'])
passed_testcases.setdefault(key, [])
passed_testcases[key].extend(conf_test_suites.TESTSUITES[job['settings']['TEST']])
for key, value in passed_testcases.iteritems():
passed_testcases[key] = sorted(list(set(value)))
return passed_testcases
def get_relval_commands(passed_testcases):
relval_template = "relval report-results --unmanned --result pass"
commands = []
for key in passed_testcases:
cmd_ = relval_template
version, _, build, arch = key
# FIXME remove after debugging
build = "22 20150124"
if version == 'rawhide':
cmd_ += ' --release "%s" --date "%s"' % tuple(build.split()) #"22 20150110"
elif version == 'branched':
cmd_ += ' --release "%s" --milestone "%s" --compose "%s"' % tuple(build.split()) #"22 Alpha TC1"
for tc_name in passed_testcases[key]:
testcase = conf_test_suites.TESTCASES[tc_name]
tc_env = arch if testcase['env'] == '$RUNARCH$' else testcase['env']
tc_type = testcase['type']
tc_section = testcase['section']
commands.append('%s --env "%s" --testtype "%s" --section "%s" --testcase "%s"' % (cmd_, tc_env, tc_type, tc_section, tc_name))
return commands
def report_results(job_ids):
commands = get_relval_commands(get_passed_testcases(job_ids))
for command in commands:
os.system(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate per-testcase results from OpenQA job runs")
parser.add_argument('jobs', type=int, nargs='+')
args = parser.parse_args()
passed_testcases = get_passed_testcases(args.jobs)
import pprint
pprint.pprint(passed_testcases)
pprint.pprint(get_relval_commands(passed_testcases))