From 471e13791ee4420465b75065305317ff542b90ba Mon Sep 17 00:00:00 2001 From: Josef Skladanka Date: Fri, 30 Jan 2015 14:58:12 +0100 Subject: [PATCH] Make the thing work! Fixed some issues & test-suites "metadata" --- openqa_trigger/conf_test_suites.py | 23 ++++++++++++++--------- openqa_trigger/openqa_trigger.py | 11 ++++++----- openqa_trigger/report_job_results.py | 15 +++++++++++++-- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/openqa_trigger/conf_test_suites.py b/openqa_trigger/conf_test_suites.py index 50f61dc..42acba1 100644 --- a/openqa_trigger/conf_test_suites.py +++ b/openqa_trigger/conf_test_suites.py @@ -1,6 +1,11 @@ TESTCASES = { "QA:Testcase_Boot_default_install Server offline": { - "section": 'Default boot', + "section": 'Default boot and install', + "env": "$RUNARCH$", + "type": "Installation", + }, + "QA:Testcase_Boot_default_install Server netinst": { + "section": 'Default boot and install', "env": "$RUNARCH$", "type": "Installation", }, @@ -11,7 +16,7 @@ TESTCASES = { }, "QA:Testcase_partitioning_guided_empty": { "section": "Guided storage configuration", - "env": "x86 BIOS", + "env": "x86", # Probably a bug in relval - column name is "x86 BIOS", but there is a comment there just behind 'x86' which probably makes it strip the rest "type": "Installation", }, "QA:Testcase_Anaconda_User_Interface_Graphical": { @@ -31,7 +36,7 @@ TESTCASES = { }, "QA:Testcase_partitioning_guided_delete_all": { "section": "Guided storage configuration", - "env": "x86 BIOS", + "env": "x86", # Probably a bug in relval - column name is "x86 BIOS", but there is a comment there just behind 'x86' which probably makes it strip the rest "type": "Installation", }, "QA:Testcase_install_to_SATA": { @@ -41,7 +46,7 @@ TESTCASES = { }, "QA:Testcase_partitioning_guided_multi_select": { "section": "Guided storage configuration", - "env": "x86 BIOS", + "env": "x86", # Probably a bug in relval - column name is "x86 BIOS", but there is a comment there just behind 'x86' which probably makes it strip the rest "type": "Installation", }, "QA:Testcase_install_to_SCSI": { @@ -74,27 +79,27 @@ TESTCASES = { TESTSUITES = { "server_simple":[ - "QA:Testcase_Boot_default_install Server offline", + "QA:Testcase_Boot_default_install Server netinst", "QA:Testcase_install_to_VirtIO", "QA:Testcase_Anaconda_User_Interface_Graphical", "QA:Testcase_Anaconda_user_creation", ], "server_delete_pata":[ - "QA:Testcase_Boot_default_install Server offline", + "QA:Testcase_Boot_default_install Server netinst", "QA:Testcase_install_to_PATA", "QA:Testcase_partitioning_guided_delete_all", "QA:Testcase_Anaconda_User_Interface_Graphical", "QA:Testcase_Anaconda_user_creation", ], "server_sata_multi":[ - "QA:Testcase_Boot_default_install Server offline", + "QA:Testcase_Boot_default_install Server netinst", "QA:Testcase_install_to_SATA", "QA:Testcase_partitioning_guided_multi_select", "QA:Testcase_Anaconda_User_Interface_Graphical", "QA:Testcase_Anaconda_user_creation", ], - "server_scsi_updatesimg":[ - "QA:Testcase_Boot_default_install Server offline", + "server_scsi_updates_img":[ + "QA:Testcase_Boot_default_install Server netinst", "QA:Testcase_install_to_SCSI", "QA:Testcase_partitioning_guided_empty", "QA:Testcase_Anaconda_updates.img_via_URL", diff --git a/openqa_trigger/openqa_trigger.py b/openqa_trigger/openqa_trigger.py index 953dc08..52138bf 100755 --- a/openqa_trigger/openqa_trigger.py +++ b/openqa_trigger/openqa_trigger.py @@ -8,14 +8,14 @@ import os.path import sys import subprocess -#from evaluate_jobs import evaluate_jobs +from report_job_results import report_results PERSISTENT = "/var/tmp/openqa_watcher.json" CURRENT_TEST = "https://fedoraproject.org/wiki/Test_Results:Current_Installation_Test" ISO_URL = "https://kojipkgs.fedoraproject.org/mash/rawhide-%s/rawhide/%s/os/images/boot.iso" ISO_REGEX = re.compile(r'https://kojipkgs\.fedoraproject\.org/mash/(?Prawhide-(?P\d+))/rawhide/(?Px86_64|i386)/os/images/boot\.iso') ISO_PATH = "/var/lib/openqa/factory/iso/" -RUN_COMMAND = "/var/lib/openqa/script/client isos post ISO=%s DISTRI=fedora VERSION=rawhide FLAVOR=server ARCH=%s BUILD='%s_%s'" +RUN_COMMAND = "/var/lib/openqa/script/client isos post ISO=%s DISTRI=fedora VERSION=rawhide FLAVOR=server ARCH=%s BUILD=%s_%s" VERSIONS = ['i386', 'x86_64'] # read last tested version from file @@ -45,8 +45,9 @@ def read_currents(): def download_rawhide_iso(link, name, arch): isoname = "%s_%s.iso" % (name, arch) filename = os.path.join(ISO_PATH, isoname) - link = "http://" + link[len("https://"):] - urlgrabber.urlgrab(link, filename) + if not os.path.isfile(filename): + link = "http://" + link[len("https://"):] + urlgrabber.urlgrab(link, filename) return isoname # run OpenQA 'isos' job on selected isoname, with given arch and build @@ -94,8 +95,8 @@ def run_if_newer(): f.close() # wait for jobs to finish and display results - #evaluate_jobs(jobs) print jobs + report_results(jobs) if __name__ == "__main__": diff --git a/openqa_trigger/report_job_results.py b/openqa_trigger/report_job_results.py index 51a0de6..75a19ec 100644 --- a/openqa_trigger/report_job_results.py +++ b/openqa_trigger/report_job_results.py @@ -5,7 +5,7 @@ import time import conf_test_suites -API_ROOT = "http://10.34.28.126/api/v1" +API_ROOT = "http://localhost/api/v1" SLEEPTIME = 60 @@ -21,6 +21,7 @@ def get_passed_testcases(job_ids): for job_id, url in running_jobs.items(): job_state = requests.get(url).json()['job'] if job_state['state'] == 'done': + print "Job %s is done" % job_id finished_jobs[job_id] = job_state del running_jobs[job_id] if running_jobs: @@ -63,19 +64,29 @@ def get_relval_commands(passed_testcases): def report_results(job_ids): commands = get_relval_commands(get_passed_testcases(job_ids)) + print "Running relval commands:" for command in commands: + print command os.system(command) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Evaluate per-testcase results from OpenQA job runs") parser.add_argument('jobs', type=int, nargs='+') + parser.add_argument('--report', default=False, action='store_true') args = parser.parse_args() passed_testcases = get_passed_testcases(args.jobs) + commands = get_relval_commands(passed_testcases) import pprint pprint.pprint(passed_testcases) - pprint.pprint(get_relval_commands(passed_testcases)) + if not args.report: + print "\n\n### No reporting is done! ###\n\n" + pprint.pprint(commands) + else: + for command in commands: + print command + os.system(command)