From 0fab296255624667a477f5410b07438447280604 Mon Sep 17 00:00:00 2001 From: Erik Norvell Date: Mon, 24 Jun 2024 11:25:02 +0200 Subject: [PATCH] Update and rename mld parsing script to scripts/parse_mld_xml.py, now with optional histogram summary --- scripts/parse_mld_xml.py | 80 --------------------------- scripts/parse_xml_report.py | 107 ++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 80 deletions(-) delete mode 100644 scripts/parse_mld_xml.py create mode 100644 scripts/parse_xml_report.py diff --git a/scripts/parse_mld_xml.py b/scripts/parse_mld_xml.py deleted file mode 100644 index 7370cfe7ff..0000000000 --- a/scripts/parse_mld_xml.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -from xml.etree import ElementTree - -""" -Parse a junit report and create an MLD summary report. -""" - -PROPERTIES = ["MLD", "MAXIMUM ABS DIFF"] - - -# Main routine -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Parse a junit report and create a MLD summary report." - ) - parser.add_argument( - "xml_report", - type=str, - help="XML junit report input file, e.g. report-junit.xml", - ) - parser.add_argument("csv_file", type=str, help="Output CSV file, e.g. mld.csv") - args = parser.parse_args() - xml_report = args.xml_report - csv_file = args.csv_file - - tree = ElementTree.parse(xml_report) - - testsuite = tree.find(".//testsuite") - - - testcases = tree.findall(".//testcase") - - results_unsorted = {} - passes = 0 - failures = 0 - errors = 0 - - for testcase in testcases: - if testcase.find(".//skipped") is None: - if testcase.get("file") is None: - fulltestname = ( - testcase.get("classname").replace(".", "/") - + ".py::" - + testcase.get("name") - ) - else: - fulltestname = testcase.get("file") + "::" + testcase.get("name") - - properties_found = { - p.get("name"): p.get("value") - for p in testcase.findall(".//property") - } - - if testcase.find('failure') is not None: - testresult = 'FAIL' - failures = failures + 1 - elif testcase.find('error') is not None: - testresult = 'ERROR' - errors = errors + 1 - else: - testresult = 'PASS' - passes = passes + 1 - - properties_values = [str(properties_found.get(p)) for p in PROPERTIES] - outline = ";".join([fulltestname,testresult] + properties_values) + "\n" - results_unsorted[fulltestname] = outline - - results_sorted = dict(sorted(results_unsorted.items())) - - with open(csv_file, "w") as outfile: - headerline = ";".join(["testcase","Result"] + PROPERTIES) + "\n" - outfile.write(headerline) - for test in results_sorted: - outfile.write(results_sorted[test]) - - print( - f"Parsed testsuite with {passes+failures+errors} tests: {passes} passes, {failures} failures and {errors} errors." - ) diff --git a/scripts/parse_xml_report.py b/scripts/parse_xml_report.py new file mode 100644 index 0000000000..4b61dc1a94 --- /dev/null +++ b/scripts/parse_xml_report.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +import argparse +import re +import math +import numpy as np +from xml.etree import ElementTree + +""" +Parse a junit report and create a summary report. +""" + +PROPERTIES = ["MLD", "MAXIMUM ABS DIFF", "MIN_SSNR"] + + +# Main routine +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Parse a junit report and create a MLD summary report." + ) + parser.add_argument( + "xml_report", + type=str, + help="XML junit report input file, e.g. report-junit.xml", + ) + parser.add_argument("csv_file", type=str, help="Output CSV file, e.g. mld.csv") + parser.add_argument("summary_file", type=str, nargs='?', help="Summary CSV file, e.g. summary.csv", default = None) + args = parser.parse_args() + xml_report = args.xml_report + csv_file = args.csv_file + summary_file = args.summary_file + + tree = ElementTree.parse(xml_report) + + testsuite = tree.find(".//testsuite") + + + testcases = tree.findall(".//testcase") + + results_unsorted = {} + count = {'PASS':0,'FAIL':0,'ERROR':0} + + for testcase in testcases: + if testcase.find(".//skipped") is None: + if testcase.get("file") is None: + fulltestname = ( + testcase.get("classname").replace(".", "/") + + ".py::" + + testcase.get("name") + ) + else: + fulltestname = testcase.get("file") + "::" + testcase.get("name") + + properties_found = { + p.get("name"): p.get("value") + for p in testcase.findall(".//property") + } + + if testcase.find('failure') is not None: + testresult = 'FAIL' + elif testcase.find('error') is not None: + testresult = 'ERROR' + else: + testresult = 'PASS' + + properties_values = [str(properties_found.get(p)) for p in PROPERTIES] + # For ERROR cases, both a FAIL and an ERROR result is generated. + # Here, a FAIL would be overwritten with an ERROR result since it has the same name. + results_unsorted[fulltestname] = (testresult, properties_values) + + results_sorted = dict(sorted(results_unsorted.items())) + + with open(csv_file, "w") as outfile: + headerline = ";".join(["testcase","Result"] + PROPERTIES) + "\n" + outfile.write(headerline) + for test in results_sorted: + count[results_sorted[test][0]] += 1 + line = ";".join([test,results_sorted[test][0]] + results_sorted[test][1]) + "\n" + outfile.write(line) + + categories = {'Normal operation':r'.*', 'JBM':r'JBM', 'PLC':r'%', 'Bitrate switching':r'br sw|bitrate switching'} + limits = [0,5,10,20,math.inf] + tmp = {} + + if summary_file is not None: + with open(summary_file, "w") as fp: + for testcase in results_sorted: + cat = [c for c in categories if re.search(categories[c],testcase)][-1] + if cat in tmp: + tmp[cat].append(results_sorted[testcase][1][0]) # Add MLD score to list + else: + tmp[cat] = [results_sorted[testcase][1][0]] + + headerline = "Category;0;" + ";".join([f"{str(a)} -- {str(b)}" for (a,b) in zip(limits[0:-1],limits[1:])]) + ";None\n" + fp.write(headerline) + for cat in tmp: + # Separate 0 and None as special cases + mld = [float(x) for x in tmp[cat] if x != 'None' and x != '0'] + zero = sum([1 for x in tmp[cat] if x == '0']) + none = sum([1 for x in tmp[cat] if x == 'None']) + hist, _ = np.histogram(mld,limits) + line = f"{cat}; {str(zero)}; {'; '.join(map(str,hist))}; {str(none)}\n" + fp.write(line) + + print( + f"Parsed testsuite with {count['PASS']+count['FAIL']+count['ERROR']} tests: {count['PASS']} passes, {count['FAIL']} failures and {count['ERROR']} errors." + ) -- GitLab