[PATCH 1/1] Add yaml format to the supported report formats
clrrm at isep.ipp.pt
clrrm at isep.ipp.pt
Wed Dec 2 16:05:15 UTC 2020
From: Cláudio Maia <clrrm at isep.ipp.pt>
---
tester/rt/test.py | 104 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 102 insertions(+), 2 deletions(-)
diff --git a/tester/rt/test.py b/tester/rt/test.py
index 9b157e9..0178a8d 100644
--- a/tester/rt/test.py
+++ b/tester/rt/test.py
@@ -339,9 +339,109 @@ def generate_junit_report(args, reports, start_time, end_time,
with open(junit_file, 'w') as f:
TestSuite.to_file(f, [ts], prettyprint = True)
+def generate_yaml_report(args, reports, start_time, end_time,
+ total, yaml_file):
+ import yaml
+
+ def format_output(output_list):
+ return "\n".join(output_list).replace("] ", '').replace('=> ', '')
+
+ yaml_log = {}
+ yaml_log['command-line'] = args
+ yaml_log['host'] = host.label(mode='all')
+ yaml_log['python'] = sys.version.replace('\n', '')
+ yaml_log['summary'] = {}
+ yaml_log['summary']['passed-count'] = reports.passed
+ yaml_log['summary']['failed-count'] = reports.failed
+ yaml_log['summary']['user-input-count'] = reports.user_input
+ yaml_log['summary']['expected-fail-count'] = reports.expected_fail
+ yaml_log['summary']['indeterminate-count'] = reports.indeterminate
+ yaml_log['summary']['benchmark-count'] = reports.benchmark
+ yaml_log['summary']['timeout-count'] = reports.timeouts
+ yaml_log['summary']['test-too-long_count'] = reports.test_too_long
+ yaml_log['summary']['invalid-count'] = reports.invalids
+ yaml_log['summary']['wrong-version-count'] = reports.wrong_version
+ yaml_log['summary']['wrong-build-count'] = reports.wrong_build
+ yaml_log['summary']['wrong-tools-count'] = reports.wrong_tools
+ yaml_log['summary']['total-count'] = reports.total
+ time_delta = end_time - start_time
+ yaml_log['summary']['average-test-time'] = str(time_delta / total)
+ yaml_log['summary']['testing-time'] = str(time_delta)
+
+ result_types = [
+ 'failed', 'user-input', 'expected-fail', 'indeterminate',
+ 'benchmark', 'timeout', 'test-too-long', 'invalid', 'wrong-version',
+ 'wrong-build', 'wrong-tools'
+ ]
+ for result_type in result_types:
+ yaml_log['summary'][result_type] = []
+
+ result_element = {}
+ yaml_log['outputs'] = []
+
+ # process output of each test
+ for exe_name in reports.results:
+ result_element['executable-name'] = path.basename(exe_name)
+ result_element['executable-sha512'] = get_hash512(exe_name)
+ result_element['execution-start'] = reports.results[exe_name]['start'].isoformat()
+ result_element['execution-end'] = reports.results[exe_name]['end'].isoformat()
+ date_diff = reports.results[exe_name]['end'] - reports.results[exe_name]['start']
+ result_element['execution-duration'] = str(date_diff)
+ result_element['execution-result'] = reports.results[exe_name]['result']
+ result_element['bsp'] = reports.results[exe_name]['bsp']
+ result_element['bsp-arch'] = reports.results[exe_name]['bsp_arch']
+ result_output = reports.results[exe_name]['output']
+
+ dbg_output = []
+ test_output = []
+ idxs_output = [] # store indices of given substrings
+ for elem in result_output:
+ if '=> ' in elem:
+ idxs_output.append(result_output.index(elem))
+ if '*** END' in elem:
+ idxs_output.append(result_output.index(elem))
+
+ if len(idxs_output) == 3: # test executed and has result
+ dbg_output = result_output[idxs_output[0]:idxs_output[1]]
+ dbg_output.append("=== Executed Test ===")
+ dbg_output = dbg_output + result_output[idxs_output[2]+1:len(result_output)]
+ test_output = result_output[idxs_output[1]:idxs_output[2]+1]
+ else:
+ dbg_output = result_output
+
+ result_element['debugger-output'] = format_output(dbg_output)
+ result_element['console-output'] = format_output(test_output)
+ yaml_log['outputs'].append(result_element)
+
+ result_type = reports.results[exe_name]['result']
+ # map "fatal-error" on to "failed"
+ if result_type == "fatal-error":
+ result_type = "failed"
+
+ if result_type != 'passed':
+ yaml_log['summary'][result_type].append(path.basename(exe_name))
+
+ result_element = {}
+
+ with open(yaml_file, 'w') as outfile:
+ yaml.dump(yaml_log, outfile, default_flow_style=False, allow_unicode=True)
+
+
+def get_hash512(exe):
+ """ returns SHA512 hash string of a given binary file passed as argument """
+ import hashlib
+
+ hash = hashlib.sha512()
+ with open(exe, "rb") as f:
+ for byte_block in iter(lambda: f.read(4096), b""):
+ hash.update(byte_block)
+ return hash.hexdigest()
+
+
report_formatters = {
'json': generate_json_report,
- 'junit': generate_junit_report
+ 'junit': generate_junit_report,
+ 'yaml': generate_yaml_report
}
@@ -365,7 +465,7 @@ def run(args):
'--rtems-bsp': 'The RTEMS BSP to run the test on',
'--user-config': 'Path to your local user configuration INI file',
'--report-path': 'Report output base path (file extension will be added)',
- '--report-format': 'Formats in which to report test results in addition to txt: json',
+ '--report-format': 'Formats in which to report test results in addition to txt: json, yaml',
'--log-mode': 'Reporting modes, failures (default),all,none',
'--list-bsps': 'List the supported BSPs',
'--debug-trace': 'Debug trace based on specific flags (console,gdb,output,cov)',
--
2.17.1
More information about the devel
mailing list