[PATCH 1/2] tester: Count tester header errors

Kinsey Moore kinsey.moore at oarcorp.com
Mon Sep 6 21:34:40 UTC 2021


These two patches look good with one minor nit below.

On 9/5/2021 02:57, chrisj at rtems.org wrote:
> From: Chris Johns <chrisj at rtems.org>
>
> - Remove the hard tester error and count the header errors.
> ---
>   tester/rt/report.py | 17 +++++++++++++----
>   tester/rt/test.py   |  7 +++++--
>   2 files changed, 18 insertions(+), 6 deletions(-)
>
> diff --git a/tester/rt/report.py b/tester/rt/report.py
> index 0e19edc..e808fce 100644
> --- a/tester/rt/report.py
> +++ b/tester/rt/report.py
> @@ -68,6 +68,7 @@ class report(object):
>           self.wrong_version = 0
>           self.wrong_build = 0
>           self.wrong_tools = 0
> +        self.wrong_header = 0
>           self.results = {}
>           self.config = {}
>           self.name_max_len = 0
> @@ -85,9 +86,11 @@ class report(object):
>           msg += 'Wrong Version  %*d%s' % (self.total_len, self.wrong_version, os.linesep)
>           msg += 'Wrong Build    %*d%s' % (self.total_len, self.wrong_build, os.linesep)
>           msg += 'Wrong Tools    %*d%s' % (self.total_len, self.wrong_tools, os.linesep)
> +        msg += 'Wrong Header   %*d%s' % (self.total_len, self.wrong_header, os.linesep)
>           return msg
>   
>       def start(self, index, total, name, executable, bsp_arch, bsp, show_header):
> +        wrong = self.wrong_version + self.wrong_build + self.wrong_tools + self.wrong_header
>           header = '[%*d/%*d] p:%-*d f:%-*d u:%-*d e:%-*d I:%-*d B:%-*d ' \
>                    't:%-*d L:%-*d i:%-*d W:%-*d | %s/%s: %s' % \
>                    (len(str(total)), index,
> @@ -101,7 +104,7 @@ class report(object):
>                     len(str(total)), self.timeouts,
>                     len(str(total)), self.test_too_long,
>                     len(str(total)), self.invalids,
> -                  len(str(total)), self.wrong_version + self.wrong_build + self.wrong_tools,
> +                  len(str(total)), wrong,
>                     bsp_arch,
>                     bsp,
>                     path.basename(name))
> @@ -245,7 +248,8 @@ class report(object):
>                       status = 'wrong-tools'
>                       self.wrong_tools += 1
>                   else:
> -                    raise error.general('invalid test state: %s: %s' % (name, state))
> +                    status = 'wrong-header'
> +                    self.wrong_header += 1
>               self.results[name]['result'] = status
>               self.results[name]['output'] = prefixed_output
>               if self.name_max_len < len(path.basename(name)):
> @@ -256,7 +260,8 @@ class report(object):
>   
>       def log(self, name, mode):
>           status_fails = ['failed', 'timeout', 'test-too-long', 'invalid',
> -                        'wrong-version', 'wrong-build', 'wrong-tools']
> +                        'wrong-version', 'wrong-build', 'wrong-tools',
> +                        'wrong-header']
>           if mode != 'none':
>               self.lock.acquire()
>               if name not in self.results:
> @@ -287,7 +292,7 @@ class report(object):
>   
>       def score_card(self, mode = 'full'):
>           if mode == 'short':
> -            wrongs = self.wrong_version + self.wrong_build + self.wrong_tools
> +            wrongs = self.wrong_version + self.wrong_build + self.wrong_tools + self.wrong_header
>               return 'Passed:%d Failed:%d Timeout:%d Test-Too-long:%d Invalid:%d Wrong:%d' % \
>                   (self.passed, self.failed, self.timeouts, self.test_too_long,
>                    self.invalids, wrongs)
> @@ -305,6 +310,7 @@ class report(object):
>               l += ['Wrong Version: %*d' % (self.total_len, self.wrong_version)]
>               l += ['Wrong Build:   %*d' % (self.total_len, self.wrong_build)]
>               l += ['Wrong Tools:   %*d' % (self.total_len, self.wrong_tools)]
> +            l += ['Wrong Header   %*d' % (self.total_len, self.wrong_header)]

Missing ':' after "Header".


>               l += ['---------------%s' % ('-' * self.total_len)]
>               l += ['Total:         %*d' % (self.total_len, self.total)]
>               return os.linesep.join(l)
> @@ -352,6 +358,9 @@ class report(object):
>           if self.wrong_tools:
>               l += ['Wrong Tools:']
>               l += show_state(self.results, 'wrong-tools', self.name_max_len)
> +        if self.wrong_header:
> +            l += ['Wrong Headers:']
> +            l += show_state(self.results, 'wrong-header', self.name_max_len)
>           return os.linesep.join(l)
>   
>       def summary(self):
> diff --git a/tester/rt/test.py b/tester/rt/test.py
> index 66f1756..113936c 100644
> --- a/tester/rt/test.py
> +++ b/tester/rt/test.py
> @@ -240,6 +240,7 @@ def generate_json_report(args, reports, start_time, end_time,
>       json_log['summary']['wrong-version_count'] = reports.wrong_version
>       json_log['summary']['wrong-build_count'] = reports.wrong_build
>       json_log['summary']['wrong-tools_count'] = reports.wrong_tools
> +    json_log['summary']['invalid-header_count'] = reports.wrong_header
>       json_log['summary']['total_count'] = reports.total
>       time_delta = end_time - start_time
>       json_log['summary']['average_test_time'] = str(time_delta / total)
> @@ -248,7 +249,7 @@ def generate_json_report(args, reports, start_time, end_time,
>       result_types = [
>               'failed', 'user-input', 'expected-fail', 'indeterminate',
>               'benchmark', 'timeout', 'test-too-long', 'invalid', 'wrong-version',
> -            'wrong-build', 'wrong-tools'
> +            'wrong-build', 'wrong-tools', 'wrong-header'
>       ]
>       json_results = {}
>       for result_type in result_types:
> @@ -313,6 +314,7 @@ def generate_junit_report(args, reports, start_time, end_time,
>       junit_prop['wrong-version_count'] = reports.wrong_version
>       junit_prop['wrong-build_count'] = reports.wrong_build
>       junit_prop['wrong-tools_count'] = reports.wrong_tools
> +    junit_prop['wrong-header_count'] = reports.wrong_header
>       junit_prop['total_count'] = reports.total
>       time_delta = end_time - start_time
>       junit_prop['average_test_time'] = str(time_delta / total)
> @@ -367,6 +369,7 @@ def generate_yaml_report(args, reports, start_time, end_time,
>       yaml_log['summary']['wrong-version-count'] = reports.wrong_version
>       yaml_log['summary']['wrong-build-count'] = reports.wrong_build
>       yaml_log['summary']['wrong-tools-count'] = reports.wrong_tools
> +    yaml_log['summary']['wrong-header-count'] = reports.wrong_header
>       yaml_log['summary']['total-count'] = reports.total
>       time_delta = end_time - start_time
>       yaml_log['summary']['average-test-time'] = str(time_delta / total)
> @@ -375,7 +378,7 @@ def generate_yaml_report(args, reports, start_time, end_time,
>       result_types = [
>               'failed', 'user-input', 'expected-fail', 'indeterminate',
>               'benchmark', 'timeout', 'test-too-long', 'invalid', 'wrong-version',
> -            'wrong-build', 'wrong-tools'
> +            'wrong-build', 'wrong-tools', 'wrong-header'
>       ]
>       for result_type in result_types:
>           yaml_log['summary'][result_type] = []


More information about the devel mailing list