diff options
Diffstat (limited to 'utils/lit/lit')
-rw-r--r-- | utils/lit/lit/Test.py | 43 | ||||
-rwxr-xr-x | utils/lit/lit/main.py | 15 |
2 files changed, 56 insertions, 2 deletions
diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py index 05cae99a2f..d84eb4798f 100644 --- a/utils/lit/lit/Test.py +++ b/utils/lit/lit/Test.py @@ -1,6 +1,6 @@ import os -# Test results. +# Test result codes. class ResultCode(object): """Test result codes.""" @@ -31,6 +31,28 @@ XPASS = ResultCode('XPASS', True) UNRESOLVED = ResultCode('UNRESOLVED', True) UNSUPPORTED = ResultCode('UNSUPPORTED', False) +# Test metric values. + +class MetricValue(object): + def format(self): + raise RuntimeError("abstract method") + +class IntMetricValue(MetricValue): + def __init__(self, value): + self.value = value + + def format(self): + return str(self.value) + +class RealMetricValue(MetricValue): + def __init__(self, value): + self.value = value + + def format(self): + return '%.4f' % self.value + +# Test results. + class Result(object): """Wrapper for the results of executing an individual test.""" @@ -41,6 +63,25 @@ class Result(object): self.output = output # The wall timing to execute the test, if timing. self.elapsed = elapsed + # The metrics reported by this test. + self.metrics = {} + + def addMetric(self, name, value): + """ + addMetric(name, value) + + Attach a test metric to the test result, with the given name and list of + values. It is an error to attempt to attach the metrics with the same + name multiple times. + + Each value must be an instance of a MetricValue subclass. + """ + if name in self.metrics: + raise ValueError("result already includes metrics for %r" % ( + name,)) + if not isinstance(value, MetricValue): + raise TypeError("unexpected metric value: %r" % (value,)) + self.metrics[name] = value # Test classes. diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py index 50c9a66c8d..b93aa6fd0b 100755 --- a/utils/lit/lit/main.py +++ b/utils/lit/lit/main.py @@ -45,15 +45,28 @@ class TestingProgressDisplay(object): if self.progressBar: self.progressBar.clear() - print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(), + # Show the test result line. + test_name = test.getFullName() + print('%s: %s (%d of %d)' % (test.result.code.name, test_name, self.completed, self.numTests)) + # Show the test failure output, if requested. if test.result.code.isFailure and self.opts.showOutput: print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), '*'*20)) print(test.result.output) print("*" * 20) + # Report test metrics, if present. + if test.result.metrics: + print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(), + '*'*10)) + items = sorted(test.result.metrics.items()) + for metric_name, value in items: + print('%s: %s ' % (metric_name, value.format())) + print("*" * 10) + + # Ensure the output is flushed. sys.stdout.flush() def main(builtinParameters = {}): |