[lit] Add support for attach arbitrary metrics to test results.
authorDaniel Dunbar <daniel@zuster.org>
Wed, 11 Sep 2013 17:45:11 +0000 (17:45 +0000)
committerDaniel Dunbar <daniel@zuster.org>
Wed, 11 Sep 2013 17:45:11 +0000 (17:45 +0000)
 - This is a work-in-progress and all details are subject to change, but I am
   trying to build up support for allowing lit to be used as a driver for
   performance tests (or other tests which might want to record information
   beyond simple PASS/FAIL).

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190535 91177308-0d34-0410-b5e6-96231b3b80d8

utils/lit/lit/Test.py
utils/lit/lit/main.py
utils/lit/tests/Inputs/test-data/lit.cfg [new file with mode: 0644]
utils/lit/tests/Inputs/test-data/metrics.ini [new file with mode: 0644]
utils/lit/tests/test-data.py [new file with mode: 0644]

index 05cae99a2f607236e583a2f10ec8188775531ebf..d84eb4798f141e2d0e0cfd33d1813f3e2b104347 100644 (file)
@@ -1,6 +1,6 @@
 import os
 
-# Test results.
+# Test result codes.
 
 class ResultCode(object):
     """Test result codes."""
@@ -31,6 +31,28 @@ XPASS       = ResultCode('XPASS', True)
 UNRESOLVED  = ResultCode('UNRESOLVED', True)
 UNSUPPORTED = ResultCode('UNSUPPORTED', False)
 
+# Test metric values.
+
+class MetricValue(object):
+    def format(self):
+        raise RuntimeError("abstract method")
+
+class IntMetricValue(MetricValue):
+    def __init__(self, value):
+        self.value = value
+
+    def format(self):
+        return str(self.value)
+
+class RealMetricValue(MetricValue):
+    def __init__(self, value):
+        self.value = value
+
+    def format(self):
+        return '%.4f' % self.value
+
+# Test results.
+
 class Result(object):
     """Wrapper for the results of executing an individual test."""
 
@@ -41,6 +63,25 @@ class Result(object):
         self.output = output
         # The wall timing to execute the test, if timing.
         self.elapsed = elapsed
+        # The metrics reported by this test.
+        self.metrics = {}
+
+    def addMetric(self, name, value):
+        """
+        addMetric(name, value)
+
+        Attach a test metric to the test result, with the given name and list of
+        values. It is an error to attempt to attach the metrics with the same
+        name multiple times.
+
+        Each value must be an instance of a MetricValue subclass.
+        """
+        if name in self.metrics:
+            raise ValueError("result already includes metrics for %r" % (
+                    name,))
+        if not isinstance(value, MetricValue):
+            raise TypeError("unexpected metric value: %r" % (value,))
+        self.metrics[name] = value
 
 # Test classes.
 
index 50c9a66c8d3bfd0167cd33f4408f9068c301c40d..b93aa6fd0b6b65f7acbf0a2194a43b8a891ee31b 100755 (executable)
@@ -45,15 +45,28 @@ class TestingProgressDisplay(object):
         if self.progressBar:
             self.progressBar.clear()
 
-        print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
+        # Show the test result line.
+        test_name = test.getFullName()
+        print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
                                      self.completed, self.numTests))
 
+        # Show the test failure output, if requested.
         if test.result.code.isFailure and self.opts.showOutput:
             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
                                               '*'*20))
             print(test.result.output)
             print("*" * 20)
 
+        # Report test metrics, if present.
+        if test.result.metrics:
+            print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
+                                               '*'*10))
+            items = sorted(test.result.metrics.items())
+            for metric_name, value in items:
+                print('%s: %s ' % (metric_name, value.format()))
+            print("*" * 10)
+
+        # Ensure the output is flushed.
         sys.stdout.flush()
 
 def main(builtinParameters = {}):
diff --git a/utils/lit/tests/Inputs/test-data/lit.cfg b/utils/lit/tests/Inputs/test-data/lit.cfg
new file mode 100644 (file)
index 0000000..f5aba7b
--- /dev/null
@@ -0,0 +1,44 @@
+import os
+try:
+    import ConfigParser
+except ImportError:
+    import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+    def execute(self, test, lit_config):
+        # In this dummy format, expect that each test file is actually just a
+        # .ini format dump of the results to report.
+
+        source_path = test.getSourcePath()
+
+        cfg = ConfigParser.ConfigParser()
+        cfg.read(source_path)
+
+        # Create the basic test result.
+        result_code = cfg.get('global', 'result_code')
+        result_output = cfg.get('global', 'result_output')
+        result = lit.Test.Result(getattr(lit.Test, result_code),
+                                 result_output)
+
+        # Load additional metrics.
+        for key,value_str in cfg.items('results'):
+            value = eval(value_str)
+            if isinstance(value, int):
+                metric = lit.Test.IntMetricValue(value)
+            elif isinstance(value, float):
+                metric = lit.Test.RealMetricValue(value)
+            else:
+                raise RuntimeError("unsupported result type")
+            result.addMetric(key, metric)
+
+        return result
+
+config.name = 'test-data'
+config.suffixes = ['.ini']
+config.test_format = DummyFormat()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = None
diff --git a/utils/lit/tests/Inputs/test-data/metrics.ini b/utils/lit/tests/Inputs/test-data/metrics.ini
new file mode 100644 (file)
index 0000000..267e516
--- /dev/null
@@ -0,0 +1,7 @@
+[global]
+result_code = PASS
+result_output = 'Test passed.'
+
+[results]
+value0 = 1
+value1 = 2.3456
\ No newline at end of file
diff --git a/utils/lit/tests/test-data.py b/utils/lit/tests/test-data.py
new file mode 100644 (file)
index 0000000..54909d7
--- /dev/null
@@ -0,0 +1,12 @@
+# Test features related to formats which support reporting additional test data.
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
+# RUN: FileCheck < %t.out %s
+
+# CHECK: -- Testing:
+
+# CHECK: PASS: test-data :: metrics.ini
+# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
+# CHECK-NEXT: value0: 1
+# CHECK-NEXT: value1: 2.3456
+# CHECK-NEXT: ***