2 from xml.sax.saxutils import escape
3 from json import JSONEncoder
7 class ResultCode(object):
8 """Test result codes."""
10 # We override __new__ and __getnewargs__ to ensure that pickling still
11 # provides unique ResultCode objects in any particular instance.
13 def __new__(cls, name, isFailure):
14 res = cls._instances.get(name)
16 cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
18 def __getnewargs__(self):
19 return (self.name, self.isFailure)
21 def __init__(self, name, isFailure):
23 self.isFailure = isFailure
26 return '%s%r' % (self.__class__.__name__,
27 (self.name, self.isFailure))
29 PASS = ResultCode('PASS', False)
30 FLAKYPASS = ResultCode('FLAKYPASS', False)
31 XFAIL = ResultCode('XFAIL', False)
32 FAIL = ResultCode('FAIL', True)
33 XPASS = ResultCode('XPASS', True)
34 UNRESOLVED = ResultCode('UNRESOLVED', True)
35 UNSUPPORTED = ResultCode('UNSUPPORTED', False)
39 class MetricValue(object):
44 Convert this metric to a string suitable for displaying as part of the
47 raise RuntimeError("abstract method")
51 todata() -> json-serializable data
53 Convert this metric to content suitable for serializing in the JSON test
56 raise RuntimeError("abstract method")
58 class IntMetricValue(MetricValue):
59 def __init__(self, value):
63 return str(self.value)
68 class RealMetricValue(MetricValue):
69 def __init__(self, value):
73 return '%.4f' % self.value
78 class JSONMetricValue(MetricValue):
80 JSONMetricValue is used for types that are representable in the output
81 but that are otherwise uninterpreted.
83 def __init__(self, value):
84 # Ensure the value is a serializable by trying to encode it.
85 # WARNING: The value may change before it is encoded again, and may
86 # not be encodable after the change.
95 e = JSONEncoder(indent=2, sort_keys=True)
96 return e.encode(self.value)
101 def toMetricValue(value):
102 if isinstance(value, MetricValue):
104 elif isinstance(value, int) or isinstance(value, long):
105 return IntMetricValue(value)
106 elif isinstance(value, float):
107 return RealMetricValue(value)
109 # Try to create a JSONMetricValue and let the constructor throw
110 # if value is not a valid type.
111 return JSONMetricValue(value)
116 class Result(object):
117 """Wrapper for the results of executing an individual test."""
119 def __init__(self, code, output='', elapsed=None):
124 # The wall timing to execute the test, if timing.
125 self.elapsed = elapsed
126 # The metrics reported by this test.
129 def addMetric(self, name, value):
131 addMetric(name, value)
133 Attach a test metric to the test result, with the given name and list of
134 values. It is an error to attempt to attach the metrics with the same
137 Each value must be an instance of a MetricValue subclass.
139 if name in self.metrics:
140 raise ValueError("result already includes metrics for %r" % (
142 if not isinstance(value, MetricValue):
143 raise TypeError("unexpected metric value: %r" % (value,))
144 self.metrics[name] = value
149 """TestSuite - Information on a group of tests.
151 A test suite groups together a set of logically related tests.
154 def __init__(self, name, source_root, exec_root, config):
156 self.source_root = source_root
157 self.exec_root = exec_root
158 # The test suite configuration.
161 def getSourcePath(self, components):
162 return os.path.join(self.source_root, *components)
164 def getExecPath(self, components):
165 return os.path.join(self.exec_root, *components)
168 """Test - Information on a single test instance."""
170 def __init__(self, suite, path_in_suite, config, file_path = None):
172 self.path_in_suite = path_in_suite
174 self.file_path = file_path
175 # A list of conditions under which this test is expected to fail. These
176 # can optionally be provided by test format handlers, and will be
177 # honored when the test result is supplied.
179 # The test result, once complete.
182 def setResult(self, result):
183 if self.result is not None:
184 raise ArgumentError("test result already set")
185 if not isinstance(result, Result):
186 raise ArgumentError("unexpected result type")
190 # Apply the XFAIL handling to resolve the result exit code.
191 if self.isExpectedToFail():
192 if self.result.code == PASS:
193 self.result.code = XPASS
194 elif self.result.code == FAIL:
195 self.result.code = XFAIL
197 def getFullName(self):
198 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
200 def getFilePath(self):
202 return self.file_path
203 return self.getSourcePath()
205 def getSourcePath(self):
206 return self.suite.getSourcePath(self.path_in_suite)
208 def getExecPath(self):
209 return self.suite.getExecPath(self.path_in_suite)
211 def isExpectedToFail(self):
213 isExpectedToFail() -> bool
215 Check whether this test is expected to fail in the current
216 configuration. This check relies on the test xfails property which by
217 some test formats may not be computed until the test has first been
221 # Check if any of the xfails match an available feature or the target.
222 for item in self.xfails:
223 # If this is the wildcard, it always fails.
227 # If this is an exact match for one of the features, it fails.
228 if item in self.config.available_features:
231 # If this is a part of the target triple, it fails.
232 if item in self.suite.config.target_triple:
238 def getJUnitXML(self):
239 test_name = self.path_in_suite[-1]
240 test_path = self.path_in_suite[:-1]
241 safe_test_path = [x.replace(".","_") for x in test_path]
242 safe_name = self.suite.name.replace(".","-")
245 class_name = safe_name + "." + "/".join(safe_test_path)
247 class_name = safe_name + "." + safe_name
249 xml = "<testcase classname='" + class_name + "' name='" + \
251 xml += " time='%.2f'" % (self.result.elapsed,)
252 if self.result.code.isFailure:
253 xml += ">\n\t<failure >\n" + escape(self.result.output)
254 xml += "\n\t</failure>\n</testcase>"