4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 if not test.result.code.isFailure and \
46 (self.opts.quiet or self.opts.succinct):
50 self.progressBar.clear()
52 # Show the test result line.
53 test_name = test.getFullName()
54 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55 self.completed, self.numTests))
57 # Show the test failure output, if requested.
58 if test.result.code.isFailure and self.opts.showOutput:
59 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61 print(test.result.output)
64 # Report test metrics, if present.
65 if test.result.metrics:
66 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68 items = sorted(test.result.metrics.items())
69 for metric_name, value in items:
70 print('%s: %s ' % (metric_name, value.format()))
73 # Ensure the output is flushed.
76 def write_test_results(run, lit_config, testing_time, output_path):
80 lit_config.fatal('test output unsupported with Python 2.5')
82 # Construct the data we will write.
84 # Encode the current lit version as a schema version.
85 data['__version__'] = lit.__versioninfo__
86 data['elapsed'] = testing_time
87 # FIXME: Record some information on the lit configuration used?
88 # FIXME: Record information from the individual test suites?
91 data['tests'] = tests_data = []
92 for test in run.tests:
94 'name' : test.getFullName(),
95 'code' : test.result.code.name,
96 'output' : test.result.output,
97 'elapsed' : test.result.elapsed }
99 # Add test metrics, if present.
100 if test.result.metrics:
101 test_data['metrics'] = metrics_data = {}
102 for key, value in test.result.metrics.items():
103 metrics_data[key] = value.todata()
105 tests_data.append(test_data)
108 f = open(output_path, 'w')
110 json.dump(data, f, indent=2, sort_keys=True)
115 def update_incremental_cache(test):
116 if not test.result.code.isFailure:
118 fname = test.getFilePath()
119 os.utime(fname, None)
121 def sort_by_incremental_cache(run):
123 fname = test.getFilePath()
125 return -os.path.getmtime(fname)
128 run.tests.sort(key = lambda t: sortIndex(t))
130 def main(builtinParameters = {}):
131 # Use processes by default on Unix platforms.
132 isWindows = platform.system() == 'Windows'
133 useProcessesIsDefault = not isWindows
136 from optparse import OptionParser, OptionGroup
137 parser = OptionParser("usage: %prog [options] {file-or-path}")
139 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
140 help="Number of testing threads",
141 type=int, action="store", default=None)
142 parser.add_option("", "--config-prefix", dest="configPrefix",
143 metavar="NAME", help="Prefix for 'lit' config files",
144 action="store", default=None)
145 parser.add_option("", "--param", dest="userParameters",
147 help="Add 'NAME' = 'VAL' to the user defined parameters",
148 type=str, action="append", default=[])
150 group = OptionGroup(parser, "Output Format")
151 # FIXME: I find these names very confusing, although I like the
153 group.add_option("-q", "--quiet", dest="quiet",
154 help="Suppress no error output",
155 action="store_true", default=False)
156 group.add_option("-s", "--succinct", dest="succinct",
157 help="Reduce amount of output",
158 action="store_true", default=False)
159 group.add_option("-v", "--verbose", dest="showOutput",
160 help="Show all test output",
161 action="store_true", default=False)
162 group.add_option("-o", "--output", dest="output_path",
163 help="Write test results to the provided path",
164 action="store", type=str, metavar="PATH")
165 group.add_option("", "--no-progress-bar", dest="useProgressBar",
166 help="Do not use curses based progress bar",
167 action="store_false", default=True)
168 parser.add_option_group(group)
170 group = OptionGroup(parser, "Test Execution")
171 group.add_option("", "--path", dest="path",
172 help="Additional paths to add to testing environment",
173 action="append", type=str, default=[])
174 group.add_option("", "--vg", dest="useValgrind",
175 help="Run tests under valgrind",
176 action="store_true", default=False)
177 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
178 help="Check for memory leaks under valgrind",
179 action="store_true", default=False)
180 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
181 help="Specify an extra argument for valgrind",
182 type=str, action="append", default=[])
183 group.add_option("", "--time-tests", dest="timeTests",
184 help="Track elapsed wall time for each test",
185 action="store_true", default=False)
186 group.add_option("", "--no-execute", dest="noExecute",
187 help="Don't execute any tests (assume PASS)",
188 action="store_true", default=False)
189 parser.add_option_group(group)
191 group = OptionGroup(parser, "Test Selection")
192 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
193 help="Maximum number of tests to run",
194 action="store", type=int, default=None)
195 group.add_option("", "--max-time", dest="maxTime", metavar="N",
196 help="Maximum time to spend testing (in seconds)",
197 action="store", type=float, default=None)
198 group.add_option("", "--shuffle", dest="shuffle",
199 help="Run tests in random order",
200 action="store_true", default=False)
201 group.add_option("-i", "--incremental", dest="incremental",
202 help="Run modified and failing tests first (updates "
204 action="store_true", default=False)
205 group.add_option("", "--filter", dest="filter", metavar="REGEX",
206 help=("Only run tests with paths matching the given "
207 "regular expression"),
208 action="store", default=None)
209 parser.add_option_group(group)
211 group = OptionGroup(parser, "Debug and Experimental Options")
212 group.add_option("", "--debug", dest="debug",
213 help="Enable debugging (for 'lit' development)",
214 action="store_true", default=False)
215 group.add_option("", "--show-suites", dest="showSuites",
216 help="Show discovered test suites",
217 action="store_true", default=False)
218 group.add_option("", "--show-tests", dest="showTests",
219 help="Show all discovered tests",
220 action="store_true", default=False)
221 group.add_option("", "--use-processes", dest="useProcesses",
222 help="Run tests in parallel with processes (not threads)",
223 action="store_true", default=useProcessesIsDefault)
224 group.add_option("", "--use-threads", dest="useProcesses",
225 help="Run tests in parallel with threads (not processes)",
226 action="store_false", default=useProcessesIsDefault)
227 parser.add_option_group(group)
229 (opts, args) = parser.parse_args()
232 parser.error('No inputs specified')
234 if opts.numThreads is None:
235 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
236 # http://bugs.python.org/issue1731717
237 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
238 # threads by default there.
239 if sys.hexversion >= 0x2050200:
240 opts.numThreads = lit.util.detectCPUs()
246 # Create the user defined parameters.
247 userParams = dict(builtinParameters)
248 for entry in opts.userParameters:
252 name,val = entry.split('=', 1)
253 userParams[name] = val
255 # Create the global config object.
256 litConfig = lit.LitConfig.LitConfig(
257 progname = os.path.basename(sys.argv[0]),
260 useValgrind = opts.useValgrind,
261 valgrindLeakCheck = opts.valgrindLeakCheck,
262 valgrindArgs = opts.valgrindArgs,
263 noExecute = opts.noExecute,
265 isWindows = isWindows,
267 config_prefix = opts.configPrefix)
269 # Perform test discovery.
270 run = lit.run.Run(litConfig,
271 lit.discovery.find_tests_for_inputs(litConfig, inputs))
273 if opts.showSuites or opts.showTests:
274 # Aggregate the tests by suite.
277 if t.suite not in suitesAndTests:
278 suitesAndTests[t.suite] = []
279 suitesAndTests[t.suite].append(t)
280 suitesAndTests = list(suitesAndTests.items())
281 suitesAndTests.sort(key = lambda item: item[0].name)
283 # Show the suites, if requested.
285 print('-- Test Suites --')
286 for ts,ts_tests in suitesAndTests:
287 print(' %s - %d tests' %(ts.name, len(ts_tests)))
288 print(' Source Root: %s' % ts.source_root)
289 print(' Exec Root : %s' % ts.exec_root)
291 # Show the tests, if requested.
293 print('-- Available Tests --')
294 for ts,ts_tests in suitesAndTests:
295 ts_tests.sort(key = lambda test: test.path_in_suite)
296 for test in ts_tests:
297 print(' %s' % (test.getFullName(),))
302 # Select and order the tests.
303 numTotalTests = len(run.tests)
305 # First, select based on the filter expression if given.
308 rex = re.compile(opts.filter)
310 parser.error("invalid regular expression for --filter: %r" % (
312 run.tests = [t for t in run.tests
313 if rex.search(t.getFullName())]
315 # Then select the order.
317 random.shuffle(run.tests)
318 elif opts.incremental:
319 sort_by_incremental_cache(run)
321 run.tests.sort(key = lambda t: t.getFullName())
323 # Finally limit the number of tests, if desired.
324 if opts.maxTests is not None:
325 run.tests = run.tests[:opts.maxTests]
327 # Don't create more threads than tests.
328 opts.numThreads = min(len(run.tests), opts.numThreads)
331 if len(run.tests) != numTotalTests:
332 extra = ' of %d' % numTotalTests
333 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
338 if opts.succinct and opts.useProgressBar:
340 tc = lit.ProgressBar.TerminalController()
341 progressBar = lit.ProgressBar.ProgressBar(tc, header)
344 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
348 startTime = time.time()
349 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
351 run.execute_tests(display, opts.numThreads, opts.maxTime,
353 except KeyboardInterrupt:
357 testing_time = time.time() - startTime
359 print('Testing Time: %.2fs' % (testing_time,))
361 # Write out the test data, if requested.
362 if opts.output_path is not None:
363 write_test_results(run, litConfig, testing_time, opts.output_path)
365 # List test results organized by kind.
368 for test in run.tests:
369 if test.result.code not in byCode:
370 byCode[test.result.code] = []
371 byCode[test.result.code].append(test)
372 if test.result.code.isFailure:
375 # Print each test in any of the failing groups.
376 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
377 ('Failing Tests', lit.Test.FAIL),
378 ('Unresolved Tests', lit.Test.UNRESOLVED)):
379 elts = byCode.get(code)
383 print('%s (%d):' % (title, len(elts)))
385 print(' %s' % test.getFullName())
386 sys.stdout.write('\n')
388 if opts.timeTests and run.tests:
390 test_times = [(test.getFullName(), test.result.elapsed)
391 for test in run.tests]
392 lit.util.printHistogram(test_times, title='Tests')
394 for name,code in (('Expected Passes ', lit.Test.PASS),
395 ('Expected Failures ', lit.Test.XFAIL),
396 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
397 ('Unresolved Tests ', lit.Test.UNRESOLVED),
398 ('Unexpected Passes ', lit.Test.XPASS),
399 ('Unexpected Failures', lit.Test.FAIL),):
400 if opts.quiet and not code.isFailure:
402 N = len(byCode.get(code,[]))
404 print(' %s: %d' % (name,N))
406 # If we encountered any additional errors, exit abnormally.
407 if litConfig.numErrors:
408 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
411 # Warn about warnings.
412 if litConfig.numWarnings:
413 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
419 if __name__=='__main__':