4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 (not self.opts.quiet and not self.opts.succinct)
51 self.progressBar.clear()
53 # Show the test result line.
54 test_name = test.getFullName()
55 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56 self.completed, self.numTests))
58 # Show the test failure output, if requested.
59 if test.result.code.isFailure and self.opts.showOutput:
60 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
62 print(test.result.output)
65 # Report test metrics, if present.
66 if test.result.metrics:
67 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
69 items = sorted(test.result.metrics.items())
70 for metric_name, value in items:
71 print('%s: %s ' % (metric_name, value.format()))
74 # Ensure the output is flushed.
77 def write_test_results(run, lit_config, testing_time, output_path):
81 lit_config.fatal('test output unsupported with Python 2.5')
83 # Construct the data we will write.
85 # Encode the current lit version as a schema version.
86 data['__version__'] = lit.__versioninfo__
87 data['elapsed'] = testing_time
88 # FIXME: Record some information on the lit configuration used?
89 # FIXME: Record information from the individual test suites?
92 data['tests'] = tests_data = []
93 for test in run.tests:
95 'name' : test.getFullName(),
96 'code' : test.result.code.name,
97 'output' : test.result.output,
98 'elapsed' : test.result.elapsed }
100 # Add test metrics, if present.
101 if test.result.metrics:
102 test_data['metrics'] = metrics_data = {}
103 for key, value in test.result.metrics.items():
104 metrics_data[key] = value.todata()
106 tests_data.append(test_data)
109 f = open(output_path, 'w')
111 json.dump(data, f, indent=2, sort_keys=True)
116 def update_incremental_cache(test):
117 if not test.result.code.isFailure:
119 fname = test.getFilePath()
120 os.utime(fname, None)
122 def sort_by_incremental_cache(run):
124 fname = test.getFilePath()
126 return -os.path.getmtime(fname)
129 run.tests.sort(key = lambda t: sortIndex(t))
131 def main(builtinParameters = {}):
132 # Use processes by default on Unix platforms.
133 isWindows = platform.system() == 'Windows'
134 useProcessesIsDefault = not isWindows
137 from optparse import OptionParser, OptionGroup
138 parser = OptionParser("usage: %prog [options] {file-or-path}")
140 parser.add_option("", "--version", dest="show_version",
141 help="Show version and exit",
142 action="store_true", default=False)
143 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144 help="Number of testing threads",
145 type=int, action="store", default=None)
146 parser.add_option("", "--config-prefix", dest="configPrefix",
147 metavar="NAME", help="Prefix for 'lit' config files",
148 action="store", default=None)
149 parser.add_option("", "--param", dest="userParameters",
151 help="Add 'NAME' = 'VAL' to the user defined parameters",
152 type=str, action="append", default=[])
154 group = OptionGroup(parser, "Output Format")
155 # FIXME: I find these names very confusing, although I like the
157 group.add_option("-q", "--quiet", dest="quiet",
158 help="Suppress no error output",
159 action="store_true", default=False)
160 group.add_option("-s", "--succinct", dest="succinct",
161 help="Reduce amount of output",
162 action="store_true", default=False)
163 group.add_option("-v", "--verbose", dest="showOutput",
164 help="Show all test output",
165 action="store_true", default=False)
166 group.add_option("-o", "--output", dest="output_path",
167 help="Write test results to the provided path",
168 action="store", type=str, metavar="PATH")
169 group.add_option("", "--no-progress-bar", dest="useProgressBar",
170 help="Do not use curses based progress bar",
171 action="store_false", default=True)
172 group.add_option("", "--show-unsupported", dest="show_unsupported",
173 help="Show unsupported tests",
174 action="store_true", default=False)
175 group.add_option("", "--show-xfail", dest="show_xfail",
176 help="Show tests that were expected to fail",
177 action="store_true", default=False)
178 parser.add_option_group(group)
180 group = OptionGroup(parser, "Test Execution")
181 group.add_option("", "--path", dest="path",
182 help="Additional paths to add to testing environment",
183 action="append", type=str, default=[])
184 group.add_option("", "--vg", dest="useValgrind",
185 help="Run tests under valgrind",
186 action="store_true", default=False)
187 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188 help="Check for memory leaks under valgrind",
189 action="store_true", default=False)
190 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191 help="Specify an extra argument for valgrind",
192 type=str, action="append", default=[])
193 group.add_option("", "--time-tests", dest="timeTests",
194 help="Track elapsed wall time for each test",
195 action="store_true", default=False)
196 group.add_option("", "--no-execute", dest="noExecute",
197 help="Don't execute any tests (assume PASS)",
198 action="store_true", default=False)
199 parser.add_option_group(group)
201 group = OptionGroup(parser, "Test Selection")
202 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
203 help="Maximum number of tests to run",
204 action="store", type=int, default=None)
205 group.add_option("", "--max-time", dest="maxTime", metavar="N",
206 help="Maximum time to spend testing (in seconds)",
207 action="store", type=float, default=None)
208 group.add_option("", "--shuffle", dest="shuffle",
209 help="Run tests in random order",
210 action="store_true", default=False)
211 group.add_option("-i", "--incremental", dest="incremental",
212 help="Run modified and failing tests first (updates "
214 action="store_true", default=False)
215 group.add_option("", "--filter", dest="filter", metavar="REGEX",
216 help=("Only run tests with paths matching the given "
217 "regular expression"),
218 action="store", default=None)
219 parser.add_option_group(group)
221 group = OptionGroup(parser, "Debug and Experimental Options")
222 group.add_option("", "--debug", dest="debug",
223 help="Enable debugging (for 'lit' development)",
224 action="store_true", default=False)
225 group.add_option("", "--show-suites", dest="showSuites",
226 help="Show discovered test suites",
227 action="store_true", default=False)
228 group.add_option("", "--show-tests", dest="showTests",
229 help="Show all discovered tests",
230 action="store_true", default=False)
231 group.add_option("", "--use-processes", dest="useProcesses",
232 help="Run tests in parallel with processes (not threads)",
233 action="store_true", default=useProcessesIsDefault)
234 group.add_option("", "--use-threads", dest="useProcesses",
235 help="Run tests in parallel with threads (not processes)",
236 action="store_false", default=useProcessesIsDefault)
237 parser.add_option_group(group)
239 (opts, args) = parser.parse_args()
241 if opts.show_version:
242 print("lit %s" % (lit.__version__,))
246 parser.error('No inputs specified')
248 if opts.numThreads is None:
249 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
250 # http://bugs.python.org/issue1731717
251 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
252 # threads by default there.
253 if sys.hexversion >= 0x2050200:
254 opts.numThreads = lit.util.detectCPUs()
260 # Create the user defined parameters.
261 userParams = dict(builtinParameters)
262 for entry in opts.userParameters:
266 name,val = entry.split('=', 1)
267 userParams[name] = val
269 # Create the global config object.
270 litConfig = lit.LitConfig.LitConfig(
271 progname = os.path.basename(sys.argv[0]),
274 useValgrind = opts.useValgrind,
275 valgrindLeakCheck = opts.valgrindLeakCheck,
276 valgrindArgs = opts.valgrindArgs,
277 noExecute = opts.noExecute,
279 isWindows = isWindows,
281 config_prefix = opts.configPrefix)
283 # Perform test discovery.
284 run = lit.run.Run(litConfig,
285 lit.discovery.find_tests_for_inputs(litConfig, inputs))
287 if opts.showSuites or opts.showTests:
288 # Aggregate the tests by suite.
291 if t.suite not in suitesAndTests:
292 suitesAndTests[t.suite] = []
293 suitesAndTests[t.suite].append(t)
294 suitesAndTests = list(suitesAndTests.items())
295 suitesAndTests.sort(key = lambda item: item[0].name)
297 # Show the suites, if requested.
299 print('-- Test Suites --')
300 for ts,ts_tests in suitesAndTests:
301 print(' %s - %d tests' %(ts.name, len(ts_tests)))
302 print(' Source Root: %s' % ts.source_root)
303 print(' Exec Root : %s' % ts.exec_root)
305 # Show the tests, if requested.
307 print('-- Available Tests --')
308 for ts,ts_tests in suitesAndTests:
309 ts_tests.sort(key = lambda test: test.path_in_suite)
310 for test in ts_tests:
311 print(' %s' % (test.getFullName(),))
316 # Select and order the tests.
317 numTotalTests = len(run.tests)
319 # First, select based on the filter expression if given.
322 rex = re.compile(opts.filter)
324 parser.error("invalid regular expression for --filter: %r" % (
326 run.tests = [t for t in run.tests
327 if rex.search(t.getFullName())]
329 # Then select the order.
331 random.shuffle(run.tests)
332 elif opts.incremental:
333 sort_by_incremental_cache(run)
335 run.tests.sort(key = lambda t: t.getFullName())
337 # Finally limit the number of tests, if desired.
338 if opts.maxTests is not None:
339 run.tests = run.tests[:opts.maxTests]
341 # Don't create more threads than tests.
342 opts.numThreads = min(len(run.tests), opts.numThreads)
345 if len(run.tests) != numTotalTests:
346 extra = ' of %d' % numTotalTests
347 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
352 if opts.succinct and opts.useProgressBar:
354 tc = lit.ProgressBar.TerminalController()
355 progressBar = lit.ProgressBar.ProgressBar(tc, header)
358 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
362 startTime = time.time()
363 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
365 run.execute_tests(display, opts.numThreads, opts.maxTime,
367 except KeyboardInterrupt:
371 testing_time = time.time() - startTime
373 print('Testing Time: %.2fs' % (testing_time,))
375 # Write out the test data, if requested.
376 if opts.output_path is not None:
377 write_test_results(run, litConfig, testing_time, opts.output_path)
379 # List test results organized by kind.
382 for test in run.tests:
383 if test.result.code not in byCode:
384 byCode[test.result.code] = []
385 byCode[test.result.code].append(test)
386 if test.result.code.isFailure:
389 # Print each test in any of the failing groups.
390 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
391 ('Failing Tests', lit.Test.FAIL),
392 ('Unresolved Tests', lit.Test.UNRESOLVED),
393 ('Unsupported Tests', lit.Test.UNSUPPORTED),
394 ('Expected Failing Tests', lit.Test.XFAIL)):
395 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
396 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
398 elts = byCode.get(code)
402 print('%s (%d):' % (title, len(elts)))
404 print(' %s' % test.getFullName())
405 sys.stdout.write('\n')
407 if opts.timeTests and run.tests:
409 test_times = [(test.getFullName(), test.result.elapsed)
410 for test in run.tests]
411 lit.util.printHistogram(test_times, title='Tests')
413 for name,code in (('Expected Passes ', lit.Test.PASS),
414 ('Expected Failures ', lit.Test.XFAIL),
415 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
416 ('Unresolved Tests ', lit.Test.UNRESOLVED),
417 ('Unexpected Passes ', lit.Test.XPASS),
418 ('Unexpected Failures', lit.Test.FAIL)):
419 if opts.quiet and not code.isFailure:
421 N = len(byCode.get(code,[]))
423 print(' %s: %d' % (name,N))
425 # If we encountered any additional errors, exit abnormally.
426 if litConfig.numErrors:
427 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
430 # Warn about warnings.
431 if litConfig.numWarnings:
432 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
438 if __name__=='__main__':