4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 self.opts.showAllOutput or \
47 (not self.opts.quiet and not self.opts.succinct)
52 self.progressBar.clear()
54 # Show the test result line.
55 test_name = test.getFullName()
56 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57 self.completed, self.numTests))
59 # Show the test failure output, if requested.
60 if (test.result.code.isFailure and self.opts.showOutput) or \
61 self.opts.showAllOutput:
62 if test.result.code.isFailure:
63 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
65 print(test.result.output)
68 # Report test metrics, if present.
69 if test.result.metrics:
70 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
72 items = sorted(test.result.metrics.items())
73 for metric_name, value in items:
74 print('%s: %s ' % (metric_name, value.format()))
77 # Ensure the output is flushed.
80 def write_test_results(run, lit_config, testing_time, output_path):
84 lit_config.fatal('test output unsupported with Python 2.5')
86 # Construct the data we will write.
88 # Encode the current lit version as a schema version.
89 data['__version__'] = lit.__versioninfo__
90 data['elapsed'] = testing_time
91 # FIXME: Record some information on the lit configuration used?
92 # FIXME: Record information from the individual test suites?
95 data['tests'] = tests_data = []
96 for test in run.tests:
98 'name' : test.getFullName(),
99 'code' : test.result.code.name,
100 'output' : test.result.output,
101 'elapsed' : test.result.elapsed }
103 # Add test metrics, if present.
104 if test.result.metrics:
105 test_data['metrics'] = metrics_data = {}
106 for key, value in test.result.metrics.items():
107 metrics_data[key] = value.todata()
109 tests_data.append(test_data)
112 f = open(output_path, 'w')
114 json.dump(data, f, indent=2, sort_keys=True)
119 def update_incremental_cache(test):
120 if not test.result.code.isFailure:
122 fname = test.getFilePath()
123 os.utime(fname, None)
125 def sort_by_incremental_cache(run):
127 fname = test.getFilePath()
129 return -os.path.getmtime(fname)
132 run.tests.sort(key = lambda t: sortIndex(t))
134 def main(builtinParameters = {}):
135 # Use processes by default on Unix platforms.
136 isWindows = platform.system() == 'Windows'
137 useProcessesIsDefault = not isWindows
140 from optparse import OptionParser, OptionGroup
141 parser = OptionParser("usage: %prog [options] {file-or-path}")
143 parser.add_option("", "--version", dest="show_version",
144 help="Show version and exit",
145 action="store_true", default=False)
146 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
147 help="Number of testing threads",
148 type=int, action="store", default=None)
149 parser.add_option("", "--config-prefix", dest="configPrefix",
150 metavar="NAME", help="Prefix for 'lit' config files",
151 action="store", default=None)
152 parser.add_option("-D", "--param", dest="userParameters",
154 help="Add 'NAME' = 'VAL' to the user defined parameters",
155 type=str, action="append", default=[])
157 group = OptionGroup(parser, "Output Format")
158 # FIXME: I find these names very confusing, although I like the
160 group.add_option("-q", "--quiet", dest="quiet",
161 help="Suppress no error output",
162 action="store_true", default=False)
163 group.add_option("-s", "--succinct", dest="succinct",
164 help="Reduce amount of output",
165 action="store_true", default=False)
166 group.add_option("-v", "--verbose", dest="showOutput",
167 help="Show test output for failures",
168 action="store_true", default=False)
169 group.add_option("-a", "--show-all", dest="showAllOutput",
170 help="Display all commandlines and output",
171 action="store_true", default=False)
172 group.add_option("-o", "--output", dest="output_path",
173 help="Write test results to the provided path",
174 action="store", type=str, metavar="PATH")
175 group.add_option("", "--no-progress-bar", dest="useProgressBar",
176 help="Do not use curses based progress bar",
177 action="store_false", default=True)
178 group.add_option("", "--show-unsupported", dest="show_unsupported",
179 help="Show unsupported tests",
180 action="store_true", default=False)
181 group.add_option("", "--show-xfail", dest="show_xfail",
182 help="Show tests that were expected to fail",
183 action="store_true", default=False)
184 parser.add_option_group(group)
186 group = OptionGroup(parser, "Test Execution")
187 group.add_option("", "--path", dest="path",
188 help="Additional paths to add to testing environment",
189 action="append", type=str, default=[])
190 group.add_option("", "--vg", dest="useValgrind",
191 help="Run tests under valgrind",
192 action="store_true", default=False)
193 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
194 help="Check for memory leaks under valgrind",
195 action="store_true", default=False)
196 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
197 help="Specify an extra argument for valgrind",
198 type=str, action="append", default=[])
199 group.add_option("", "--time-tests", dest="timeTests",
200 help="Track elapsed wall time for each test",
201 action="store_true", default=False)
202 group.add_option("", "--no-execute", dest="noExecute",
203 help="Don't execute any tests (assume PASS)",
204 action="store_true", default=False)
205 group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
206 help=("Write XUnit-compatible XML test reports to the"
207 " specified file"), default=None)
208 group.add_option("", "--timeout", dest="maxIndividualTestTime",
209 help="Maximum time to spend running a single test (in seconds)."
210 "0 means no time limit. [Default: 0]",
211 type=int, default=None)
212 parser.add_option_group(group)
214 group = OptionGroup(parser, "Test Selection")
215 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
216 help="Maximum number of tests to run",
217 action="store", type=int, default=None)
218 group.add_option("", "--max-time", dest="maxTime", metavar="N",
219 help="Maximum time to spend testing (in seconds)",
220 action="store", type=float, default=None)
221 group.add_option("", "--shuffle", dest="shuffle",
222 help="Run tests in random order",
223 action="store_true", default=False)
224 group.add_option("-i", "--incremental", dest="incremental",
225 help="Run modified and failing tests first (updates "
227 action="store_true", default=False)
228 group.add_option("", "--filter", dest="filter", metavar="REGEX",
229 help=("Only run tests with paths matching the given "
230 "regular expression"),
231 action="store", default=None)
232 parser.add_option_group(group)
234 group = OptionGroup(parser, "Debug and Experimental Options")
235 group.add_option("", "--debug", dest="debug",
236 help="Enable debugging (for 'lit' development)",
237 action="store_true", default=False)
238 group.add_option("", "--show-suites", dest="showSuites",
239 help="Show discovered test suites",
240 action="store_true", default=False)
241 group.add_option("", "--show-tests", dest="showTests",
242 help="Show all discovered tests",
243 action="store_true", default=False)
244 group.add_option("", "--use-processes", dest="useProcesses",
245 help="Run tests in parallel with processes (not threads)",
246 action="store_true", default=useProcessesIsDefault)
247 group.add_option("", "--use-threads", dest="useProcesses",
248 help="Run tests in parallel with threads (not processes)",
249 action="store_false", default=useProcessesIsDefault)
250 parser.add_option_group(group)
252 (opts, args) = parser.parse_args()
254 if opts.show_version:
255 print("lit %s" % (lit.__version__,))
259 parser.error('No inputs specified')
261 if opts.numThreads is None:
262 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
263 # http://bugs.python.org/issue1731717
264 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
265 # threads by default there.
266 if sys.hexversion >= 0x2050200:
267 opts.numThreads = lit.util.detectCPUs()
273 # Create the user defined parameters.
274 userParams = dict(builtinParameters)
275 for entry in opts.userParameters:
279 name,val = entry.split('=', 1)
280 userParams[name] = val
282 # Decide what the requested maximum indvidual test time should be
283 if opts.maxIndividualTestTime != None:
284 maxIndividualTestTime = opts.maxIndividualTestTime
287 maxIndividualTestTime = 0
290 # Create the global config object.
291 litConfig = lit.LitConfig.LitConfig(
292 progname = os.path.basename(sys.argv[0]),
295 useValgrind = opts.useValgrind,
296 valgrindLeakCheck = opts.valgrindLeakCheck,
297 valgrindArgs = opts.valgrindArgs,
298 noExecute = opts.noExecute,
300 isWindows = isWindows,
302 config_prefix = opts.configPrefix,
303 maxIndividualTestTime = maxIndividualTestTime)
305 # Perform test discovery.
306 run = lit.run.Run(litConfig,
307 lit.discovery.find_tests_for_inputs(litConfig, inputs))
309 # After test discovery the configuration might have changed
310 # the maxIndividualTestTime. If we explicitly set this on the
311 # command line then override what was set in the test configuration
312 if opts.maxIndividualTestTime != None:
313 if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
314 litConfig.note(('The test suite configuration requested an individual'
315 ' test timeout of {0} seconds but a timeout of {1} seconds was'
316 ' requested on the command line. Forcing timeout to be {1}'
318 .format(litConfig.maxIndividualTestTime,
319 opts.maxIndividualTestTime))
320 litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
322 if opts.showSuites or opts.showTests:
323 # Aggregate the tests by suite.
325 for result_test in run.tests:
326 if result_test.suite not in suitesAndTests:
327 suitesAndTests[result_test.suite] = []
328 suitesAndTests[result_test.suite].append(result_test)
329 suitesAndTests = list(suitesAndTests.items())
330 suitesAndTests.sort(key = lambda item: item[0].name)
332 # Show the suites, if requested.
334 print('-- Test Suites --')
335 for ts,ts_tests in suitesAndTests:
336 print(' %s - %d tests' %(ts.name, len(ts_tests)))
337 print(' Source Root: %s' % ts.source_root)
338 print(' Exec Root : %s' % ts.exec_root)
340 # Show the tests, if requested.
342 print('-- Available Tests --')
343 for ts,ts_tests in suitesAndTests:
344 ts_tests.sort(key = lambda test: test.path_in_suite)
345 for test in ts_tests:
346 print(' %s' % (test.getFullName(),))
351 # Select and order the tests.
352 numTotalTests = len(run.tests)
354 # First, select based on the filter expression if given.
357 rex = re.compile(opts.filter)
359 parser.error("invalid regular expression for --filter: %r" % (
361 run.tests = [result_test for result_test in run.tests
362 if rex.search(result_test.getFullName())]
364 # Then select the order.
366 random.shuffle(run.tests)
367 elif opts.incremental:
368 sort_by_incremental_cache(run)
370 run.tests.sort(key = lambda result_test: result_test.getFullName())
372 # Finally limit the number of tests, if desired.
373 if opts.maxTests is not None:
374 run.tests = run.tests[:opts.maxTests]
376 # Don't create more threads than tests.
377 opts.numThreads = min(len(run.tests), opts.numThreads)
379 # Because some tests use threads internally, and at least on Linux each
380 # of these threads counts toward the current process limit, try to
381 # raise the (soft) process limit so that tests don't fail due to
382 # resource exhaustion.
384 cpus = lit.util.detectCPUs()
385 desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
387 # Import the resource module here inside this try block because it
388 # will likely fail on Windows.
391 max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
392 desired_limit = min(desired_limit, max_procs_hard)
394 if max_procs_soft < desired_limit:
395 resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
396 litConfig.note('raised the process limit from %d to %d' % \
397 (max_procs_soft, desired_limit))
402 if len(run.tests) != numTotalTests:
403 extra = ' of %d' % numTotalTests
404 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
408 if opts.succinct and opts.useProgressBar:
410 tc = lit.ProgressBar.TerminalController()
411 progressBar = lit.ProgressBar.ProgressBar(tc, header)
414 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
418 startTime = time.time()
419 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
421 run.execute_tests(display, opts.numThreads, opts.maxTime,
423 except KeyboardInterrupt:
427 testing_time = time.time() - startTime
429 print('Testing Time: %.2fs' % (testing_time,))
431 # Write out the test data, if requested.
432 if opts.output_path is not None:
433 write_test_results(run, litConfig, testing_time, opts.output_path)
435 # List test results organized by kind.
438 for test in run.tests:
439 if test.result.code not in byCode:
440 byCode[test.result.code] = []
441 byCode[test.result.code].append(test)
442 if test.result.code.isFailure:
445 # Print each test in any of the failing groups.
446 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
447 ('Failing Tests', lit.Test.FAIL),
448 ('Unresolved Tests', lit.Test.UNRESOLVED),
449 ('Unsupported Tests', lit.Test.UNSUPPORTED),
450 ('Expected Failing Tests', lit.Test.XFAIL),
451 ('Timed Out Tests', lit.Test.TIMEOUT)):
452 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
453 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
455 elts = byCode.get(code)
459 print('%s (%d):' % (title, len(elts)))
461 print(' %s' % test.getFullName())
462 sys.stdout.write('\n')
464 if opts.timeTests and run.tests:
466 test_times = [(test.getFullName(), test.result.elapsed)
467 for test in run.tests]
468 lit.util.printHistogram(test_times, title='Tests')
470 for name,code in (('Expected Passes ', lit.Test.PASS),
471 ('Passes With Retry ', lit.Test.FLAKYPASS),
472 ('Expected Failures ', lit.Test.XFAIL),
473 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
474 ('Unresolved Tests ', lit.Test.UNRESOLVED),
475 ('Unexpected Passes ', lit.Test.XPASS),
476 ('Unexpected Failures', lit.Test.FAIL),
477 ('Individual Timeouts', lit.Test.TIMEOUT)):
478 if opts.quiet and not code.isFailure:
480 N = len(byCode.get(code,[]))
482 print(' %s: %d' % (name,N))
484 if opts.xunit_output_file:
485 # Collect the tests, indexed by test suite
487 for result_test in run.tests:
488 suite = result_test.suite.config.name
489 if suite not in by_suite:
494 by_suite[suite]['tests'].append(result_test)
495 if result_test.result.code.isFailure:
496 by_suite[suite]['failures'] += 1
498 by_suite[suite]['passes'] += 1
499 xunit_output_file = open(opts.xunit_output_file, "w")
500 xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
501 xunit_output_file.write("<testsuites>\n")
502 for suite_name, suite in by_suite.items():
503 safe_suite_name = suite_name.replace(".", "-")
504 xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
505 xunit_output_file.write(" tests='" + str(suite['passes'] +
506 suite['failures']) + "'")
507 xunit_output_file.write(" failures='" + str(suite['failures']) +
509 for result_test in suite['tests']:
510 xunit_output_file.write(result_test.getJUnitXML() + "\n")
511 xunit_output_file.write("</testsuite>\n")
512 xunit_output_file.write("</testsuites>")
513 xunit_output_file.close()
515 # If we encountered any additional errors, exit abnormally.
516 if litConfig.numErrors:
517 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
520 # Warn about warnings.
521 if litConfig.numWarnings:
522 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
528 if __name__=='__main__':