4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 (not self.opts.quiet and not self.opts.succinct)
51 self.progressBar.clear()
53 # Show the test result line.
54 test_name = test.getFullName()
55 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56 self.completed, self.numTests))
58 # Show the test failure output, if requested.
59 if test.result.code.isFailure and self.opts.showOutput:
60 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
62 print(test.result.output)
65 # Report test metrics, if present.
66 if test.result.metrics:
67 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
69 items = sorted(test.result.metrics.items())
70 for metric_name, value in items:
71 print('%s: %s ' % (metric_name, value.format()))
74 # Ensure the output is flushed.
77 def write_test_results(run, lit_config, testing_time, output_path):
81 lit_config.fatal('test output unsupported with Python 2.5')
83 # Construct the data we will write.
85 # Encode the current lit version as a schema version.
86 data['__version__'] = lit.__versioninfo__
87 data['elapsed'] = testing_time
88 # FIXME: Record some information on the lit configuration used?
89 # FIXME: Record information from the individual test suites?
92 data['tests'] = tests_data = []
93 for test in run.tests:
95 'name' : test.getFullName(),
96 'code' : test.result.code.name,
97 'output' : test.result.output,
98 'elapsed' : test.result.elapsed }
100 # Add test metrics, if present.
101 if test.result.metrics:
102 test_data['metrics'] = metrics_data = {}
103 for key, value in test.result.metrics.items():
104 metrics_data[key] = value.todata()
106 tests_data.append(test_data)
109 f = open(output_path, 'w')
111 json.dump(data, f, indent=2, sort_keys=True)
116 def update_incremental_cache(test):
117 if not test.result.code.isFailure:
119 fname = test.getFilePath()
120 os.utime(fname, None)
122 def sort_by_incremental_cache(run):
124 fname = test.getFilePath()
126 return -os.path.getmtime(fname)
129 run.tests.sort(key = lambda t: sortIndex(t))
131 def main(builtinParameters = {}):
132 # Use processes by default on Unix platforms.
133 isWindows = platform.system() == 'Windows'
134 useProcessesIsDefault = not isWindows
137 from optparse import OptionParser, OptionGroup
138 parser = OptionParser("usage: %prog [options] {file-or-path}")
140 parser.add_option("", "--version", dest="show_version",
141 help="Show version and exit",
142 action="store_true", default=False)
143 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144 help="Number of testing threads",
145 type=int, action="store", default=None)
146 parser.add_option("", "--config-prefix", dest="configPrefix",
147 metavar="NAME", help="Prefix for 'lit' config files",
148 action="store", default=None)
149 parser.add_option("-D", "--param", dest="userParameters",
151 help="Add 'NAME' = 'VAL' to the user defined parameters",
152 type=str, action="append", default=[])
154 group = OptionGroup(parser, "Output Format")
155 # FIXME: I find these names very confusing, although I like the
157 group.add_option("-q", "--quiet", dest="quiet",
158 help="Suppress no error output",
159 action="store_true", default=False)
160 group.add_option("-s", "--succinct", dest="succinct",
161 help="Reduce amount of output",
162 action="store_true", default=False)
163 group.add_option("-v", "--verbose", dest="showOutput",
164 help="Show all test output",
165 action="store_true", default=False)
166 group.add_option("-o", "--output", dest="output_path",
167 help="Write test results to the provided path",
168 action="store", type=str, metavar="PATH")
169 group.add_option("", "--no-progress-bar", dest="useProgressBar",
170 help="Do not use curses based progress bar",
171 action="store_false", default=True)
172 group.add_option("", "--show-unsupported", dest="show_unsupported",
173 help="Show unsupported tests",
174 action="store_true", default=False)
175 group.add_option("", "--show-xfail", dest="show_xfail",
176 help="Show tests that were expected to fail",
177 action="store_true", default=False)
178 parser.add_option_group(group)
180 group = OptionGroup(parser, "Test Execution")
181 group.add_option("", "--path", dest="path",
182 help="Additional paths to add to testing environment",
183 action="append", type=str, default=[])
184 group.add_option("", "--vg", dest="useValgrind",
185 help="Run tests under valgrind",
186 action="store_true", default=False)
187 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188 help="Check for memory leaks under valgrind",
189 action="store_true", default=False)
190 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191 help="Specify an extra argument for valgrind",
192 type=str, action="append", default=[])
193 group.add_option("", "--time-tests", dest="timeTests",
194 help="Track elapsed wall time for each test",
195 action="store_true", default=False)
196 group.add_option("", "--no-execute", dest="noExecute",
197 help="Don't execute any tests (assume PASS)",
198 action="store_true", default=False)
199 group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
200 help=("Write XUnit-compatible XML test reports to the"
201 " specified file"), default=None)
202 parser.add_option_group(group)
204 group = OptionGroup(parser, "Test Selection")
205 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
206 help="Maximum number of tests to run",
207 action="store", type=int, default=None)
208 group.add_option("", "--max-time", dest="maxTime", metavar="N",
209 help="Maximum time to spend testing (in seconds)",
210 action="store", type=float, default=None)
211 group.add_option("", "--shuffle", dest="shuffle",
212 help="Run tests in random order",
213 action="store_true", default=False)
214 group.add_option("-i", "--incremental", dest="incremental",
215 help="Run modified and failing tests first (updates "
217 action="store_true", default=False)
218 group.add_option("", "--filter", dest="filter", metavar="REGEX",
219 help=("Only run tests with paths matching the given "
220 "regular expression"),
221 action="store", default=None)
222 parser.add_option_group(group)
224 group = OptionGroup(parser, "Debug and Experimental Options")
225 group.add_option("", "--debug", dest="debug",
226 help="Enable debugging (for 'lit' development)",
227 action="store_true", default=False)
228 group.add_option("", "--show-suites", dest="showSuites",
229 help="Show discovered test suites",
230 action="store_true", default=False)
231 group.add_option("", "--show-tests", dest="showTests",
232 help="Show all discovered tests",
233 action="store_true", default=False)
234 group.add_option("", "--use-processes", dest="useProcesses",
235 help="Run tests in parallel with processes (not threads)",
236 action="store_true", default=useProcessesIsDefault)
237 group.add_option("", "--use-threads", dest="useProcesses",
238 help="Run tests in parallel with threads (not processes)",
239 action="store_false", default=useProcessesIsDefault)
240 parser.add_option_group(group)
242 (opts, args) = parser.parse_args()
244 if opts.show_version:
245 print("lit %s" % (lit.__version__,))
249 parser.error('No inputs specified')
251 if opts.numThreads is None:
252 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
253 # http://bugs.python.org/issue1731717
254 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
255 # threads by default there.
256 if sys.hexversion >= 0x2050200:
257 opts.numThreads = lit.util.detectCPUs()
263 # Create the user defined parameters.
264 userParams = dict(builtinParameters)
265 for entry in opts.userParameters:
269 name,val = entry.split('=', 1)
270 userParams[name] = val
272 # Create the global config object.
273 litConfig = lit.LitConfig.LitConfig(
274 progname = os.path.basename(sys.argv[0]),
277 useValgrind = opts.useValgrind,
278 valgrindLeakCheck = opts.valgrindLeakCheck,
279 valgrindArgs = opts.valgrindArgs,
280 noExecute = opts.noExecute,
282 isWindows = isWindows,
284 config_prefix = opts.configPrefix)
286 # Perform test discovery.
287 run = lit.run.Run(litConfig,
288 lit.discovery.find_tests_for_inputs(litConfig, inputs))
290 if opts.showSuites or opts.showTests:
291 # Aggregate the tests by suite.
293 for result_test in run.tests:
294 if result_test.suite not in suitesAndTests:
295 suitesAndTests[result_test.suite] = []
296 suitesAndTests[result_test.suite].append(result_test)
297 suitesAndTests = list(suitesAndTests.items())
298 suitesAndTests.sort(key = lambda item: item[0].name)
300 # Show the suites, if requested.
302 print('-- Test Suites --')
303 for ts,ts_tests in suitesAndTests:
304 print(' %s - %d tests' %(ts.name, len(ts_tests)))
305 print(' Source Root: %s' % ts.source_root)
306 print(' Exec Root : %s' % ts.exec_root)
308 # Show the tests, if requested.
310 print('-- Available Tests --')
311 for ts,ts_tests in suitesAndTests:
312 ts_tests.sort(key = lambda test: test.path_in_suite)
313 for test in ts_tests:
314 print(' %s' % (test.getFullName(),))
319 # Select and order the tests.
320 numTotalTests = len(run.tests)
322 # First, select based on the filter expression if given.
325 rex = re.compile(opts.filter)
327 parser.error("invalid regular expression for --filter: %r" % (
329 run.tests = [result_test for result_test in run.tests
330 if rex.search(result_test.getFullName())]
332 # Then select the order.
334 random.shuffle(run.tests)
335 elif opts.incremental:
336 sort_by_incremental_cache(run)
338 run.tests.sort(key = lambda result_test: result_test.getFullName())
340 # Finally limit the number of tests, if desired.
341 if opts.maxTests is not None:
342 run.tests = run.tests[:opts.maxTests]
344 # Don't create more threads than tests.
345 opts.numThreads = min(len(run.tests), opts.numThreads)
347 # Because some tests use threads internally, and at least on Linux each
348 # of these threads counts toward the current process limit, try to
349 # raise the (soft) process limit so that tests don't fail due to
350 # resource exhaustion.
352 cpus = lit.util.detectCPUs()
353 desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
355 # Import the resource module here inside this try block because it
356 # will likely fail on Windows.
359 max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
360 desired_limit = min(desired_limit, max_procs_hard)
362 if max_procs_soft < desired_limit:
363 resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
364 litConfig.note('raised the process limit from %d to %d' % \
365 (max_procs_soft, desired_limit))
370 if len(run.tests) != numTotalTests:
371 extra = ' of %d' % numTotalTests
372 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
377 if opts.succinct and opts.useProgressBar:
379 tc = lit.ProgressBar.TerminalController()
380 progressBar = lit.ProgressBar.ProgressBar(tc, header)
383 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
387 startTime = time.time()
388 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
390 run.execute_tests(display, opts.numThreads, opts.maxTime,
392 except KeyboardInterrupt:
396 testing_time = time.time() - startTime
398 print('Testing Time: %.2fs' % (testing_time,))
400 # Write out the test data, if requested.
401 if opts.output_path is not None:
402 write_test_results(run, litConfig, testing_time, opts.output_path)
404 # List test results organized by kind.
407 for test in run.tests:
408 if test.result.code not in byCode:
409 byCode[test.result.code] = []
410 byCode[test.result.code].append(test)
411 if test.result.code.isFailure:
414 # Print each test in any of the failing groups.
415 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
416 ('Failing Tests', lit.Test.FAIL),
417 ('Unresolved Tests', lit.Test.UNRESOLVED),
418 ('Unsupported Tests', lit.Test.UNSUPPORTED),
419 ('Expected Failing Tests', lit.Test.XFAIL)):
420 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
421 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
423 elts = byCode.get(code)
427 print('%s (%d):' % (title, len(elts)))
429 print(' %s' % test.getFullName())
430 sys.stdout.write('\n')
432 if opts.timeTests and run.tests:
434 test_times = [(test.getFullName(), test.result.elapsed)
435 for test in run.tests]
436 lit.util.printHistogram(test_times, title='Tests')
438 for name,code in (('Expected Passes ', lit.Test.PASS),
439 ('Passes With Retry ', lit.Test.FLAKYPASS),
440 ('Expected Failures ', lit.Test.XFAIL),
441 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
442 ('Unresolved Tests ', lit.Test.UNRESOLVED),
443 ('Unexpected Passes ', lit.Test.XPASS),
444 ('Unexpected Failures', lit.Test.FAIL)):
445 if opts.quiet and not code.isFailure:
447 N = len(byCode.get(code,[]))
449 print(' %s: %d' % (name,N))
451 if opts.xunit_output_file:
452 # Collect the tests, indexed by test suite
454 for result_test in run.tests:
455 suite = result_test.suite.config.name
456 if suite not in by_suite:
461 by_suite[suite]['tests'].append(result_test)
462 if result_test.result.code.isFailure:
463 by_suite[suite]['failures'] += 1
465 by_suite[suite]['passes'] += 1
466 xunit_output_file = open(opts.xunit_output_file, "w")
467 xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
468 xunit_output_file.write("<testsuites>\n")
469 for suite_name, suite in by_suite.items():
470 safe_suite_name = suite_name.replace(".", "-")
471 xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
472 xunit_output_file.write(" tests='" + str(suite['passes'] +
473 suite['failures']) + "'")
474 xunit_output_file.write(" failures='" + str(suite['failures']) +
476 for result_test in suite['tests']:
477 xunit_output_file.write(result_test.getJUnitXML() + "\n")
478 xunit_output_file.write("</testsuite>\n")
479 xunit_output_file.write("</testsuites>")
480 xunit_output_file.close()
482 # If we encountered any additional errors, exit abnormally.
483 if litConfig.numErrors:
484 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
487 # Warn about warnings.
488 if litConfig.numWarnings:
489 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
495 if __name__=='__main__':