4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 (not self.opts.quiet and not self.opts.succinct)
51 self.progressBar.clear()
53 # Show the test result line.
54 test_name = test.getFullName()
55 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56 self.completed, self.numTests))
58 # Show the test failure output, if requested.
59 if (test.result.code.isFailure and self.opts.showOutput) or \
60 self.opts.showAllOutput:
61 if test.result.code.isFailure:
62 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
64 print(test.result.output)
67 # Report test metrics, if present.
68 if test.result.metrics:
69 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
71 items = sorted(test.result.metrics.items())
72 for metric_name, value in items:
73 print('%s: %s ' % (metric_name, value.format()))
76 # Ensure the output is flushed.
79 def write_test_results(run, lit_config, testing_time, output_path):
83 lit_config.fatal('test output unsupported with Python 2.5')
85 # Construct the data we will write.
87 # Encode the current lit version as a schema version.
88 data['__version__'] = lit.__versioninfo__
89 data['elapsed'] = testing_time
90 # FIXME: Record some information on the lit configuration used?
91 # FIXME: Record information from the individual test suites?
94 data['tests'] = tests_data = []
95 for test in run.tests:
97 'name' : test.getFullName(),
98 'code' : test.result.code.name,
99 'output' : test.result.output,
100 'elapsed' : test.result.elapsed }
102 # Add test metrics, if present.
103 if test.result.metrics:
104 test_data['metrics'] = metrics_data = {}
105 for key, value in test.result.metrics.items():
106 metrics_data[key] = value.todata()
108 tests_data.append(test_data)
111 f = open(output_path, 'w')
113 json.dump(data, f, indent=2, sort_keys=True)
118 def update_incremental_cache(test):
119 if not test.result.code.isFailure:
121 fname = test.getFilePath()
122 os.utime(fname, None)
124 def sort_by_incremental_cache(run):
126 fname = test.getFilePath()
128 return -os.path.getmtime(fname)
131 run.tests.sort(key = lambda t: sortIndex(t))
133 def main(builtinParameters = {}):
134 # Use processes by default on Unix platforms.
135 isWindows = platform.system() == 'Windows'
136 useProcessesIsDefault = not isWindows
139 from optparse import OptionParser, OptionGroup
140 parser = OptionParser("usage: %prog [options] {file-or-path}")
142 parser.add_option("", "--version", dest="show_version",
143 help="Show version and exit",
144 action="store_true", default=False)
145 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
146 help="Number of testing threads",
147 type=int, action="store", default=None)
148 parser.add_option("", "--config-prefix", dest="configPrefix",
149 metavar="NAME", help="Prefix for 'lit' config files",
150 action="store", default=None)
151 parser.add_option("-D", "--param", dest="userParameters",
153 help="Add 'NAME' = 'VAL' to the user defined parameters",
154 type=str, action="append", default=[])
156 group = OptionGroup(parser, "Output Format")
157 # FIXME: I find these names very confusing, although I like the
159 group.add_option("-q", "--quiet", dest="quiet",
160 help="Suppress no error output",
161 action="store_true", default=False)
162 group.add_option("-s", "--succinct", dest="succinct",
163 help="Reduce amount of output",
164 action="store_true", default=False)
165 group.add_option("-v", "--verbose", dest="showOutput",
166 help="Show test output for failures",
167 action="store_true", default=False)
168 group.add_option("-a", "--show-all", dest="showAllOutput",
169 help="Display all commandlines and output",
170 action="store_true", default=False)
171 group.add_option("-o", "--output", dest="output_path",
172 help="Write test results to the provided path",
173 action="store", type=str, metavar="PATH")
174 group.add_option("", "--no-progress-bar", dest="useProgressBar",
175 help="Do not use curses based progress bar",
176 action="store_false", default=True)
177 group.add_option("", "--show-unsupported", dest="show_unsupported",
178 help="Show unsupported tests",
179 action="store_true", default=False)
180 group.add_option("", "--show-xfail", dest="show_xfail",
181 help="Show tests that were expected to fail",
182 action="store_true", default=False)
183 parser.add_option_group(group)
185 group = OptionGroup(parser, "Test Execution")
186 group.add_option("", "--path", dest="path",
187 help="Additional paths to add to testing environment",
188 action="append", type=str, default=[])
189 group.add_option("", "--vg", dest="useValgrind",
190 help="Run tests under valgrind",
191 action="store_true", default=False)
192 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
193 help="Check for memory leaks under valgrind",
194 action="store_true", default=False)
195 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
196 help="Specify an extra argument for valgrind",
197 type=str, action="append", default=[])
198 group.add_option("", "--time-tests", dest="timeTests",
199 help="Track elapsed wall time for each test",
200 action="store_true", default=False)
201 group.add_option("", "--no-execute", dest="noExecute",
202 help="Don't execute any tests (assume PASS)",
203 action="store_true", default=False)
204 group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
205 help=("Write XUnit-compatible XML test reports to the"
206 " specified file"), default=None)
207 parser.add_option_group(group)
209 group = OptionGroup(parser, "Test Selection")
210 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
211 help="Maximum number of tests to run",
212 action="store", type=int, default=None)
213 group.add_option("", "--max-time", dest="maxTime", metavar="N",
214 help="Maximum time to spend testing (in seconds)",
215 action="store", type=float, default=None)
216 group.add_option("", "--shuffle", dest="shuffle",
217 help="Run tests in random order",
218 action="store_true", default=False)
219 group.add_option("-i", "--incremental", dest="incremental",
220 help="Run modified and failing tests first (updates "
222 action="store_true", default=False)
223 group.add_option("", "--filter", dest="filter", metavar="REGEX",
224 help=("Only run tests with paths matching the given "
225 "regular expression"),
226 action="store", default=None)
227 parser.add_option_group(group)
229 group = OptionGroup(parser, "Debug and Experimental Options")
230 group.add_option("", "--debug", dest="debug",
231 help="Enable debugging (for 'lit' development)",
232 action="store_true", default=False)
233 group.add_option("", "--show-suites", dest="showSuites",
234 help="Show discovered test suites",
235 action="store_true", default=False)
236 group.add_option("", "--show-tests", dest="showTests",
237 help="Show all discovered tests",
238 action="store_true", default=False)
239 group.add_option("", "--use-processes", dest="useProcesses",
240 help="Run tests in parallel with processes (not threads)",
241 action="store_true", default=useProcessesIsDefault)
242 group.add_option("", "--use-threads", dest="useProcesses",
243 help="Run tests in parallel with threads (not processes)",
244 action="store_false", default=useProcessesIsDefault)
245 parser.add_option_group(group)
247 (opts, args) = parser.parse_args()
249 if opts.show_version:
250 print("lit %s" % (lit.__version__,))
254 parser.error('No inputs specified')
256 if opts.numThreads is None:
257 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
258 # http://bugs.python.org/issue1731717
259 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
260 # threads by default there.
261 if sys.hexversion >= 0x2050200:
262 opts.numThreads = lit.util.detectCPUs()
268 # Create the user defined parameters.
269 userParams = dict(builtinParameters)
270 for entry in opts.userParameters:
274 name,val = entry.split('=', 1)
275 userParams[name] = val
277 # Create the global config object.
278 litConfig = lit.LitConfig.LitConfig(
279 progname = os.path.basename(sys.argv[0]),
282 useValgrind = opts.useValgrind,
283 valgrindLeakCheck = opts.valgrindLeakCheck,
284 valgrindArgs = opts.valgrindArgs,
285 noExecute = opts.noExecute,
287 isWindows = isWindows,
289 config_prefix = opts.configPrefix)
291 # Perform test discovery.
292 run = lit.run.Run(litConfig,
293 lit.discovery.find_tests_for_inputs(litConfig, inputs))
295 if opts.showSuites or opts.showTests:
296 # Aggregate the tests by suite.
298 for result_test in run.tests:
299 if result_test.suite not in suitesAndTests:
300 suitesAndTests[result_test.suite] = []
301 suitesAndTests[result_test.suite].append(result_test)
302 suitesAndTests = list(suitesAndTests.items())
303 suitesAndTests.sort(key = lambda item: item[0].name)
305 # Show the suites, if requested.
307 print('-- Test Suites --')
308 for ts,ts_tests in suitesAndTests:
309 print(' %s - %d tests' %(ts.name, len(ts_tests)))
310 print(' Source Root: %s' % ts.source_root)
311 print(' Exec Root : %s' % ts.exec_root)
313 # Show the tests, if requested.
315 print('-- Available Tests --')
316 for ts,ts_tests in suitesAndTests:
317 ts_tests.sort(key = lambda test: test.path_in_suite)
318 for test in ts_tests:
319 print(' %s' % (test.getFullName(),))
324 # Select and order the tests.
325 numTotalTests = len(run.tests)
327 # First, select based on the filter expression if given.
330 rex = re.compile(opts.filter)
332 parser.error("invalid regular expression for --filter: %r" % (
334 run.tests = [result_test for result_test in run.tests
335 if rex.search(result_test.getFullName())]
337 # Then select the order.
339 random.shuffle(run.tests)
340 elif opts.incremental:
341 sort_by_incremental_cache(run)
343 run.tests.sort(key = lambda result_test: result_test.getFullName())
345 # Finally limit the number of tests, if desired.
346 if opts.maxTests is not None:
347 run.tests = run.tests[:opts.maxTests]
349 # Don't create more threads than tests.
350 opts.numThreads = min(len(run.tests), opts.numThreads)
352 # Because some tests use threads internally, and at least on Linux each
353 # of these threads counts toward the current process limit, try to
354 # raise the (soft) process limit so that tests don't fail due to
355 # resource exhaustion.
357 cpus = lit.util.detectCPUs()
358 desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
360 # Import the resource module here inside this try block because it
361 # will likely fail on Windows.
364 max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
365 desired_limit = min(desired_limit, max_procs_hard)
367 if max_procs_soft < desired_limit:
368 resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
369 litConfig.note('raised the process limit from %d to %d' % \
370 (max_procs_soft, desired_limit))
375 if len(run.tests) != numTotalTests:
376 extra = ' of %d' % numTotalTests
377 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
382 if opts.succinct and opts.useProgressBar:
384 tc = lit.ProgressBar.TerminalController()
385 progressBar = lit.ProgressBar.ProgressBar(tc, header)
388 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
392 startTime = time.time()
393 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
395 run.execute_tests(display, opts.numThreads, opts.maxTime,
397 except KeyboardInterrupt:
401 testing_time = time.time() - startTime
403 print('Testing Time: %.2fs' % (testing_time,))
405 # Write out the test data, if requested.
406 if opts.output_path is not None:
407 write_test_results(run, litConfig, testing_time, opts.output_path)
409 # List test results organized by kind.
412 for test in run.tests:
413 if test.result.code not in byCode:
414 byCode[test.result.code] = []
415 byCode[test.result.code].append(test)
416 if test.result.code.isFailure:
419 # Print each test in any of the failing groups.
420 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
421 ('Failing Tests', lit.Test.FAIL),
422 ('Unresolved Tests', lit.Test.UNRESOLVED),
423 ('Unsupported Tests', lit.Test.UNSUPPORTED),
424 ('Expected Failing Tests', lit.Test.XFAIL)):
425 if (lit.Test.XFAIL == code and not opts.show_xfail) or \
426 (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
428 elts = byCode.get(code)
432 print('%s (%d):' % (title, len(elts)))
434 print(' %s' % test.getFullName())
435 sys.stdout.write('\n')
437 if opts.timeTests and run.tests:
439 test_times = [(test.getFullName(), test.result.elapsed)
440 for test in run.tests]
441 lit.util.printHistogram(test_times, title='Tests')
443 for name,code in (('Expected Passes ', lit.Test.PASS),
444 ('Passes With Retry ', lit.Test.FLAKYPASS),
445 ('Expected Failures ', lit.Test.XFAIL),
446 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
447 ('Unresolved Tests ', lit.Test.UNRESOLVED),
448 ('Unexpected Passes ', lit.Test.XPASS),
449 ('Unexpected Failures', lit.Test.FAIL)):
450 if opts.quiet and not code.isFailure:
452 N = len(byCode.get(code,[]))
454 print(' %s: %d' % (name,N))
456 if opts.xunit_output_file:
457 # Collect the tests, indexed by test suite
459 for result_test in run.tests:
460 suite = result_test.suite.config.name
461 if suite not in by_suite:
466 by_suite[suite]['tests'].append(result_test)
467 if result_test.result.code.isFailure:
468 by_suite[suite]['failures'] += 1
470 by_suite[suite]['passes'] += 1
471 xunit_output_file = open(opts.xunit_output_file, "w")
472 xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
473 xunit_output_file.write("<testsuites>\n")
474 for suite_name, suite in by_suite.items():
475 safe_suite_name = suite_name.replace(".", "-")
476 xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
477 xunit_output_file.write(" tests='" + str(suite['passes'] +
478 suite['failures']) + "'")
479 xunit_output_file.write(" failures='" + str(suite['failures']) +
481 for result_test in suite['tests']:
482 xunit_output_file.write(result_test.getJUnitXML() + "\n")
483 xunit_output_file.write("</testsuite>\n")
484 xunit_output_file.write("</testsuites>")
485 xunit_output_file.close()
487 # If we encountered any additional errors, exit abnormally.
488 if litConfig.numErrors:
489 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
492 # Warn about warnings.
493 if litConfig.numWarnings:
494 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
500 if __name__=='__main__':