4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 if not test.result.code.isFailure and \
46 (self.opts.quiet or self.opts.succinct):
50 self.progressBar.clear()
52 # Show the test result line.
53 test_name = test.getFullName()
54 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55 self.completed, self.numTests))
57 # Show the test failure output, if requested.
58 if test.result.code.isFailure and self.opts.showOutput:
59 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61 print(test.result.output)
64 # Report test metrics, if present.
65 if test.result.metrics:
66 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68 items = sorted(test.result.metrics.items())
69 for metric_name, value in items:
70 print('%s: %s ' % (metric_name, value.format()))
73 # Ensure the output is flushed.
76 def write_test_results(run, lit_config, testing_time, output_path):
80 lit_config.fatal('test output unsupported with Python 2.5')
82 # Construct the data we will write.
84 # Encode the current lit version as a schema version.
85 data['__version__'] = lit.__versioninfo__
86 data['elapsed'] = testing_time
87 # FIXME: Record some information on the lit configuration used?
88 # FIXME: Record information from the individual test suites?
91 data['tests'] = tests_data = []
92 for test in run.tests:
94 'name' : test.getFullName(),
95 'code' : test.result.code.name,
96 'output' : test.result.output,
97 'elapsed' : test.result.elapsed }
99 # Add test metrics, if present.
100 if test.result.metrics:
101 test_data['metrics'] = metrics_data = {}
102 for key, value in test.result.metrics.items():
103 metrics_data[key] = value.todata()
105 tests_data.append(test_data)
108 f = open(output_path, 'w')
110 json.dump(data, f, indent=2, sort_keys=True)
115 def update_incremental_cache(test):
116 if not test.result.code.isFailure:
118 fname = test.getFilePath()
119 os.utime(fname, None)
121 def sort_by_incremental_cache(run, litConfig):
124 fname = test.getFilePath()
126 index = -os.path.getmtime(fname)
131 run.tests.sort(key = lambda t: sortIndex(t))
133 def main(builtinParameters = {}):
134 # Use processes by default on Unix platforms.
135 isWindows = platform.system() == 'Windows'
136 useProcessesIsDefault = not isWindows
139 from optparse import OptionParser, OptionGroup
140 parser = OptionParser("usage: %prog [options] {file-or-path}")
142 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
143 help="Number of testing threads",
144 type=int, action="store", default=None)
145 parser.add_option("", "--config-prefix", dest="configPrefix",
146 metavar="NAME", help="Prefix for 'lit' config files",
147 action="store", default=None)
148 parser.add_option("", "--param", dest="userParameters",
150 help="Add 'NAME' = 'VAL' to the user defined parameters",
151 type=str, action="append", default=[])
153 group = OptionGroup(parser, "Output Format")
154 # FIXME: I find these names very confusing, although I like the
156 group.add_option("-q", "--quiet", dest="quiet",
157 help="Suppress no error output",
158 action="store_true", default=False)
159 group.add_option("-s", "--succinct", dest="succinct",
160 help="Reduce amount of output",
161 action="store_true", default=False)
162 group.add_option("-v", "--verbose", dest="showOutput",
163 help="Show all test output",
164 action="store_true", default=False)
165 group.add_option("-o", "--output", dest="output_path",
166 help="Write test results to the provided path",
167 action="store", type=str, metavar="PATH")
168 group.add_option("", "--no-progress-bar", dest="useProgressBar",
169 help="Do not use curses based progress bar",
170 action="store_false", default=True)
171 parser.add_option_group(group)
173 group = OptionGroup(parser, "Test Execution")
174 group.add_option("", "--path", dest="path",
175 help="Additional paths to add to testing environment",
176 action="append", type=str, default=[])
177 group.add_option("", "--vg", dest="useValgrind",
178 help="Run tests under valgrind",
179 action="store_true", default=False)
180 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
181 help="Check for memory leaks under valgrind",
182 action="store_true", default=False)
183 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
184 help="Specify an extra argument for valgrind",
185 type=str, action="append", default=[])
186 group.add_option("", "--time-tests", dest="timeTests",
187 help="Track elapsed wall time for each test",
188 action="store_true", default=False)
189 group.add_option("", "--no-execute", dest="noExecute",
190 help="Don't execute any tests (assume PASS)",
191 action="store_true", default=False)
192 parser.add_option_group(group)
194 group = OptionGroup(parser, "Test Selection")
195 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
196 help="Maximum number of tests to run",
197 action="store", type=int, default=None)
198 group.add_option("", "--max-time", dest="maxTime", metavar="N",
199 help="Maximum time to spend testing (in seconds)",
200 action="store", type=float, default=None)
201 group.add_option("", "--shuffle", dest="shuffle",
202 help="Run tests in random order",
203 action="store_true", default=False)
204 group.add_option("-i", "--incremental", dest="incremental",
205 help="Run modified and failing tests first (updates "
207 action="store_true", default=False)
208 group.add_option("", "--filter", dest="filter", metavar="REGEX",
209 help=("Only run tests with paths matching the given "
210 "regular expression"),
211 action="store", default=None)
212 parser.add_option_group(group)
214 group = OptionGroup(parser, "Debug and Experimental Options")
215 group.add_option("", "--debug", dest="debug",
216 help="Enable debugging (for 'lit' development)",
217 action="store_true", default=False)
218 group.add_option("", "--show-suites", dest="showSuites",
219 help="Show discovered test suites",
220 action="store_true", default=False)
221 group.add_option("", "--show-tests", dest="showTests",
222 help="Show all discovered tests",
223 action="store_true", default=False)
224 group.add_option("", "--use-processes", dest="useProcesses",
225 help="Run tests in parallel with processes (not threads)",
226 action="store_true", default=useProcessesIsDefault)
227 group.add_option("", "--use-threads", dest="useProcesses",
228 help="Run tests in parallel with threads (not processes)",
229 action="store_false", default=useProcessesIsDefault)
230 parser.add_option_group(group)
232 (opts, args) = parser.parse_args()
235 parser.error('No inputs specified')
237 if opts.numThreads is None:
238 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
239 # http://bugs.python.org/issue1731717
240 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
241 # threads by default there.
242 if sys.hexversion >= 0x2050200:
243 opts.numThreads = lit.util.detectCPUs()
249 # Create the user defined parameters.
250 userParams = dict(builtinParameters)
251 for entry in opts.userParameters:
255 name,val = entry.split('=', 1)
256 userParams[name] = val
258 # Create the global config object.
259 litConfig = lit.LitConfig.LitConfig(
260 progname = os.path.basename(sys.argv[0]),
263 useValgrind = opts.useValgrind,
264 valgrindLeakCheck = opts.valgrindLeakCheck,
265 valgrindArgs = opts.valgrindArgs,
266 noExecute = opts.noExecute,
268 isWindows = isWindows,
270 config_prefix = opts.configPrefix)
272 # Perform test discovery.
273 run = lit.run.Run(litConfig,
274 lit.discovery.find_tests_for_inputs(litConfig, inputs))
276 if opts.showSuites or opts.showTests:
277 # Aggregate the tests by suite.
280 if t.suite not in suitesAndTests:
281 suitesAndTests[t.suite] = []
282 suitesAndTests[t.suite].append(t)
283 suitesAndTests = list(suitesAndTests.items())
284 suitesAndTests.sort(key = lambda item: item[0].name)
286 # Show the suites, if requested.
288 print('-- Test Suites --')
289 for ts,ts_tests in suitesAndTests:
290 print(' %s - %d tests' %(ts.name, len(ts_tests)))
291 print(' Source Root: %s' % ts.source_root)
292 print(' Exec Root : %s' % ts.exec_root)
294 # Show the tests, if requested.
296 print('-- Available Tests --')
297 for ts,ts_tests in suitesAndTests:
298 ts_tests.sort(key = lambda test: test.path_in_suite)
299 for test in ts_tests:
300 print(' %s' % (test.getFullName(),))
305 # Select and order the tests.
306 numTotalTests = len(run.tests)
308 # First, select based on the filter expression if given.
311 rex = re.compile(opts.filter)
313 parser.error("invalid regular expression for --filter: %r" % (
315 run.tests = [t for t in run.tests
316 if rex.search(t.getFullName())]
318 # Then select the order.
320 random.shuffle(run.tests)
321 elif opts.incremental:
322 sort_by_incremental_cache(run, litConfig)
324 run.tests.sort(key = lambda t: t.getFullName())
326 # Finally limit the number of tests, if desired.
327 if opts.maxTests is not None:
328 run.tests = run.tests[:opts.maxTests]
330 # Don't create more threads than tests.
331 opts.numThreads = min(len(run.tests), opts.numThreads)
334 if len(run.tests) != numTotalTests:
335 extra = ' of %d' % numTotalTests
336 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
341 if opts.succinct and opts.useProgressBar:
343 tc = lit.ProgressBar.TerminalController()
344 progressBar = lit.ProgressBar.ProgressBar(tc, header)
347 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
351 startTime = time.time()
352 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
354 run.execute_tests(display, opts.numThreads, opts.maxTime,
356 except KeyboardInterrupt:
360 testing_time = time.time() - startTime
362 print('Testing Time: %.2fs' % (testing_time,))
364 # Write out the test data, if requested.
365 if opts.output_path is not None:
366 write_test_results(run, litConfig, testing_time, opts.output_path)
368 # List test results organized by kind.
371 for test in run.tests:
372 if test.result.code not in byCode:
373 byCode[test.result.code] = []
374 byCode[test.result.code].append(test)
375 if test.result.code.isFailure:
378 # Print each test in any of the failing groups.
379 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
380 ('Failing Tests', lit.Test.FAIL),
381 ('Unresolved Tests', lit.Test.UNRESOLVED)):
382 elts = byCode.get(code)
386 print('%s (%d):' % (title, len(elts)))
388 print(' %s' % test.getFullName())
389 sys.stdout.write('\n')
391 if opts.timeTests and run.tests:
393 test_times = [(test.getFullName(), test.result.elapsed)
394 for test in run.tests]
395 lit.util.printHistogram(test_times, title='Tests')
397 for name,code in (('Expected Passes ', lit.Test.PASS),
398 ('Expected Failures ', lit.Test.XFAIL),
399 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
400 ('Unresolved Tests ', lit.Test.UNRESOLVED),
401 ('Unexpected Passes ', lit.Test.XPASS),
402 ('Unexpected Failures', lit.Test.FAIL),):
403 if opts.quiet and not code.isFailure:
405 N = len(byCode.get(code,[]))
407 print(' %s: %d' % (name,N))
409 # If we encountered any additional errors, exit abnormally.
410 if litConfig.numErrors:
411 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
414 # Warn about warnings.
415 if litConfig.numWarnings:
416 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
422 if __name__=='__main__':