4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 if not test.result.code.isFailure and \
46 (self.opts.quiet or self.opts.succinct):
50 self.progressBar.clear()
52 # Show the test result line.
53 test_name = test.getFullName()
54 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55 self.completed, self.numTests))
57 # Show the test failure output, if requested.
58 if test.result.code.isFailure and self.opts.showOutput:
59 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61 print(test.result.output)
64 # Report test metrics, if present.
65 if test.result.metrics:
66 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68 items = sorted(test.result.metrics.items())
69 for metric_name, value in items:
70 print('%s: %s ' % (metric_name, value.format()))
73 # Ensure the output is flushed.
76 def write_test_results(run, lit_config, testing_time, output_path):
80 lit_config.fatal('test output unsupported with Python 2.5')
82 # Construct the data we will write.
84 # Encode the current lit version as a schema version.
85 data['__version__'] = lit.__versioninfo__
86 data['elapsed'] = testing_time
87 # FIXME: Record some information on the lit configuration used?
88 # FIXME: Record information from the individual test suites?
91 data['tests'] = tests_data = []
92 for test in run.tests:
94 'name' : test.getFullName(),
95 'code' : test.result.code.name,
96 'output' : test.result.output,
97 'elapsed' : test.result.elapsed }
99 # Add test metrics, if present.
100 if test.result.metrics:
101 test_data['metrics'] = metrics_data = {}
102 for key, value in test.result.metrics.items():
103 metrics_data[key] = value.todata()
105 tests_data.append(test_data)
108 f = open(output_path, 'w')
110 json.dump(data, f, indent=2, sort_keys=True)
115 def update_incremental_cache(test):
116 if not test.result.code.isFailure:
118 fname = test.getFilePath()
119 os.utime(fname, None)
121 def sort_by_incremental_cache(run):
123 fname = test.getFilePath()
125 return -os.path.getmtime(fname)
128 run.tests.sort(key = lambda t: sortIndex(t))
130 def main(builtinParameters = {}):
131 # Use processes by default on Unix platforms.
132 isWindows = platform.system() == 'Windows'
133 useProcessesIsDefault = not isWindows
136 from optparse import OptionParser, OptionGroup
137 parser = OptionParser("usage: %prog [options] {file-or-path}")
139 parser.add_option("", "--version", dest="show_version",
140 help="Show version and exit",
141 action="store_true", default=False)
142 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
143 help="Number of testing threads",
144 type=int, action="store", default=None)
145 parser.add_option("", "--config-prefix", dest="configPrefix",
146 metavar="NAME", help="Prefix for 'lit' config files",
147 action="store", default=None)
148 parser.add_option("", "--param", dest="userParameters",
150 help="Add 'NAME' = 'VAL' to the user defined parameters",
151 type=str, action="append", default=[])
153 group = OptionGroup(parser, "Output Format")
154 # FIXME: I find these names very confusing, although I like the
156 group.add_option("-q", "--quiet", dest="quiet",
157 help="Suppress no error output",
158 action="store_true", default=False)
159 group.add_option("-s", "--succinct", dest="succinct",
160 help="Reduce amount of output",
161 action="store_true", default=False)
162 group.add_option("-v", "--verbose", dest="showOutput",
163 help="Show all test output",
164 action="store_true", default=False)
165 group.add_option("-o", "--output", dest="output_path",
166 help="Write test results to the provided path",
167 action="store", type=str, metavar="PATH")
168 group.add_option("", "--no-progress-bar", dest="useProgressBar",
169 help="Do not use curses based progress bar",
170 action="store_false", default=True)
171 parser.add_option_group(group)
173 group = OptionGroup(parser, "Test Execution")
174 group.add_option("", "--path", dest="path",
175 help="Additional paths to add to testing environment",
176 action="append", type=str, default=[])
177 group.add_option("", "--vg", dest="useValgrind",
178 help="Run tests under valgrind",
179 action="store_true", default=False)
180 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
181 help="Check for memory leaks under valgrind",
182 action="store_true", default=False)
183 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
184 help="Specify an extra argument for valgrind",
185 type=str, action="append", default=[])
186 group.add_option("", "--time-tests", dest="timeTests",
187 help="Track elapsed wall time for each test",
188 action="store_true", default=False)
189 group.add_option("", "--no-execute", dest="noExecute",
190 help="Don't execute any tests (assume PASS)",
191 action="store_true", default=False)
192 parser.add_option_group(group)
194 group = OptionGroup(parser, "Test Selection")
195 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
196 help="Maximum number of tests to run",
197 action="store", type=int, default=None)
198 group.add_option("", "--max-time", dest="maxTime", metavar="N",
199 help="Maximum time to spend testing (in seconds)",
200 action="store", type=float, default=None)
201 group.add_option("", "--shuffle", dest="shuffle",
202 help="Run tests in random order",
203 action="store_true", default=False)
204 group.add_option("-i", "--incremental", dest="incremental",
205 help="Run modified and failing tests first (updates "
207 action="store_true", default=False)
208 group.add_option("", "--filter", dest="filter", metavar="REGEX",
209 help=("Only run tests with paths matching the given "
210 "regular expression"),
211 action="store", default=None)
212 parser.add_option_group(group)
214 group = OptionGroup(parser, "Debug and Experimental Options")
215 group.add_option("", "--debug", dest="debug",
216 help="Enable debugging (for 'lit' development)",
217 action="store_true", default=False)
218 group.add_option("", "--show-suites", dest="showSuites",
219 help="Show discovered test suites",
220 action="store_true", default=False)
221 group.add_option("", "--show-tests", dest="showTests",
222 help="Show all discovered tests",
223 action="store_true", default=False)
224 group.add_option("", "--use-processes", dest="useProcesses",
225 help="Run tests in parallel with processes (not threads)",
226 action="store_true", default=useProcessesIsDefault)
227 group.add_option("", "--use-threads", dest="useProcesses",
228 help="Run tests in parallel with threads (not processes)",
229 action="store_false", default=useProcessesIsDefault)
230 parser.add_option_group(group)
232 (opts, args) = parser.parse_args()
234 if opts.show_version:
235 print("lit %s" % (lit.__version__,))
239 parser.error('No inputs specified')
241 if opts.numThreads is None:
242 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
243 # http://bugs.python.org/issue1731717
244 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
245 # threads by default there.
246 if sys.hexversion >= 0x2050200:
247 opts.numThreads = lit.util.detectCPUs()
253 # Create the user defined parameters.
254 userParams = dict(builtinParameters)
255 for entry in opts.userParameters:
259 name,val = entry.split('=', 1)
260 userParams[name] = val
262 # Create the global config object.
263 litConfig = lit.LitConfig.LitConfig(
264 progname = os.path.basename(sys.argv[0]),
267 useValgrind = opts.useValgrind,
268 valgrindLeakCheck = opts.valgrindLeakCheck,
269 valgrindArgs = opts.valgrindArgs,
270 noExecute = opts.noExecute,
272 isWindows = isWindows,
274 config_prefix = opts.configPrefix)
276 # Perform test discovery.
277 run = lit.run.Run(litConfig,
278 lit.discovery.find_tests_for_inputs(litConfig, inputs))
280 if opts.showSuites or opts.showTests:
281 # Aggregate the tests by suite.
284 if t.suite not in suitesAndTests:
285 suitesAndTests[t.suite] = []
286 suitesAndTests[t.suite].append(t)
287 suitesAndTests = list(suitesAndTests.items())
288 suitesAndTests.sort(key = lambda item: item[0].name)
290 # Show the suites, if requested.
292 print('-- Test Suites --')
293 for ts,ts_tests in suitesAndTests:
294 print(' %s - %d tests' %(ts.name, len(ts_tests)))
295 print(' Source Root: %s' % ts.source_root)
296 print(' Exec Root : %s' % ts.exec_root)
298 # Show the tests, if requested.
300 print('-- Available Tests --')
301 for ts,ts_tests in suitesAndTests:
302 ts_tests.sort(key = lambda test: test.path_in_suite)
303 for test in ts_tests:
304 print(' %s' % (test.getFullName(),))
309 # Select and order the tests.
310 numTotalTests = len(run.tests)
312 # First, select based on the filter expression if given.
315 rex = re.compile(opts.filter)
317 parser.error("invalid regular expression for --filter: %r" % (
319 run.tests = [t for t in run.tests
320 if rex.search(t.getFullName())]
322 # Then select the order.
324 random.shuffle(run.tests)
325 elif opts.incremental:
326 sort_by_incremental_cache(run)
328 run.tests.sort(key = lambda t: t.getFullName())
330 # Finally limit the number of tests, if desired.
331 if opts.maxTests is not None:
332 run.tests = run.tests[:opts.maxTests]
334 # Don't create more threads than tests.
335 opts.numThreads = min(len(run.tests), opts.numThreads)
338 if len(run.tests) != numTotalTests:
339 extra = ' of %d' % numTotalTests
340 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
345 if opts.succinct and opts.useProgressBar:
347 tc = lit.ProgressBar.TerminalController()
348 progressBar = lit.ProgressBar.ProgressBar(tc, header)
351 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
355 startTime = time.time()
356 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
358 run.execute_tests(display, opts.numThreads, opts.maxTime,
360 except KeyboardInterrupt:
364 testing_time = time.time() - startTime
366 print('Testing Time: %.2fs' % (testing_time,))
368 # Write out the test data, if requested.
369 if opts.output_path is not None:
370 write_test_results(run, litConfig, testing_time, opts.output_path)
372 # List test results organized by kind.
375 for test in run.tests:
376 if test.result.code not in byCode:
377 byCode[test.result.code] = []
378 byCode[test.result.code].append(test)
379 if test.result.code.isFailure:
382 # Print each test in any of the failing groups.
383 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
384 ('Failing Tests', lit.Test.FAIL),
385 ('Unresolved Tests', lit.Test.UNRESOLVED)):
386 elts = byCode.get(code)
390 print('%s (%d):' % (title, len(elts)))
392 print(' %s' % test.getFullName())
393 sys.stdout.write('\n')
395 if opts.timeTests and run.tests:
397 test_times = [(test.getFullName(), test.result.elapsed)
398 for test in run.tests]
399 lit.util.printHistogram(test_times, title='Tests')
401 for name,code in (('Expected Passes ', lit.Test.PASS),
402 ('Expected Failures ', lit.Test.XFAIL),
403 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
404 ('Unresolved Tests ', lit.Test.UNRESOLVED),
405 ('Unexpected Passes ', lit.Test.XPASS),
406 ('Unexpected Failures', lit.Test.FAIL),):
407 if opts.quiet and not code.isFailure:
409 N = len(byCode.get(code,[]))
411 print(' %s: %d' % (name,N))
413 # If we encountered any additional errors, exit abnormally.
414 if litConfig.numErrors:
415 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
418 # Warn about warnings.
419 if litConfig.numWarnings:
420 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
426 if __name__=='__main__':