4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 self.progressBar.update(float(self.completed)/self.numTests,
41 if not test.result.code.isFailure and \
42 (self.opts.quiet or self.opts.succinct):
46 self.progressBar.clear()
48 # Show the test result line.
49 test_name = test.getFullName()
50 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51 self.completed, self.numTests))
53 # Show the test failure output, if requested.
54 if test.result.code.isFailure and self.opts.showOutput:
55 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
57 print(test.result.output)
60 # Report test metrics, if present.
61 if test.result.metrics:
62 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
64 items = sorted(test.result.metrics.items())
65 for metric_name, value in items:
66 print('%s: %s ' % (metric_name, value.format()))
69 # Ensure the output is flushed.
72 def write_test_results(run, lit_config, testing_time, output_path):
76 lit_config.fatal('test output unsupported with Python 2.5')
78 # Construct the data we will write.
80 # Encode the current lit version as a schema version.
81 data['__version__'] = lit.__versioninfo__
82 data['elapsed'] = testing_time
83 # FIXME: Record some information on the lit configuration used?
84 # FIXME: Record information from the individual test suites?
87 data['tests'] = tests_data = []
88 for test in run.tests:
90 'name' : test.getFullName(),
91 'code' : test.result.code.name,
92 'output' : test.result.output,
93 'elapsed' : test.result.elapsed }
95 # Add test metrics, if present.
96 if test.result.metrics:
97 test_data['metrics'] = metrics_data = {}
98 for key, value in test.result.metrics.items():
99 metrics_data[key] = value.todata()
101 tests_data.append(test_data)
104 f = open(output_path, 'w')
106 json.dump(data, f, indent=2, sort_keys=True)
111 def main(builtinParameters = {}):
112 # Bump the GIL check interval, its more important to get any one thread to a
113 # blocking operation (hopefully exec) than to try and unblock other threads.
115 # FIXME: This is a hack.
116 sys.setcheckinterval(1000)
118 # Use processes by default on Unix platforms.
119 isWindows = platform.system() == 'Windows'
120 useProcessesIsDefault = not isWindows
123 from optparse import OptionParser, OptionGroup
124 parser = OptionParser("usage: %prog [options] {file-or-path}")
126 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
127 help="Number of testing threads",
128 type=int, action="store", default=None)
129 parser.add_option("", "--config-prefix", dest="configPrefix",
130 metavar="NAME", help="Prefix for 'lit' config files",
131 action="store", default=None)
132 parser.add_option("", "--param", dest="userParameters",
134 help="Add 'NAME' = 'VAL' to the user defined parameters",
135 type=str, action="append", default=[])
137 group = OptionGroup(parser, "Output Format")
138 # FIXME: I find these names very confusing, although I like the
140 group.add_option("-q", "--quiet", dest="quiet",
141 help="Suppress no error output",
142 action="store_true", default=False)
143 group.add_option("-s", "--succinct", dest="succinct",
144 help="Reduce amount of output",
145 action="store_true", default=False)
146 group.add_option("-v", "--verbose", dest="showOutput",
147 help="Show all test output",
148 action="store_true", default=False)
149 group.add_option("-o", "--output", dest="output_path",
150 help="Write test results to the provided path",
151 action="store", type=str, metavar="PATH")
152 group.add_option("", "--no-progress-bar", dest="useProgressBar",
153 help="Do not use curses based progress bar",
154 action="store_false", default=True)
155 parser.add_option_group(group)
157 group = OptionGroup(parser, "Test Execution")
158 group.add_option("", "--path", dest="path",
159 help="Additional paths to add to testing environment",
160 action="append", type=str, default=[])
161 group.add_option("", "--vg", dest="useValgrind",
162 help="Run tests under valgrind",
163 action="store_true", default=False)
164 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
165 help="Check for memory leaks under valgrind",
166 action="store_true", default=False)
167 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
168 help="Specify an extra argument for valgrind",
169 type=str, action="append", default=[])
170 group.add_option("", "--time-tests", dest="timeTests",
171 help="Track elapsed wall time for each test",
172 action="store_true", default=False)
173 group.add_option("", "--no-execute", dest="noExecute",
174 help="Don't execute any tests (assume PASS)",
175 action="store_true", default=False)
176 parser.add_option_group(group)
178 group = OptionGroup(parser, "Test Selection")
179 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
180 help="Maximum number of tests to run",
181 action="store", type=int, default=None)
182 group.add_option("", "--max-time", dest="maxTime", metavar="N",
183 help="Maximum time to spend testing (in seconds)",
184 action="store", type=float, default=None)
185 group.add_option("", "--shuffle", dest="shuffle",
186 help="Run tests in random order",
187 action="store_true", default=False)
188 group.add_option("", "--filter", dest="filter", metavar="REGEX",
189 help=("Only run tests with paths matching the given "
190 "regular expression"),
191 action="store", default=None)
192 parser.add_option_group(group)
194 group = OptionGroup(parser, "Debug and Experimental Options")
195 group.add_option("", "--debug", dest="debug",
196 help="Enable debugging (for 'lit' development)",
197 action="store_true", default=False)
198 group.add_option("", "--show-suites", dest="showSuites",
199 help="Show discovered test suites",
200 action="store_true", default=False)
201 group.add_option("", "--show-tests", dest="showTests",
202 help="Show all discovered tests",
203 action="store_true", default=False)
204 group.add_option("", "--use-processes", dest="useProcesses",
205 help="Run tests in parallel with processes (not threads)",
206 action="store_true", default=useProcessesIsDefault)
207 group.add_option("", "--use-threads", dest="useProcesses",
208 help="Run tests in parallel with threads (not processes)",
209 action="store_false", default=not useProcessesIsDefault)
210 parser.add_option_group(group)
212 (opts, args) = parser.parse_args()
215 parser.error('No inputs specified')
217 if opts.numThreads is None:
218 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
219 # http://bugs.python.org/issue1731717
220 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
221 # threads by default there.
222 if sys.hexversion >= 0x2050200:
223 opts.numThreads = lit.util.detectCPUs()
229 # Create the user defined parameters.
230 userParams = dict(builtinParameters)
231 for entry in opts.userParameters:
235 name,val = entry.split('=', 1)
236 userParams[name] = val
238 # Create the global config object.
239 litConfig = lit.LitConfig.LitConfig(
240 progname = os.path.basename(sys.argv[0]),
243 useValgrind = opts.useValgrind,
244 valgrindLeakCheck = opts.valgrindLeakCheck,
245 valgrindArgs = opts.valgrindArgs,
246 noExecute = opts.noExecute,
248 isWindows = isWindows,
250 config_prefix = opts.configPrefix)
252 # Perform test discovery.
253 run = lit.run.Run(litConfig,
254 lit.discovery.find_tests_for_inputs(litConfig, inputs))
256 if opts.showSuites or opts.showTests:
257 # Aggregate the tests by suite.
260 if t.suite not in suitesAndTests:
261 suitesAndTests[t.suite] = []
262 suitesAndTests[t.suite].append(t)
263 suitesAndTests = list(suitesAndTests.items())
264 suitesAndTests.sort(key = lambda item: item[0].name)
266 # Show the suites, if requested.
268 print('-- Test Suites --')
269 for ts,ts_tests in suitesAndTests:
270 print(' %s - %d tests' %(ts.name, len(ts_tests)))
271 print(' Source Root: %s' % ts.source_root)
272 print(' Exec Root : %s' % ts.exec_root)
274 # Show the tests, if requested.
276 print('-- Available Tests --')
277 for ts,ts_tests in suitesAndTests:
278 ts_tests.sort(key = lambda test: test.path_in_suite)
279 for test in ts_tests:
280 print(' %s' % (test.getFullName(),))
285 # Select and order the tests.
286 numTotalTests = len(run.tests)
288 # First, select based on the filter expression if given.
291 rex = re.compile(opts.filter)
293 parser.error("invalid regular expression for --filter: %r" % (
295 run.tests = [t for t in run.tests
296 if rex.search(t.getFullName())]
298 # Then select the order.
300 random.shuffle(run.tests)
302 run.tests.sort(key = lambda t: t.getFullName())
304 # Finally limit the number of tests, if desired.
305 if opts.maxTests is not None:
306 run.tests = run.tests[:opts.maxTests]
308 # Don't create more threads than tests.
309 opts.numThreads = min(len(run.tests), opts.numThreads)
312 if len(run.tests) != numTotalTests:
313 extra = ' of %d' % numTotalTests
314 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
319 if opts.succinct and opts.useProgressBar:
321 tc = lit.ProgressBar.TerminalController()
322 progressBar = lit.ProgressBar.ProgressBar(tc, header)
325 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
329 startTime = time.time()
330 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
332 run.execute_tests(display, opts.numThreads, opts.maxTime,
334 except KeyboardInterrupt:
338 testing_time = time.time() - startTime
340 print('Testing Time: %.2fs' % (testing_time,))
342 # Write out the test data, if requested.
343 if opts.output_path is not None:
344 write_test_results(run, litConfig, testing_time, opts.output_path)
346 # List test results organized by kind.
349 for test in run.tests:
350 if test.result.code not in byCode:
351 byCode[test.result.code] = []
352 byCode[test.result.code].append(test)
353 if test.result.code.isFailure:
356 # Print each test in any of the failing groups.
357 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
358 ('Failing Tests', lit.Test.FAIL),
359 ('Unresolved Tests', lit.Test.UNRESOLVED)):
360 elts = byCode.get(code)
364 print('%s (%d):' % (title, len(elts)))
366 print(' %s' % test.getFullName())
367 sys.stdout.write('\n')
369 if opts.timeTests and run.tests:
371 test_times = [(test.getFullName(), test.result.elapsed)
372 for test in run.tests]
373 lit.util.printHistogram(test_times, title='Tests')
375 for name,code in (('Expected Passes ', lit.Test.PASS),
376 ('Expected Failures ', lit.Test.XFAIL),
377 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
378 ('Unresolved Tests ', lit.Test.UNRESOLVED),
379 ('Unexpected Passes ', lit.Test.XPASS),
380 ('Unexpected Failures', lit.Test.FAIL),):
381 if opts.quiet and not code.isFailure:
383 N = len(byCode.get(code,[]))
385 print(' %s: %d' % (name,N))
387 # If we encountered any additional errors, exit abnormally.
388 if litConfig.numErrors:
389 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
392 # Warn about warnings.
393 if litConfig.numWarnings:
394 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
400 if __name__=='__main__':