4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time, threading, traceback
12 import lit.ProgressBar
20 class TestingProgressDisplay:
21 def __init__(self, opts, numTests, progressBar=None):
23 self.numTests = numTests
25 self.lock = threading.Lock()
26 self.progressBar = progressBar
29 def update(self, test):
30 # Avoid locking overhead in quiet mode
31 if self.opts.quiet and not test.result.code.isFailure:
38 self.handleUpdate(test)
44 self.progressBar.clear()
47 elif self.opts.succinct:
48 sys.stdout.write('\n')
50 def handleUpdate(self, test):
53 self.progressBar.update(float(self.completed)/self.numTests,
56 if self.opts.succinct and not test.result.code.isFailure:
60 self.progressBar.clear()
62 print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
63 self.completed, self.numTests))
65 if test.result.code.isFailure and self.opts.showOutput:
66 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
68 print(test.result.output)
74 def __init__(self, tests, maxTime):
75 self.maxTime = maxTime
76 self.iter = iter(range(len(tests)))
77 self.lock = threading.Lock()
78 self.startTime = time.time()
87 # Check if we have run out of time.
88 if self.maxTime is not None:
89 if time.time() - self.startTime > self.maxTime:
92 # Otherwise take the next test.
97 for item in self.iter:
104 class Tester(object):
105 def __init__(self, run_instance, provider, consumer):
106 self.run_instance = run_instance
107 self.provider = provider
108 self.consumer = consumer
112 item = self.provider.get()
116 self.consumer.taskFinished()
118 def runTest(self, test_index):
119 test = self.run_instance.tests[test_index]
121 self.run_instance.execute_test(test)
122 except KeyboardInterrupt:
123 # This is a sad hack. Unfortunately subprocess goes
124 # bonkers with ctrl-c and we start forking merrily.
125 print('\nCtrl-C detected, goodbye.')
127 self.consumer.update(test_index, test)
129 class ThreadResultsConsumer(object):
130 def __init__(self, display):
131 self.display = display
133 def update(self, test_index, test):
134 self.display.update(test)
136 def taskFinished(self):
139 def handleResults(self):
142 def run_one_tester(run, provider, display):
143 tester = Tester(run, provider, display)
146 def runTests(numThreads, run, provider, display):
147 consumer = ThreadResultsConsumer(display)
149 # If only using one testing thread, don't use tasks at all; this lets us
150 # profile, among other things.
152 run_one_tester(run, provider, consumer)
155 # Start all of the tasks.
156 tasks = [threading.Thread(target=run_one_tester,
157 args=(run, provider, consumer))
158 for i in range(numThreads)]
162 # Allow the consumer to handle results, if necessary.
163 consumer.handleResults()
165 # Wait for all the tasks to complete.
169 def main(builtinParameters = {}):
170 # Bump the GIL check interval, its more important to get any one thread to a
171 # blocking operation (hopefully exec) than to try and unblock other threads.
173 # FIXME: This is a hack.
174 sys.setcheckinterval(1000)
177 from optparse import OptionParser, OptionGroup
178 parser = OptionParser("usage: %prog [options] {file-or-path}")
180 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
181 help="Number of testing threads",
182 type=int, action="store", default=None)
183 parser.add_option("", "--config-prefix", dest="configPrefix",
184 metavar="NAME", help="Prefix for 'lit' config files",
185 action="store", default=None)
186 parser.add_option("", "--param", dest="userParameters",
188 help="Add 'NAME' = 'VAL' to the user defined parameters",
189 type=str, action="append", default=[])
191 group = OptionGroup(parser, "Output Format")
192 # FIXME: I find these names very confusing, although I like the
194 group.add_option("-q", "--quiet", dest="quiet",
195 help="Suppress no error output",
196 action="store_true", default=False)
197 group.add_option("-s", "--succinct", dest="succinct",
198 help="Reduce amount of output",
199 action="store_true", default=False)
200 group.add_option("-v", "--verbose", dest="showOutput",
201 help="Show all test output",
202 action="store_true", default=False)
203 group.add_option("", "--no-progress-bar", dest="useProgressBar",
204 help="Do not use curses based progress bar",
205 action="store_false", default=True)
206 parser.add_option_group(group)
208 group = OptionGroup(parser, "Test Execution")
209 group.add_option("", "--path", dest="path",
210 help="Additional paths to add to testing environment",
211 action="append", type=str, default=[])
212 group.add_option("", "--vg", dest="useValgrind",
213 help="Run tests under valgrind",
214 action="store_true", default=False)
215 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
216 help="Check for memory leaks under valgrind",
217 action="store_true", default=False)
218 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
219 help="Specify an extra argument for valgrind",
220 type=str, action="append", default=[])
221 group.add_option("", "--time-tests", dest="timeTests",
222 help="Track elapsed wall time for each test",
223 action="store_true", default=False)
224 group.add_option("", "--no-execute", dest="noExecute",
225 help="Don't execute any tests (assume PASS)",
226 action="store_true", default=False)
227 parser.add_option_group(group)
229 group = OptionGroup(parser, "Test Selection")
230 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
231 help="Maximum number of tests to run",
232 action="store", type=int, default=None)
233 group.add_option("", "--max-time", dest="maxTime", metavar="N",
234 help="Maximum time to spend testing (in seconds)",
235 action="store", type=float, default=None)
236 group.add_option("", "--shuffle", dest="shuffle",
237 help="Run tests in random order",
238 action="store_true", default=False)
239 group.add_option("", "--filter", dest="filter", metavar="REGEX",
240 help=("Only run tests with paths matching the given "
241 "regular expression"),
242 action="store", default=None)
243 parser.add_option_group(group)
245 group = OptionGroup(parser, "Debug and Experimental Options")
246 group.add_option("", "--debug", dest="debug",
247 help="Enable debugging (for 'lit' development)",
248 action="store_true", default=False)
249 group.add_option("", "--show-suites", dest="showSuites",
250 help="Show discovered test suites",
251 action="store_true", default=False)
252 group.add_option("", "--show-tests", dest="showTests",
253 help="Show all discovered tests",
254 action="store_true", default=False)
255 parser.add_option_group(group)
257 (opts, args) = parser.parse_args()
260 parser.error('No inputs specified')
262 if opts.numThreads is None:
263 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
264 # http://bugs.python.org/issue1731717
265 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
266 # threads by default there.
267 if sys.hexversion >= 0x2050200:
268 opts.numThreads = lit.util.detectCPUs()
274 # Create the user defined parameters.
275 userParams = dict(builtinParameters)
276 for entry in opts.userParameters:
280 name,val = entry.split('=', 1)
281 userParams[name] = val
283 # Create the global config object.
284 litConfig = lit.LitConfig.LitConfig(
285 progname = os.path.basename(sys.argv[0]),
288 useValgrind = opts.useValgrind,
289 valgrindLeakCheck = opts.valgrindLeakCheck,
290 valgrindArgs = opts.valgrindArgs,
291 noExecute = opts.noExecute,
293 isWindows = (platform.system()=='Windows'),
295 config_prefix = opts.configPrefix)
297 # Perform test discovery.
298 run = lit.run.Run(litConfig,
299 lit.discovery.find_tests_for_inputs(litConfig, inputs))
301 if opts.showSuites or opts.showTests:
302 # Aggregate the tests by suite.
305 if t.suite not in suitesAndTests:
306 suitesAndTests[t.suite] = []
307 suitesAndTests[t.suite].append(t)
308 suitesAndTests = list(suitesAndTests.items())
309 suitesAndTests.sort(key = lambda item: item[0].name)
311 # Show the suites, if requested.
313 print('-- Test Suites --')
314 for ts,ts_tests in suitesAndTests:
315 print(' %s - %d tests' %(ts.name, len(ts_tests)))
316 print(' Source Root: %s' % ts.source_root)
317 print(' Exec Root : %s' % ts.exec_root)
319 # Show the tests, if requested.
321 print('-- Available Tests --')
322 for ts,ts_tests in suitesAndTests:
323 ts_tests.sort(key = lambda test: test.path_in_suite)
324 for test in ts_tests:
325 print(' %s' % (test.getFullName(),))
330 # Select and order the tests.
331 numTotalTests = len(run.tests)
333 # First, select based on the filter expression if given.
336 rex = re.compile(opts.filter)
338 parser.error("invalid regular expression for --filter: %r" % (
340 run.tests = [t for t in run.tests
341 if rex.search(t.getFullName())]
343 # Then select the order.
345 random.shuffle(run.tests)
347 run.tests.sort(key = lambda t: t.getFullName())
349 # Finally limit the number of tests, if desired.
350 if opts.maxTests is not None:
351 run.tests = run.tests[:opts.maxTests]
353 # Don't create more threads than tests.
354 opts.numThreads = min(len(run.tests), opts.numThreads)
357 if len(run.tests) != numTotalTests:
358 extra = ' of %d' % numTotalTests
359 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
364 if opts.succinct and opts.useProgressBar:
366 tc = lit.ProgressBar.TerminalController()
367 progressBar = lit.ProgressBar.ProgressBar(tc, header)
370 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
374 startTime = time.time()
375 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
376 provider = TestProvider(run.tests, opts.maxTime)
383 def console_ctrl_handler(type):
386 win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
388 runTests(opts.numThreads, run, provider, display)
389 except KeyboardInterrupt:
394 print('Testing Time: %.2fs'%(time.time() - startTime))
396 # Update results for any tests which weren't run.
397 for test in run.tests:
398 if test.result is None:
399 test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
401 # List test results organized by kind.
404 for test in run.tests:
405 if test.result.code not in byCode:
406 byCode[test.result.code] = []
407 byCode[test.result.code].append(test)
408 if test.result.code.isFailure:
411 # Print each test in any of the failing groups.
412 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
413 ('Failing Tests', lit.Test.FAIL),
414 ('Unresolved Tests', lit.Test.UNRESOLVED)):
415 elts = byCode.get(code)
419 print('%s (%d):' % (title, len(elts)))
421 print(' %s' % test.getFullName())
422 sys.stdout.write('\n')
424 if opts.timeTests and run.tests:
426 test_times = [(test.getFullName(), test.result.elapsed)
427 for test in run.tests]
428 lit.util.printHistogram(test_times, title='Tests')
430 for name,code in (('Expected Passes ', lit.Test.PASS),
431 ('Expected Failures ', lit.Test.XFAIL),
432 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
433 ('Unresolved Tests ', lit.Test.UNRESOLVED),
434 ('Unexpected Passes ', lit.Test.XPASS),
435 ('Unexpected Failures', lit.Test.FAIL),):
436 if opts.quiet and not code.isFailure:
438 N = len(byCode.get(code,[]))
440 print(' %s: %d' % (name,N))
442 # If we encountered any additional errors, exit abnormally.
443 if litConfig.numErrors:
444 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
447 # Warn about warnings.
448 if litConfig.numWarnings:
449 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
455 if __name__=='__main__':