[lit] Add a --show-tests option.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 import math, os, platform, random, re, sys, time, threading, traceback
10
11 import ProgressBar
12 import TestRunner
13 import Util
14
15 import LitConfig
16 import Test
17
18 import lit.discovery
19
20 class TestingProgressDisplay:
21     def __init__(self, opts, numTests, progressBar=None):
22         self.opts = opts
23         self.numTests = numTests
24         self.current = None
25         self.lock = threading.Lock()
26         self.progressBar = progressBar
27         self.completed = 0
28
29     def update(self, test):
30         # Avoid locking overhead in quiet mode
31         if self.opts.quiet and not test.result.isFailure:
32             self.completed += 1
33             return
34
35         # Output lock.
36         self.lock.acquire()
37         try:
38             self.handleUpdate(test)
39         finally:
40             self.lock.release()
41
42     def finish(self):
43         if self.progressBar:
44             self.progressBar.clear()
45         elif self.opts.quiet:
46             pass
47         elif self.opts.succinct:
48             sys.stdout.write('\n')
49
50     def handleUpdate(self, test):
51         self.completed += 1
52         if self.progressBar:
53             self.progressBar.update(float(self.completed)/self.numTests,
54                                     test.getFullName())
55
56         if self.opts.succinct and not test.result.isFailure:
57             return
58
59         if self.progressBar:
60             self.progressBar.clear()
61
62         print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
63                                      self.completed, self.numTests)
64
65         if test.result.isFailure and self.opts.showOutput:
66             print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
67                                               '*'*20)
68             print test.output
69             print "*" * 20
70
71         sys.stdout.flush()
72
73 class TestProvider:
74     def __init__(self, tests, maxTime):
75         self.maxTime = maxTime
76         self.iter = iter(tests)
77         self.lock = threading.Lock()
78         self.startTime = time.time()
79         self.canceled = False
80
81     def cancel(self):
82         self.lock.acquire()
83         self.canceled = True
84         self.lock.release()
85
86     def get(self):
87         # Check if we have run out of time.
88         if self.maxTime is not None:
89             if time.time() - self.startTime > self.maxTime:
90                 return None
91
92         # Otherwise take the next test.
93         self.lock.acquire()
94         if self.canceled:
95           self.lock.release()
96           return None
97
98         try:
99             item = self.iter.next()
100         except StopIteration:
101             item = None
102         self.lock.release()
103         return item
104
105 class Tester(threading.Thread):
106     def __init__(self, litConfig, provider, display):
107         threading.Thread.__init__(self)
108         self.litConfig = litConfig
109         self.provider = provider
110         self.display = display
111
112     def run(self):
113         while 1:
114             item = self.provider.get()
115             if item is None:
116                 break
117             self.runTest(item)
118
119     def runTest(self, test):
120         result = None
121         startTime = time.time()
122         try:
123             result, output = test.config.test_format.execute(test,
124                                                              self.litConfig)
125         except KeyboardInterrupt:
126             # This is a sad hack. Unfortunately subprocess goes
127             # bonkers with ctrl-c and we start forking merrily.
128             print '\nCtrl-C detected, goodbye.'
129             os.kill(0,9)
130         except:
131             if self.litConfig.debug:
132                 raise
133             result = Test.UNRESOLVED
134             output = 'Exception during script execution:\n'
135             output += traceback.format_exc()
136             output += '\n'
137         elapsed = time.time() - startTime
138
139         test.setResult(result, output, elapsed)
140         self.display.update(test)
141
142 def runTests(numThreads, litConfig, provider, display):
143     # If only using one testing thread, don't use threads at all; this lets us
144     # profile, among other things.
145     if numThreads == 1:
146         t = Tester(litConfig, provider, display)
147         t.run()
148         return
149
150     # Otherwise spin up the testing threads and wait for them to finish.
151     testers = [Tester(litConfig, provider, display)
152                for i in range(numThreads)]
153     for t in testers:
154         t.start()
155     try:
156         for t in testers:
157             t.join()
158     except KeyboardInterrupt:
159         sys.exit(2)
160
161 def main(builtinParameters = {}):
162     # Bump the GIL check interval, its more important to get any one thread to a
163     # blocking operation (hopefully exec) than to try and unblock other threads.
164     #
165     # FIXME: This is a hack.
166     import sys
167     sys.setcheckinterval(1000)
168
169     global options
170     from optparse import OptionParser, OptionGroup
171     parser = OptionParser("usage: %prog [options] {file-or-path}")
172
173     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
174                       help="Number of testing threads",
175                       type=int, action="store", default=None)
176     parser.add_option("", "--config-prefix", dest="configPrefix",
177                       metavar="NAME", help="Prefix for 'lit' config files",
178                       action="store", default=None)
179     parser.add_option("", "--param", dest="userParameters",
180                       metavar="NAME=VAL",
181                       help="Add 'NAME' = 'VAL' to the user defined parameters",
182                       type=str, action="append", default=[])
183
184     group = OptionGroup(parser, "Output Format")
185     # FIXME: I find these names very confusing, although I like the
186     # functionality.
187     group.add_option("-q", "--quiet", dest="quiet",
188                      help="Suppress no error output",
189                      action="store_true", default=False)
190     group.add_option("-s", "--succinct", dest="succinct",
191                      help="Reduce amount of output",
192                      action="store_true", default=False)
193     group.add_option("-v", "--verbose", dest="showOutput",
194                      help="Show all test output",
195                      action="store_true", default=False)
196     group.add_option("", "--no-progress-bar", dest="useProgressBar",
197                      help="Do not use curses based progress bar",
198                      action="store_false", default=True)
199     parser.add_option_group(group)
200
201     group = OptionGroup(parser, "Test Execution")
202     group.add_option("", "--path", dest="path",
203                      help="Additional paths to add to testing environment",
204                      action="append", type=str, default=[])
205     group.add_option("", "--vg", dest="useValgrind",
206                      help="Run tests under valgrind",
207                      action="store_true", default=False)
208     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
209                      help="Check for memory leaks under valgrind",
210                      action="store_true", default=False)
211     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
212                      help="Specify an extra argument for valgrind",
213                      type=str, action="append", default=[])
214     group.add_option("", "--time-tests", dest="timeTests",
215                      help="Track elapsed wall time for each test",
216                      action="store_true", default=False)
217     group.add_option("", "--no-execute", dest="noExecute",
218                      help="Don't execute any tests (assume PASS)",
219                      action="store_true", default=False)
220     parser.add_option_group(group)
221
222     group = OptionGroup(parser, "Test Selection")
223     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
224                      help="Maximum number of tests to run",
225                      action="store", type=int, default=None)
226     group.add_option("", "--max-time", dest="maxTime", metavar="N",
227                      help="Maximum time to spend testing (in seconds)",
228                      action="store", type=float, default=None)
229     group.add_option("", "--shuffle", dest="shuffle",
230                      help="Run tests in random order",
231                      action="store_true", default=False)
232     group.add_option("", "--filter", dest="filter", metavar="REGEX",
233                      help=("Only run tests with paths matching the given "
234                            "regular expression"),
235                      action="store", default=None)
236     parser.add_option_group(group)
237
238     group = OptionGroup(parser, "Debug and Experimental Options")
239     group.add_option("", "--debug", dest="debug",
240                       help="Enable debugging (for 'lit' development)",
241                       action="store_true", default=False)
242     group.add_option("", "--show-suites", dest="showSuites",
243                       help="Show discovered test suites",
244                       action="store_true", default=False)
245     group.add_option("", "--show-tests", dest="showTests",
246                       help="Show all discovered tests",
247                       action="store_true", default=False)
248     group.add_option("", "--repeat", dest="repeatTests", metavar="N",
249                       help="Repeat tests N times (for timing)",
250                       action="store", default=None, type=int)
251     parser.add_option_group(group)
252
253     (opts, args) = parser.parse_args()
254
255     if not args:
256         parser.error('No inputs specified')
257
258     if opts.numThreads is None:
259 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
260 # http://bugs.python.org/issue1731717
261 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
262 # threads by default there.
263        if sys.hexversion >= 0x2050200:
264                opts.numThreads = Util.detectCPUs()
265        else:
266                opts.numThreads = 1
267
268     inputs = args
269
270     # Create the user defined parameters.
271     userParams = dict(builtinParameters)
272     for entry in opts.userParameters:
273         if '=' not in entry:
274             name,val = entry,''
275         else:
276             name,val = entry.split('=', 1)
277         userParams[name] = val
278
279     # Create the global config object.
280     litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
281                                     path = opts.path,
282                                     quiet = opts.quiet,
283                                     useValgrind = opts.useValgrind,
284                                     valgrindLeakCheck = opts.valgrindLeakCheck,
285                                     valgrindArgs = opts.valgrindArgs,
286                                     noExecute = opts.noExecute,
287                                     debug = opts.debug,
288                                     isWindows = (platform.system()=='Windows'),
289                                     params = userParams,
290                                     config_prefix = opts.configPrefix)
291
292     tests = lit.discovery.find_tests_for_inputs(litConfig, inputs)
293
294     if opts.showSuites or opts.showTests:
295         # Aggregate the tests by suite.
296         suitesAndTests = {}
297         for t in tests:
298             if t.suite not in suitesAndTests:
299                 suitesAndTests[t.suite] = []
300             suitesAndTests[t.suite].append(t)
301         suitesAndTests = suitesAndTests.items()
302         suitesAndTests.sort(key = lambda (ts,_): ts.name)
303
304         # Show the suites, if requested.
305         if opts.showSuites:
306             print '-- Test Suites --'
307             for ts,ts_tests in suitesAndTests:
308                 print '  %s - %d tests' %(ts.name, len(ts_tests))
309                 print '    Source Root: %s' % ts.source_root
310                 print '    Exec Root  : %s' % ts.exec_root
311
312         # Show the tests, if requested.
313         if opts.showTests:
314             print '-- Available Tests --'
315             for ts,ts_tests in suitesAndTests:
316                 ts_tests.sort(key = lambda test: test.path_in_suite)
317                 for test in ts_tests:
318                     print '  %s' % (test.getFullName(),)
319         
320     # Select and order the tests.
321     numTotalTests = len(tests)
322
323     # First, select based on the filter expression if given.
324     if opts.filter:
325         try:
326             rex = re.compile(opts.filter)
327         except:
328             parser.error("invalid regular expression for --filter: %r" % (
329                     opts.filter))
330         tests = [t for t in tests
331                  if rex.search(t.getFullName())]
332
333     # Then select the order.
334     if opts.shuffle:
335         random.shuffle(tests)
336     else:
337         tests.sort(key = lambda t: t.getFullName())
338
339     # Finally limit the number of tests, if desired.
340     if opts.maxTests is not None:
341         tests = tests[:opts.maxTests]
342
343     # Don't create more threads than tests.
344     opts.numThreads = min(len(tests), opts.numThreads)
345
346     extra = ''
347     if len(tests) != numTotalTests:
348         extra = ' of %d' % numTotalTests
349     header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
350                                                       opts.numThreads)
351
352     if opts.repeatTests:
353         tests = [t.copyWithIndex(i)
354                  for t in tests
355                  for i in range(opts.repeatTests)]
356
357     progressBar = None
358     if not opts.quiet:
359         if opts.succinct and opts.useProgressBar:
360             try:
361                 tc = ProgressBar.TerminalController()
362                 progressBar = ProgressBar.ProgressBar(tc, header)
363             except ValueError:
364                 print header
365                 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
366         else:
367             print header
368
369     startTime = time.time()
370     display = TestingProgressDisplay(opts, len(tests), progressBar)
371     provider = TestProvider(tests, opts.maxTime)
372
373     try:
374       import win32api
375     except ImportError:
376       pass
377     else:
378       def console_ctrl_handler(type):
379         provider.cancel()
380         return True
381       win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
382
383     runTests(opts.numThreads, litConfig, provider, display)
384     display.finish()
385
386     if not opts.quiet:
387         print 'Testing Time: %.2fs'%(time.time() - startTime)
388
389     # Update results for any tests which weren't run.
390     for t in tests:
391         if t.result is None:
392             t.setResult(Test.UNRESOLVED, '', 0.0)
393
394     # List test results organized by kind.
395     hasFailures = False
396     byCode = {}
397     for t in tests:
398         if t.result not in byCode:
399             byCode[t.result] = []
400         byCode[t.result].append(t)
401         if t.result.isFailure:
402             hasFailures = True
403
404     # FIXME: Show unresolved and (optionally) unsupported tests.
405     for title,code in (('Unexpected Passing Tests', Test.XPASS),
406                        ('Failing Tests', Test.FAIL)):
407         elts = byCode.get(code)
408         if not elts:
409             continue
410         print '*'*20
411         print '%s (%d):' % (title, len(elts))
412         for t in elts:
413             print '    %s' % t.getFullName()
414         print
415
416     if opts.timeTests:
417         # Collate, in case we repeated tests.
418         times = {}
419         for t in tests:
420             key = t.getFullName()
421             times[key] = times.get(key, 0.) + t.elapsed
422
423         byTime = list(times.items())
424         byTime.sort(key = lambda (name,elapsed): elapsed)
425         if byTime:
426             Util.printHistogram(byTime, title='Tests')
427
428     for name,code in (('Expected Passes    ', Test.PASS),
429                       ('Expected Failures  ', Test.XFAIL),
430                       ('Unsupported Tests  ', Test.UNSUPPORTED),
431                       ('Unresolved Tests   ', Test.UNRESOLVED),
432                       ('Unexpected Passes  ', Test.XPASS),
433                       ('Unexpected Failures', Test.FAIL),):
434         if opts.quiet and not code.isFailure:
435             continue
436         N = len(byCode.get(code,[]))
437         if N:
438             print '  %s: %d' % (name,N)
439
440     # If we encountered any additional errors, exit abnormally.
441     if litConfig.numErrors:
442         print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
443         sys.exit(2)
444
445     # Warn about warnings.
446     if litConfig.numWarnings:
447         print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
448
449     if hasFailures:
450         sys.exit(1)
451     sys.exit(0)
452
453 if __name__=='__main__':
454     main()