[lit] Add a --filter option which is useful when dealing with virtual test
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 import math, os, platform, random, re, sys, time, threading, traceback
10
11 import ProgressBar
12 import TestRunner
13 import Util
14
15 from TestingConfig import TestingConfig
16 import LitConfig
17 import Test
18
19 # Configuration files to look for when discovering test suites. These can be
20 # overridden with --config-prefix.
21 #
22 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
23 gConfigName = 'lit.cfg'
24 gSiteConfigName = 'lit.site.cfg'
25
26 kLocalConfigName = 'lit.local.cfg'
27
28 class TestingProgressDisplay:
29     def __init__(self, opts, numTests, progressBar=None):
30         self.opts = opts
31         self.numTests = numTests
32         self.current = None
33         self.lock = threading.Lock()
34         self.progressBar = progressBar
35         self.completed = 0
36
37     def update(self, test):
38         # Avoid locking overhead in quiet mode
39         if self.opts.quiet and not test.result.isFailure:
40             self.completed += 1
41             return
42
43         # Output lock.
44         self.lock.acquire()
45         try:
46             self.handleUpdate(test)
47         finally:
48             self.lock.release()
49
50     def finish(self):
51         if self.progressBar:
52             self.progressBar.clear()
53         elif self.opts.quiet:
54             pass
55         elif self.opts.succinct:
56             sys.stdout.write('\n')
57
58     def handleUpdate(self, test):
59         self.completed += 1
60         if self.progressBar:
61             self.progressBar.update(float(self.completed)/self.numTests,
62                                     test.getFullName())
63
64         if self.opts.succinct and not test.result.isFailure:
65             return
66
67         if self.progressBar:
68             self.progressBar.clear()
69
70         print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
71                                      self.completed, self.numTests)
72
73         if test.result.isFailure and self.opts.showOutput:
74             print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
75                                               '*'*20)
76             print test.output
77             print "*" * 20
78
79         sys.stdout.flush()
80
81 class TestProvider:
82     def __init__(self, tests, maxTime):
83         self.maxTime = maxTime
84         self.iter = iter(tests)
85         self.lock = threading.Lock()
86         self.startTime = time.time()
87
88     def get(self):
89         # Check if we have run out of time.
90         if self.maxTime is not None:
91             if time.time() - self.startTime > self.maxTime:
92                 return None
93
94         # Otherwise take the next test.
95         self.lock.acquire()
96         try:
97             item = self.iter.next()
98         except StopIteration:
99             item = None
100         self.lock.release()
101         return item
102
103 class Tester(threading.Thread):
104     def __init__(self, litConfig, provider, display):
105         threading.Thread.__init__(self)
106         self.litConfig = litConfig
107         self.provider = provider
108         self.display = display
109
110     def run(self):
111         while 1:
112             item = self.provider.get()
113             if item is None:
114                 break
115             self.runTest(item)
116
117     def runTest(self, test):
118         result = None
119         startTime = time.time()
120         try:
121             result, output = test.config.test_format.execute(test,
122                                                              self.litConfig)
123         except KeyboardInterrupt:
124             # This is a sad hack. Unfortunately subprocess goes
125             # bonkers with ctrl-c and we start forking merrily.
126             print '\nCtrl-C detected, goodbye.'
127             os.kill(0,9)
128         except:
129             if self.litConfig.debug:
130                 raise
131             result = Test.UNRESOLVED
132             output = 'Exception during script execution:\n'
133             output += traceback.format_exc()
134             output += '\n'
135         elapsed = time.time() - startTime
136
137         test.setResult(result, output, elapsed)
138         self.display.update(test)
139
140 def dirContainsTestSuite(path):
141     cfgpath = os.path.join(path, gSiteConfigName)
142     if os.path.exists(cfgpath):
143         return cfgpath
144     cfgpath = os.path.join(path, gConfigName)
145     if os.path.exists(cfgpath):
146         return cfgpath
147
148 def getTestSuite(item, litConfig, cache):
149     """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
150
151     Find the test suite containing @arg item.
152
153     @retval (None, ...) - Indicates no test suite contains @arg item.
154     @retval (suite, relative_path) - The suite that @arg item is in, and its
155     relative path inside that suite.
156     """
157     def search1(path):
158         # Check for a site config or a lit config.
159         cfgpath = dirContainsTestSuite(path)
160
161         # If we didn't find a config file, keep looking.
162         if not cfgpath:
163             parent,base = os.path.split(path)
164             if parent == path:
165                 return (None, ())
166
167             ts, relative = search(parent)
168             return (ts, relative + (base,))
169
170         # We found a config file, load it.
171         if litConfig.debug:
172             litConfig.note('loading suite config %r' % cfgpath)
173
174         cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
175         source_root = os.path.realpath(cfg.test_source_root or path)
176         exec_root = os.path.realpath(cfg.test_exec_root or path)
177         return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
178
179     def search(path):
180         # Check for an already instantiated test suite.
181         res = cache.get(path)
182         if res is None:
183             cache[path] = res = search1(path)
184         return res
185
186     # Canonicalize the path.
187     item = os.path.realpath(item)
188
189     # Skip files and virtual components.
190     components = []
191     while not os.path.isdir(item):
192         parent,base = os.path.split(item)
193         if parent == item:
194             return (None, ())
195         components.append(base)
196         item = parent
197     components.reverse()
198
199     ts, relative = search(item)
200     return ts, tuple(relative + tuple(components))
201
202 def getLocalConfig(ts, path_in_suite, litConfig, cache):
203     def search1(path_in_suite):
204         # Get the parent config.
205         if not path_in_suite:
206             parent = ts.config
207         else:
208             parent = search(path_in_suite[:-1])
209
210         # Load the local configuration.
211         source_path = ts.getSourcePath(path_in_suite)
212         cfgpath = os.path.join(source_path, kLocalConfigName)
213         if litConfig.debug:
214             litConfig.note('loading local config %r' % cfgpath)
215         return TestingConfig.frompath(cfgpath, parent, litConfig,
216                                     mustExist = False,
217                                     config = parent.clone(cfgpath))
218
219     def search(path_in_suite):
220         key = (ts, path_in_suite)
221         res = cache.get(key)
222         if res is None:
223             cache[key] = res = search1(path_in_suite)
224         return res
225
226     return search(path_in_suite)
227
228 def getTests(path, litConfig, testSuiteCache, localConfigCache):
229     # Find the test suite for this input and its relative path.
230     ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
231     if ts is None:
232         litConfig.warning('unable to find test suite for %r' % path)
233         return (),()
234
235     if litConfig.debug:
236         litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
237                                                         path_in_suite))
238
239     return ts, getTestsInSuite(ts, path_in_suite, litConfig,
240                                testSuiteCache, localConfigCache)
241
242 def getTestsInSuite(ts, path_in_suite, litConfig,
243                     testSuiteCache, localConfigCache):
244     # Check that the source path exists (errors here are reported by the
245     # caller).
246     source_path = ts.getSourcePath(path_in_suite)
247     if not os.path.exists(source_path):
248         return
249
250     # Check if the user named a test directly.
251     if not os.path.isdir(source_path):
252         lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
253         yield Test.Test(ts, path_in_suite, lc)
254         return
255
256     # Otherwise we have a directory to search for tests, start by getting the
257     # local configuration.
258     lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
259
260     # Search for tests.
261     if lc.test_format is not None:
262         for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
263                                                       litConfig, lc):
264             yield res
265
266     # Search subdirectories.
267     for filename in os.listdir(source_path):
268         # FIXME: This doesn't belong here?
269         if filename in ('Output', '.svn') or filename in lc.excludes:
270             continue
271
272         # Ignore non-directories.
273         file_sourcepath = os.path.join(source_path, filename)
274         if not os.path.isdir(file_sourcepath):
275             continue
276
277         # Check for nested test suites, first in the execpath in case there is a
278         # site configuration and then in the source path.
279         file_execpath = ts.getExecPath(path_in_suite + (filename,))
280         if dirContainsTestSuite(file_execpath):
281             sub_ts, subiter = getTests(file_execpath, litConfig,
282                                        testSuiteCache, localConfigCache)
283         elif dirContainsTestSuite(file_sourcepath):
284             sub_ts, subiter = getTests(file_sourcepath, litConfig,
285                                        testSuiteCache, localConfigCache)
286         else:
287             # Otherwise, continue loading from inside this test suite.
288             subiter = getTestsInSuite(ts, path_in_suite + (filename,),
289                                       litConfig, testSuiteCache,
290                                       localConfigCache)
291             sub_ts = None
292
293         N = 0
294         for res in subiter:
295             N += 1
296             yield res
297         if sub_ts and not N:
298             litConfig.warning('test suite %r contained no tests' % sub_ts.name)
299
300 def runTests(numThreads, litConfig, provider, display):
301     # If only using one testing thread, don't use threads at all; this lets us
302     # profile, among other things.
303     if numThreads == 1:
304         t = Tester(litConfig, provider, display)
305         t.run()
306         return
307
308     # Otherwise spin up the testing threads and wait for them to finish.
309     testers = [Tester(litConfig, provider, display)
310                for i in range(numThreads)]
311     for t in testers:
312         t.start()
313     try:
314         for t in testers:
315             t.join()
316     except KeyboardInterrupt:
317         sys.exit(2)
318
319 def load_test_suite(inputs):
320     import unittest
321
322     # Create the global config object.
323     litConfig = LitConfig.LitConfig(progname = 'lit',
324                                     path = [],
325                                     quiet = False,
326                                     useValgrind = False,
327                                     valgrindLeakCheck = False,
328                                     valgrindArgs = [],
329                                     useTclAsSh = False,
330                                     noExecute = False,
331                                     ignoreStdErr = False,
332                                     debug = False,
333                                     isWindows = (platform.system()=='Windows'),
334                                     params = {})
335
336     # Load the tests from the inputs.
337     tests = []
338     testSuiteCache = {}
339     localConfigCache = {}
340     for input in inputs:
341         prev = len(tests)
342         tests.extend(getTests(input, litConfig,
343                               testSuiteCache, localConfigCache)[1])
344         if prev == len(tests):
345             litConfig.warning('input %r contained no tests' % input)
346
347     # If there were any errors during test discovery, exit now.
348     if litConfig.numErrors:
349         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
350         sys.exit(2)
351
352     # Return a unittest test suite which just runs the tests in order.
353     def get_test_fn(test):
354         return unittest.FunctionTestCase(
355             lambda: test.config.test_format.execute(
356                 test, litConfig),
357             description = test.getFullName())
358
359     from LitTestCase import LitTestCase
360     return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
361
362 def main(builtinParameters = {}):    # Bump the GIL check interval, its more important to get any one thread to a
363     # blocking operation (hopefully exec) than to try and unblock other threads.
364     #
365     # FIXME: This is a hack.
366     import sys
367     sys.setcheckinterval(1000)
368
369     global options
370     from optparse import OptionParser, OptionGroup
371     parser = OptionParser("usage: %prog [options] {file-or-path}")
372
373     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
374                       help="Number of testing threads",
375                       type=int, action="store", default=None)
376     parser.add_option("", "--config-prefix", dest="configPrefix",
377                       metavar="NAME", help="Prefix for 'lit' config files",
378                       action="store", default=None)
379     parser.add_option("", "--param", dest="userParameters",
380                       metavar="NAME=VAL",
381                       help="Add 'NAME' = 'VAL' to the user defined parameters",
382                       type=str, action="append", default=[])
383
384     group = OptionGroup(parser, "Output Format")
385     # FIXME: I find these names very confusing, although I like the
386     # functionality.
387     group.add_option("-q", "--quiet", dest="quiet",
388                      help="Suppress no error output",
389                      action="store_true", default=False)
390     group.add_option("-s", "--succinct", dest="succinct",
391                      help="Reduce amount of output",
392                      action="store_true", default=False)
393     group.add_option("-v", "--verbose", dest="showOutput",
394                      help="Show all test output",
395                      action="store_true", default=False)
396     group.add_option("", "--no-progress-bar", dest="useProgressBar",
397                      help="Do not use curses based progress bar",
398                      action="store_false", default=True)
399     parser.add_option_group(group)
400
401     group = OptionGroup(parser, "Test Execution")
402     group.add_option("", "--path", dest="path",
403                      help="Additional paths to add to testing environment",
404                      action="append", type=str, default=[])
405     group.add_option("", "--vg", dest="useValgrind",
406                      help="Run tests under valgrind",
407                      action="store_true", default=False)
408     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
409                      help="Check for memory leaks under valgrind",
410                      action="store_true", default=False)
411     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
412                      help="Specify an extra argument for valgrind",
413                      type=str, action="append", default=[])
414     group.add_option("", "--time-tests", dest="timeTests",
415                      help="Track elapsed wall time for each test",
416                      action="store_true", default=False)
417     group.add_option("", "--no-execute", dest="noExecute",
418                      help="Don't execute any tests (assume PASS)",
419                      action="store_true", default=False)
420     parser.add_option_group(group)
421
422     group = OptionGroup(parser, "Test Selection")
423     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
424                      help="Maximum number of tests to run",
425                      action="store", type=int, default=None)
426     group.add_option("", "--max-time", dest="maxTime", metavar="N",
427                      help="Maximum time to spend testing (in seconds)",
428                      action="store", type=float, default=None)
429     group.add_option("", "--shuffle", dest="shuffle",
430                      help="Run tests in random order",
431                      action="store_true", default=False)
432     group.add_option("", "--filter", dest="filter", metavar="EXPRESSION",
433                      help=("Only run tests with paths matching the given "
434                            "regular expression"),
435                      action="store", default=None)
436     parser.add_option_group(group)
437
438     group = OptionGroup(parser, "Debug and Experimental Options")
439     group.add_option("", "--debug", dest="debug",
440                       help="Enable debugging (for 'lit' development)",
441                       action="store_true", default=False)
442     group.add_option("", "--show-suites", dest="showSuites",
443                       help="Show discovered test suites",
444                       action="store_true", default=False)
445     group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
446                       help="Don't run Tcl scripts using 'sh'",
447                       action="store_false", default=True)
448     group.add_option("", "--repeat", dest="repeatTests", metavar="N",
449                       help="Repeat tests N times (for timing)",
450                       action="store", default=None, type=int)
451     parser.add_option_group(group)
452
453     (opts, args) = parser.parse_args()
454
455     if not args:
456         parser.error('No inputs specified')
457
458     if opts.configPrefix is not None:
459         global gConfigName, gSiteConfigName
460         gConfigName = '%s.cfg' % opts.configPrefix
461         gSiteConfigName = '%s.site.cfg' % opts.configPrefix
462
463     if opts.numThreads is None:
464 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
465 # http://bugs.python.org/issue1731717
466 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
467 # threads by default there.
468        if sys.hexversion >= 0x2050200:
469                opts.numThreads = Util.detectCPUs()
470        else:
471                opts.numThreads = 1
472
473     inputs = args
474
475     # Create the user defined parameters.
476     userParams = dict(builtinParameters)
477     for entry in opts.userParameters:
478         if '=' not in entry:
479             name,val = entry,''
480         else:
481             name,val = entry.split('=', 1)
482         userParams[name] = val
483
484     # Create the global config object.
485     litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
486                                     path = opts.path,
487                                     quiet = opts.quiet,
488                                     useValgrind = opts.useValgrind,
489                                     valgrindLeakCheck = opts.valgrindLeakCheck,
490                                     valgrindArgs = opts.valgrindArgs,
491                                     useTclAsSh = opts.useTclAsSh,
492                                     noExecute = opts.noExecute,
493                                     ignoreStdErr = False,
494                                     debug = opts.debug,
495                                     isWindows = (platform.system()=='Windows'),
496                                     params = userParams)
497
498     # Expand '@...' form in inputs.
499     actual_inputs = []
500     for input in inputs:
501         if os.path.exists(input) or not input.startswith('@'):
502             actual_inputs.append(input)
503         else:
504             f = open(input[1:])
505             try:
506                 for ln in f:
507                     ln = ln.strip()
508                     if ln:
509                         actual_inputs.append(ln)
510             finally:
511                 f.close()
512                     
513             
514     # Load the tests from the inputs.
515     tests = []
516     testSuiteCache = {}
517     localConfigCache = {}
518     for input in actual_inputs:
519         prev = len(tests)
520         tests.extend(getTests(input, litConfig,
521                               testSuiteCache, localConfigCache)[1])
522         if prev == len(tests):
523             litConfig.warning('input %r contained no tests' % input)
524
525     # If there were any errors during test discovery, exit now.
526     if litConfig.numErrors:
527         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
528         sys.exit(2)
529
530     if opts.showSuites:
531         suitesAndTests = dict([(ts,[])
532                                for ts,_ in testSuiteCache.values()
533                                if ts])
534         for t in tests:
535             suitesAndTests[t.suite].append(t)
536
537         print '-- Test Suites --'
538         suitesAndTests = suitesAndTests.items()
539         suitesAndTests.sort(key = lambda (ts,_): ts.name)
540         for ts,ts_tests in suitesAndTests:
541             print '  %s - %d tests' %(ts.name, len(ts_tests))
542             print '    Source Root: %s' % ts.source_root
543             print '    Exec Root  : %s' % ts.exec_root
544
545     # Select and order the tests.
546     numTotalTests = len(tests)
547
548     # First, select based on the filter expression if given.
549     if opts.filter:
550         try:
551             rex = re.compile(opts.filter)
552         except:
553             parser.error("invalid regular expression for --filter: %r" % (
554                     opts.filter))
555         tests = [t for t in tests
556                  if rex.search(t.getFullName())]
557
558     # Then select the order.
559     if opts.shuffle:
560         random.shuffle(tests)
561     else:
562         tests.sort(key = lambda t: t.getFullName())
563
564     # Finally limit the number of tests, if desired.
565     if opts.maxTests is not None:
566         tests = tests[:opts.maxTests]
567
568     extra = ''
569     if len(tests) != numTotalTests:
570         extra = ' of %d' % numTotalTests
571     header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
572                                                       opts.numThreads)
573
574     if opts.repeatTests:
575         tests = [t.copyWithIndex(i)
576                  for t in tests
577                  for i in range(opts.repeatTests)]
578
579     progressBar = None
580     if not opts.quiet:
581         if opts.succinct and opts.useProgressBar:
582             try:
583                 tc = ProgressBar.TerminalController()
584                 progressBar = ProgressBar.ProgressBar(tc, header)
585             except ValueError:
586                 print header
587                 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
588         else:
589             print header
590
591     # Don't create more threads than tests.
592     opts.numThreads = min(len(tests), opts.numThreads)
593
594     startTime = time.time()
595     display = TestingProgressDisplay(opts, len(tests), progressBar)
596     provider = TestProvider(tests, opts.maxTime)
597     runTests(opts.numThreads, litConfig, provider, display)
598     display.finish()
599
600     if not opts.quiet:
601         print 'Testing Time: %.2fs'%(time.time() - startTime)
602
603     # Update results for any tests which weren't run.
604     for t in tests:
605         if t.result is None:
606             t.setResult(Test.UNRESOLVED, '', 0.0)
607
608     # List test results organized by kind.
609     hasFailures = False
610     byCode = {}
611     for t in tests:
612         if t.result not in byCode:
613             byCode[t.result] = []
614         byCode[t.result].append(t)
615         if t.result.isFailure:
616             hasFailures = True
617
618     # FIXME: Show unresolved and (optionally) unsupported tests.
619     for title,code in (('Unexpected Passing Tests', Test.XPASS),
620                        ('Failing Tests', Test.FAIL)):
621         elts = byCode.get(code)
622         if not elts:
623             continue
624         print '*'*20
625         print '%s (%d):' % (title, len(elts))
626         for t in elts:
627             print '    %s' % t.getFullName()
628         print
629
630     if opts.timeTests:
631         # Collate, in case we repeated tests.
632         times = {}
633         for t in tests:
634             key = t.getFullName()
635             times[key] = times.get(key, 0.) + t.elapsed
636
637         byTime = list(times.items())
638         byTime.sort(key = lambda (name,elapsed): elapsed)
639         if byTime:
640             Util.printHistogram(byTime, title='Tests')
641
642     for name,code in (('Expected Passes    ', Test.PASS),
643                       ('Expected Failures  ', Test.XFAIL),
644                       ('Unsupported Tests  ', Test.UNSUPPORTED),
645                       ('Unresolved Tests   ', Test.UNRESOLVED),
646                       ('Unexpected Passes  ', Test.XPASS),
647                       ('Unexpected Failures', Test.FAIL),):
648         if opts.quiet and not code.isFailure:
649             continue
650         N = len(byCode.get(code,[]))
651         if N:
652             print '  %s: %d' % (name,N)
653
654     # If we encountered any additional errors, exit abnormally.
655     if litConfig.numErrors:
656         print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
657         sys.exit(2)
658
659     # Warn about warnings.
660     if litConfig.numWarnings:
661         print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
662
663     if hasFailures:
664         sys.exit(1)
665     sys.exit(0)
666
667 if __name__=='__main__':
668     main()