lit: Rename main lit module to main.py, lit/lit/lit.py was a bit too, err,
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 import math, os, platform, random, re, sys, time, threading, traceback
10
11 import ProgressBar
12 import TestRunner
13 import Util
14
15 from TestingConfig import TestingConfig
16 import LitConfig
17 import Test
18
19 # Configuration files to look for when discovering test suites. These can be
20 # overridden with --config-prefix.
21 #
22 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
23 gConfigName = 'lit.cfg'
24 gSiteConfigName = 'lit.site.cfg'
25
26 kLocalConfigName = 'lit.local.cfg'
27
28 class TestingProgressDisplay:
29     def __init__(self, opts, numTests, progressBar=None):
30         self.opts = opts
31         self.numTests = numTests
32         self.current = None
33         self.lock = threading.Lock()
34         self.progressBar = progressBar
35         self.completed = 0
36
37     def update(self, test):
38         # Avoid locking overhead in quiet mode
39         if self.opts.quiet and not test.result.isFailure:
40             self.completed += 1
41             return
42
43         # Output lock.
44         self.lock.acquire()
45         try:
46             self.handleUpdate(test)
47         finally:
48             self.lock.release()
49
50     def finish(self):
51         if self.progressBar:
52             self.progressBar.clear()
53         elif self.opts.quiet:
54             pass
55         elif self.opts.succinct:
56             sys.stdout.write('\n')
57
58     def handleUpdate(self, test):
59         self.completed += 1
60         if self.progressBar:
61             self.progressBar.update(float(self.completed)/self.numTests,
62                                     test.getFullName())
63
64         if self.opts.succinct and not test.result.isFailure:
65             return
66
67         if self.progressBar:
68             self.progressBar.clear()
69
70         print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
71                                      self.completed, self.numTests)
72
73         if test.result.isFailure and self.opts.showOutput:
74             print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
75                                               '*'*20)
76             print test.output
77             print "*" * 20
78
79         sys.stdout.flush()
80
81 class TestProvider:
82     def __init__(self, tests, maxTime):
83         self.maxTime = maxTime
84         self.iter = iter(tests)
85         self.lock = threading.Lock()
86         self.startTime = time.time()
87
88     def get(self):
89         # Check if we have run out of time.
90         if self.maxTime is not None:
91             if time.time() - self.startTime > self.maxTime:
92                 return None
93
94         # Otherwise take the next test.
95         self.lock.acquire()
96         try:
97             item = self.iter.next()
98         except StopIteration:
99             item = None
100         self.lock.release()
101         return item
102
103 class Tester(threading.Thread):
104     def __init__(self, litConfig, provider, display):
105         threading.Thread.__init__(self)
106         self.litConfig = litConfig
107         self.provider = provider
108         self.display = display
109
110     def run(self):
111         while 1:
112             item = self.provider.get()
113             if item is None:
114                 break
115             self.runTest(item)
116
117     def runTest(self, test):
118         result = None
119         startTime = time.time()
120         try:
121             result, output = test.config.test_format.execute(test,
122                                                              self.litConfig)
123         except KeyboardInterrupt:
124             # This is a sad hack. Unfortunately subprocess goes
125             # bonkers with ctrl-c and we start forking merrily.
126             print '\nCtrl-C detected, goodbye.'
127             os.kill(0,9)
128         except:
129             if self.litConfig.debug:
130                 raise
131             result = Test.UNRESOLVED
132             output = 'Exception during script execution:\n'
133             output += traceback.format_exc()
134             output += '\n'
135         elapsed = time.time() - startTime
136
137         test.setResult(result, output, elapsed)
138         self.display.update(test)
139
140 def dirContainsTestSuite(path):
141     cfgpath = os.path.join(path, gSiteConfigName)
142     if os.path.exists(cfgpath):
143         return cfgpath
144     cfgpath = os.path.join(path, gConfigName)
145     if os.path.exists(cfgpath):
146         return cfgpath
147
148 def getTestSuite(item, litConfig, cache):
149     """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
150
151     Find the test suite containing @arg item.
152
153     @retval (None, ...) - Indicates no test suite contains @arg item.
154     @retval (suite, relative_path) - The suite that @arg item is in, and its
155     relative path inside that suite.
156     """
157     def search1(path):
158         # Check for a site config or a lit config.
159         cfgpath = dirContainsTestSuite(path)
160
161         # If we didn't find a config file, keep looking.
162         if not cfgpath:
163             parent,base = os.path.split(path)
164             if parent == path:
165                 return (None, ())
166
167             ts, relative = search(parent)
168             return (ts, relative + (base,))
169
170         # We found a config file, load it.
171         if litConfig.debug:
172             litConfig.note('loading suite config %r' % cfgpath)
173
174         cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
175         source_root = os.path.realpath(cfg.test_source_root or path)
176         exec_root = os.path.realpath(cfg.test_exec_root or path)
177         return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
178
179     def search(path):
180         # Check for an already instantiated test suite.
181         res = cache.get(path)
182         if res is None:
183             cache[path] = res = search1(path)
184         return res
185
186     # Canonicalize the path.
187     item = os.path.realpath(item)
188
189     # Skip files and virtual components.
190     components = []
191     while not os.path.isdir(item):
192         parent,base = os.path.split(item)
193         if parent == item:
194             return (None, ())
195         components.append(base)
196         item = parent
197     components.reverse()
198
199     ts, relative = search(item)
200     return ts, tuple(relative + tuple(components))
201
202 def getLocalConfig(ts, path_in_suite, litConfig, cache):
203     def search1(path_in_suite):
204         # Get the parent config.
205         if not path_in_suite:
206             parent = ts.config
207         else:
208             parent = search(path_in_suite[:-1])
209
210         # Load the local configuration.
211         source_path = ts.getSourcePath(path_in_suite)
212         cfgpath = os.path.join(source_path, kLocalConfigName)
213         if litConfig.debug:
214             litConfig.note('loading local config %r' % cfgpath)
215         return TestingConfig.frompath(cfgpath, parent, litConfig,
216                                     mustExist = False,
217                                     config = parent.clone(cfgpath))
218
219     def search(path_in_suite):
220         key = (ts, path_in_suite)
221         res = cache.get(key)
222         if res is None:
223             cache[key] = res = search1(path_in_suite)
224         return res
225
226     return search(path_in_suite)
227
228 def getTests(path, litConfig, testSuiteCache, localConfigCache):
229     # Find the test suite for this input and its relative path.
230     ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
231     if ts is None:
232         litConfig.warning('unable to find test suite for %r' % path)
233         return (),()
234
235     if litConfig.debug:
236         litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
237                                                         path_in_suite))
238
239     return ts, getTestsInSuite(ts, path_in_suite, litConfig,
240                                testSuiteCache, localConfigCache)
241
242 def getTestsInSuite(ts, path_in_suite, litConfig,
243                     testSuiteCache, localConfigCache):
244     # Check that the source path exists (errors here are reported by the
245     # caller).
246     source_path = ts.getSourcePath(path_in_suite)
247     if not os.path.exists(source_path):
248         return
249
250     # Check if the user named a test directly.
251     if not os.path.isdir(source_path):
252         lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
253         yield Test.Test(ts, path_in_suite, lc)
254         return
255
256     # Otherwise we have a directory to search for tests, start by getting the
257     # local configuration.
258     lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
259
260     # Search for tests.
261     if lc.test_format is not None:
262         for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
263                                                       litConfig, lc):
264             yield res
265
266     # Search subdirectories.
267     for filename in os.listdir(source_path):
268         # FIXME: This doesn't belong here?
269         if filename in ('Output', '.svn') or filename in lc.excludes:
270             continue
271
272         # Ignore non-directories.
273         file_sourcepath = os.path.join(source_path, filename)
274         if not os.path.isdir(file_sourcepath):
275             continue
276
277         # Check for nested test suites, first in the execpath in case there is a
278         # site configuration and then in the source path.
279         file_execpath = ts.getExecPath(path_in_suite + (filename,))
280         if dirContainsTestSuite(file_execpath):
281             sub_ts, subiter = getTests(file_execpath, litConfig,
282                                        testSuiteCache, localConfigCache)
283         elif dirContainsTestSuite(file_sourcepath):
284             sub_ts, subiter = getTests(file_sourcepath, litConfig,
285                                        testSuiteCache, localConfigCache)
286         else:
287             # Otherwise, continue loading from inside this test suite.
288             subiter = getTestsInSuite(ts, path_in_suite + (filename,),
289                                       litConfig, testSuiteCache,
290                                       localConfigCache)
291             sub_ts = None
292
293         N = 0
294         for res in subiter:
295             N += 1
296             yield res
297         if sub_ts and not N:
298             litConfig.warning('test suite %r contained no tests' % sub_ts.name)
299
300 def runTests(numThreads, litConfig, provider, display):
301     # If only using one testing thread, don't use threads at all; this lets us
302     # profile, among other things.
303     if numThreads == 1:
304         t = Tester(litConfig, provider, display)
305         t.run()
306         return
307
308     # Otherwise spin up the testing threads and wait for them to finish.
309     testers = [Tester(litConfig, provider, display)
310                for i in range(numThreads)]
311     for t in testers:
312         t.start()
313     try:
314         for t in testers:
315             t.join()
316     except KeyboardInterrupt:
317         sys.exit(2)
318
319 def load_test_suite(inputs):
320     import unittest
321
322     # Create the global config object.
323     litConfig = LitConfig.LitConfig(progname = 'lit',
324                                     path = [],
325                                     quiet = False,
326                                     useValgrind = False,
327                                     valgrindLeakCheck = False,
328                                     valgrindArgs = [],
329                                     useTclAsSh = False,
330                                     noExecute = False,
331                                     debug = False,
332                                     isWindows = (platform.system()=='Windows'),
333                                     params = {})
334
335     # Load the tests from the inputs.
336     tests = []
337     testSuiteCache = {}
338     localConfigCache = {}
339     for input in inputs:
340         prev = len(tests)
341         tests.extend(getTests(input, litConfig,
342                               testSuiteCache, localConfigCache)[1])
343         if prev == len(tests):
344             litConfig.warning('input %r contained no tests' % input)
345
346     # If there were any errors during test discovery, exit now.
347     if litConfig.numErrors:
348         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
349         sys.exit(2)
350
351     # Return a unittest test suite which just runs the tests in order.
352     def get_test_fn(test):
353         return unittest.FunctionTestCase(
354             lambda: test.config.test_format.execute(
355                 test, litConfig),
356             description = test.getFullName())
357
358     from LitTestCase import LitTestCase
359     return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
360
361 def main(builtinParameters = {}):    # Bump the GIL check interval, its more important to get any one thread to a
362     # blocking operation (hopefully exec) than to try and unblock other threads.
363     #
364     # FIXME: This is a hack.
365     import sys
366     sys.setcheckinterval(1000)
367
368     global options
369     from optparse import OptionParser, OptionGroup
370     parser = OptionParser("usage: %prog [options] {file-or-path}")
371
372     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
373                       help="Number of testing threads",
374                       type=int, action="store", default=None)
375     parser.add_option("", "--config-prefix", dest="configPrefix",
376                       metavar="NAME", help="Prefix for 'lit' config files",
377                       action="store", default=None)
378     parser.add_option("", "--param", dest="userParameters",
379                       metavar="NAME=VAL",
380                       help="Add 'NAME' = 'VAL' to the user defined parameters",
381                       type=str, action="append", default=[])
382
383     group = OptionGroup(parser, "Output Format")
384     # FIXME: I find these names very confusing, although I like the
385     # functionality.
386     group.add_option("-q", "--quiet", dest="quiet",
387                      help="Suppress no error output",
388                      action="store_true", default=False)
389     group.add_option("-s", "--succinct", dest="succinct",
390                      help="Reduce amount of output",
391                      action="store_true", default=False)
392     group.add_option("-v", "--verbose", dest="showOutput",
393                      help="Show all test output",
394                      action="store_true", default=False)
395     group.add_option("", "--no-progress-bar", dest="useProgressBar",
396                      help="Do not use curses based progress bar",
397                      action="store_false", default=True)
398     parser.add_option_group(group)
399
400     group = OptionGroup(parser, "Test Execution")
401     group.add_option("", "--path", dest="path",
402                      help="Additional paths to add to testing environment",
403                      action="append", type=str, default=[])
404     group.add_option("", "--vg", dest="useValgrind",
405                      help="Run tests under valgrind",
406                      action="store_true", default=False)
407     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
408                      help="Check for memory leaks under valgrind",
409                      action="store_true", default=False)
410     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
411                      help="Specify an extra argument for valgrind",
412                      type=str, action="append", default=[])
413     group.add_option("", "--time-tests", dest="timeTests",
414                      help="Track elapsed wall time for each test",
415                      action="store_true", default=False)
416     group.add_option("", "--no-execute", dest="noExecute",
417                      help="Don't execute any tests (assume PASS)",
418                      action="store_true", default=False)
419     parser.add_option_group(group)
420
421     group = OptionGroup(parser, "Test Selection")
422     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
423                      help="Maximum number of tests to run",
424                      action="store", type=int, default=None)
425     group.add_option("", "--max-time", dest="maxTime", metavar="N",
426                      help="Maximum time to spend testing (in seconds)",
427                      action="store", type=float, default=None)
428     group.add_option("", "--shuffle", dest="shuffle",
429                      help="Run tests in random order",
430                      action="store_true", default=False)
431     parser.add_option_group(group)
432
433     group = OptionGroup(parser, "Debug and Experimental Options")
434     group.add_option("", "--debug", dest="debug",
435                       help="Enable debugging (for 'lit' development)",
436                       action="store_true", default=False)
437     group.add_option("", "--show-suites", dest="showSuites",
438                       help="Show discovered test suites",
439                       action="store_true", default=False)
440     group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
441                       help="Don't run Tcl scripts using 'sh'",
442                       action="store_false", default=True)
443     group.add_option("", "--repeat", dest="repeatTests", metavar="N",
444                       help="Repeat tests N times (for timing)",
445                       action="store", default=None, type=int)
446     parser.add_option_group(group)
447
448     (opts, args) = parser.parse_args()
449
450     if not args:
451         parser.error('No inputs specified')
452
453     if opts.configPrefix is not None:
454         global gConfigName, gSiteConfigName
455         gConfigName = '%s.cfg' % opts.configPrefix
456         gSiteConfigName = '%s.site.cfg' % opts.configPrefix
457
458     if opts.numThreads is None:
459 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
460 # http://bugs.python.org/issue1731717
461 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
462 # threads by default there.
463        if sys.hexversion >= 0x2050200:
464                opts.numThreads = Util.detectCPUs()
465        else:
466                opts.numThreads = 1
467
468     inputs = args
469
470     # Create the user defined parameters.
471     userParams = dict(builtinParameters)
472     for entry in opts.userParameters:
473         if '=' not in entry:
474             name,val = entry,''
475         else:
476             name,val = entry.split('=', 1)
477         userParams[name] = val
478
479     # Create the global config object.
480     litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
481                                     path = opts.path,
482                                     quiet = opts.quiet,
483                                     useValgrind = opts.useValgrind,
484                                     valgrindLeakCheck = opts.valgrindLeakCheck,
485                                     valgrindArgs = opts.valgrindArgs,
486                                     useTclAsSh = opts.useTclAsSh,
487                                     noExecute = opts.noExecute,
488                                     debug = opts.debug,
489                                     isWindows = (platform.system()=='Windows'),
490                                     params = userParams)
491
492     # Expand '@...' form in inputs.
493     actual_inputs = []
494     for input in inputs:
495         if os.path.exists(input) or not input.startswith('@'):
496             actual_inputs.append(input)
497         else:
498             f = open(input[1:])
499             try:
500                 for ln in f:
501                     ln = ln.strip()
502                     if ln:
503                         actual_inputs.append(ln)
504             finally:
505                 f.close()
506                     
507             
508     # Load the tests from the inputs.
509     tests = []
510     testSuiteCache = {}
511     localConfigCache = {}
512     for input in actual_inputs:
513         prev = len(tests)
514         tests.extend(getTests(input, litConfig,
515                               testSuiteCache, localConfigCache)[1])
516         if prev == len(tests):
517             litConfig.warning('input %r contained no tests' % input)
518
519     # If there were any errors during test discovery, exit now.
520     if litConfig.numErrors:
521         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
522         sys.exit(2)
523
524     if opts.showSuites:
525         suitesAndTests = dict([(ts,[])
526                                for ts,_ in testSuiteCache.values()
527                                if ts])
528         for t in tests:
529             suitesAndTests[t.suite].append(t)
530
531         print '-- Test Suites --'
532         suitesAndTests = suitesAndTests.items()
533         suitesAndTests.sort(key = lambda (ts,_): ts.name)
534         for ts,ts_tests in suitesAndTests:
535             print '  %s - %d tests' %(ts.name, len(ts_tests))
536             print '    Source Root: %s' % ts.source_root
537             print '    Exec Root  : %s' % ts.exec_root
538
539     # Select and order the tests.
540     numTotalTests = len(tests)
541     if opts.shuffle:
542         random.shuffle(tests)
543     else:
544         tests.sort(key = lambda t: t.getFullName())
545     if opts.maxTests is not None:
546         tests = tests[:opts.maxTests]
547
548     extra = ''
549     if len(tests) != numTotalTests:
550         extra = ' of %d' % numTotalTests
551     header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
552                                                       opts.numThreads)
553
554     if opts.repeatTests:
555         tests = [t.copyWithIndex(i)
556                  for t in tests
557                  for i in range(opts.repeatTests)]
558
559     progressBar = None
560     if not opts.quiet:
561         if opts.succinct and opts.useProgressBar:
562             try:
563                 tc = ProgressBar.TerminalController()
564                 progressBar = ProgressBar.ProgressBar(tc, header)
565             except ValueError:
566                 print header
567                 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
568         else:
569             print header
570
571     # Don't create more threads than tests.
572     opts.numThreads = min(len(tests), opts.numThreads)
573
574     startTime = time.time()
575     display = TestingProgressDisplay(opts, len(tests), progressBar)
576     provider = TestProvider(tests, opts.maxTime)
577     runTests(opts.numThreads, litConfig, provider, display)
578     display.finish()
579
580     if not opts.quiet:
581         print 'Testing Time: %.2fs'%(time.time() - startTime)
582
583     # Update results for any tests which weren't run.
584     for t in tests:
585         if t.result is None:
586             t.setResult(Test.UNRESOLVED, '', 0.0)
587
588     # List test results organized by kind.
589     hasFailures = False
590     byCode = {}
591     for t in tests:
592         if t.result not in byCode:
593             byCode[t.result] = []
594         byCode[t.result].append(t)
595         if t.result.isFailure:
596             hasFailures = True
597
598     # FIXME: Show unresolved and (optionally) unsupported tests.
599     for title,code in (('Unexpected Passing Tests', Test.XPASS),
600                        ('Failing Tests', Test.FAIL)):
601         elts = byCode.get(code)
602         if not elts:
603             continue
604         print '*'*20
605         print '%s (%d):' % (title, len(elts))
606         for t in elts:
607             print '    %s' % t.getFullName()
608         print
609
610     if opts.timeTests:
611         # Collate, in case we repeated tests.
612         times = {}
613         for t in tests:
614             key = t.getFullName()
615             times[key] = times.get(key, 0.) + t.elapsed
616
617         byTime = list(times.items())
618         byTime.sort(key = lambda (name,elapsed): elapsed)
619         if byTime:
620             Util.printHistogram(byTime, title='Tests')
621
622     for name,code in (('Expected Passes    ', Test.PASS),
623                       ('Expected Failures  ', Test.XFAIL),
624                       ('Unsupported Tests  ', Test.UNSUPPORTED),
625                       ('Unresolved Tests   ', Test.UNRESOLVED),
626                       ('Unexpected Passes  ', Test.XPASS),
627                       ('Unexpected Failures', Test.FAIL),):
628         if opts.quiet and not code.isFailure:
629             continue
630         N = len(byCode.get(code,[]))
631         if N:
632             print '  %s: %d' % (name,N)
633
634     # If we encountered any additional errors, exit abnormally.
635     if litConfig.numErrors:
636         print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
637         sys.exit(2)
638
639     # Warn about warnings.
640     if litConfig.numWarnings:
641         print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
642
643     if hasFailures:
644         sys.exit(1)
645     sys.exit(0)
646
647 if __name__=='__main__':
648     main()