lit: When finding nested test suites, check first in the execpath in case there
[oota-llvm.git] / utils / lit / lit.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 import math, os, platform, random, re, sys, time, threading, traceback
10
11 import ProgressBar
12 import TestRunner
13 import Util
14
15 from TestingConfig import TestingConfig
16 import LitConfig
17 import Test
18
19 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
20 kConfigName = 'lit.cfg'
21 kSiteConfigName = 'lit.site.cfg'
22 kLocalConfigName = 'lit.local.cfg'
23
24 class TestingProgressDisplay:
25     def __init__(self, opts, numTests, progressBar=None):
26         self.opts = opts
27         self.numTests = numTests
28         self.current = None
29         self.lock = threading.Lock()
30         self.progressBar = progressBar
31         self.completed = 0
32
33     def update(self, test):
34         # Avoid locking overhead in quiet mode
35         if self.opts.quiet and not test.result.isFailure:
36             self.completed += 1
37             return
38
39         # Output lock.
40         self.lock.acquire()
41         try:
42             self.handleUpdate(test)
43         finally:
44             self.lock.release()
45
46     def finish(self):
47         if self.progressBar:
48             self.progressBar.clear()
49         elif self.opts.quiet:
50             pass
51         elif self.opts.succinct:
52             sys.stdout.write('\n')
53
54     def handleUpdate(self, test):
55         self.completed += 1
56         if self.progressBar:
57             self.progressBar.update(float(self.completed)/self.numTests,
58                                     test.getFullName())
59
60         if self.opts.succinct and not test.result.isFailure:
61             return
62
63         if self.progressBar:
64             self.progressBar.clear()
65
66         print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
67                                      self.completed, self.numTests)
68
69         if test.result.isFailure and self.opts.showOutput:
70             print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
71                                               '*'*20)
72             print test.output
73             print "*" * 20
74
75         sys.stdout.flush()
76
77 class TestProvider:
78     def __init__(self, tests, maxTime):
79         self.maxTime = maxTime
80         self.iter = iter(tests)
81         self.lock = threading.Lock()
82         self.startTime = time.time()
83
84     def get(self):
85         # Check if we have run out of time.
86         if self.maxTime is not None:
87             if time.time() - self.startTime > self.maxTime:
88                 return None
89
90         # Otherwise take the next test.
91         self.lock.acquire()
92         try:
93             item = self.iter.next()
94         except StopIteration:
95             item = None
96         self.lock.release()
97         return item
98
99 class Tester(threading.Thread):
100     def __init__(self, litConfig, provider, display):
101         threading.Thread.__init__(self)
102         self.litConfig = litConfig
103         self.provider = provider
104         self.display = display
105
106     def run(self):
107         while 1:
108             item = self.provider.get()
109             if item is None:
110                 break
111             self.runTest(item)
112
113     def runTest(self, test):
114         result = None
115         startTime = time.time()
116         try:
117             result, output = test.config.test_format.execute(test,
118                                                              self.litConfig)
119         except KeyboardInterrupt:
120             # This is a sad hack. Unfortunately subprocess goes
121             # bonkers with ctrl-c and we start forking merrily.
122             print '\nCtrl-C detected, goodbye.'
123             os.kill(0,9)
124         except:
125             if self.litConfig.debug:
126                 raise
127             result = Test.UNRESOLVED
128             output = 'Exception during script execution:\n'
129             output += traceback.format_exc()
130             output += '\n'
131         elapsed = time.time() - startTime
132
133         test.setResult(result, output, elapsed)
134         self.display.update(test)
135
136 def dirContainsTestSuite(path):
137     cfgpath = os.path.join(path, kSiteConfigName)
138     if os.path.exists(cfgpath):
139         return cfgpath
140     cfgpath = os.path.join(path, kConfigName)
141     if os.path.exists(cfgpath):
142         return cfgpath
143
144 def getTestSuite(item, litConfig, cache):
145     """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
146
147     Find the test suite containing @arg item.
148
149     @retval (None, ...) - Indicates no test suite contains @arg item.
150     @retval (suite, relative_path) - The suite that @arg item is in, and its
151     relative path inside that suite.
152     """
153     def search1(path):
154         # Check for a site config or a lit config.
155         cfgpath = dirContainsTestSuite(path)
156
157         # If we didn't find a config file, keep looking.
158         if not cfgpath:
159             parent,base = os.path.split(path)
160             if parent == path:
161                 return (None, ())
162
163             ts, relative = search(parent)
164             return (ts, relative + (base,))
165
166         # We found a config file, load it.
167         if litConfig.debug:
168             litConfig.note('loading suite config %r' % cfgpath)
169
170         cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
171         source_root = os.path.realpath(cfg.test_source_root or path)
172         exec_root = os.path.realpath(cfg.test_exec_root or path)
173         return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
174
175     def search(path):
176         # Check for an already instantiated test suite.
177         res = cache.get(path)
178         if res is None:
179             cache[path] = res = search1(path)
180         return res
181
182     # Canonicalize the path.
183     item = os.path.realpath(item)
184
185     # Skip files and virtual components.
186     components = []
187     while not os.path.isdir(item):
188         parent,base = os.path.split(item)
189         if parent == item:
190             return (None, ())
191         components.append(base)
192         item = parent
193     components.reverse()
194
195     ts, relative = search(item)
196     return ts, tuple(relative + tuple(components))
197
198 def getLocalConfig(ts, path_in_suite, litConfig, cache):
199     def search1(path_in_suite):
200         # Get the parent config.
201         if not path_in_suite:
202             parent = ts.config
203         else:
204             parent = search(path_in_suite[:-1])
205
206         # Load the local configuration.
207         source_path = ts.getSourcePath(path_in_suite)
208         cfgpath = os.path.join(source_path, kLocalConfigName)
209         if litConfig.debug:
210             litConfig.note('loading local config %r' % cfgpath)
211         return TestingConfig.frompath(cfgpath, parent, litConfig,
212                                     mustExist = False,
213                                     config = parent.clone(cfgpath))
214
215     def search(path_in_suite):
216         key = (ts, path_in_suite)
217         res = cache.get(key)
218         if res is None:
219             cache[key] = res = search1(path_in_suite)
220         return res
221
222     return search(path_in_suite)
223
224 def getTests(path, litConfig, testSuiteCache, localConfigCache):
225     # Find the test suite for this input and its relative path.
226     ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
227     if ts is None:
228         litConfig.warning('unable to find test suite for %r' % path)
229         return ()
230
231     if litConfig.debug:
232         litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
233                                                         path_in_suite))
234
235     return getTestsInSuite(ts, path_in_suite, litConfig,
236                            testSuiteCache, localConfigCache)
237
238 def getTestsInSuite(ts, path_in_suite, litConfig,
239                     testSuiteCache, localConfigCache):
240     # Check that the source path exists (errors here are reported by the
241     # caller).
242     source_path = ts.getSourcePath(path_in_suite)
243     if not os.path.exists(source_path):
244         return
245
246     # Check if the user named a test directly.
247     if not os.path.isdir(source_path):
248         lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
249         yield Test.Test(ts, path_in_suite, lc)
250         return
251
252     # Otherwise we have a directory to search for tests, start by getting the
253     # local configuration.
254     lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
255
256     # Search for tests.
257     for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
258                                                   litConfig, lc):
259         yield res
260
261     # Search subdirectories.
262     for filename in os.listdir(source_path):
263         # FIXME: This doesn't belong here?
264         if filename == 'Output' or filename in lc.excludes:
265             continue
266
267         # Ignore non-directories.
268         file_sourcepath = os.path.join(source_path, filename)
269         if not os.path.isdir(file_sourcepath):
270             continue
271         
272         # Check for nested test suites, first in the execpath in case there is a
273         # site configuration and then in the source path.
274         file_execpath = ts.getExecPath(path_in_suite + (filename,))
275         if dirContainsTestSuite(file_execpath):
276             subiter = getTests(file_execpath, litConfig,
277                                testSuiteCache, localConfigCache)
278         elif dirContainsTestSuite(file_sourcepath):
279             subiter = getTests(file_sourcepath, litConfig,
280                                testSuiteCache, localConfigCache)
281         else:
282             # Otherwise, continue loading from inside this test suite.
283             subiter = getTestsInSuite(ts, path_in_suite + (filename,),
284                                       litConfig, testSuiteCache,
285                                       localConfigCache)
286         
287         for res in subiter:
288             yield res
289
290 def runTests(numThreads, litConfig, provider, display):
291     # If only using one testing thread, don't use threads at all; this lets us
292     # profile, among other things.
293     if numThreads == 1:
294         t = Tester(litConfig, provider, display)
295         t.run()
296         return
297
298     # Otherwise spin up the testing threads and wait for them to finish.
299     testers = [Tester(litConfig, provider, display)
300                for i in range(numThreads)]
301     for t in testers:
302         t.start()
303     try:
304         for t in testers:
305             t.join()
306     except KeyboardInterrupt:
307         sys.exit(2)
308
309 def main():
310     global options
311     from optparse import OptionParser, OptionGroup
312     parser = OptionParser("usage: %prog [options] {file-or-path}")
313
314     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
315                       help="Number of testing threads",
316                       type=int, action="store", default=None)
317
318     group = OptionGroup(parser, "Output Format")
319     # FIXME: I find these names very confusing, although I like the
320     # functionality.
321     group.add_option("-q", "--quiet", dest="quiet",
322                      help="Suppress no error output",
323                      action="store_true", default=False)
324     group.add_option("-s", "--succinct", dest="succinct",
325                      help="Reduce amount of output",
326                      action="store_true", default=False)
327     group.add_option("-v", "--verbose", dest="showOutput",
328                      help="Show all test output",
329                      action="store_true", default=False)
330     group.add_option("", "--no-progress-bar", dest="useProgressBar",
331                      help="Do not use curses based progress bar",
332                      action="store_false", default=True)
333     parser.add_option_group(group)
334
335     group = OptionGroup(parser, "Test Execution")
336     group.add_option("", "--path", dest="path",
337                      help="Additional paths to add to testing environment",
338                      action="append", type=str, default=[])
339     group.add_option("", "--vg", dest="useValgrind",
340                      help="Run tests under valgrind",
341                      action="store_true", default=False)
342     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
343                      help="Specify an extra argument for valgrind",
344                      type=str, action="append", default=[])
345     group.add_option("", "--time-tests", dest="timeTests",
346                      help="Track elapsed wall time for each test",
347                      action="store_true", default=False)
348     group.add_option("", "--no-execute", dest="noExecute",
349                      help="Don't execute any tests (assume PASS)",
350                      action="store_true", default=False)
351     parser.add_option_group(group)
352
353     group = OptionGroup(parser, "Test Selection")
354     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
355                      help="Maximum number of tests to run",
356                      action="store", type=int, default=None)
357     group.add_option("", "--max-time", dest="maxTime", metavar="N",
358                      help="Maximum time to spend testing (in seconds)",
359                      action="store", type=float, default=None)
360     group.add_option("", "--shuffle", dest="shuffle",
361                      help="Run tests in random order",
362                      action="store_true", default=False)
363     parser.add_option_group(group)
364
365     group = OptionGroup(parser, "Debug and Experimental Options")
366     group.add_option("", "--debug", dest="debug",
367                       help="Enable debugging (for 'lit' development)",
368                       action="store_true", default=False)
369     group.add_option("", "--show-suites", dest="showSuites",
370                       help="Show discovered test suites",
371                       action="store_true", default=False)
372     group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
373                       help="Don't run Tcl scripts using 'sh'",
374                       action="store_false", default=True)
375     parser.add_option_group(group)
376
377     (opts, args) = parser.parse_args()
378
379     if not args:
380         parser.error('No inputs specified')
381
382     if opts.numThreads is None:
383         opts.numThreads = Util.detectCPUs()
384
385     inputs = args
386
387     # Create the global config object.
388     litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
389                                     path = opts.path,
390                                     quiet = opts.quiet,
391                                     useValgrind = opts.useValgrind,
392                                     valgrindArgs = opts.valgrindArgs,
393                                     useTclAsSh = opts.useTclAsSh,
394                                     noExecute = opts.noExecute,
395                                     debug = opts.debug,
396                                     isWindows = (platform.system()=='Windows'))
397
398     # Load the tests from the inputs.
399     tests = []
400     testSuiteCache = {}
401     localConfigCache = {}
402     for input in inputs:
403         prev = len(tests)
404         tests.extend(getTests(input, litConfig,
405                               testSuiteCache, localConfigCache))
406         if prev == len(tests):
407             litConfig.warning('input %r contained no tests' % input)
408
409     # If there were any errors during test discovery, exit now.
410     if litConfig.numErrors:
411         print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
412         sys.exit(2)
413
414     if opts.showSuites:
415         suitesAndTests = dict([(ts,[])
416                                for ts,_ in testSuiteCache.values()])
417         for t in tests:
418             suitesAndTests[t.suite].append(t)
419
420         print '-- Test Suites --'
421         suitesAndTests = suitesAndTests.items()
422         suitesAndTests.sort(key = lambda (ts,_): ts.name)
423         for ts,tests in suitesAndTests:
424             print '  %s - %d tests' %(ts.name, len(tests))
425             print '    Source Root: %s' % ts.source_root
426             print '    Exec Root  : %s' % ts.exec_root
427
428     # Select and order the tests.
429     numTotalTests = len(tests)
430     if opts.shuffle:
431         random.shuffle(tests)
432     else:
433         tests.sort(key = lambda t: t.getFullName())
434     if opts.maxTests is not None:
435         tests = tests[:opts.maxTests]
436
437     extra = ''
438     if len(tests) != numTotalTests:
439         extra = ' of %d' % numTotalTests
440     header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
441                                                       opts.numThreads)
442
443     progressBar = None
444     if not opts.quiet:
445         if opts.succinct and opts.useProgressBar:
446             try:
447                 tc = ProgressBar.TerminalController()
448                 progressBar = ProgressBar.ProgressBar(tc, header)
449             except ValueError:
450                 print header
451                 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
452         else:
453             print header
454
455     # Don't create more threads than tests.
456     opts.numThreads = min(len(tests), opts.numThreads)
457
458     startTime = time.time()
459     display = TestingProgressDisplay(opts, len(tests), progressBar)
460     provider = TestProvider(tests, opts.maxTime)
461     runTests(opts.numThreads, litConfig, provider, display)
462     display.finish()
463
464     if not opts.quiet:
465         print 'Testing Time: %.2fs'%(time.time() - startTime)
466
467     # Update results for any tests which weren't run.
468     for t in tests:
469         if t.result is None:
470             t.setResult(Test.UNRESOLVED, '', 0.0)
471
472     # List test results organized by kind.
473     hasFailures = False
474     byCode = {}
475     for t in tests:
476         if t.result not in byCode:
477             byCode[t.result] = []
478         byCode[t.result].append(t)
479         if t.result.isFailure:
480             hasFailures = True
481
482     # FIXME: Show unresolved and (optionally) unsupported tests.
483     for title,code in (('Unexpected Passing Tests', Test.XPASS),
484                        ('Failing Tests', Test.FAIL)):
485         elts = byCode.get(code)
486         if not elts:
487             continue
488         print '*'*20
489         print '%s (%d):' % (title, len(elts))
490         for t in elts:
491             print '    %s' % t.getFullName()
492         print
493
494     if opts.timeTests:
495         byTime = list(tests)
496         byTime.sort(key = lambda t: t.elapsed)
497         if byTime:
498             Util.printHistogram([(t.getFullName(), t.elapsed) for t in byTime],
499                                 title='Tests')
500
501     for name,code in (('Expected Passes    ', Test.PASS),
502                       ('Expected Failures  ', Test.XFAIL),
503                       ('Unsupported Tests  ', Test.UNSUPPORTED),
504                       ('Unresolved Tests   ', Test.UNRESOLVED),
505                       ('Unexpected Passes  ', Test.XPASS),
506                       ('Unexpected Failures', Test.FAIL),):
507         if opts.quiet and not code.isFailure:
508             continue
509         N = len(byCode.get(code,[]))
510         if N:
511             print '  %s: %d' % (name,N)
512
513     # If we encountered any additional errors, exit abnormally.
514     if litConfig.numErrors:
515         print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
516         sys.exit(2)
517
518     # Warn about warnings.
519     if litConfig.numWarnings:
520         print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
521
522     if hasFailures:
523         sys.exit(1)
524     sys.exit(0)
525
526 if __name__=='__main__':
527     # Bump the GIL check interval, its more important to get any one thread to a
528     # blocking operation (hopefully exec) than to try and unblock other threads.
529     import sys
530     sys.setcheckinterval(1000)
531     main()