[lit] Factor out a results consumer interface for test execution.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time, threading, traceback
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17
18 import lit.discovery
19
20 class TestingProgressDisplay:
21     def __init__(self, opts, numTests, progressBar=None):
22         self.opts = opts
23         self.numTests = numTests
24         self.current = None
25         self.lock = threading.Lock()
26         self.progressBar = progressBar
27         self.completed = 0
28
29     def update(self, test):
30         # Avoid locking overhead in quiet mode
31         if self.opts.quiet and not test.result.code.isFailure:
32             self.completed += 1
33             return
34
35         # Output lock.
36         self.lock.acquire()
37         try:
38             self.handleUpdate(test)
39         finally:
40             self.lock.release()
41
42     def finish(self):
43         if self.progressBar:
44             self.progressBar.clear()
45         elif self.opts.quiet:
46             pass
47         elif self.opts.succinct:
48             sys.stdout.write('\n')
49
50     def handleUpdate(self, test):
51         self.completed += 1
52         if self.progressBar:
53             self.progressBar.update(float(self.completed)/self.numTests,
54                                     test.getFullName())
55
56         if self.opts.succinct and not test.result.code.isFailure:
57             return
58
59         if self.progressBar:
60             self.progressBar.clear()
61
62         print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
63                                      self.completed, self.numTests))
64
65         if test.result.code.isFailure and self.opts.showOutput:
66             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
67                                               '*'*20))
68             print(test.result.output)
69             print("*" * 20)
70
71         sys.stdout.flush()
72
73 class TestProvider:
74     def __init__(self, tests, maxTime):
75         self.maxTime = maxTime
76         self.iter = iter(range(len(tests)))
77         self.lock = threading.Lock()
78         self.startTime = time.time()
79         self.canceled = False
80
81     def cancel(self):
82         self.lock.acquire()
83         self.canceled = True
84         self.lock.release()
85
86     def get(self):
87         # Check if we have run out of time.
88         if self.maxTime is not None:
89             if time.time() - self.startTime > self.maxTime:
90                 return None
91
92         # Otherwise take the next test.
93         self.lock.acquire()
94         if self.canceled:
95           self.lock.release()
96           return None
97         for item in self.iter:
98             break
99         else:
100             item = None
101         self.lock.release()
102         return item
103
104 class Tester(object):
105     def __init__(self, run_instance, provider, consumer):
106         self.run_instance = run_instance
107         self.provider = provider
108         self.consumer = consumer
109
110     def run(self):
111         while 1:
112             item = self.provider.get()
113             if item is None:
114                 break
115             self.runTest(item)
116         self.consumer.taskFinished()
117
118     def runTest(self, test_index):
119         test = self.run_instance.tests[test_index]
120         try:
121             self.run_instance.execute_test(test)
122         except KeyboardInterrupt:
123             # This is a sad hack. Unfortunately subprocess goes
124             # bonkers with ctrl-c and we start forking merrily.
125             print('\nCtrl-C detected, goodbye.')
126             os.kill(0,9)
127         self.consumer.update(test_index, test)
128
129 class ThreadResultsConsumer(object):
130     def __init__(self, display):
131         self.display = display
132
133     def update(self, test_index, test):
134         self.display.update(test)
135
136     def taskFinished(self):
137         pass
138
139     def handleResults(self):
140         pass
141
142 def run_one_tester(run, provider, display):
143     tester = Tester(run, provider, display)
144     tester.run()
145
146 def runTests(numThreads, run, provider, display):
147     consumer = ThreadResultsConsumer(display)
148
149     # If only using one testing thread, don't use tasks at all; this lets us
150     # profile, among other things.
151     if numThreads == 1:
152         run_one_tester(run, provider, consumer)
153         return
154
155     # Start all of the tasks.
156     tasks = [threading.Thread(target=run_one_tester,
157                               args=(run, provider, consumer))
158              for i in range(numThreads)]
159     for t in tasks:
160         t.start()
161
162     # Allow the consumer to handle results, if necessary.
163     consumer.handleResults()
164
165     # Wait for all the tasks to complete.
166     for t in tasks:
167         t.join()
168
169 def main(builtinParameters = {}):
170     # Bump the GIL check interval, its more important to get any one thread to a
171     # blocking operation (hopefully exec) than to try and unblock other threads.
172     #
173     # FIXME: This is a hack.
174     sys.setcheckinterval(1000)
175
176     global options
177     from optparse import OptionParser, OptionGroup
178     parser = OptionParser("usage: %prog [options] {file-or-path}")
179
180     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
181                       help="Number of testing threads",
182                       type=int, action="store", default=None)
183     parser.add_option("", "--config-prefix", dest="configPrefix",
184                       metavar="NAME", help="Prefix for 'lit' config files",
185                       action="store", default=None)
186     parser.add_option("", "--param", dest="userParameters",
187                       metavar="NAME=VAL",
188                       help="Add 'NAME' = 'VAL' to the user defined parameters",
189                       type=str, action="append", default=[])
190
191     group = OptionGroup(parser, "Output Format")
192     # FIXME: I find these names very confusing, although I like the
193     # functionality.
194     group.add_option("-q", "--quiet", dest="quiet",
195                      help="Suppress no error output",
196                      action="store_true", default=False)
197     group.add_option("-s", "--succinct", dest="succinct",
198                      help="Reduce amount of output",
199                      action="store_true", default=False)
200     group.add_option("-v", "--verbose", dest="showOutput",
201                      help="Show all test output",
202                      action="store_true", default=False)
203     group.add_option("", "--no-progress-bar", dest="useProgressBar",
204                      help="Do not use curses based progress bar",
205                      action="store_false", default=True)
206     parser.add_option_group(group)
207
208     group = OptionGroup(parser, "Test Execution")
209     group.add_option("", "--path", dest="path",
210                      help="Additional paths to add to testing environment",
211                      action="append", type=str, default=[])
212     group.add_option("", "--vg", dest="useValgrind",
213                      help="Run tests under valgrind",
214                      action="store_true", default=False)
215     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
216                      help="Check for memory leaks under valgrind",
217                      action="store_true", default=False)
218     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
219                      help="Specify an extra argument for valgrind",
220                      type=str, action="append", default=[])
221     group.add_option("", "--time-tests", dest="timeTests",
222                      help="Track elapsed wall time for each test",
223                      action="store_true", default=False)
224     group.add_option("", "--no-execute", dest="noExecute",
225                      help="Don't execute any tests (assume PASS)",
226                      action="store_true", default=False)
227     parser.add_option_group(group)
228
229     group = OptionGroup(parser, "Test Selection")
230     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
231                      help="Maximum number of tests to run",
232                      action="store", type=int, default=None)
233     group.add_option("", "--max-time", dest="maxTime", metavar="N",
234                      help="Maximum time to spend testing (in seconds)",
235                      action="store", type=float, default=None)
236     group.add_option("", "--shuffle", dest="shuffle",
237                      help="Run tests in random order",
238                      action="store_true", default=False)
239     group.add_option("", "--filter", dest="filter", metavar="REGEX",
240                      help=("Only run tests with paths matching the given "
241                            "regular expression"),
242                      action="store", default=None)
243     parser.add_option_group(group)
244
245     group = OptionGroup(parser, "Debug and Experimental Options")
246     group.add_option("", "--debug", dest="debug",
247                       help="Enable debugging (for 'lit' development)",
248                       action="store_true", default=False)
249     group.add_option("", "--show-suites", dest="showSuites",
250                       help="Show discovered test suites",
251                       action="store_true", default=False)
252     group.add_option("", "--show-tests", dest="showTests",
253                       help="Show all discovered tests",
254                       action="store_true", default=False)
255     parser.add_option_group(group)
256
257     (opts, args) = parser.parse_args()
258
259     if not args:
260         parser.error('No inputs specified')
261
262     if opts.numThreads is None:
263 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
264 # http://bugs.python.org/issue1731717
265 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
266 # threads by default there.
267        if sys.hexversion >= 0x2050200:
268                opts.numThreads = lit.util.detectCPUs()
269        else:
270                opts.numThreads = 1
271
272     inputs = args
273
274     # Create the user defined parameters.
275     userParams = dict(builtinParameters)
276     for entry in opts.userParameters:
277         if '=' not in entry:
278             name,val = entry,''
279         else:
280             name,val = entry.split('=', 1)
281         userParams[name] = val
282
283     # Create the global config object.
284     litConfig = lit.LitConfig.LitConfig(
285         progname = os.path.basename(sys.argv[0]),
286         path = opts.path,
287         quiet = opts.quiet,
288         useValgrind = opts.useValgrind,
289         valgrindLeakCheck = opts.valgrindLeakCheck,
290         valgrindArgs = opts.valgrindArgs,
291         noExecute = opts.noExecute,
292         debug = opts.debug,
293         isWindows = (platform.system()=='Windows'),
294         params = userParams,
295         config_prefix = opts.configPrefix)
296
297     # Perform test discovery.
298     run = lit.run.Run(litConfig,
299                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
300
301     if opts.showSuites or opts.showTests:
302         # Aggregate the tests by suite.
303         suitesAndTests = {}
304         for t in run.tests:
305             if t.suite not in suitesAndTests:
306                 suitesAndTests[t.suite] = []
307             suitesAndTests[t.suite].append(t)
308         suitesAndTests = list(suitesAndTests.items())
309         suitesAndTests.sort(key = lambda item: item[0].name)
310
311         # Show the suites, if requested.
312         if opts.showSuites:
313             print('-- Test Suites --')
314             for ts,ts_tests in suitesAndTests:
315                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
316                 print('    Source Root: %s' % ts.source_root)
317                 print('    Exec Root  : %s' % ts.exec_root)
318
319         # Show the tests, if requested.
320         if opts.showTests:
321             print('-- Available Tests --')
322             for ts,ts_tests in suitesAndTests:
323                 ts_tests.sort(key = lambda test: test.path_in_suite)
324                 for test in ts_tests:
325                     print('  %s' % (test.getFullName(),))
326
327         # Exit.
328         sys.exit(0)
329
330     # Select and order the tests.
331     numTotalTests = len(run.tests)
332
333     # First, select based on the filter expression if given.
334     if opts.filter:
335         try:
336             rex = re.compile(opts.filter)
337         except:
338             parser.error("invalid regular expression for --filter: %r" % (
339                     opts.filter))
340         run.tests = [t for t in run.tests
341                      if rex.search(t.getFullName())]
342
343     # Then select the order.
344     if opts.shuffle:
345         random.shuffle(run.tests)
346     else:
347         run.tests.sort(key = lambda t: t.getFullName())
348
349     # Finally limit the number of tests, if desired.
350     if opts.maxTests is not None:
351         run.tests = run.tests[:opts.maxTests]
352
353     # Don't create more threads than tests.
354     opts.numThreads = min(len(run.tests), opts.numThreads)
355
356     extra = ''
357     if len(run.tests) != numTotalTests:
358         extra = ' of %d' % numTotalTests
359     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
360                                                       opts.numThreads)
361
362     progressBar = None
363     if not opts.quiet:
364         if opts.succinct and opts.useProgressBar:
365             try:
366                 tc = lit.ProgressBar.TerminalController()
367                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
368             except ValueError:
369                 print(header)
370                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
371         else:
372             print(header)
373
374     startTime = time.time()
375     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
376     provider = TestProvider(run.tests, opts.maxTime)
377
378     try:
379       import win32api
380     except ImportError:
381       pass
382     else:
383       def console_ctrl_handler(type):
384         provider.cancel()
385         return True
386       win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
387     try:
388         runTests(opts.numThreads, run, provider, display)
389     except KeyboardInterrupt:
390         sys.exit(2)
391     display.finish()
392
393     if not opts.quiet:
394         print('Testing Time: %.2fs'%(time.time() - startTime))
395
396     # Update results for any tests which weren't run.
397     for test in run.tests:
398         if test.result is None:
399             test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
400
401     # List test results organized by kind.
402     hasFailures = False
403     byCode = {}
404     for test in run.tests:
405         if test.result.code not in byCode:
406             byCode[test.result.code] = []
407         byCode[test.result.code].append(test)
408         if test.result.code.isFailure:
409             hasFailures = True
410
411     # Print each test in any of the failing groups.
412     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
413                        ('Failing Tests', lit.Test.FAIL),
414                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
415         elts = byCode.get(code)
416         if not elts:
417             continue
418         print('*'*20)
419         print('%s (%d):' % (title, len(elts)))
420         for test in elts:
421             print('    %s' % test.getFullName())
422         sys.stdout.write('\n')
423
424     if opts.timeTests and run.tests:
425         # Order by time.
426         test_times = [(test.getFullName(), test.result.elapsed)
427                       for test in run.tests]
428         lit.util.printHistogram(test_times, title='Tests')
429
430     for name,code in (('Expected Passes    ', lit.Test.PASS),
431                       ('Expected Failures  ', lit.Test.XFAIL),
432                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
433                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
434                       ('Unexpected Passes  ', lit.Test.XPASS),
435                       ('Unexpected Failures', lit.Test.FAIL),):
436         if opts.quiet and not code.isFailure:
437             continue
438         N = len(byCode.get(code,[]))
439         if N:
440             print('  %s: %d' % (name,N))
441
442     # If we encountered any additional errors, exit abnormally.
443     if litConfig.numErrors:
444         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
445         sys.exit(2)
446
447     # Warn about warnings.
448     if litConfig.numWarnings:
449         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
450
451     if hasFailures:
452         sys.exit(1)
453     sys.exit(0)
454
455 if __name__=='__main__':
456     main()