[lit] Add support for multiprocessing, under --use-processes for now.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37         if self.progressBar:
38             self.progressBar.update(float(self.completed)/self.numTests,
39                                     test.getFullName())
40
41         if not test.result.code.isFailure and \
42                 (self.opts.quiet or self.opts.succinct):
43             return
44
45         if self.progressBar:
46             self.progressBar.clear()
47
48         print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
49                                      self.completed, self.numTests))
50
51         if test.result.code.isFailure and self.opts.showOutput:
52             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
53                                               '*'*20))
54             print(test.result.output)
55             print("*" * 20)
56
57         sys.stdout.flush()
58
59 def main(builtinParameters = {}):
60     # Bump the GIL check interval, its more important to get any one thread to a
61     # blocking operation (hopefully exec) than to try and unblock other threads.
62     #
63     # FIXME: This is a hack.
64     sys.setcheckinterval(1000)
65
66     global options
67     from optparse import OptionParser, OptionGroup
68     parser = OptionParser("usage: %prog [options] {file-or-path}")
69
70     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
71                       help="Number of testing threads",
72                       type=int, action="store", default=None)
73     parser.add_option("", "--config-prefix", dest="configPrefix",
74                       metavar="NAME", help="Prefix for 'lit' config files",
75                       action="store", default=None)
76     parser.add_option("", "--param", dest="userParameters",
77                       metavar="NAME=VAL",
78                       help="Add 'NAME' = 'VAL' to the user defined parameters",
79                       type=str, action="append", default=[])
80
81     group = OptionGroup(parser, "Output Format")
82     # FIXME: I find these names very confusing, although I like the
83     # functionality.
84     group.add_option("-q", "--quiet", dest="quiet",
85                      help="Suppress no error output",
86                      action="store_true", default=False)
87     group.add_option("-s", "--succinct", dest="succinct",
88                      help="Reduce amount of output",
89                      action="store_true", default=False)
90     group.add_option("-v", "--verbose", dest="showOutput",
91                      help="Show all test output",
92                      action="store_true", default=False)
93     group.add_option("", "--no-progress-bar", dest="useProgressBar",
94                      help="Do not use curses based progress bar",
95                      action="store_false", default=True)
96     parser.add_option_group(group)
97
98     group = OptionGroup(parser, "Test Execution")
99     group.add_option("", "--path", dest="path",
100                      help="Additional paths to add to testing environment",
101                      action="append", type=str, default=[])
102     group.add_option("", "--vg", dest="useValgrind",
103                      help="Run tests under valgrind",
104                      action="store_true", default=False)
105     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
106                      help="Check for memory leaks under valgrind",
107                      action="store_true", default=False)
108     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
109                      help="Specify an extra argument for valgrind",
110                      type=str, action="append", default=[])
111     group.add_option("", "--time-tests", dest="timeTests",
112                      help="Track elapsed wall time for each test",
113                      action="store_true", default=False)
114     group.add_option("", "--no-execute", dest="noExecute",
115                      help="Don't execute any tests (assume PASS)",
116                      action="store_true", default=False)
117     parser.add_option_group(group)
118
119     group = OptionGroup(parser, "Test Selection")
120     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
121                      help="Maximum number of tests to run",
122                      action="store", type=int, default=None)
123     group.add_option("", "--max-time", dest="maxTime", metavar="N",
124                      help="Maximum time to spend testing (in seconds)",
125                      action="store", type=float, default=None)
126     group.add_option("", "--shuffle", dest="shuffle",
127                      help="Run tests in random order",
128                      action="store_true", default=False)
129     group.add_option("", "--filter", dest="filter", metavar="REGEX",
130                      help=("Only run tests with paths matching the given "
131                            "regular expression"),
132                      action="store", default=None)
133     parser.add_option_group(group)
134
135     group = OptionGroup(parser, "Debug and Experimental Options")
136     group.add_option("", "--debug", dest="debug",
137                       help="Enable debugging (for 'lit' development)",
138                       action="store_true", default=False)
139     group.add_option("", "--show-suites", dest="showSuites",
140                       help="Show discovered test suites",
141                       action="store_true", default=False)
142     group.add_option("", "--show-tests", dest="showTests",
143                       help="Show all discovered tests",
144                       action="store_true", default=False)
145     group.add_option("", "--use-processes", dest="useProcesses",
146                       help="Run tests in parallel with processes (not threads)",
147                       action="store_true", default=False)
148     group.add_option("", "--use-threads", dest="useProcesses",
149                       help="Run tests in parallel with threads (not processes)",
150                       action="store_false", default=False)
151     parser.add_option_group(group)
152
153     (opts, args) = parser.parse_args()
154
155     if not args:
156         parser.error('No inputs specified')
157
158     if opts.numThreads is None:
159 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
160 # http://bugs.python.org/issue1731717
161 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
162 # threads by default there.
163        if sys.hexversion >= 0x2050200:
164                opts.numThreads = lit.util.detectCPUs()
165        else:
166                opts.numThreads = 1
167
168     inputs = args
169
170     # Create the user defined parameters.
171     userParams = dict(builtinParameters)
172     for entry in opts.userParameters:
173         if '=' not in entry:
174             name,val = entry,''
175         else:
176             name,val = entry.split('=', 1)
177         userParams[name] = val
178
179     # Create the global config object.
180     litConfig = lit.LitConfig.LitConfig(
181         progname = os.path.basename(sys.argv[0]),
182         path = opts.path,
183         quiet = opts.quiet,
184         useValgrind = opts.useValgrind,
185         valgrindLeakCheck = opts.valgrindLeakCheck,
186         valgrindArgs = opts.valgrindArgs,
187         noExecute = opts.noExecute,
188         debug = opts.debug,
189         isWindows = (platform.system()=='Windows'),
190         params = userParams,
191         config_prefix = opts.configPrefix)
192
193     # Perform test discovery.
194     run = lit.run.Run(litConfig,
195                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
196
197     if opts.showSuites or opts.showTests:
198         # Aggregate the tests by suite.
199         suitesAndTests = {}
200         for t in run.tests:
201             if t.suite not in suitesAndTests:
202                 suitesAndTests[t.suite] = []
203             suitesAndTests[t.suite].append(t)
204         suitesAndTests = list(suitesAndTests.items())
205         suitesAndTests.sort(key = lambda item: item[0].name)
206
207         # Show the suites, if requested.
208         if opts.showSuites:
209             print('-- Test Suites --')
210             for ts,ts_tests in suitesAndTests:
211                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
212                 print('    Source Root: %s' % ts.source_root)
213                 print('    Exec Root  : %s' % ts.exec_root)
214
215         # Show the tests, if requested.
216         if opts.showTests:
217             print('-- Available Tests --')
218             for ts,ts_tests in suitesAndTests:
219                 ts_tests.sort(key = lambda test: test.path_in_suite)
220                 for test in ts_tests:
221                     print('  %s' % (test.getFullName(),))
222
223         # Exit.
224         sys.exit(0)
225
226     # Select and order the tests.
227     numTotalTests = len(run.tests)
228
229     # First, select based on the filter expression if given.
230     if opts.filter:
231         try:
232             rex = re.compile(opts.filter)
233         except:
234             parser.error("invalid regular expression for --filter: %r" % (
235                     opts.filter))
236         run.tests = [t for t in run.tests
237                      if rex.search(t.getFullName())]
238
239     # Then select the order.
240     if opts.shuffle:
241         random.shuffle(run.tests)
242     else:
243         run.tests.sort(key = lambda t: t.getFullName())
244
245     # Finally limit the number of tests, if desired.
246     if opts.maxTests is not None:
247         run.tests = run.tests[:opts.maxTests]
248
249     # Don't create more threads than tests.
250     opts.numThreads = min(len(run.tests), opts.numThreads)
251
252     extra = ''
253     if len(run.tests) != numTotalTests:
254         extra = ' of %d' % numTotalTests
255     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
256                                                       opts.numThreads)
257
258     progressBar = None
259     if not opts.quiet:
260         if opts.succinct and opts.useProgressBar:
261             try:
262                 tc = lit.ProgressBar.TerminalController()
263                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
264             except ValueError:
265                 print(header)
266                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
267         else:
268             print(header)
269
270     startTime = time.time()
271     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
272     try:
273         run.execute_tests(display, opts.numThreads, opts.maxTime,
274                           opts.useProcesses)
275     except KeyboardInterrupt:
276         sys.exit(2)
277     display.finish()
278
279     if not opts.quiet:
280         print('Testing Time: %.2fs'%(time.time() - startTime))
281
282     # List test results organized by kind.
283     hasFailures = False
284     byCode = {}
285     for test in run.tests:
286         if test.result.code not in byCode:
287             byCode[test.result.code] = []
288         byCode[test.result.code].append(test)
289         if test.result.code.isFailure:
290             hasFailures = True
291
292     # Print each test in any of the failing groups.
293     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
294                        ('Failing Tests', lit.Test.FAIL),
295                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
296         elts = byCode.get(code)
297         if not elts:
298             continue
299         print('*'*20)
300         print('%s (%d):' % (title, len(elts)))
301         for test in elts:
302             print('    %s' % test.getFullName())
303         sys.stdout.write('\n')
304
305     if opts.timeTests and run.tests:
306         # Order by time.
307         test_times = [(test.getFullName(), test.result.elapsed)
308                       for test in run.tests]
309         lit.util.printHistogram(test_times, title='Tests')
310
311     for name,code in (('Expected Passes    ', lit.Test.PASS),
312                       ('Expected Failures  ', lit.Test.XFAIL),
313                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
314                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
315                       ('Unexpected Passes  ', lit.Test.XPASS),
316                       ('Unexpected Failures', lit.Test.FAIL),):
317         if opts.quiet and not code.isFailure:
318             continue
319         N = len(byCode.get(code,[]))
320         if N:
321             print('  %s: %d' % (name,N))
322
323     # If we encountered any additional errors, exit abnormally.
324     if litConfig.numErrors:
325         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
326         sys.exit(2)
327
328     # Warn about warnings.
329     if litConfig.numWarnings:
330         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
331
332     if hasFailures:
333         sys.exit(1)
334     sys.exit(0)
335
336 if __name__=='__main__':
337     main()