[lit] Use multiprocessing based parallelism by default, on Unix.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37         if self.progressBar:
38             self.progressBar.update(float(self.completed)/self.numTests,
39                                     test.getFullName())
40
41         if not test.result.code.isFailure and \
42                 (self.opts.quiet or self.opts.succinct):
43             return
44
45         if self.progressBar:
46             self.progressBar.clear()
47
48         # Show the test result line.
49         test_name = test.getFullName()
50         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51                                      self.completed, self.numTests))
52
53         # Show the test failure output, if requested.
54         if test.result.code.isFailure and self.opts.showOutput:
55             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
56                                               '*'*20))
57             print(test.result.output)
58             print("*" * 20)
59
60         # Report test metrics, if present.
61         if test.result.metrics:
62             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
63                                                '*'*10))
64             items = sorted(test.result.metrics.items())
65             for metric_name, value in items:
66                 print('%s: %s ' % (metric_name, value.format()))
67             print("*" * 10)
68
69         # Ensure the output is flushed.
70         sys.stdout.flush()
71
72 def write_test_results(run, lit_config, testing_time, output_path):
73     try:
74         import json
75     except ImportError:
76         lit_config.fatal('test output unsupported with Python 2.5')
77
78     # Construct the data we will write.
79     data = {}
80     # Encode the current lit version as a schema version.
81     data['__version__'] = lit.__versioninfo__
82     data['elapsed'] = testing_time
83     # FIXME: Record some information on the lit configuration used?
84     # FIXME: Record information from the individual test suites?
85
86     # Encode the tests.
87     data['tests'] = tests_data = []
88     for test in run.tests:
89         test_data = {
90             'name' : test.getFullName(),
91             'code' : test.result.code.name,
92             'output' : test.result.output,
93             'elapsed' : test.result.elapsed }
94
95         # Add test metrics, if present.
96         if test.result.metrics:
97             test_data['metrics'] = metrics_data = {}
98             for key, value in test.result.metrics.items():
99                 metrics_data[key] = value.todata()
100
101         tests_data.append(test_data)
102
103     # Write the output.
104     f = open(output_path, 'w')
105     try:
106         json.dump(data, f, indent=2, sort_keys=True)
107         f.write('\n')
108     finally:
109         f.close()
110
111 def main(builtinParameters = {}):
112     # Bump the GIL check interval, its more important to get any one thread to a
113     # blocking operation (hopefully exec) than to try and unblock other threads.
114     #
115     # FIXME: This is a hack.
116     sys.setcheckinterval(1000)
117
118     # Use processes by default on Unix platforms.
119     isWindows = platform.system() == 'Windows'
120     useProcessesIsDefault = not isWindows
121
122     global options
123     from optparse import OptionParser, OptionGroup
124     parser = OptionParser("usage: %prog [options] {file-or-path}")
125
126     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
127                       help="Number of testing threads",
128                       type=int, action="store", default=None)
129     parser.add_option("", "--config-prefix", dest="configPrefix",
130                       metavar="NAME", help="Prefix for 'lit' config files",
131                       action="store", default=None)
132     parser.add_option("", "--param", dest="userParameters",
133                       metavar="NAME=VAL",
134                       help="Add 'NAME' = 'VAL' to the user defined parameters",
135                       type=str, action="append", default=[])
136
137     group = OptionGroup(parser, "Output Format")
138     # FIXME: I find these names very confusing, although I like the
139     # functionality.
140     group.add_option("-q", "--quiet", dest="quiet",
141                      help="Suppress no error output",
142                      action="store_true", default=False)
143     group.add_option("-s", "--succinct", dest="succinct",
144                      help="Reduce amount of output",
145                      action="store_true", default=False)
146     group.add_option("-v", "--verbose", dest="showOutput",
147                      help="Show all test output",
148                      action="store_true", default=False)
149     group.add_option("-o", "--output", dest="output_path",
150                      help="Write test results to the provided path",
151                      action="store", type=str, metavar="PATH")
152     group.add_option("", "--no-progress-bar", dest="useProgressBar",
153                      help="Do not use curses based progress bar",
154                      action="store_false", default=True)
155     parser.add_option_group(group)
156
157     group = OptionGroup(parser, "Test Execution")
158     group.add_option("", "--path", dest="path",
159                      help="Additional paths to add to testing environment",
160                      action="append", type=str, default=[])
161     group.add_option("", "--vg", dest="useValgrind",
162                      help="Run tests under valgrind",
163                      action="store_true", default=False)
164     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
165                      help="Check for memory leaks under valgrind",
166                      action="store_true", default=False)
167     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
168                      help="Specify an extra argument for valgrind",
169                      type=str, action="append", default=[])
170     group.add_option("", "--time-tests", dest="timeTests",
171                      help="Track elapsed wall time for each test",
172                      action="store_true", default=False)
173     group.add_option("", "--no-execute", dest="noExecute",
174                      help="Don't execute any tests (assume PASS)",
175                      action="store_true", default=False)
176     parser.add_option_group(group)
177
178     group = OptionGroup(parser, "Test Selection")
179     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
180                      help="Maximum number of tests to run",
181                      action="store", type=int, default=None)
182     group.add_option("", "--max-time", dest="maxTime", metavar="N",
183                      help="Maximum time to spend testing (in seconds)",
184                      action="store", type=float, default=None)
185     group.add_option("", "--shuffle", dest="shuffle",
186                      help="Run tests in random order",
187                      action="store_true", default=False)
188     group.add_option("", "--filter", dest="filter", metavar="REGEX",
189                      help=("Only run tests with paths matching the given "
190                            "regular expression"),
191                      action="store", default=None)
192     parser.add_option_group(group)
193
194     group = OptionGroup(parser, "Debug and Experimental Options")
195     group.add_option("", "--debug", dest="debug",
196                       help="Enable debugging (for 'lit' development)",
197                       action="store_true", default=False)
198     group.add_option("", "--show-suites", dest="showSuites",
199                       help="Show discovered test suites",
200                       action="store_true", default=False)
201     group.add_option("", "--show-tests", dest="showTests",
202                       help="Show all discovered tests",
203                       action="store_true", default=False)
204     group.add_option("", "--use-processes", dest="useProcesses",
205                       help="Run tests in parallel with processes (not threads)",
206                       action="store_true", default=useProcessesIsDefault)
207     group.add_option("", "--use-threads", dest="useProcesses",
208                       help="Run tests in parallel with threads (not processes)",
209                       action="store_false", default=not useProcessesIsDefault)
210     parser.add_option_group(group)
211
212     (opts, args) = parser.parse_args()
213
214     if not args:
215         parser.error('No inputs specified')
216
217     if opts.numThreads is None:
218 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
219 # http://bugs.python.org/issue1731717
220 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
221 # threads by default there.
222        if sys.hexversion >= 0x2050200:
223                opts.numThreads = lit.util.detectCPUs()
224        else:
225                opts.numThreads = 1
226
227     inputs = args
228
229     # Create the user defined parameters.
230     userParams = dict(builtinParameters)
231     for entry in opts.userParameters:
232         if '=' not in entry:
233             name,val = entry,''
234         else:
235             name,val = entry.split('=', 1)
236         userParams[name] = val
237
238     # Create the global config object.
239     litConfig = lit.LitConfig.LitConfig(
240         progname = os.path.basename(sys.argv[0]),
241         path = opts.path,
242         quiet = opts.quiet,
243         useValgrind = opts.useValgrind,
244         valgrindLeakCheck = opts.valgrindLeakCheck,
245         valgrindArgs = opts.valgrindArgs,
246         noExecute = opts.noExecute,
247         debug = opts.debug,
248         isWindows = isWindows,
249         params = userParams,
250         config_prefix = opts.configPrefix)
251
252     # Perform test discovery.
253     run = lit.run.Run(litConfig,
254                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
255
256     if opts.showSuites or opts.showTests:
257         # Aggregate the tests by suite.
258         suitesAndTests = {}
259         for t in run.tests:
260             if t.suite not in suitesAndTests:
261                 suitesAndTests[t.suite] = []
262             suitesAndTests[t.suite].append(t)
263         suitesAndTests = list(suitesAndTests.items())
264         suitesAndTests.sort(key = lambda item: item[0].name)
265
266         # Show the suites, if requested.
267         if opts.showSuites:
268             print('-- Test Suites --')
269             for ts,ts_tests in suitesAndTests:
270                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
271                 print('    Source Root: %s' % ts.source_root)
272                 print('    Exec Root  : %s' % ts.exec_root)
273
274         # Show the tests, if requested.
275         if opts.showTests:
276             print('-- Available Tests --')
277             for ts,ts_tests in suitesAndTests:
278                 ts_tests.sort(key = lambda test: test.path_in_suite)
279                 for test in ts_tests:
280                     print('  %s' % (test.getFullName(),))
281
282         # Exit.
283         sys.exit(0)
284
285     # Select and order the tests.
286     numTotalTests = len(run.tests)
287
288     # First, select based on the filter expression if given.
289     if opts.filter:
290         try:
291             rex = re.compile(opts.filter)
292         except:
293             parser.error("invalid regular expression for --filter: %r" % (
294                     opts.filter))
295         run.tests = [t for t in run.tests
296                      if rex.search(t.getFullName())]
297
298     # Then select the order.
299     if opts.shuffle:
300         random.shuffle(run.tests)
301     else:
302         run.tests.sort(key = lambda t: t.getFullName())
303
304     # Finally limit the number of tests, if desired.
305     if opts.maxTests is not None:
306         run.tests = run.tests[:opts.maxTests]
307
308     # Don't create more threads than tests.
309     opts.numThreads = min(len(run.tests), opts.numThreads)
310
311     extra = ''
312     if len(run.tests) != numTotalTests:
313         extra = ' of %d' % numTotalTests
314     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
315                                                       opts.numThreads)
316
317     progressBar = None
318     if not opts.quiet:
319         if opts.succinct and opts.useProgressBar:
320             try:
321                 tc = lit.ProgressBar.TerminalController()
322                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
323             except ValueError:
324                 print(header)
325                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
326         else:
327             print(header)
328
329     startTime = time.time()
330     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
331     try:
332         run.execute_tests(display, opts.numThreads, opts.maxTime,
333                           opts.useProcesses)
334     except KeyboardInterrupt:
335         sys.exit(2)
336     display.finish()
337
338     testing_time = time.time() - startTime
339     if not opts.quiet:
340         print('Testing Time: %.2fs' % (testing_time,))
341
342     # Write out the test data, if requested.
343     if opts.output_path is not None:
344         write_test_results(run, litConfig, testing_time, opts.output_path)
345
346     # List test results organized by kind.
347     hasFailures = False
348     byCode = {}
349     for test in run.tests:
350         if test.result.code not in byCode:
351             byCode[test.result.code] = []
352         byCode[test.result.code].append(test)
353         if test.result.code.isFailure:
354             hasFailures = True
355
356     # Print each test in any of the failing groups.
357     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
358                        ('Failing Tests', lit.Test.FAIL),
359                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
360         elts = byCode.get(code)
361         if not elts:
362             continue
363         print('*'*20)
364         print('%s (%d):' % (title, len(elts)))
365         for test in elts:
366             print('    %s' % test.getFullName())
367         sys.stdout.write('\n')
368
369     if opts.timeTests and run.tests:
370         # Order by time.
371         test_times = [(test.getFullName(), test.result.elapsed)
372                       for test in run.tests]
373         lit.util.printHistogram(test_times, title='Tests')
374
375     for name,code in (('Expected Passes    ', lit.Test.PASS),
376                       ('Expected Failures  ', lit.Test.XFAIL),
377                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
378                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
379                       ('Unexpected Passes  ', lit.Test.XPASS),
380                       ('Unexpected Failures', lit.Test.FAIL),):
381         if opts.quiet and not code.isFailure:
382             continue
383         N = len(byCode.get(code,[]))
384         if N:
385             print('  %s: %d' % (name,N))
386
387     # If we encountered any additional errors, exit abnormally.
388     if litConfig.numErrors:
389         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
390         sys.exit(2)
391
392     # Warn about warnings.
393     if litConfig.numWarnings:
394         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
395
396     if hasFailures:
397         sys.exit(1)
398     sys.exit(0)
399
400 if __name__=='__main__':
401     main()