[lit] Stop hacking the GIL check interval.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37         if self.progressBar:
38             self.progressBar.update(float(self.completed)/self.numTests,
39                                     test.getFullName())
40
41         if not test.result.code.isFailure and \
42                 (self.opts.quiet or self.opts.succinct):
43             return
44
45         if self.progressBar:
46             self.progressBar.clear()
47
48         # Show the test result line.
49         test_name = test.getFullName()
50         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51                                      self.completed, self.numTests))
52
53         # Show the test failure output, if requested.
54         if test.result.code.isFailure and self.opts.showOutput:
55             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
56                                               '*'*20))
57             print(test.result.output)
58             print("*" * 20)
59
60         # Report test metrics, if present.
61         if test.result.metrics:
62             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
63                                                '*'*10))
64             items = sorted(test.result.metrics.items())
65             for metric_name, value in items:
66                 print('%s: %s ' % (metric_name, value.format()))
67             print("*" * 10)
68
69         # Ensure the output is flushed.
70         sys.stdout.flush()
71
72 def write_test_results(run, lit_config, testing_time, output_path):
73     try:
74         import json
75     except ImportError:
76         lit_config.fatal('test output unsupported with Python 2.5')
77
78     # Construct the data we will write.
79     data = {}
80     # Encode the current lit version as a schema version.
81     data['__version__'] = lit.__versioninfo__
82     data['elapsed'] = testing_time
83     # FIXME: Record some information on the lit configuration used?
84     # FIXME: Record information from the individual test suites?
85
86     # Encode the tests.
87     data['tests'] = tests_data = []
88     for test in run.tests:
89         test_data = {
90             'name' : test.getFullName(),
91             'code' : test.result.code.name,
92             'output' : test.result.output,
93             'elapsed' : test.result.elapsed }
94
95         # Add test metrics, if present.
96         if test.result.metrics:
97             test_data['metrics'] = metrics_data = {}
98             for key, value in test.result.metrics.items():
99                 metrics_data[key] = value.todata()
100
101         tests_data.append(test_data)
102
103     # Write the output.
104     f = open(output_path, 'w')
105     try:
106         json.dump(data, f, indent=2, sort_keys=True)
107         f.write('\n')
108     finally:
109         f.close()
110
111 def main(builtinParameters = {}):
112     # Use processes by default on Unix platforms.
113     isWindows = platform.system() == 'Windows'
114     useProcessesIsDefault = not isWindows
115
116     global options
117     from optparse import OptionParser, OptionGroup
118     parser = OptionParser("usage: %prog [options] {file-or-path}")
119
120     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
121                       help="Number of testing threads",
122                       type=int, action="store", default=None)
123     parser.add_option("", "--config-prefix", dest="configPrefix",
124                       metavar="NAME", help="Prefix for 'lit' config files",
125                       action="store", default=None)
126     parser.add_option("", "--param", dest="userParameters",
127                       metavar="NAME=VAL",
128                       help="Add 'NAME' = 'VAL' to the user defined parameters",
129                       type=str, action="append", default=[])
130
131     group = OptionGroup(parser, "Output Format")
132     # FIXME: I find these names very confusing, although I like the
133     # functionality.
134     group.add_option("-q", "--quiet", dest="quiet",
135                      help="Suppress no error output",
136                      action="store_true", default=False)
137     group.add_option("-s", "--succinct", dest="succinct",
138                      help="Reduce amount of output",
139                      action="store_true", default=False)
140     group.add_option("-v", "--verbose", dest="showOutput",
141                      help="Show all test output",
142                      action="store_true", default=False)
143     group.add_option("-o", "--output", dest="output_path",
144                      help="Write test results to the provided path",
145                      action="store", type=str, metavar="PATH")
146     group.add_option("", "--no-progress-bar", dest="useProgressBar",
147                      help="Do not use curses based progress bar",
148                      action="store_false", default=True)
149     parser.add_option_group(group)
150
151     group = OptionGroup(parser, "Test Execution")
152     group.add_option("", "--path", dest="path",
153                      help="Additional paths to add to testing environment",
154                      action="append", type=str, default=[])
155     group.add_option("", "--vg", dest="useValgrind",
156                      help="Run tests under valgrind",
157                      action="store_true", default=False)
158     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
159                      help="Check for memory leaks under valgrind",
160                      action="store_true", default=False)
161     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
162                      help="Specify an extra argument for valgrind",
163                      type=str, action="append", default=[])
164     group.add_option("", "--time-tests", dest="timeTests",
165                      help="Track elapsed wall time for each test",
166                      action="store_true", default=False)
167     group.add_option("", "--no-execute", dest="noExecute",
168                      help="Don't execute any tests (assume PASS)",
169                      action="store_true", default=False)
170     parser.add_option_group(group)
171
172     group = OptionGroup(parser, "Test Selection")
173     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
174                      help="Maximum number of tests to run",
175                      action="store", type=int, default=None)
176     group.add_option("", "--max-time", dest="maxTime", metavar="N",
177                      help="Maximum time to spend testing (in seconds)",
178                      action="store", type=float, default=None)
179     group.add_option("", "--shuffle", dest="shuffle",
180                      help="Run tests in random order",
181                      action="store_true", default=False)
182     group.add_option("", "--filter", dest="filter", metavar="REGEX",
183                      help=("Only run tests with paths matching the given "
184                            "regular expression"),
185                      action="store", default=None)
186     parser.add_option_group(group)
187
188     group = OptionGroup(parser, "Debug and Experimental Options")
189     group.add_option("", "--debug", dest="debug",
190                       help="Enable debugging (for 'lit' development)",
191                       action="store_true", default=False)
192     group.add_option("", "--show-suites", dest="showSuites",
193                       help="Show discovered test suites",
194                       action="store_true", default=False)
195     group.add_option("", "--show-tests", dest="showTests",
196                       help="Show all discovered tests",
197                       action="store_true", default=False)
198     group.add_option("", "--use-processes", dest="useProcesses",
199                       help="Run tests in parallel with processes (not threads)",
200                       action="store_true", default=useProcessesIsDefault)
201     group.add_option("", "--use-threads", dest="useProcesses",
202                       help="Run tests in parallel with threads (not processes)",
203                       action="store_false", default=not useProcessesIsDefault)
204     parser.add_option_group(group)
205
206     (opts, args) = parser.parse_args()
207
208     if not args:
209         parser.error('No inputs specified')
210
211     if opts.numThreads is None:
212 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
213 # http://bugs.python.org/issue1731717
214 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
215 # threads by default there.
216        if sys.hexversion >= 0x2050200:
217                opts.numThreads = lit.util.detectCPUs()
218        else:
219                opts.numThreads = 1
220
221     inputs = args
222
223     # Create the user defined parameters.
224     userParams = dict(builtinParameters)
225     for entry in opts.userParameters:
226         if '=' not in entry:
227             name,val = entry,''
228         else:
229             name,val = entry.split('=', 1)
230         userParams[name] = val
231
232     # Create the global config object.
233     litConfig = lit.LitConfig.LitConfig(
234         progname = os.path.basename(sys.argv[0]),
235         path = opts.path,
236         quiet = opts.quiet,
237         useValgrind = opts.useValgrind,
238         valgrindLeakCheck = opts.valgrindLeakCheck,
239         valgrindArgs = opts.valgrindArgs,
240         noExecute = opts.noExecute,
241         debug = opts.debug,
242         isWindows = isWindows,
243         params = userParams,
244         config_prefix = opts.configPrefix)
245
246     # Perform test discovery.
247     run = lit.run.Run(litConfig,
248                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
249
250     if opts.showSuites or opts.showTests:
251         # Aggregate the tests by suite.
252         suitesAndTests = {}
253         for t in run.tests:
254             if t.suite not in suitesAndTests:
255                 suitesAndTests[t.suite] = []
256             suitesAndTests[t.suite].append(t)
257         suitesAndTests = list(suitesAndTests.items())
258         suitesAndTests.sort(key = lambda item: item[0].name)
259
260         # Show the suites, if requested.
261         if opts.showSuites:
262             print('-- Test Suites --')
263             for ts,ts_tests in suitesAndTests:
264                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
265                 print('    Source Root: %s' % ts.source_root)
266                 print('    Exec Root  : %s' % ts.exec_root)
267
268         # Show the tests, if requested.
269         if opts.showTests:
270             print('-- Available Tests --')
271             for ts,ts_tests in suitesAndTests:
272                 ts_tests.sort(key = lambda test: test.path_in_suite)
273                 for test in ts_tests:
274                     print('  %s' % (test.getFullName(),))
275
276         # Exit.
277         sys.exit(0)
278
279     # Select and order the tests.
280     numTotalTests = len(run.tests)
281
282     # First, select based on the filter expression if given.
283     if opts.filter:
284         try:
285             rex = re.compile(opts.filter)
286         except:
287             parser.error("invalid regular expression for --filter: %r" % (
288                     opts.filter))
289         run.tests = [t for t in run.tests
290                      if rex.search(t.getFullName())]
291
292     # Then select the order.
293     if opts.shuffle:
294         random.shuffle(run.tests)
295     else:
296         run.tests.sort(key = lambda t: t.getFullName())
297
298     # Finally limit the number of tests, if desired.
299     if opts.maxTests is not None:
300         run.tests = run.tests[:opts.maxTests]
301
302     # Don't create more threads than tests.
303     opts.numThreads = min(len(run.tests), opts.numThreads)
304
305     extra = ''
306     if len(run.tests) != numTotalTests:
307         extra = ' of %d' % numTotalTests
308     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
309                                                       opts.numThreads)
310
311     progressBar = None
312     if not opts.quiet:
313         if opts.succinct and opts.useProgressBar:
314             try:
315                 tc = lit.ProgressBar.TerminalController()
316                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
317             except ValueError:
318                 print(header)
319                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
320         else:
321             print(header)
322
323     startTime = time.time()
324     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
325     try:
326         run.execute_tests(display, opts.numThreads, opts.maxTime,
327                           opts.useProcesses)
328     except KeyboardInterrupt:
329         sys.exit(2)
330     display.finish()
331
332     testing_time = time.time() - startTime
333     if not opts.quiet:
334         print('Testing Time: %.2fs' % (testing_time,))
335
336     # Write out the test data, if requested.
337     if opts.output_path is not None:
338         write_test_results(run, litConfig, testing_time, opts.output_path)
339
340     # List test results organized by kind.
341     hasFailures = False
342     byCode = {}
343     for test in run.tests:
344         if test.result.code not in byCode:
345             byCode[test.result.code] = []
346         byCode[test.result.code].append(test)
347         if test.result.code.isFailure:
348             hasFailures = True
349
350     # Print each test in any of the failing groups.
351     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
352                        ('Failing Tests', lit.Test.FAIL),
353                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
354         elts = byCode.get(code)
355         if not elts:
356             continue
357         print('*'*20)
358         print('%s (%d):' % (title, len(elts)))
359         for test in elts:
360             print('    %s' % test.getFullName())
361         sys.stdout.write('\n')
362
363     if opts.timeTests and run.tests:
364         # Order by time.
365         test_times = [(test.getFullName(), test.result.elapsed)
366                       for test in run.tests]
367         lit.util.printHistogram(test_times, title='Tests')
368
369     for name,code in (('Expected Passes    ', lit.Test.PASS),
370                       ('Expected Failures  ', lit.Test.XFAIL),
371                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
372                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
373                       ('Unexpected Passes  ', lit.Test.XPASS),
374                       ('Unexpected Failures', lit.Test.FAIL),):
375         if opts.quiet and not code.isFailure:
376             continue
377         N = len(byCode.get(code,[]))
378         if N:
379             print('  %s: %d' % (name,N))
380
381     # If we encountered any additional errors, exit abnormally.
382     if litConfig.numErrors:
383         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
384         sys.exit(2)
385
386     # Warn about warnings.
387     if litConfig.numWarnings:
388         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
389
390     if hasFailures:
391         sys.exit(1)
392     sys.exit(0)
393
394 if __name__=='__main__':
395     main()