[lit] Add an --output option, for writing results in a machine readable form.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37         if self.progressBar:
38             self.progressBar.update(float(self.completed)/self.numTests,
39                                     test.getFullName())
40
41         if not test.result.code.isFailure and \
42                 (self.opts.quiet or self.opts.succinct):
43             return
44
45         if self.progressBar:
46             self.progressBar.clear()
47
48         # Show the test result line.
49         test_name = test.getFullName()
50         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51                                      self.completed, self.numTests))
52
53         # Show the test failure output, if requested.
54         if test.result.code.isFailure and self.opts.showOutput:
55             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
56                                               '*'*20))
57             print(test.result.output)
58             print("*" * 20)
59
60         # Report test metrics, if present.
61         if test.result.metrics:
62             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
63                                                '*'*10))
64             items = sorted(test.result.metrics.items())
65             for metric_name, value in items:
66                 print('%s: %s ' % (metric_name, value.format()))
67             print("*" * 10)
68
69         # Ensure the output is flushed.
70         sys.stdout.flush()
71
72 def write_test_results(run, lit_config, testing_time, output_path):
73     try:
74         import json
75     except ImportError:
76         lit_config.fatal('test output unsupported with Python 2.5')
77
78     # Construct the data we will write.
79     data = {}
80     # Encode the current lit version as a schema version.
81     data['__version__'] = lit.__versioninfo__
82     data['elapsed'] = testing_time
83     # FIXME: Record some information on the lit configuration used?
84     # FIXME: Record information from the individual test suites?
85
86     # Encode the tests.
87     data['tests'] = tests_data = []
88     for test in run.tests:
89         test_data = {
90             'name' : test.getFullName(),
91             'code' : test.result.code.name,
92             'output' : test.result.output,
93             'elapsed' : test.result.elapsed }
94
95         # Add test metrics, if present.
96         if test.result.metrics:
97             test_data['metrics'] = metrics_data = {}
98             for key, value in test.result.metrics.items():
99                 metrics_data[key] = value.todata()
100
101         tests_data.append(test_data)
102
103     # Write the output.
104     f = open(output_path, 'w')
105     try:
106         json.dump(data, f, indent=2, sort_keys=True)
107         f.write('\n')
108     finally:
109         f.close()
110
111 def main(builtinParameters = {}):
112     # Bump the GIL check interval, its more important to get any one thread to a
113     # blocking operation (hopefully exec) than to try and unblock other threads.
114     #
115     # FIXME: This is a hack.
116     sys.setcheckinterval(1000)
117
118     global options
119     from optparse import OptionParser, OptionGroup
120     parser = OptionParser("usage: %prog [options] {file-or-path}")
121
122     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
123                       help="Number of testing threads",
124                       type=int, action="store", default=None)
125     parser.add_option("", "--config-prefix", dest="configPrefix",
126                       metavar="NAME", help="Prefix for 'lit' config files",
127                       action="store", default=None)
128     parser.add_option("", "--param", dest="userParameters",
129                       metavar="NAME=VAL",
130                       help="Add 'NAME' = 'VAL' to the user defined parameters",
131                       type=str, action="append", default=[])
132
133     group = OptionGroup(parser, "Output Format")
134     # FIXME: I find these names very confusing, although I like the
135     # functionality.
136     group.add_option("-q", "--quiet", dest="quiet",
137                      help="Suppress no error output",
138                      action="store_true", default=False)
139     group.add_option("-s", "--succinct", dest="succinct",
140                      help="Reduce amount of output",
141                      action="store_true", default=False)
142     group.add_option("-v", "--verbose", dest="showOutput",
143                      help="Show all test output",
144                      action="store_true", default=False)
145     group.add_option("-o", "--output", dest="output_path",
146                      help="Write test results to the provided path",
147                      action="store", type=str, metavar="PATH")
148     group.add_option("", "--no-progress-bar", dest="useProgressBar",
149                      help="Do not use curses based progress bar",
150                      action="store_false", default=True)
151     parser.add_option_group(group)
152
153     group = OptionGroup(parser, "Test Execution")
154     group.add_option("", "--path", dest="path",
155                      help="Additional paths to add to testing environment",
156                      action="append", type=str, default=[])
157     group.add_option("", "--vg", dest="useValgrind",
158                      help="Run tests under valgrind",
159                      action="store_true", default=False)
160     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
161                      help="Check for memory leaks under valgrind",
162                      action="store_true", default=False)
163     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
164                      help="Specify an extra argument for valgrind",
165                      type=str, action="append", default=[])
166     group.add_option("", "--time-tests", dest="timeTests",
167                      help="Track elapsed wall time for each test",
168                      action="store_true", default=False)
169     group.add_option("", "--no-execute", dest="noExecute",
170                      help="Don't execute any tests (assume PASS)",
171                      action="store_true", default=False)
172     parser.add_option_group(group)
173
174     group = OptionGroup(parser, "Test Selection")
175     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
176                      help="Maximum number of tests to run",
177                      action="store", type=int, default=None)
178     group.add_option("", "--max-time", dest="maxTime", metavar="N",
179                      help="Maximum time to spend testing (in seconds)",
180                      action="store", type=float, default=None)
181     group.add_option("", "--shuffle", dest="shuffle",
182                      help="Run tests in random order",
183                      action="store_true", default=False)
184     group.add_option("", "--filter", dest="filter", metavar="REGEX",
185                      help=("Only run tests with paths matching the given "
186                            "regular expression"),
187                      action="store", default=None)
188     parser.add_option_group(group)
189
190     group = OptionGroup(parser, "Debug and Experimental Options")
191     group.add_option("", "--debug", dest="debug",
192                       help="Enable debugging (for 'lit' development)",
193                       action="store_true", default=False)
194     group.add_option("", "--show-suites", dest="showSuites",
195                       help="Show discovered test suites",
196                       action="store_true", default=False)
197     group.add_option("", "--show-tests", dest="showTests",
198                       help="Show all discovered tests",
199                       action="store_true", default=False)
200     group.add_option("", "--use-processes", dest="useProcesses",
201                       help="Run tests in parallel with processes (not threads)",
202                       action="store_true", default=False)
203     group.add_option("", "--use-threads", dest="useProcesses",
204                       help="Run tests in parallel with threads (not processes)",
205                       action="store_false", default=False)
206     parser.add_option_group(group)
207
208     (opts, args) = parser.parse_args()
209
210     if not args:
211         parser.error('No inputs specified')
212
213     if opts.numThreads is None:
214 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
215 # http://bugs.python.org/issue1731717
216 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
217 # threads by default there.
218        if sys.hexversion >= 0x2050200:
219                opts.numThreads = lit.util.detectCPUs()
220        else:
221                opts.numThreads = 1
222
223     inputs = args
224
225     # Create the user defined parameters.
226     userParams = dict(builtinParameters)
227     for entry in opts.userParameters:
228         if '=' not in entry:
229             name,val = entry,''
230         else:
231             name,val = entry.split('=', 1)
232         userParams[name] = val
233
234     # Create the global config object.
235     litConfig = lit.LitConfig.LitConfig(
236         progname = os.path.basename(sys.argv[0]),
237         path = opts.path,
238         quiet = opts.quiet,
239         useValgrind = opts.useValgrind,
240         valgrindLeakCheck = opts.valgrindLeakCheck,
241         valgrindArgs = opts.valgrindArgs,
242         noExecute = opts.noExecute,
243         debug = opts.debug,
244         isWindows = (platform.system()=='Windows'),
245         params = userParams,
246         config_prefix = opts.configPrefix)
247
248     # Perform test discovery.
249     run = lit.run.Run(litConfig,
250                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
251
252     if opts.showSuites or opts.showTests:
253         # Aggregate the tests by suite.
254         suitesAndTests = {}
255         for t in run.tests:
256             if t.suite not in suitesAndTests:
257                 suitesAndTests[t.suite] = []
258             suitesAndTests[t.suite].append(t)
259         suitesAndTests = list(suitesAndTests.items())
260         suitesAndTests.sort(key = lambda item: item[0].name)
261
262         # Show the suites, if requested.
263         if opts.showSuites:
264             print('-- Test Suites --')
265             for ts,ts_tests in suitesAndTests:
266                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
267                 print('    Source Root: %s' % ts.source_root)
268                 print('    Exec Root  : %s' % ts.exec_root)
269
270         # Show the tests, if requested.
271         if opts.showTests:
272             print('-- Available Tests --')
273             for ts,ts_tests in suitesAndTests:
274                 ts_tests.sort(key = lambda test: test.path_in_suite)
275                 for test in ts_tests:
276                     print('  %s' % (test.getFullName(),))
277
278         # Exit.
279         sys.exit(0)
280
281     # Select and order the tests.
282     numTotalTests = len(run.tests)
283
284     # First, select based on the filter expression if given.
285     if opts.filter:
286         try:
287             rex = re.compile(opts.filter)
288         except:
289             parser.error("invalid regular expression for --filter: %r" % (
290                     opts.filter))
291         run.tests = [t for t in run.tests
292                      if rex.search(t.getFullName())]
293
294     # Then select the order.
295     if opts.shuffle:
296         random.shuffle(run.tests)
297     else:
298         run.tests.sort(key = lambda t: t.getFullName())
299
300     # Finally limit the number of tests, if desired.
301     if opts.maxTests is not None:
302         run.tests = run.tests[:opts.maxTests]
303
304     # Don't create more threads than tests.
305     opts.numThreads = min(len(run.tests), opts.numThreads)
306
307     extra = ''
308     if len(run.tests) != numTotalTests:
309         extra = ' of %d' % numTotalTests
310     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
311                                                       opts.numThreads)
312
313     progressBar = None
314     if not opts.quiet:
315         if opts.succinct and opts.useProgressBar:
316             try:
317                 tc = lit.ProgressBar.TerminalController()
318                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
319             except ValueError:
320                 print(header)
321                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
322         else:
323             print(header)
324
325     startTime = time.time()
326     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
327     try:
328         run.execute_tests(display, opts.numThreads, opts.maxTime,
329                           opts.useProcesses)
330     except KeyboardInterrupt:
331         sys.exit(2)
332     display.finish()
333
334     testing_time = time.time() - startTime
335     if not opts.quiet:
336         print('Testing Time: %.2fs' % (testing_time,))
337
338     # Write out the test data, if requested.
339     if opts.output_path is not None:
340         write_test_results(run, litConfig, testing_time, opts.output_path)
341
342     # List test results organized by kind.
343     hasFailures = False
344     byCode = {}
345     for test in run.tests:
346         if test.result.code not in byCode:
347             byCode[test.result.code] = []
348         byCode[test.result.code].append(test)
349         if test.result.code.isFailure:
350             hasFailures = True
351
352     # Print each test in any of the failing groups.
353     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
354                        ('Failing Tests', lit.Test.FAIL),
355                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
356         elts = byCode.get(code)
357         if not elts:
358             continue
359         print('*'*20)
360         print('%s (%d):' % (title, len(elts)))
361         for test in elts:
362             print('    %s' % test.getFullName())
363         sys.stdout.write('\n')
364
365     if opts.timeTests and run.tests:
366         # Order by time.
367         test_times = [(test.getFullName(), test.result.elapsed)
368                       for test in run.tests]
369         lit.util.printHistogram(test_times, title='Tests')
370
371     for name,code in (('Expected Passes    ', lit.Test.PASS),
372                       ('Expected Failures  ', lit.Test.XFAIL),
373                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
374                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
375                       ('Unexpected Passes  ', lit.Test.XPASS),
376                       ('Unexpected Failures', lit.Test.FAIL),):
377         if opts.quiet and not code.isFailure:
378             continue
379         N = len(byCode.get(code,[]))
380         if N:
381             print('  %s: %d' % (name,N))
382
383     # If we encountered any additional errors, exit abnormally.
384     if litConfig.numErrors:
385         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
386         sys.exit(2)
387
388     # Warn about warnings.
389     if litConfig.numWarnings:
390         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
391
392     if hasFailures:
393         sys.exit(1)
394     sys.exit(0)
395
396 if __name__=='__main__':
397     main()