[lit] Add --show-xfail flag to LIT.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (self.opts.show_unsupported and test.result.code.name == 'UNSUPPORTED') or \
47             (self.opts.show_xfail and test.result.code.name == 'XFAIL') or \
48             (not self.opts.quiet and not self.opts.succinct)
49         if not shouldShow:
50             return
51
52         if self.progressBar:
53             self.progressBar.clear()
54
55         # Show the test result line.
56         test_name = test.getFullName()
57         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
58                                      self.completed, self.numTests))
59
60         # Show the test failure output, if requested.
61         if test.result.code.isFailure and self.opts.showOutput:
62             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
63                                               '*'*20))
64             print(test.result.output)
65             print("*" * 20)
66
67         # Report test metrics, if present.
68         if test.result.metrics:
69             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
70                                                '*'*10))
71             items = sorted(test.result.metrics.items())
72             for metric_name, value in items:
73                 print('%s: %s ' % (metric_name, value.format()))
74             print("*" * 10)
75
76         # Ensure the output is flushed.
77         sys.stdout.flush()
78
79 def write_test_results(run, lit_config, testing_time, output_path):
80     try:
81         import json
82     except ImportError:
83         lit_config.fatal('test output unsupported with Python 2.5')
84
85     # Construct the data we will write.
86     data = {}
87     # Encode the current lit version as a schema version.
88     data['__version__'] = lit.__versioninfo__
89     data['elapsed'] = testing_time
90     # FIXME: Record some information on the lit configuration used?
91     # FIXME: Record information from the individual test suites?
92
93     # Encode the tests.
94     data['tests'] = tests_data = []
95     for test in run.tests:
96         test_data = {
97             'name' : test.getFullName(),
98             'code' : test.result.code.name,
99             'output' : test.result.output,
100             'elapsed' : test.result.elapsed }
101
102         # Add test metrics, if present.
103         if test.result.metrics:
104             test_data['metrics'] = metrics_data = {}
105             for key, value in test.result.metrics.items():
106                 metrics_data[key] = value.todata()
107
108         tests_data.append(test_data)
109
110     # Write the output.
111     f = open(output_path, 'w')
112     try:
113         json.dump(data, f, indent=2, sort_keys=True)
114         f.write('\n')
115     finally:
116         f.close()
117
118 def update_incremental_cache(test):
119     if not test.result.code.isFailure:
120         return
121     fname = test.getFilePath()
122     os.utime(fname, None)
123
124 def sort_by_incremental_cache(run):
125     def sortIndex(test):
126         fname = test.getFilePath()
127         try:
128             return -os.path.getmtime(fname)
129         except:
130             return 0
131     run.tests.sort(key = lambda t: sortIndex(t))
132
133 def main(builtinParameters = {}):
134     # Use processes by default on Unix platforms.
135     isWindows = platform.system() == 'Windows'
136     useProcessesIsDefault = not isWindows
137
138     global options
139     from optparse import OptionParser, OptionGroup
140     parser = OptionParser("usage: %prog [options] {file-or-path}")
141
142     parser.add_option("", "--version", dest="show_version",
143                       help="Show version and exit",
144                       action="store_true", default=False)
145     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
146                       help="Number of testing threads",
147                       type=int, action="store", default=None)
148     parser.add_option("", "--config-prefix", dest="configPrefix",
149                       metavar="NAME", help="Prefix for 'lit' config files",
150                       action="store", default=None)
151     parser.add_option("", "--param", dest="userParameters",
152                       metavar="NAME=VAL",
153                       help="Add 'NAME' = 'VAL' to the user defined parameters",
154                       type=str, action="append", default=[])
155
156     group = OptionGroup(parser, "Output Format")
157     # FIXME: I find these names very confusing, although I like the
158     # functionality.
159     group.add_option("-q", "--quiet", dest="quiet",
160                      help="Suppress no error output",
161                      action="store_true", default=False)
162     group.add_option("-s", "--succinct", dest="succinct",
163                      help="Reduce amount of output",
164                      action="store_true", default=False)
165     group.add_option("-v", "--verbose", dest="showOutput",
166                      help="Show all test output",
167                      action="store_true", default=False)
168     group.add_option("-o", "--output", dest="output_path",
169                      help="Write test results to the provided path",
170                      action="store", type=str, metavar="PATH")
171     group.add_option("", "--no-progress-bar", dest="useProgressBar",
172                      help="Do not use curses based progress bar",
173                      action="store_false", default=True)
174     group.add_option("", "--show-unsupported", dest="show_unsupported",
175                      help="Show unsupported tests",
176                      action="store_true", default=False)
177     group.add_option("", "--show-xfail", dest="show_xfail",
178                      help="Show tests that were expected to fail",
179                      action="store_true", default=False)
180     parser.add_option_group(group)
181
182     group = OptionGroup(parser, "Test Execution")
183     group.add_option("", "--path", dest="path",
184                      help="Additional paths to add to testing environment",
185                      action="append", type=str, default=[])
186     group.add_option("", "--vg", dest="useValgrind",
187                      help="Run tests under valgrind",
188                      action="store_true", default=False)
189     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
190                      help="Check for memory leaks under valgrind",
191                      action="store_true", default=False)
192     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
193                      help="Specify an extra argument for valgrind",
194                      type=str, action="append", default=[])
195     group.add_option("", "--time-tests", dest="timeTests",
196                      help="Track elapsed wall time for each test",
197                      action="store_true", default=False)
198     group.add_option("", "--no-execute", dest="noExecute",
199                      help="Don't execute any tests (assume PASS)",
200                      action="store_true", default=False)
201     parser.add_option_group(group)
202
203     group = OptionGroup(parser, "Test Selection")
204     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
205                      help="Maximum number of tests to run",
206                      action="store", type=int, default=None)
207     group.add_option("", "--max-time", dest="maxTime", metavar="N",
208                      help="Maximum time to spend testing (in seconds)",
209                      action="store", type=float, default=None)
210     group.add_option("", "--shuffle", dest="shuffle",
211                      help="Run tests in random order",
212                      action="store_true", default=False)
213     group.add_option("-i", "--incremental", dest="incremental",
214                      help="Run modified and failing tests first (updates "
215                      "mtimes)",
216                      action="store_true", default=False)
217     group.add_option("", "--filter", dest="filter", metavar="REGEX",
218                      help=("Only run tests with paths matching the given "
219                            "regular expression"),
220                      action="store", default=None)
221     parser.add_option_group(group)
222
223     group = OptionGroup(parser, "Debug and Experimental Options")
224     group.add_option("", "--debug", dest="debug",
225                       help="Enable debugging (for 'lit' development)",
226                       action="store_true", default=False)
227     group.add_option("", "--show-suites", dest="showSuites",
228                       help="Show discovered test suites",
229                       action="store_true", default=False)
230     group.add_option("", "--show-tests", dest="showTests",
231                       help="Show all discovered tests",
232                       action="store_true", default=False)
233     group.add_option("", "--use-processes", dest="useProcesses",
234                       help="Run tests in parallel with processes (not threads)",
235                       action="store_true", default=useProcessesIsDefault)
236     group.add_option("", "--use-threads", dest="useProcesses",
237                       help="Run tests in parallel with threads (not processes)",
238                       action="store_false", default=useProcessesIsDefault)
239     parser.add_option_group(group)
240
241     (opts, args) = parser.parse_args()
242
243     if opts.show_version:
244         print("lit %s" % (lit.__version__,))
245         return
246
247     if not args:
248         parser.error('No inputs specified')
249
250     if opts.numThreads is None:
251 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
252 # http://bugs.python.org/issue1731717
253 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
254 # threads by default there.
255        if sys.hexversion >= 0x2050200:
256                opts.numThreads = lit.util.detectCPUs()
257        else:
258                opts.numThreads = 1
259
260     inputs = args
261
262     # Create the user defined parameters.
263     userParams = dict(builtinParameters)
264     for entry in opts.userParameters:
265         if '=' not in entry:
266             name,val = entry,''
267         else:
268             name,val = entry.split('=', 1)
269         userParams[name] = val
270
271     # Create the global config object.
272     litConfig = lit.LitConfig.LitConfig(
273         progname = os.path.basename(sys.argv[0]),
274         path = opts.path,
275         quiet = opts.quiet,
276         useValgrind = opts.useValgrind,
277         valgrindLeakCheck = opts.valgrindLeakCheck,
278         valgrindArgs = opts.valgrindArgs,
279         noExecute = opts.noExecute,
280         debug = opts.debug,
281         isWindows = isWindows,
282         params = userParams,
283         config_prefix = opts.configPrefix)
284
285     # Perform test discovery.
286     run = lit.run.Run(litConfig,
287                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
288
289     if opts.showSuites or opts.showTests:
290         # Aggregate the tests by suite.
291         suitesAndTests = {}
292         for t in run.tests:
293             if t.suite not in suitesAndTests:
294                 suitesAndTests[t.suite] = []
295             suitesAndTests[t.suite].append(t)
296         suitesAndTests = list(suitesAndTests.items())
297         suitesAndTests.sort(key = lambda item: item[0].name)
298
299         # Show the suites, if requested.
300         if opts.showSuites:
301             print('-- Test Suites --')
302             for ts,ts_tests in suitesAndTests:
303                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
304                 print('    Source Root: %s' % ts.source_root)
305                 print('    Exec Root  : %s' % ts.exec_root)
306
307         # Show the tests, if requested.
308         if opts.showTests:
309             print('-- Available Tests --')
310             for ts,ts_tests in suitesAndTests:
311                 ts_tests.sort(key = lambda test: test.path_in_suite)
312                 for test in ts_tests:
313                     print('  %s' % (test.getFullName(),))
314
315         # Exit.
316         sys.exit(0)
317
318     # Select and order the tests.
319     numTotalTests = len(run.tests)
320
321     # First, select based on the filter expression if given.
322     if opts.filter:
323         try:
324             rex = re.compile(opts.filter)
325         except:
326             parser.error("invalid regular expression for --filter: %r" % (
327                     opts.filter))
328         run.tests = [t for t in run.tests
329                      if rex.search(t.getFullName())]
330
331     # Then select the order.
332     if opts.shuffle:
333         random.shuffle(run.tests)
334     elif opts.incremental:
335         sort_by_incremental_cache(run)
336     else:
337         run.tests.sort(key = lambda t: t.getFullName())
338
339     # Finally limit the number of tests, if desired.
340     if opts.maxTests is not None:
341         run.tests = run.tests[:opts.maxTests]
342
343     # Don't create more threads than tests.
344     opts.numThreads = min(len(run.tests), opts.numThreads)
345
346     extra = ''
347     if len(run.tests) != numTotalTests:
348         extra = ' of %d' % numTotalTests
349     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
350                                                       opts.numThreads)
351
352     progressBar = None
353     if not opts.quiet:
354         if opts.succinct and opts.useProgressBar:
355             try:
356                 tc = lit.ProgressBar.TerminalController()
357                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
358             except ValueError:
359                 print(header)
360                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
361         else:
362             print(header)
363
364     startTime = time.time()
365     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
366     try:
367         run.execute_tests(display, opts.numThreads, opts.maxTime,
368                           opts.useProcesses)
369     except KeyboardInterrupt:
370         sys.exit(2)
371     display.finish()
372
373     testing_time = time.time() - startTime
374     if not opts.quiet:
375         print('Testing Time: %.2fs' % (testing_time,))
376
377     # Write out the test data, if requested.
378     if opts.output_path is not None:
379         write_test_results(run, litConfig, testing_time, opts.output_path)
380
381     # List test results organized by kind.
382     hasFailures = False
383     byCode = {}
384     for test in run.tests:
385         if test.result.code not in byCode:
386             byCode[test.result.code] = []
387         byCode[test.result.code].append(test)
388         if test.result.code.isFailure:
389             hasFailures = True
390
391     # Print each test in any of the failing groups.
392     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
393                        ('Failing Tests', lit.Test.FAIL),
394                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
395         elts = byCode.get(code)
396         if not elts:
397             continue
398         print('*'*20)
399         print('%s (%d):' % (title, len(elts)))
400         for test in elts:
401             print('    %s' % test.getFullName())
402         sys.stdout.write('\n')
403
404     if opts.timeTests and run.tests:
405         # Order by time.
406         test_times = [(test.getFullName(), test.result.elapsed)
407                       for test in run.tests]
408         lit.util.printHistogram(test_times, title='Tests')
409
410     for name,code in (('Expected Passes    ', lit.Test.PASS),
411                       ('Expected Failures  ', lit.Test.XFAIL),
412                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
413                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
414                       ('Unexpected Passes  ', lit.Test.XPASS),
415                       ('Unexpected Failures', lit.Test.FAIL),):
416         if opts.quiet and not code.isFailure:
417             continue
418         N = len(byCode.get(code,[]))
419         if N:
420             print('  %s: %d' % (name,N))
421
422     # If we encountered any additional errors, exit abnormally.
423     if litConfig.numErrors:
424         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
425         sys.exit(2)
426
427     # Warn about warnings.
428     if litConfig.numWarnings:
429         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
430
431     if hasFailures:
432         sys.exit(1)
433     sys.exit(0)
434
435 if __name__=='__main__':
436     main()