[LIT] Move display of unsupported and xfail tests to summary.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (not self.opts.quiet and not self.opts.succinct)
47         if not shouldShow:
48             return
49
50         if self.progressBar:
51             self.progressBar.clear()
52
53         # Show the test result line.
54         test_name = test.getFullName()
55         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56                                      self.completed, self.numTests))
57
58         # Show the test failure output, if requested.
59         if test.result.code.isFailure and self.opts.showOutput:
60             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61                                               '*'*20))
62             print(test.result.output)
63             print("*" * 20)
64
65         # Report test metrics, if present.
66         if test.result.metrics:
67             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68                                                '*'*10))
69             items = sorted(test.result.metrics.items())
70             for metric_name, value in items:
71                 print('%s: %s ' % (metric_name, value.format()))
72             print("*" * 10)
73
74         # Ensure the output is flushed.
75         sys.stdout.flush()
76
77 def write_test_results(run, lit_config, testing_time, output_path):
78     try:
79         import json
80     except ImportError:
81         lit_config.fatal('test output unsupported with Python 2.5')
82
83     # Construct the data we will write.
84     data = {}
85     # Encode the current lit version as a schema version.
86     data['__version__'] = lit.__versioninfo__
87     data['elapsed'] = testing_time
88     # FIXME: Record some information on the lit configuration used?
89     # FIXME: Record information from the individual test suites?
90
91     # Encode the tests.
92     data['tests'] = tests_data = []
93     for test in run.tests:
94         test_data = {
95             'name' : test.getFullName(),
96             'code' : test.result.code.name,
97             'output' : test.result.output,
98             'elapsed' : test.result.elapsed }
99
100         # Add test metrics, if present.
101         if test.result.metrics:
102             test_data['metrics'] = metrics_data = {}
103             for key, value in test.result.metrics.items():
104                 metrics_data[key] = value.todata()
105
106         tests_data.append(test_data)
107
108     # Write the output.
109     f = open(output_path, 'w')
110     try:
111         json.dump(data, f, indent=2, sort_keys=True)
112         f.write('\n')
113     finally:
114         f.close()
115
116 def update_incremental_cache(test):
117     if not test.result.code.isFailure:
118         return
119     fname = test.getFilePath()
120     os.utime(fname, None)
121
122 def sort_by_incremental_cache(run):
123     def sortIndex(test):
124         fname = test.getFilePath()
125         try:
126             return -os.path.getmtime(fname)
127         except:
128             return 0
129     run.tests.sort(key = lambda t: sortIndex(t))
130
131 def main(builtinParameters = {}):
132     # Use processes by default on Unix platforms.
133     isWindows = platform.system() == 'Windows'
134     useProcessesIsDefault = not isWindows
135
136     global options
137     from optparse import OptionParser, OptionGroup
138     parser = OptionParser("usage: %prog [options] {file-or-path}")
139
140     parser.add_option("", "--version", dest="show_version",
141                       help="Show version and exit",
142                       action="store_true", default=False)
143     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144                       help="Number of testing threads",
145                       type=int, action="store", default=None)
146     parser.add_option("", "--config-prefix", dest="configPrefix",
147                       metavar="NAME", help="Prefix for 'lit' config files",
148                       action="store", default=None)
149     parser.add_option("", "--param", dest="userParameters",
150                       metavar="NAME=VAL",
151                       help="Add 'NAME' = 'VAL' to the user defined parameters",
152                       type=str, action="append", default=[])
153
154     group = OptionGroup(parser, "Output Format")
155     # FIXME: I find these names very confusing, although I like the
156     # functionality.
157     group.add_option("-q", "--quiet", dest="quiet",
158                      help="Suppress no error output",
159                      action="store_true", default=False)
160     group.add_option("-s", "--succinct", dest="succinct",
161                      help="Reduce amount of output",
162                      action="store_true", default=False)
163     group.add_option("-v", "--verbose", dest="showOutput",
164                      help="Show all test output",
165                      action="store_true", default=False)
166     group.add_option("-o", "--output", dest="output_path",
167                      help="Write test results to the provided path",
168                      action="store", type=str, metavar="PATH")
169     group.add_option("", "--no-progress-bar", dest="useProgressBar",
170                      help="Do not use curses based progress bar",
171                      action="store_false", default=True)
172     group.add_option("", "--show-unsupported", dest="show_unsupported",
173                      help="Show unsupported tests",
174                      action="store_true", default=False)
175     group.add_option("", "--show-xfail", dest="show_xfail",
176                      help="Show tests that were expected to fail",
177                      action="store_true", default=False)
178     parser.add_option_group(group)
179
180     group = OptionGroup(parser, "Test Execution")
181     group.add_option("", "--path", dest="path",
182                      help="Additional paths to add to testing environment",
183                      action="append", type=str, default=[])
184     group.add_option("", "--vg", dest="useValgrind",
185                      help="Run tests under valgrind",
186                      action="store_true", default=False)
187     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188                      help="Check for memory leaks under valgrind",
189                      action="store_true", default=False)
190     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191                      help="Specify an extra argument for valgrind",
192                      type=str, action="append", default=[])
193     group.add_option("", "--time-tests", dest="timeTests",
194                      help="Track elapsed wall time for each test",
195                      action="store_true", default=False)
196     group.add_option("", "--no-execute", dest="noExecute",
197                      help="Don't execute any tests (assume PASS)",
198                      action="store_true", default=False)
199     parser.add_option_group(group)
200
201     group = OptionGroup(parser, "Test Selection")
202     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
203                      help="Maximum number of tests to run",
204                      action="store", type=int, default=None)
205     group.add_option("", "--max-time", dest="maxTime", metavar="N",
206                      help="Maximum time to spend testing (in seconds)",
207                      action="store", type=float, default=None)
208     group.add_option("", "--shuffle", dest="shuffle",
209                      help="Run tests in random order",
210                      action="store_true", default=False)
211     group.add_option("-i", "--incremental", dest="incremental",
212                      help="Run modified and failing tests first (updates "
213                      "mtimes)",
214                      action="store_true", default=False)
215     group.add_option("", "--filter", dest="filter", metavar="REGEX",
216                      help=("Only run tests with paths matching the given "
217                            "regular expression"),
218                      action="store", default=None)
219     parser.add_option_group(group)
220
221     group = OptionGroup(parser, "Debug and Experimental Options")
222     group.add_option("", "--debug", dest="debug",
223                       help="Enable debugging (for 'lit' development)",
224                       action="store_true", default=False)
225     group.add_option("", "--show-suites", dest="showSuites",
226                       help="Show discovered test suites",
227                       action="store_true", default=False)
228     group.add_option("", "--show-tests", dest="showTests",
229                       help="Show all discovered tests",
230                       action="store_true", default=False)
231     group.add_option("", "--use-processes", dest="useProcesses",
232                       help="Run tests in parallel with processes (not threads)",
233                       action="store_true", default=useProcessesIsDefault)
234     group.add_option("", "--use-threads", dest="useProcesses",
235                       help="Run tests in parallel with threads (not processes)",
236                       action="store_false", default=useProcessesIsDefault)
237     parser.add_option_group(group)
238
239     (opts, args) = parser.parse_args()
240
241     if opts.show_version:
242         print("lit %s" % (lit.__version__,))
243         return
244
245     if not args:
246         parser.error('No inputs specified')
247
248     if opts.numThreads is None:
249 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
250 # http://bugs.python.org/issue1731717
251 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
252 # threads by default there.
253        if sys.hexversion >= 0x2050200:
254                opts.numThreads = lit.util.detectCPUs()
255        else:
256                opts.numThreads = 1
257
258     inputs = args
259
260     # Create the user defined parameters.
261     userParams = dict(builtinParameters)
262     for entry in opts.userParameters:
263         if '=' not in entry:
264             name,val = entry,''
265         else:
266             name,val = entry.split('=', 1)
267         userParams[name] = val
268
269     # Create the global config object.
270     litConfig = lit.LitConfig.LitConfig(
271         progname = os.path.basename(sys.argv[0]),
272         path = opts.path,
273         quiet = opts.quiet,
274         useValgrind = opts.useValgrind,
275         valgrindLeakCheck = opts.valgrindLeakCheck,
276         valgrindArgs = opts.valgrindArgs,
277         noExecute = opts.noExecute,
278         debug = opts.debug,
279         isWindows = isWindows,
280         params = userParams,
281         config_prefix = opts.configPrefix)
282
283     # Perform test discovery.
284     run = lit.run.Run(litConfig,
285                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
286
287     if opts.showSuites or opts.showTests:
288         # Aggregate the tests by suite.
289         suitesAndTests = {}
290         for t in run.tests:
291             if t.suite not in suitesAndTests:
292                 suitesAndTests[t.suite] = []
293             suitesAndTests[t.suite].append(t)
294         suitesAndTests = list(suitesAndTests.items())
295         suitesAndTests.sort(key = lambda item: item[0].name)
296
297         # Show the suites, if requested.
298         if opts.showSuites:
299             print('-- Test Suites --')
300             for ts,ts_tests in suitesAndTests:
301                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
302                 print('    Source Root: %s' % ts.source_root)
303                 print('    Exec Root  : %s' % ts.exec_root)
304
305         # Show the tests, if requested.
306         if opts.showTests:
307             print('-- Available Tests --')
308             for ts,ts_tests in suitesAndTests:
309                 ts_tests.sort(key = lambda test: test.path_in_suite)
310                 for test in ts_tests:
311                     print('  %s' % (test.getFullName(),))
312
313         # Exit.
314         sys.exit(0)
315
316     # Select and order the tests.
317     numTotalTests = len(run.tests)
318
319     # First, select based on the filter expression if given.
320     if opts.filter:
321         try:
322             rex = re.compile(opts.filter)
323         except:
324             parser.error("invalid regular expression for --filter: %r" % (
325                     opts.filter))
326         run.tests = [t for t in run.tests
327                      if rex.search(t.getFullName())]
328
329     # Then select the order.
330     if opts.shuffle:
331         random.shuffle(run.tests)
332     elif opts.incremental:
333         sort_by_incremental_cache(run)
334     else:
335         run.tests.sort(key = lambda t: t.getFullName())
336
337     # Finally limit the number of tests, if desired.
338     if opts.maxTests is not None:
339         run.tests = run.tests[:opts.maxTests]
340
341     # Don't create more threads than tests.
342     opts.numThreads = min(len(run.tests), opts.numThreads)
343
344     extra = ''
345     if len(run.tests) != numTotalTests:
346         extra = ' of %d' % numTotalTests
347     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
348                                                       opts.numThreads)
349
350     progressBar = None
351     if not opts.quiet:
352         if opts.succinct and opts.useProgressBar:
353             try:
354                 tc = lit.ProgressBar.TerminalController()
355                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
356             except ValueError:
357                 print(header)
358                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
359         else:
360             print(header)
361
362     startTime = time.time()
363     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
364     try:
365         run.execute_tests(display, opts.numThreads, opts.maxTime,
366                           opts.useProcesses)
367     except KeyboardInterrupt:
368         sys.exit(2)
369     display.finish()
370
371     testing_time = time.time() - startTime
372     if not opts.quiet:
373         print('Testing Time: %.2fs' % (testing_time,))
374
375     # Write out the test data, if requested.
376     if opts.output_path is not None:
377         write_test_results(run, litConfig, testing_time, opts.output_path)
378
379     # List test results organized by kind.
380     hasFailures = False
381     byCode = {}
382     for test in run.tests:
383         if test.result.code not in byCode:
384             byCode[test.result.code] = []
385         byCode[test.result.code].append(test)
386         if test.result.code.isFailure:
387             hasFailures = True
388
389     # Print each test in any of the failing groups.
390     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
391                        ('Failing Tests', lit.Test.FAIL),
392                        ('Unresolved Tests', lit.Test.UNRESOLVED),
393                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
394                        ('Expected Failing Tests', lit.Test.XFAIL)):
395         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
396            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
397             continue
398         elts = byCode.get(code)
399         if not elts:
400             continue
401         print('*'*20)
402         print('%s (%d):' % (title, len(elts)))
403         for test in elts:
404             print('    %s' % test.getFullName())
405         sys.stdout.write('\n')
406
407     if opts.timeTests and run.tests:
408         # Order by time.
409         test_times = [(test.getFullName(), test.result.elapsed)
410                       for test in run.tests]
411         lit.util.printHistogram(test_times, title='Tests')
412
413     for name,code in (('Expected Passes    ', lit.Test.PASS),
414                       ('Expected Failures  ', lit.Test.XFAIL),
415                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
416                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
417                       ('Unexpected Passes  ', lit.Test.XPASS),
418                       ('Unexpected Failures', lit.Test.FAIL)):
419         if opts.quiet and not code.isFailure:
420             continue
421         N = len(byCode.get(code,[]))
422         if N:
423             print('  %s: %d' % (name,N))
424
425     # If we encountered any additional errors, exit abnormally.
426     if litConfig.numErrors:
427         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
428         sys.exit(2)
429
430     # Warn about warnings.
431     if litConfig.numWarnings:
432         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
433
434     if hasFailures:
435         sys.exit(1)
436     sys.exit(0)
437
438 if __name__=='__main__':
439     main()