lit: Add '-a' option to display commands+output of all tests
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (not self.opts.quiet and not self.opts.succinct)
47         if not shouldShow:
48             return
49
50         if self.progressBar:
51             self.progressBar.clear()
52
53         # Show the test result line.
54         test_name = test.getFullName()
55         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56                                      self.completed, self.numTests))
57
58         # Show the test failure output, if requested.
59         if (test.result.code.isFailure and self.opts.showOutput) or \
60            self.opts.showAllOutput:
61             if test.result.code.isFailure:
62                 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
63                                                   '*'*20))
64             print(test.result.output)
65             print("*" * 20)
66
67         # Report test metrics, if present.
68         if test.result.metrics:
69             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
70                                                '*'*10))
71             items = sorted(test.result.metrics.items())
72             for metric_name, value in items:
73                 print('%s: %s ' % (metric_name, value.format()))
74             print("*" * 10)
75
76         # Ensure the output is flushed.
77         sys.stdout.flush()
78
79 def write_test_results(run, lit_config, testing_time, output_path):
80     try:
81         import json
82     except ImportError:
83         lit_config.fatal('test output unsupported with Python 2.5')
84
85     # Construct the data we will write.
86     data = {}
87     # Encode the current lit version as a schema version.
88     data['__version__'] = lit.__versioninfo__
89     data['elapsed'] = testing_time
90     # FIXME: Record some information on the lit configuration used?
91     # FIXME: Record information from the individual test suites?
92
93     # Encode the tests.
94     data['tests'] = tests_data = []
95     for test in run.tests:
96         test_data = {
97             'name' : test.getFullName(),
98             'code' : test.result.code.name,
99             'output' : test.result.output,
100             'elapsed' : test.result.elapsed }
101
102         # Add test metrics, if present.
103         if test.result.metrics:
104             test_data['metrics'] = metrics_data = {}
105             for key, value in test.result.metrics.items():
106                 metrics_data[key] = value.todata()
107
108         tests_data.append(test_data)
109
110     # Write the output.
111     f = open(output_path, 'w')
112     try:
113         json.dump(data, f, indent=2, sort_keys=True)
114         f.write('\n')
115     finally:
116         f.close()
117
118 def update_incremental_cache(test):
119     if not test.result.code.isFailure:
120         return
121     fname = test.getFilePath()
122     os.utime(fname, None)
123
124 def sort_by_incremental_cache(run):
125     def sortIndex(test):
126         fname = test.getFilePath()
127         try:
128             return -os.path.getmtime(fname)
129         except:
130             return 0
131     run.tests.sort(key = lambda t: sortIndex(t))
132
133 def main(builtinParameters = {}):
134     # Use processes by default on Unix platforms.
135     isWindows = platform.system() == 'Windows'
136     useProcessesIsDefault = not isWindows
137
138     global options
139     from optparse import OptionParser, OptionGroup
140     parser = OptionParser("usage: %prog [options] {file-or-path}")
141
142     parser.add_option("", "--version", dest="show_version",
143                       help="Show version and exit",
144                       action="store_true", default=False)
145     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
146                       help="Number of testing threads",
147                       type=int, action="store", default=None)
148     parser.add_option("", "--config-prefix", dest="configPrefix",
149                       metavar="NAME", help="Prefix for 'lit' config files",
150                       action="store", default=None)
151     parser.add_option("-D", "--param", dest="userParameters",
152                       metavar="NAME=VAL",
153                       help="Add 'NAME' = 'VAL' to the user defined parameters",
154                       type=str, action="append", default=[])
155
156     group = OptionGroup(parser, "Output Format")
157     # FIXME: I find these names very confusing, although I like the
158     # functionality.
159     group.add_option("-q", "--quiet", dest="quiet",
160                      help="Suppress no error output",
161                      action="store_true", default=False)
162     group.add_option("-s", "--succinct", dest="succinct",
163                      help="Reduce amount of output",
164                      action="store_true", default=False)
165     group.add_option("-v", "--verbose", dest="showOutput",
166                      help="Show test output for failures",
167                      action="store_true", default=False)
168     group.add_option("-a", "--show-all", dest="showAllOutput",
169                      help="Display all commandlines and output",
170                      action="store_true", default=False)
171     group.add_option("-o", "--output", dest="output_path",
172                      help="Write test results to the provided path",
173                      action="store", type=str, metavar="PATH")
174     group.add_option("", "--no-progress-bar", dest="useProgressBar",
175                      help="Do not use curses based progress bar",
176                      action="store_false", default=True)
177     group.add_option("", "--show-unsupported", dest="show_unsupported",
178                      help="Show unsupported tests",
179                      action="store_true", default=False)
180     group.add_option("", "--show-xfail", dest="show_xfail",
181                      help="Show tests that were expected to fail",
182                      action="store_true", default=False)
183     parser.add_option_group(group)
184
185     group = OptionGroup(parser, "Test Execution")
186     group.add_option("", "--path", dest="path",
187                      help="Additional paths to add to testing environment",
188                      action="append", type=str, default=[])
189     group.add_option("", "--vg", dest="useValgrind",
190                      help="Run tests under valgrind",
191                      action="store_true", default=False)
192     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
193                      help="Check for memory leaks under valgrind",
194                      action="store_true", default=False)
195     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
196                      help="Specify an extra argument for valgrind",
197                      type=str, action="append", default=[])
198     group.add_option("", "--time-tests", dest="timeTests",
199                      help="Track elapsed wall time for each test",
200                      action="store_true", default=False)
201     group.add_option("", "--no-execute", dest="noExecute",
202                      help="Don't execute any tests (assume PASS)",
203                      action="store_true", default=False)
204     group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
205                       help=("Write XUnit-compatible XML test reports to the"
206                             " specified file"), default=None)
207     parser.add_option_group(group)
208
209     group = OptionGroup(parser, "Test Selection")
210     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
211                      help="Maximum number of tests to run",
212                      action="store", type=int, default=None)
213     group.add_option("", "--max-time", dest="maxTime", metavar="N",
214                      help="Maximum time to spend testing (in seconds)",
215                      action="store", type=float, default=None)
216     group.add_option("", "--shuffle", dest="shuffle",
217                      help="Run tests in random order",
218                      action="store_true", default=False)
219     group.add_option("-i", "--incremental", dest="incremental",
220                      help="Run modified and failing tests first (updates "
221                      "mtimes)",
222                      action="store_true", default=False)
223     group.add_option("", "--filter", dest="filter", metavar="REGEX",
224                      help=("Only run tests with paths matching the given "
225                            "regular expression"),
226                      action="store", default=None)
227     parser.add_option_group(group)
228
229     group = OptionGroup(parser, "Debug and Experimental Options")
230     group.add_option("", "--debug", dest="debug",
231                       help="Enable debugging (for 'lit' development)",
232                       action="store_true", default=False)
233     group.add_option("", "--show-suites", dest="showSuites",
234                       help="Show discovered test suites",
235                       action="store_true", default=False)
236     group.add_option("", "--show-tests", dest="showTests",
237                       help="Show all discovered tests",
238                       action="store_true", default=False)
239     group.add_option("", "--use-processes", dest="useProcesses",
240                       help="Run tests in parallel with processes (not threads)",
241                       action="store_true", default=useProcessesIsDefault)
242     group.add_option("", "--use-threads", dest="useProcesses",
243                       help="Run tests in parallel with threads (not processes)",
244                       action="store_false", default=useProcessesIsDefault)
245     parser.add_option_group(group)
246
247     (opts, args) = parser.parse_args()
248
249     if opts.show_version:
250         print("lit %s" % (lit.__version__,))
251         return
252
253     if not args:
254         parser.error('No inputs specified')
255
256     if opts.numThreads is None:
257 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
258 # http://bugs.python.org/issue1731717
259 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
260 # threads by default there.
261        if sys.hexversion >= 0x2050200:
262                opts.numThreads = lit.util.detectCPUs()
263        else:
264                opts.numThreads = 1
265
266     inputs = args
267
268     # Create the user defined parameters.
269     userParams = dict(builtinParameters)
270     for entry in opts.userParameters:
271         if '=' not in entry:
272             name,val = entry,''
273         else:
274             name,val = entry.split('=', 1)
275         userParams[name] = val
276
277     # Create the global config object.
278     litConfig = lit.LitConfig.LitConfig(
279         progname = os.path.basename(sys.argv[0]),
280         path = opts.path,
281         quiet = opts.quiet,
282         useValgrind = opts.useValgrind,
283         valgrindLeakCheck = opts.valgrindLeakCheck,
284         valgrindArgs = opts.valgrindArgs,
285         noExecute = opts.noExecute,
286         debug = opts.debug,
287         isWindows = isWindows,
288         params = userParams,
289         config_prefix = opts.configPrefix)
290
291     # Perform test discovery.
292     run = lit.run.Run(litConfig,
293                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
294
295     if opts.showSuites or opts.showTests:
296         # Aggregate the tests by suite.
297         suitesAndTests = {}
298         for result_test in run.tests:
299             if result_test.suite not in suitesAndTests:
300                 suitesAndTests[result_test.suite] = []
301             suitesAndTests[result_test.suite].append(result_test)
302         suitesAndTests = list(suitesAndTests.items())
303         suitesAndTests.sort(key = lambda item: item[0].name)
304
305         # Show the suites, if requested.
306         if opts.showSuites:
307             print('-- Test Suites --')
308             for ts,ts_tests in suitesAndTests:
309                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
310                 print('    Source Root: %s' % ts.source_root)
311                 print('    Exec Root  : %s' % ts.exec_root)
312
313         # Show the tests, if requested.
314         if opts.showTests:
315             print('-- Available Tests --')
316             for ts,ts_tests in suitesAndTests:
317                 ts_tests.sort(key = lambda test: test.path_in_suite)
318                 for test in ts_tests:
319                     print('  %s' % (test.getFullName(),))
320
321         # Exit.
322         sys.exit(0)
323
324     # Select and order the tests.
325     numTotalTests = len(run.tests)
326
327     # First, select based on the filter expression if given.
328     if opts.filter:
329         try:
330             rex = re.compile(opts.filter)
331         except:
332             parser.error("invalid regular expression for --filter: %r" % (
333                     opts.filter))
334         run.tests = [result_test for result_test in run.tests
335                      if rex.search(result_test.getFullName())]
336
337     # Then select the order.
338     if opts.shuffle:
339         random.shuffle(run.tests)
340     elif opts.incremental:
341         sort_by_incremental_cache(run)
342     else:
343         run.tests.sort(key = lambda result_test: result_test.getFullName())
344
345     # Finally limit the number of tests, if desired.
346     if opts.maxTests is not None:
347         run.tests = run.tests[:opts.maxTests]
348
349     # Don't create more threads than tests.
350     opts.numThreads = min(len(run.tests), opts.numThreads)
351
352     # Because some tests use threads internally, and at least on Linux each
353     # of these threads counts toward the current process limit, try to
354     # raise the (soft) process limit so that tests don't fail due to
355     # resource exhaustion.
356     try:
357         cpus = lit.util.detectCPUs()
358         desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
359
360         # Import the resource module here inside this try block because it
361         # will likely fail on Windows.
362         import resource
363
364         max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
365         desired_limit = min(desired_limit, max_procs_hard)
366
367         if max_procs_soft < desired_limit:
368             resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
369             litConfig.note('raised the process limit from %d to %d' % \
370                                (max_procs_soft, desired_limit))
371     except:
372         pass
373
374     extra = ''
375     if len(run.tests) != numTotalTests:
376         extra = ' of %d' % numTotalTests
377     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
378                                                       opts.numThreads)
379
380     progressBar = None
381     if not opts.quiet:
382         if opts.succinct and opts.useProgressBar:
383             try:
384                 tc = lit.ProgressBar.TerminalController()
385                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
386             except ValueError:
387                 print(header)
388                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
389         else:
390             print(header)
391
392     startTime = time.time()
393     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
394     try:
395         run.execute_tests(display, opts.numThreads, opts.maxTime,
396                           opts.useProcesses)
397     except KeyboardInterrupt:
398         sys.exit(2)
399     display.finish()
400
401     testing_time = time.time() - startTime
402     if not opts.quiet:
403         print('Testing Time: %.2fs' % (testing_time,))
404
405     # Write out the test data, if requested.
406     if opts.output_path is not None:
407         write_test_results(run, litConfig, testing_time, opts.output_path)
408
409     # List test results organized by kind.
410     hasFailures = False
411     byCode = {}
412     for test in run.tests:
413         if test.result.code not in byCode:
414             byCode[test.result.code] = []
415         byCode[test.result.code].append(test)
416         if test.result.code.isFailure:
417             hasFailures = True
418
419     # Print each test in any of the failing groups.
420     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
421                        ('Failing Tests', lit.Test.FAIL),
422                        ('Unresolved Tests', lit.Test.UNRESOLVED),
423                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
424                        ('Expected Failing Tests', lit.Test.XFAIL)):
425         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
426            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
427             continue
428         elts = byCode.get(code)
429         if not elts:
430             continue
431         print('*'*20)
432         print('%s (%d):' % (title, len(elts)))
433         for test in elts:
434             print('    %s' % test.getFullName())
435         sys.stdout.write('\n')
436
437     if opts.timeTests and run.tests:
438         # Order by time.
439         test_times = [(test.getFullName(), test.result.elapsed)
440                       for test in run.tests]
441         lit.util.printHistogram(test_times, title='Tests')
442
443     for name,code in (('Expected Passes    ', lit.Test.PASS),
444                       ('Passes With Retry  ', lit.Test.FLAKYPASS),
445                       ('Expected Failures  ', lit.Test.XFAIL),
446                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
447                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
448                       ('Unexpected Passes  ', lit.Test.XPASS),
449                       ('Unexpected Failures', lit.Test.FAIL)):
450         if opts.quiet and not code.isFailure:
451             continue
452         N = len(byCode.get(code,[]))
453         if N:
454             print('  %s: %d' % (name,N))
455
456     if opts.xunit_output_file:
457         # Collect the tests, indexed by test suite
458         by_suite = {}
459         for result_test in run.tests:
460             suite = result_test.suite.config.name
461             if suite not in by_suite:
462                 by_suite[suite] = {
463                                    'passes'   : 0,
464                                    'failures' : 0,
465                                    'tests'    : [] }
466             by_suite[suite]['tests'].append(result_test)
467             if result_test.result.code.isFailure:
468                 by_suite[suite]['failures'] += 1
469             else:
470                 by_suite[suite]['passes'] += 1
471         xunit_output_file = open(opts.xunit_output_file, "w")
472         xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
473         xunit_output_file.write("<testsuites>\n")
474         for suite_name, suite in by_suite.items():
475             safe_suite_name = suite_name.replace(".", "-")
476             xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
477             xunit_output_file.write(" tests='" + str(suite['passes'] + 
478               suite['failures']) + "'")
479             xunit_output_file.write(" failures='" + str(suite['failures']) + 
480               "'>\n")
481             for result_test in suite['tests']:
482                 xunit_output_file.write(result_test.getJUnitXML() + "\n")
483             xunit_output_file.write("</testsuite>\n")
484         xunit_output_file.write("</testsuites>")
485         xunit_output_file.close()
486
487     # If we encountered any additional errors, exit abnormally.
488     if litConfig.numErrors:
489         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
490         sys.exit(2)
491
492     # Warn about warnings.
493     if litConfig.numWarnings:
494         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
495
496     if hasFailures:
497         sys.exit(1)
498     sys.exit(0)
499
500 if __name__=='__main__':
501     main()