Give lit a --xunit-xml-output option for saving results in xunit format
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (not self.opts.quiet and not self.opts.succinct)
47         if not shouldShow:
48             return
49
50         if self.progressBar:
51             self.progressBar.clear()
52
53         # Show the test result line.
54         test_name = test.getFullName()
55         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56                                      self.completed, self.numTests))
57
58         # Show the test failure output, if requested.
59         if test.result.code.isFailure and self.opts.showOutput:
60             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61                                               '*'*20))
62             print(test.result.output)
63             print("*" * 20)
64
65         # Report test metrics, if present.
66         if test.result.metrics:
67             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68                                                '*'*10))
69             items = sorted(test.result.metrics.items())
70             for metric_name, value in items:
71                 print('%s: %s ' % (metric_name, value.format()))
72             print("*" * 10)
73
74         # Ensure the output is flushed.
75         sys.stdout.flush()
76
77 def write_test_results(run, lit_config, testing_time, output_path):
78     try:
79         import json
80     except ImportError:
81         lit_config.fatal('test output unsupported with Python 2.5')
82
83     # Construct the data we will write.
84     data = {}
85     # Encode the current lit version as a schema version.
86     data['__version__'] = lit.__versioninfo__
87     data['elapsed'] = testing_time
88     # FIXME: Record some information on the lit configuration used?
89     # FIXME: Record information from the individual test suites?
90
91     # Encode the tests.
92     data['tests'] = tests_data = []
93     for test in run.tests:
94         test_data = {
95             'name' : test.getFullName(),
96             'code' : test.result.code.name,
97             'output' : test.result.output,
98             'elapsed' : test.result.elapsed }
99
100         # Add test metrics, if present.
101         if test.result.metrics:
102             test_data['metrics'] = metrics_data = {}
103             for key, value in test.result.metrics.items():
104                 metrics_data[key] = value.todata()
105
106         tests_data.append(test_data)
107
108     # Write the output.
109     f = open(output_path, 'w')
110     try:
111         json.dump(data, f, indent=2, sort_keys=True)
112         f.write('\n')
113     finally:
114         f.close()
115
116 def update_incremental_cache(test):
117     if not test.result.code.isFailure:
118         return
119     fname = test.getFilePath()
120     os.utime(fname, None)
121
122 def sort_by_incremental_cache(run):
123     def sortIndex(test):
124         fname = test.getFilePath()
125         try:
126             return -os.path.getmtime(fname)
127         except:
128             return 0
129     run.tests.sort(key = lambda t: sortIndex(t))
130
131 def main(builtinParameters = {}):
132     # Use processes by default on Unix platforms.
133     isWindows = platform.system() == 'Windows'
134     useProcessesIsDefault = not isWindows
135
136     global options
137     from optparse import OptionParser, OptionGroup
138     parser = OptionParser("usage: %prog [options] {file-or-path}")
139
140     parser.add_option("", "--version", dest="show_version",
141                       help="Show version and exit",
142                       action="store_true", default=False)
143     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144                       help="Number of testing threads",
145                       type=int, action="store", default=None)
146     parser.add_option("", "--config-prefix", dest="configPrefix",
147                       metavar="NAME", help="Prefix for 'lit' config files",
148                       action="store", default=None)
149     parser.add_option("", "--param", dest="userParameters",
150                       metavar="NAME=VAL",
151                       help="Add 'NAME' = 'VAL' to the user defined parameters",
152                       type=str, action="append", default=[])
153
154     group = OptionGroup(parser, "Output Format")
155     # FIXME: I find these names very confusing, although I like the
156     # functionality.
157     group.add_option("-q", "--quiet", dest="quiet",
158                      help="Suppress no error output",
159                      action="store_true", default=False)
160     group.add_option("-s", "--succinct", dest="succinct",
161                      help="Reduce amount of output",
162                      action="store_true", default=False)
163     group.add_option("-v", "--verbose", dest="showOutput",
164                      help="Show all test output",
165                      action="store_true", default=False)
166     group.add_option("-o", "--output", dest="output_path",
167                      help="Write test results to the provided path",
168                      action="store", type=str, metavar="PATH")
169     group.add_option("", "--no-progress-bar", dest="useProgressBar",
170                      help="Do not use curses based progress bar",
171                      action="store_false", default=True)
172     group.add_option("", "--show-unsupported", dest="show_unsupported",
173                      help="Show unsupported tests",
174                      action="store_true", default=False)
175     group.add_option("", "--show-xfail", dest="show_xfail",
176                      help="Show tests that were expected to fail",
177                      action="store_true", default=False)
178     parser.add_option_group(group)
179
180     group = OptionGroup(parser, "Test Execution")
181     group.add_option("", "--path", dest="path",
182                      help="Additional paths to add to testing environment",
183                      action="append", type=str, default=[])
184     group.add_option("", "--vg", dest="useValgrind",
185                      help="Run tests under valgrind",
186                      action="store_true", default=False)
187     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188                      help="Check for memory leaks under valgrind",
189                      action="store_true", default=False)
190     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191                      help="Specify an extra argument for valgrind",
192                      type=str, action="append", default=[])
193     group.add_option("", "--time-tests", dest="timeTests",
194                      help="Track elapsed wall time for each test",
195                      action="store_true", default=False)
196     group.add_option("", "--no-execute", dest="noExecute",
197                      help="Don't execute any tests (assume PASS)",
198                      action="store_true", default=False)
199     group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
200                       help=("Write XUnit-compatible XML test reports to the"
201                             " specified file"), default=None)
202     parser.add_option_group(group)
203
204     group = OptionGroup(parser, "Test Selection")
205     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
206                      help="Maximum number of tests to run",
207                      action="store", type=int, default=None)
208     group.add_option("", "--max-time", dest="maxTime", metavar="N",
209                      help="Maximum time to spend testing (in seconds)",
210                      action="store", type=float, default=None)
211     group.add_option("", "--shuffle", dest="shuffle",
212                      help="Run tests in random order",
213                      action="store_true", default=False)
214     group.add_option("-i", "--incremental", dest="incremental",
215                      help="Run modified and failing tests first (updates "
216                      "mtimes)",
217                      action="store_true", default=False)
218     group.add_option("", "--filter", dest="filter", metavar="REGEX",
219                      help=("Only run tests with paths matching the given "
220                            "regular expression"),
221                      action="store", default=None)
222     parser.add_option_group(group)
223
224     group = OptionGroup(parser, "Debug and Experimental Options")
225     group.add_option("", "--debug", dest="debug",
226                       help="Enable debugging (for 'lit' development)",
227                       action="store_true", default=False)
228     group.add_option("", "--show-suites", dest="showSuites",
229                       help="Show discovered test suites",
230                       action="store_true", default=False)
231     group.add_option("", "--show-tests", dest="showTests",
232                       help="Show all discovered tests",
233                       action="store_true", default=False)
234     group.add_option("", "--use-processes", dest="useProcesses",
235                       help="Run tests in parallel with processes (not threads)",
236                       action="store_true", default=useProcessesIsDefault)
237     group.add_option("", "--use-threads", dest="useProcesses",
238                       help="Run tests in parallel with threads (not processes)",
239                       action="store_false", default=useProcessesIsDefault)
240     parser.add_option_group(group)
241
242     (opts, args) = parser.parse_args()
243
244     if opts.show_version:
245         print("lit %s" % (lit.__version__,))
246         return
247
248     if not args:
249         parser.error('No inputs specified')
250
251     if opts.numThreads is None:
252 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
253 # http://bugs.python.org/issue1731717
254 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
255 # threads by default there.
256        if sys.hexversion >= 0x2050200:
257                opts.numThreads = lit.util.detectCPUs()
258        else:
259                opts.numThreads = 1
260
261     inputs = args
262
263     # Create the user defined parameters.
264     userParams = dict(builtinParameters)
265     for entry in opts.userParameters:
266         if '=' not in entry:
267             name,val = entry,''
268         else:
269             name,val = entry.split('=', 1)
270         userParams[name] = val
271
272     # Create the global config object.
273     litConfig = lit.LitConfig.LitConfig(
274         progname = os.path.basename(sys.argv[0]),
275         path = opts.path,
276         quiet = opts.quiet,
277         useValgrind = opts.useValgrind,
278         valgrindLeakCheck = opts.valgrindLeakCheck,
279         valgrindArgs = opts.valgrindArgs,
280         noExecute = opts.noExecute,
281         debug = opts.debug,
282         isWindows = isWindows,
283         params = userParams,
284         config_prefix = opts.configPrefix)
285
286     # Perform test discovery.
287     run = lit.run.Run(litConfig,
288                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
289
290     if opts.showSuites or opts.showTests:
291         # Aggregate the tests by suite.
292         suitesAndTests = {}
293         for result_test in run.tests:
294             if result_test.suite not in suitesAndTests:
295                 suitesAndTests[result_test.suite] = []
296             suitesAndTests[result_test.suite].append(result_test)
297         suitesAndTests = list(suitesAndTests.items())
298         suitesAndTests.sort(key = lambda item: item[0].name)
299
300         # Show the suites, if requested.
301         if opts.showSuites:
302             print('-- Test Suites --')
303             for ts,ts_tests in suitesAndTests:
304                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
305                 print('    Source Root: %s' % ts.source_root)
306                 print('    Exec Root  : %s' % ts.exec_root)
307
308         # Show the tests, if requested.
309         if opts.showTests:
310             print('-- Available Tests --')
311             for ts,ts_tests in suitesAndTests:
312                 ts_tests.sort(key = lambda test: test.path_in_suite)
313                 for test in ts_tests:
314                     print('  %s' % (test.getFullName(),))
315
316         # Exit.
317         sys.exit(0)
318
319     # Select and order the tests.
320     numTotalTests = len(run.tests)
321
322     # First, select based on the filter expression if given.
323     if opts.filter:
324         try:
325             rex = re.compile(opts.filter)
326         except:
327             parser.error("invalid regular expression for --filter: %r" % (
328                     opts.filter))
329         run.tests = [result_test for result_test in run.tests
330                      if rex.search(result_test.getFullName())]
331
332     # Then select the order.
333     if opts.shuffle:
334         random.shuffle(run.tests)
335     elif opts.incremental:
336         sort_by_incremental_cache(run)
337     else:
338         run.tests.sort(key = lambda result_test: result_test.getFullName())
339
340     # Finally limit the number of tests, if desired.
341     if opts.maxTests is not None:
342         run.tests = run.tests[:opts.maxTests]
343
344     # Don't create more threads than tests.
345     opts.numThreads = min(len(run.tests), opts.numThreads)
346
347     extra = ''
348     if len(run.tests) != numTotalTests:
349         extra = ' of %d' % numTotalTests
350     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
351                                                       opts.numThreads)
352
353     progressBar = None
354     if not opts.quiet:
355         if opts.succinct and opts.useProgressBar:
356             try:
357                 tc = lit.ProgressBar.TerminalController()
358                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
359             except ValueError:
360                 print(header)
361                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
362         else:
363             print(header)
364
365     startTime = time.time()
366     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
367     try:
368         run.execute_tests(display, opts.numThreads, opts.maxTime,
369                           opts.useProcesses)
370     except KeyboardInterrupt:
371         sys.exit(2)
372     display.finish()
373
374     testing_time = time.time() - startTime
375     if not opts.quiet:
376         print('Testing Time: %.2fs' % (testing_time,))
377
378     # Write out the test data, if requested.
379     if opts.output_path is not None:
380         write_test_results(run, litConfig, testing_time, opts.output_path)
381
382     # List test results organized by kind.
383     hasFailures = False
384     byCode = {}
385     for test in run.tests:
386         if test.result.code not in byCode:
387             byCode[test.result.code] = []
388         byCode[test.result.code].append(test)
389         if test.result.code.isFailure:
390             hasFailures = True
391
392     # Print each test in any of the failing groups.
393     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
394                        ('Failing Tests', lit.Test.FAIL),
395                        ('Unresolved Tests', lit.Test.UNRESOLVED),
396                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
397                        ('Expected Failing Tests', lit.Test.XFAIL)):
398         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
399            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
400             continue
401         elts = byCode.get(code)
402         if not elts:
403             continue
404         print('*'*20)
405         print('%s (%d):' % (title, len(elts)))
406         for test in elts:
407             print('    %s' % test.getFullName())
408         sys.stdout.write('\n')
409
410     if opts.timeTests and run.tests:
411         # Order by time.
412         test_times = [(test.getFullName(), test.result.elapsed)
413                       for test in run.tests]
414         lit.util.printHistogram(test_times, title='Tests')
415
416     for name,code in (('Expected Passes    ', lit.Test.PASS),
417                       ('Expected Failures  ', lit.Test.XFAIL),
418                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
419                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
420                       ('Unexpected Passes  ', lit.Test.XPASS),
421                       ('Unexpected Failures', lit.Test.FAIL)):
422         if opts.quiet and not code.isFailure:
423             continue
424         N = len(byCode.get(code,[]))
425         if N:
426             print('  %s: %d' % (name,N))
427
428     if opts.xunit_output_file:
429         # Collect the tests, indexed by test suite
430         by_suite = {}
431         for result_test in run.tests:
432             suite = result_test.suite.config.name
433             if suite not in by_suite:
434                 by_suite[suite] = {
435                                    'passes'   : 0,
436                                    'failures' : 0,
437                                    'tests'    : [] }
438             by_suite[suite]['tests'].append(result_test)
439             if result_test.result.code.isFailure:
440                 by_suite[suite]['failures'] += 1
441             else:
442                 by_suite[suite]['passes'] += 1
443         xunit_output_file = open(opts.xunit_output_file, "w")
444         xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
445         xunit_output_file.write("<testsuites>\n")
446         for suite_name, suite in by_suite.items():
447             xunit_output_file.write("<testsuite name='" + suite_name + "'")
448             xunit_output_file.write(" tests='" + str(suite['passes'] + 
449               suite['failures']) + "'")
450             xunit_output_file.write(" failures='" + str(suite['failures']) + 
451               "'>\n")
452             for result_test in suite['tests']:
453                 xunit_output_file.write(result_test.getJUnitXML() + "\n")
454             xunit_output_file.write("</testsuite>\n")
455         xunit_output_file.write("</testsuites>")
456         xunit_output_file.close()
457
458     # If we encountered any additional errors, exit abnormally.
459     if litConfig.numErrors:
460         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
461         sys.exit(2)
462
463     # Warn about warnings.
464     if litConfig.numWarnings:
465         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
466
467     if hasFailures:
468         sys.exit(1)
469     sys.exit(0)
470
471 if __name__=='__main__':
472     main()