a2486193e36217cae296cfe49e48f1e3d1d48993
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         if not test.result.code.isFailure and \
46                 (self.opts.quiet or self.opts.succinct):
47             return
48
49         if self.progressBar:
50             self.progressBar.clear()
51
52         # Show the test result line.
53         test_name = test.getFullName()
54         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55                                      self.completed, self.numTests))
56
57         # Show the test failure output, if requested.
58         if test.result.code.isFailure and self.opts.showOutput:
59             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
60                                               '*'*20))
61             print(test.result.output)
62             print("*" * 20)
63
64         # Report test metrics, if present.
65         if test.result.metrics:
66             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
67                                                '*'*10))
68             items = sorted(test.result.metrics.items())
69             for metric_name, value in items:
70                 print('%s: %s ' % (metric_name, value.format()))
71             print("*" * 10)
72
73         # Ensure the output is flushed.
74         sys.stdout.flush()
75
76 def write_test_results(run, lit_config, testing_time, output_path):
77     try:
78         import json
79     except ImportError:
80         lit_config.fatal('test output unsupported with Python 2.5')
81
82     # Construct the data we will write.
83     data = {}
84     # Encode the current lit version as a schema version.
85     data['__version__'] = lit.__versioninfo__
86     data['elapsed'] = testing_time
87     # FIXME: Record some information on the lit configuration used?
88     # FIXME: Record information from the individual test suites?
89
90     # Encode the tests.
91     data['tests'] = tests_data = []
92     for test in run.tests:
93         test_data = {
94             'name' : test.getFullName(),
95             'code' : test.result.code.name,
96             'output' : test.result.output,
97             'elapsed' : test.result.elapsed }
98
99         # Add test metrics, if present.
100         if test.result.metrics:
101             test_data['metrics'] = metrics_data = {}
102             for key, value in test.result.metrics.items():
103                 metrics_data[key] = value.todata()
104
105         tests_data.append(test_data)
106
107     # Write the output.
108     f = open(output_path, 'w')
109     try:
110         json.dump(data, f, indent=2, sort_keys=True)
111         f.write('\n')
112     finally:
113         f.close()
114
115 def update_incremental_cache(test):
116     if not test.result.code.isFailure:
117         return
118     fname = test.getFilePath()
119     os.utime(fname, None)
120
121 def sort_by_incremental_cache(run):
122     def sortIndex(test):
123         fname = test.getFilePath()
124         try:
125             return -os.path.getmtime(fname)
126         except:
127             return 0
128     run.tests.sort(key = lambda t: sortIndex(t))
129
130 def main(builtinParameters = {}):
131     # Use processes by default on Unix platforms.
132     isWindows = platform.system() == 'Windows'
133     useProcessesIsDefault = not isWindows
134
135     global options
136     from optparse import OptionParser, OptionGroup
137     parser = OptionParser("usage: %prog [options] {file-or-path}")
138
139     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
140                       help="Number of testing threads",
141                       type=int, action="store", default=None)
142     parser.add_option("", "--config-prefix", dest="configPrefix",
143                       metavar="NAME", help="Prefix for 'lit' config files",
144                       action="store", default=None)
145     parser.add_option("", "--param", dest="userParameters",
146                       metavar="NAME=VAL",
147                       help="Add 'NAME' = 'VAL' to the user defined parameters",
148                       type=str, action="append", default=[])
149
150     group = OptionGroup(parser, "Output Format")
151     # FIXME: I find these names very confusing, although I like the
152     # functionality.
153     group.add_option("-q", "--quiet", dest="quiet",
154                      help="Suppress no error output",
155                      action="store_true", default=False)
156     group.add_option("-s", "--succinct", dest="succinct",
157                      help="Reduce amount of output",
158                      action="store_true", default=False)
159     group.add_option("-v", "--verbose", dest="showOutput",
160                      help="Show all test output",
161                      action="store_true", default=False)
162     group.add_option("-o", "--output", dest="output_path",
163                      help="Write test results to the provided path",
164                      action="store", type=str, metavar="PATH")
165     group.add_option("", "--no-progress-bar", dest="useProgressBar",
166                      help="Do not use curses based progress bar",
167                      action="store_false", default=True)
168     parser.add_option_group(group)
169
170     group = OptionGroup(parser, "Test Execution")
171     group.add_option("", "--path", dest="path",
172                      help="Additional paths to add to testing environment",
173                      action="append", type=str, default=[])
174     group.add_option("", "--vg", dest="useValgrind",
175                      help="Run tests under valgrind",
176                      action="store_true", default=False)
177     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
178                      help="Check for memory leaks under valgrind",
179                      action="store_true", default=False)
180     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
181                      help="Specify an extra argument for valgrind",
182                      type=str, action="append", default=[])
183     group.add_option("", "--time-tests", dest="timeTests",
184                      help="Track elapsed wall time for each test",
185                      action="store_true", default=False)
186     group.add_option("", "--no-execute", dest="noExecute",
187                      help="Don't execute any tests (assume PASS)",
188                      action="store_true", default=False)
189     parser.add_option_group(group)
190
191     group = OptionGroup(parser, "Test Selection")
192     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
193                      help="Maximum number of tests to run",
194                      action="store", type=int, default=None)
195     group.add_option("", "--max-time", dest="maxTime", metavar="N",
196                      help="Maximum time to spend testing (in seconds)",
197                      action="store", type=float, default=None)
198     group.add_option("", "--shuffle", dest="shuffle",
199                      help="Run tests in random order",
200                      action="store_true", default=False)
201     group.add_option("-i", "--incremental", dest="incremental",
202                      help="Run modified and failing tests first (updates "
203                      "mtimes)",
204                      action="store_true", default=False)
205     group.add_option("", "--filter", dest="filter", metavar="REGEX",
206                      help=("Only run tests with paths matching the given "
207                            "regular expression"),
208                      action="store", default=None)
209     parser.add_option_group(group)
210
211     group = OptionGroup(parser, "Debug and Experimental Options")
212     group.add_option("", "--debug", dest="debug",
213                       help="Enable debugging (for 'lit' development)",
214                       action="store_true", default=False)
215     group.add_option("", "--show-suites", dest="showSuites",
216                       help="Show discovered test suites",
217                       action="store_true", default=False)
218     group.add_option("", "--show-tests", dest="showTests",
219                       help="Show all discovered tests",
220                       action="store_true", default=False)
221     group.add_option("", "--use-processes", dest="useProcesses",
222                       help="Run tests in parallel with processes (not threads)",
223                       action="store_true", default=useProcessesIsDefault)
224     group.add_option("", "--use-threads", dest="useProcesses",
225                       help="Run tests in parallel with threads (not processes)",
226                       action="store_false", default=useProcessesIsDefault)
227     parser.add_option_group(group)
228
229     (opts, args) = parser.parse_args()
230
231     if not args:
232         parser.error('No inputs specified')
233
234     if opts.numThreads is None:
235 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
236 # http://bugs.python.org/issue1731717
237 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
238 # threads by default there.
239        if sys.hexversion >= 0x2050200:
240                opts.numThreads = lit.util.detectCPUs()
241        else:
242                opts.numThreads = 1
243
244     inputs = args
245
246     # Create the user defined parameters.
247     userParams = dict(builtinParameters)
248     for entry in opts.userParameters:
249         if '=' not in entry:
250             name,val = entry,''
251         else:
252             name,val = entry.split('=', 1)
253         userParams[name] = val
254
255     # Create the global config object.
256     litConfig = lit.LitConfig.LitConfig(
257         progname = os.path.basename(sys.argv[0]),
258         path = opts.path,
259         quiet = opts.quiet,
260         useValgrind = opts.useValgrind,
261         valgrindLeakCheck = opts.valgrindLeakCheck,
262         valgrindArgs = opts.valgrindArgs,
263         noExecute = opts.noExecute,
264         debug = opts.debug,
265         isWindows = isWindows,
266         params = userParams,
267         config_prefix = opts.configPrefix)
268
269     # Perform test discovery.
270     run = lit.run.Run(litConfig,
271                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
272
273     if opts.showSuites or opts.showTests:
274         # Aggregate the tests by suite.
275         suitesAndTests = {}
276         for t in run.tests:
277             if t.suite not in suitesAndTests:
278                 suitesAndTests[t.suite] = []
279             suitesAndTests[t.suite].append(t)
280         suitesAndTests = list(suitesAndTests.items())
281         suitesAndTests.sort(key = lambda item: item[0].name)
282
283         # Show the suites, if requested.
284         if opts.showSuites:
285             print('-- Test Suites --')
286             for ts,ts_tests in suitesAndTests:
287                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
288                 print('    Source Root: %s' % ts.source_root)
289                 print('    Exec Root  : %s' % ts.exec_root)
290
291         # Show the tests, if requested.
292         if opts.showTests:
293             print('-- Available Tests --')
294             for ts,ts_tests in suitesAndTests:
295                 ts_tests.sort(key = lambda test: test.path_in_suite)
296                 for test in ts_tests:
297                     print('  %s' % (test.getFullName(),))
298
299         # Exit.
300         sys.exit(0)
301
302     # Select and order the tests.
303     numTotalTests = len(run.tests)
304
305     # First, select based on the filter expression if given.
306     if opts.filter:
307         try:
308             rex = re.compile(opts.filter)
309         except:
310             parser.error("invalid regular expression for --filter: %r" % (
311                     opts.filter))
312         run.tests = [t for t in run.tests
313                      if rex.search(t.getFullName())]
314
315     # Then select the order.
316     if opts.shuffle:
317         random.shuffle(run.tests)
318     elif opts.incremental:
319         sort_by_incremental_cache(run)
320     else:
321         run.tests.sort(key = lambda t: t.getFullName())
322
323     # Finally limit the number of tests, if desired.
324     if opts.maxTests is not None:
325         run.tests = run.tests[:opts.maxTests]
326
327     # Don't create more threads than tests.
328     opts.numThreads = min(len(run.tests), opts.numThreads)
329
330     extra = ''
331     if len(run.tests) != numTotalTests:
332         extra = ' of %d' % numTotalTests
333     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
334                                                       opts.numThreads)
335
336     progressBar = None
337     if not opts.quiet:
338         if opts.succinct and opts.useProgressBar:
339             try:
340                 tc = lit.ProgressBar.TerminalController()
341                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
342             except ValueError:
343                 print(header)
344                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
345         else:
346             print(header)
347
348     startTime = time.time()
349     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
350     try:
351         run.execute_tests(display, opts.numThreads, opts.maxTime,
352                           opts.useProcesses)
353     except KeyboardInterrupt:
354         sys.exit(2)
355     display.finish()
356
357     testing_time = time.time() - startTime
358     if not opts.quiet:
359         print('Testing Time: %.2fs' % (testing_time,))
360
361     # Write out the test data, if requested.
362     if opts.output_path is not None:
363         write_test_results(run, litConfig, testing_time, opts.output_path)
364
365     # List test results organized by kind.
366     hasFailures = False
367     byCode = {}
368     for test in run.tests:
369         if test.result.code not in byCode:
370             byCode[test.result.code] = []
371         byCode[test.result.code].append(test)
372         if test.result.code.isFailure:
373             hasFailures = True
374
375     # Print each test in any of the failing groups.
376     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
377                        ('Failing Tests', lit.Test.FAIL),
378                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
379         elts = byCode.get(code)
380         if not elts:
381             continue
382         print('*'*20)
383         print('%s (%d):' % (title, len(elts)))
384         for test in elts:
385             print('    %s' % test.getFullName())
386         sys.stdout.write('\n')
387
388     if opts.timeTests and run.tests:
389         # Order by time.
390         test_times = [(test.getFullName(), test.result.elapsed)
391                       for test in run.tests]
392         lit.util.printHistogram(test_times, title='Tests')
393
394     for name,code in (('Expected Passes    ', lit.Test.PASS),
395                       ('Expected Failures  ', lit.Test.XFAIL),
396                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
397                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
398                       ('Unexpected Passes  ', lit.Test.XPASS),
399                       ('Unexpected Failures', lit.Test.FAIL),):
400         if opts.quiet and not code.isFailure:
401             continue
402         N = len(byCode.get(code,[]))
403         if N:
404             print('  %s: %d' % (name,N))
405
406     # If we encountered any additional errors, exit abnormally.
407     if litConfig.numErrors:
408         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
409         sys.exit(2)
410
411     # Warn about warnings.
412     if litConfig.numWarnings:
413         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
414
415     if hasFailures:
416         sys.exit(1)
417     sys.exit(0)
418
419 if __name__=='__main__':
420     main()