Prospective Python 3 fix for r198150
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         if not test.result.code.isFailure and \
46                 (self.opts.quiet or self.opts.succinct):
47             return
48
49         if self.progressBar:
50             self.progressBar.clear()
51
52         # Show the test result line.
53         test_name = test.getFullName()
54         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55                                      self.completed, self.numTests))
56
57         # Show the test failure output, if requested.
58         if test.result.code.isFailure and self.opts.showOutput:
59             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
60                                               '*'*20))
61             print(test.result.output)
62             print("*" * 20)
63
64         # Report test metrics, if present.
65         if test.result.metrics:
66             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
67                                                '*'*10))
68             items = sorted(test.result.metrics.items())
69             for metric_name, value in items:
70                 print('%s: %s ' % (metric_name, value.format()))
71             print("*" * 10)
72
73         # Ensure the output is flushed.
74         sys.stdout.flush()
75
76 def write_test_results(run, lit_config, testing_time, output_path):
77     try:
78         import json
79     except ImportError:
80         lit_config.fatal('test output unsupported with Python 2.5')
81
82     # Construct the data we will write.
83     data = {}
84     # Encode the current lit version as a schema version.
85     data['__version__'] = lit.__versioninfo__
86     data['elapsed'] = testing_time
87     # FIXME: Record some information on the lit configuration used?
88     # FIXME: Record information from the individual test suites?
89
90     # Encode the tests.
91     data['tests'] = tests_data = []
92     for test in run.tests:
93         test_data = {
94             'name' : test.getFullName(),
95             'code' : test.result.code.name,
96             'output' : test.result.output,
97             'elapsed' : test.result.elapsed }
98
99         # Add test metrics, if present.
100         if test.result.metrics:
101             test_data['metrics'] = metrics_data = {}
102             for key, value in test.result.metrics.items():
103                 metrics_data[key] = value.todata()
104
105         tests_data.append(test_data)
106
107     # Write the output.
108     f = open(output_path, 'w')
109     try:
110         json.dump(data, f, indent=2, sort_keys=True)
111         f.write('\n')
112     finally:
113         f.close()
114
115 def update_incremental_cache(test):
116     if not test.result.code.isFailure:
117         return
118     fname = test.getFilePath()
119     os.utime(fname, None)
120
121 def sort_by_incremental_cache(run, litConfig):
122     def sortIndex(test):
123         index = 0
124         fname = test.getFilePath()
125         try:
126             index = -os.path.getmtime(fname)
127         except OSError as e:
128             if litConfig.debug:
129                 litConfig.note(e)
130         return index
131     run.tests.sort(key = lambda t: sortIndex(t))
132
133 def main(builtinParameters = {}):
134     # Use processes by default on Unix platforms.
135     isWindows = platform.system() == 'Windows'
136     useProcessesIsDefault = not isWindows
137
138     global options
139     from optparse import OptionParser, OptionGroup
140     parser = OptionParser("usage: %prog [options] {file-or-path}")
141
142     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
143                       help="Number of testing threads",
144                       type=int, action="store", default=None)
145     parser.add_option("", "--config-prefix", dest="configPrefix",
146                       metavar="NAME", help="Prefix for 'lit' config files",
147                       action="store", default=None)
148     parser.add_option("", "--param", dest="userParameters",
149                       metavar="NAME=VAL",
150                       help="Add 'NAME' = 'VAL' to the user defined parameters",
151                       type=str, action="append", default=[])
152
153     group = OptionGroup(parser, "Output Format")
154     # FIXME: I find these names very confusing, although I like the
155     # functionality.
156     group.add_option("-q", "--quiet", dest="quiet",
157                      help="Suppress no error output",
158                      action="store_true", default=False)
159     group.add_option("-s", "--succinct", dest="succinct",
160                      help="Reduce amount of output",
161                      action="store_true", default=False)
162     group.add_option("-v", "--verbose", dest="showOutput",
163                      help="Show all test output",
164                      action="store_true", default=False)
165     group.add_option("-o", "--output", dest="output_path",
166                      help="Write test results to the provided path",
167                      action="store", type=str, metavar="PATH")
168     group.add_option("", "--no-progress-bar", dest="useProgressBar",
169                      help="Do not use curses based progress bar",
170                      action="store_false", default=True)
171     parser.add_option_group(group)
172
173     group = OptionGroup(parser, "Test Execution")
174     group.add_option("", "--path", dest="path",
175                      help="Additional paths to add to testing environment",
176                      action="append", type=str, default=[])
177     group.add_option("", "--vg", dest="useValgrind",
178                      help="Run tests under valgrind",
179                      action="store_true", default=False)
180     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
181                      help="Check for memory leaks under valgrind",
182                      action="store_true", default=False)
183     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
184                      help="Specify an extra argument for valgrind",
185                      type=str, action="append", default=[])
186     group.add_option("", "--time-tests", dest="timeTests",
187                      help="Track elapsed wall time for each test",
188                      action="store_true", default=False)
189     group.add_option("", "--no-execute", dest="noExecute",
190                      help="Don't execute any tests (assume PASS)",
191                      action="store_true", default=False)
192     parser.add_option_group(group)
193
194     group = OptionGroup(parser, "Test Selection")
195     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
196                      help="Maximum number of tests to run",
197                      action="store", type=int, default=None)
198     group.add_option("", "--max-time", dest="maxTime", metavar="N",
199                      help="Maximum time to spend testing (in seconds)",
200                      action="store", type=float, default=None)
201     group.add_option("", "--shuffle", dest="shuffle",
202                      help="Run tests in random order",
203                      action="store_true", default=False)
204     group.add_option("-i", "--incremental", dest="incremental",
205                      help="Run modified and failing tests first (updates "
206                      "mtimes)",
207                      action="store_true", default=False)
208     group.add_option("", "--filter", dest="filter", metavar="REGEX",
209                      help=("Only run tests with paths matching the given "
210                            "regular expression"),
211                      action="store", default=None)
212     parser.add_option_group(group)
213
214     group = OptionGroup(parser, "Debug and Experimental Options")
215     group.add_option("", "--debug", dest="debug",
216                       help="Enable debugging (for 'lit' development)",
217                       action="store_true", default=False)
218     group.add_option("", "--show-suites", dest="showSuites",
219                       help="Show discovered test suites",
220                       action="store_true", default=False)
221     group.add_option("", "--show-tests", dest="showTests",
222                       help="Show all discovered tests",
223                       action="store_true", default=False)
224     group.add_option("", "--use-processes", dest="useProcesses",
225                       help="Run tests in parallel with processes (not threads)",
226                       action="store_true", default=useProcessesIsDefault)
227     group.add_option("", "--use-threads", dest="useProcesses",
228                       help="Run tests in parallel with threads (not processes)",
229                       action="store_false", default=useProcessesIsDefault)
230     parser.add_option_group(group)
231
232     (opts, args) = parser.parse_args()
233
234     if not args:
235         parser.error('No inputs specified')
236
237     if opts.numThreads is None:
238 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
239 # http://bugs.python.org/issue1731717
240 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
241 # threads by default there.
242        if sys.hexversion >= 0x2050200:
243                opts.numThreads = lit.util.detectCPUs()
244        else:
245                opts.numThreads = 1
246
247     inputs = args
248
249     # Create the user defined parameters.
250     userParams = dict(builtinParameters)
251     for entry in opts.userParameters:
252         if '=' not in entry:
253             name,val = entry,''
254         else:
255             name,val = entry.split('=', 1)
256         userParams[name] = val
257
258     # Create the global config object.
259     litConfig = lit.LitConfig.LitConfig(
260         progname = os.path.basename(sys.argv[0]),
261         path = opts.path,
262         quiet = opts.quiet,
263         useValgrind = opts.useValgrind,
264         valgrindLeakCheck = opts.valgrindLeakCheck,
265         valgrindArgs = opts.valgrindArgs,
266         noExecute = opts.noExecute,
267         debug = opts.debug,
268         isWindows = isWindows,
269         params = userParams,
270         config_prefix = opts.configPrefix)
271
272     # Perform test discovery.
273     run = lit.run.Run(litConfig,
274                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
275
276     if opts.showSuites or opts.showTests:
277         # Aggregate the tests by suite.
278         suitesAndTests = {}
279         for t in run.tests:
280             if t.suite not in suitesAndTests:
281                 suitesAndTests[t.suite] = []
282             suitesAndTests[t.suite].append(t)
283         suitesAndTests = list(suitesAndTests.items())
284         suitesAndTests.sort(key = lambda item: item[0].name)
285
286         # Show the suites, if requested.
287         if opts.showSuites:
288             print('-- Test Suites --')
289             for ts,ts_tests in suitesAndTests:
290                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
291                 print('    Source Root: %s' % ts.source_root)
292                 print('    Exec Root  : %s' % ts.exec_root)
293
294         # Show the tests, if requested.
295         if opts.showTests:
296             print('-- Available Tests --')
297             for ts,ts_tests in suitesAndTests:
298                 ts_tests.sort(key = lambda test: test.path_in_suite)
299                 for test in ts_tests:
300                     print('  %s' % (test.getFullName(),))
301
302         # Exit.
303         sys.exit(0)
304
305     # Select and order the tests.
306     numTotalTests = len(run.tests)
307
308     # First, select based on the filter expression if given.
309     if opts.filter:
310         try:
311             rex = re.compile(opts.filter)
312         except:
313             parser.error("invalid regular expression for --filter: %r" % (
314                     opts.filter))
315         run.tests = [t for t in run.tests
316                      if rex.search(t.getFullName())]
317
318     # Then select the order.
319     if opts.shuffle:
320         random.shuffle(run.tests)
321     elif opts.incremental:
322         sort_by_incremental_cache(run, litConfig)
323     else:
324         run.tests.sort(key = lambda t: t.getFullName())
325
326     # Finally limit the number of tests, if desired.
327     if opts.maxTests is not None:
328         run.tests = run.tests[:opts.maxTests]
329
330     # Don't create more threads than tests.
331     opts.numThreads = min(len(run.tests), opts.numThreads)
332
333     extra = ''
334     if len(run.tests) != numTotalTests:
335         extra = ' of %d' % numTotalTests
336     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
337                                                       opts.numThreads)
338
339     progressBar = None
340     if not opts.quiet:
341         if opts.succinct and opts.useProgressBar:
342             try:
343                 tc = lit.ProgressBar.TerminalController()
344                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
345             except ValueError:
346                 print(header)
347                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
348         else:
349             print(header)
350
351     startTime = time.time()
352     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
353     try:
354         run.execute_tests(display, opts.numThreads, opts.maxTime,
355                           opts.useProcesses)
356     except KeyboardInterrupt:
357         sys.exit(2)
358     display.finish()
359
360     testing_time = time.time() - startTime
361     if not opts.quiet:
362         print('Testing Time: %.2fs' % (testing_time,))
363
364     # Write out the test data, if requested.
365     if opts.output_path is not None:
366         write_test_results(run, litConfig, testing_time, opts.output_path)
367
368     # List test results organized by kind.
369     hasFailures = False
370     byCode = {}
371     for test in run.tests:
372         if test.result.code not in byCode:
373             byCode[test.result.code] = []
374         byCode[test.result.code].append(test)
375         if test.result.code.isFailure:
376             hasFailures = True
377
378     # Print each test in any of the failing groups.
379     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
380                        ('Failing Tests', lit.Test.FAIL),
381                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
382         elts = byCode.get(code)
383         if not elts:
384             continue
385         print('*'*20)
386         print('%s (%d):' % (title, len(elts)))
387         for test in elts:
388             print('    %s' % test.getFullName())
389         sys.stdout.write('\n')
390
391     if opts.timeTests and run.tests:
392         # Order by time.
393         test_times = [(test.getFullName(), test.result.elapsed)
394                       for test in run.tests]
395         lit.util.printHistogram(test_times, title='Tests')
396
397     for name,code in (('Expected Passes    ', lit.Test.PASS),
398                       ('Expected Failures  ', lit.Test.XFAIL),
399                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
400                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
401                       ('Unexpected Passes  ', lit.Test.XPASS),
402                       ('Unexpected Failures', lit.Test.FAIL),):
403         if opts.quiet and not code.isFailure:
404             continue
405         N = len(byCode.get(code,[]))
406         if N:
407             print('  %s: %d' % (name,N))
408
409     # If we encountered any additional errors, exit abnormally.
410     if litConfig.numErrors:
411         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
412         sys.exit(2)
413
414     # Warn about warnings.
415     if litConfig.numWarnings:
416         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
417
418     if hasFailures:
419         sys.exit(1)
420     sys.exit(0)
421
422 if __name__=='__main__':
423     main()