[mips][ias] Explicitly disable IAS on tests that depend on not assembling.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             self.opts.showAllOutput or \
47             (not self.opts.quiet and not self.opts.succinct)
48         if not shouldShow:
49             return
50
51         if self.progressBar:
52             self.progressBar.clear()
53
54         # Show the test result line.
55         test_name = test.getFullName()
56         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57                                      self.completed, self.numTests))
58
59         # Show the test failure output, if requested.
60         if (test.result.code.isFailure and self.opts.showOutput) or \
61            self.opts.showAllOutput:
62             if test.result.code.isFailure:
63                 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
64                                                   '*'*20))
65             print(test.result.output)
66             print("*" * 20)
67
68         # Report test metrics, if present.
69         if test.result.metrics:
70             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
71                                                '*'*10))
72             items = sorted(test.result.metrics.items())
73             for metric_name, value in items:
74                 print('%s: %s ' % (metric_name, value.format()))
75             print("*" * 10)
76
77         # Ensure the output is flushed.
78         sys.stdout.flush()
79
80 def write_test_results(run, lit_config, testing_time, output_path):
81     try:
82         import json
83     except ImportError:
84         lit_config.fatal('test output unsupported with Python 2.5')
85
86     # Construct the data we will write.
87     data = {}
88     # Encode the current lit version as a schema version.
89     data['__version__'] = lit.__versioninfo__
90     data['elapsed'] = testing_time
91     # FIXME: Record some information on the lit configuration used?
92     # FIXME: Record information from the individual test suites?
93
94     # Encode the tests.
95     data['tests'] = tests_data = []
96     for test in run.tests:
97         test_data = {
98             'name' : test.getFullName(),
99             'code' : test.result.code.name,
100             'output' : test.result.output,
101             'elapsed' : test.result.elapsed }
102
103         # Add test metrics, if present.
104         if test.result.metrics:
105             test_data['metrics'] = metrics_data = {}
106             for key, value in test.result.metrics.items():
107                 metrics_data[key] = value.todata()
108
109         tests_data.append(test_data)
110
111     # Write the output.
112     f = open(output_path, 'w')
113     try:
114         json.dump(data, f, indent=2, sort_keys=True)
115         f.write('\n')
116     finally:
117         f.close()
118
119 def update_incremental_cache(test):
120     if not test.result.code.isFailure:
121         return
122     fname = test.getFilePath()
123     os.utime(fname, None)
124
125 def sort_by_incremental_cache(run):
126     def sortIndex(test):
127         fname = test.getFilePath()
128         try:
129             return -os.path.getmtime(fname)
130         except:
131             return 0
132     run.tests.sort(key = lambda t: sortIndex(t))
133
134 def main(builtinParameters = {}):
135     # Use processes by default on Unix platforms.
136     isWindows = platform.system() == 'Windows'
137     useProcessesIsDefault = not isWindows
138
139     global options
140     from optparse import OptionParser, OptionGroup
141     parser = OptionParser("usage: %prog [options] {file-or-path}")
142
143     parser.add_option("", "--version", dest="show_version",
144                       help="Show version and exit",
145                       action="store_true", default=False)
146     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
147                       help="Number of testing threads",
148                       type=int, action="store", default=None)
149     parser.add_option("", "--config-prefix", dest="configPrefix",
150                       metavar="NAME", help="Prefix for 'lit' config files",
151                       action="store", default=None)
152     parser.add_option("-D", "--param", dest="userParameters",
153                       metavar="NAME=VAL",
154                       help="Add 'NAME' = 'VAL' to the user defined parameters",
155                       type=str, action="append", default=[])
156
157     group = OptionGroup(parser, "Output Format")
158     # FIXME: I find these names very confusing, although I like the
159     # functionality.
160     group.add_option("-q", "--quiet", dest="quiet",
161                      help="Suppress no error output",
162                      action="store_true", default=False)
163     group.add_option("-s", "--succinct", dest="succinct",
164                      help="Reduce amount of output",
165                      action="store_true", default=False)
166     group.add_option("-v", "--verbose", dest="showOutput",
167                      help="Show test output for failures",
168                      action="store_true", default=False)
169     group.add_option("-a", "--show-all", dest="showAllOutput",
170                      help="Display all commandlines and output",
171                      action="store_true", default=False)
172     group.add_option("-o", "--output", dest="output_path",
173                      help="Write test results to the provided path",
174                      action="store", type=str, metavar="PATH")
175     group.add_option("", "--no-progress-bar", dest="useProgressBar",
176                      help="Do not use curses based progress bar",
177                      action="store_false", default=True)
178     group.add_option("", "--show-unsupported", dest="show_unsupported",
179                      help="Show unsupported tests",
180                      action="store_true", default=False)
181     group.add_option("", "--show-xfail", dest="show_xfail",
182                      help="Show tests that were expected to fail",
183                      action="store_true", default=False)
184     parser.add_option_group(group)
185
186     group = OptionGroup(parser, "Test Execution")
187     group.add_option("", "--path", dest="path",
188                      help="Additional paths to add to testing environment",
189                      action="append", type=str, default=[])
190     group.add_option("", "--vg", dest="useValgrind",
191                      help="Run tests under valgrind",
192                      action="store_true", default=False)
193     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
194                      help="Check for memory leaks under valgrind",
195                      action="store_true", default=False)
196     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
197                      help="Specify an extra argument for valgrind",
198                      type=str, action="append", default=[])
199     group.add_option("", "--time-tests", dest="timeTests",
200                      help="Track elapsed wall time for each test",
201                      action="store_true", default=False)
202     group.add_option("", "--no-execute", dest="noExecute",
203                      help="Don't execute any tests (assume PASS)",
204                      action="store_true", default=False)
205     group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
206                       help=("Write XUnit-compatible XML test reports to the"
207                             " specified file"), default=None)
208     parser.add_option_group(group)
209
210     group = OptionGroup(parser, "Test Selection")
211     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
212                      help="Maximum number of tests to run",
213                      action="store", type=int, default=None)
214     group.add_option("", "--max-time", dest="maxTime", metavar="N",
215                      help="Maximum time to spend testing (in seconds)",
216                      action="store", type=float, default=None)
217     group.add_option("", "--shuffle", dest="shuffle",
218                      help="Run tests in random order",
219                      action="store_true", default=False)
220     group.add_option("-i", "--incremental", dest="incremental",
221                      help="Run modified and failing tests first (updates "
222                      "mtimes)",
223                      action="store_true", default=False)
224     group.add_option("", "--filter", dest="filter", metavar="REGEX",
225                      help=("Only run tests with paths matching the given "
226                            "regular expression"),
227                      action="store", default=None)
228     parser.add_option_group(group)
229
230     group = OptionGroup(parser, "Debug and Experimental Options")
231     group.add_option("", "--debug", dest="debug",
232                       help="Enable debugging (for 'lit' development)",
233                       action="store_true", default=False)
234     group.add_option("", "--show-suites", dest="showSuites",
235                       help="Show discovered test suites",
236                       action="store_true", default=False)
237     group.add_option("", "--show-tests", dest="showTests",
238                       help="Show all discovered tests",
239                       action="store_true", default=False)
240     group.add_option("", "--use-processes", dest="useProcesses",
241                       help="Run tests in parallel with processes (not threads)",
242                       action="store_true", default=useProcessesIsDefault)
243     group.add_option("", "--use-threads", dest="useProcesses",
244                       help="Run tests in parallel with threads (not processes)",
245                       action="store_false", default=useProcessesIsDefault)
246     parser.add_option_group(group)
247
248     (opts, args) = parser.parse_args()
249
250     if opts.show_version:
251         print("lit %s" % (lit.__version__,))
252         return
253
254     if not args:
255         parser.error('No inputs specified')
256
257     if opts.numThreads is None:
258 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
259 # http://bugs.python.org/issue1731717
260 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
261 # threads by default there.
262        if sys.hexversion >= 0x2050200:
263                opts.numThreads = lit.util.detectCPUs()
264        else:
265                opts.numThreads = 1
266
267     inputs = args
268
269     # Create the user defined parameters.
270     userParams = dict(builtinParameters)
271     for entry in opts.userParameters:
272         if '=' not in entry:
273             name,val = entry,''
274         else:
275             name,val = entry.split('=', 1)
276         userParams[name] = val
277
278     # Create the global config object.
279     litConfig = lit.LitConfig.LitConfig(
280         progname = os.path.basename(sys.argv[0]),
281         path = opts.path,
282         quiet = opts.quiet,
283         useValgrind = opts.useValgrind,
284         valgrindLeakCheck = opts.valgrindLeakCheck,
285         valgrindArgs = opts.valgrindArgs,
286         noExecute = opts.noExecute,
287         debug = opts.debug,
288         isWindows = isWindows,
289         params = userParams,
290         config_prefix = opts.configPrefix)
291
292     # Perform test discovery.
293     run = lit.run.Run(litConfig,
294                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
295
296     if opts.showSuites or opts.showTests:
297         # Aggregate the tests by suite.
298         suitesAndTests = {}
299         for result_test in run.tests:
300             if result_test.suite not in suitesAndTests:
301                 suitesAndTests[result_test.suite] = []
302             suitesAndTests[result_test.suite].append(result_test)
303         suitesAndTests = list(suitesAndTests.items())
304         suitesAndTests.sort(key = lambda item: item[0].name)
305
306         # Show the suites, if requested.
307         if opts.showSuites:
308             print('-- Test Suites --')
309             for ts,ts_tests in suitesAndTests:
310                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
311                 print('    Source Root: %s' % ts.source_root)
312                 print('    Exec Root  : %s' % ts.exec_root)
313
314         # Show the tests, if requested.
315         if opts.showTests:
316             print('-- Available Tests --')
317             for ts,ts_tests in suitesAndTests:
318                 ts_tests.sort(key = lambda test: test.path_in_suite)
319                 for test in ts_tests:
320                     print('  %s' % (test.getFullName(),))
321
322         # Exit.
323         sys.exit(0)
324
325     # Select and order the tests.
326     numTotalTests = len(run.tests)
327
328     # First, select based on the filter expression if given.
329     if opts.filter:
330         try:
331             rex = re.compile(opts.filter)
332         except:
333             parser.error("invalid regular expression for --filter: %r" % (
334                     opts.filter))
335         run.tests = [result_test for result_test in run.tests
336                      if rex.search(result_test.getFullName())]
337
338     # Then select the order.
339     if opts.shuffle:
340         random.shuffle(run.tests)
341     elif opts.incremental:
342         sort_by_incremental_cache(run)
343     else:
344         run.tests.sort(key = lambda result_test: result_test.getFullName())
345
346     # Finally limit the number of tests, if desired.
347     if opts.maxTests is not None:
348         run.tests = run.tests[:opts.maxTests]
349
350     # Don't create more threads than tests.
351     opts.numThreads = min(len(run.tests), opts.numThreads)
352
353     # Because some tests use threads internally, and at least on Linux each
354     # of these threads counts toward the current process limit, try to
355     # raise the (soft) process limit so that tests don't fail due to
356     # resource exhaustion.
357     try:
358         cpus = lit.util.detectCPUs()
359         desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
360
361         # Import the resource module here inside this try block because it
362         # will likely fail on Windows.
363         import resource
364
365         max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
366         desired_limit = min(desired_limit, max_procs_hard)
367
368         if max_procs_soft < desired_limit:
369             resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
370             litConfig.note('raised the process limit from %d to %d' % \
371                                (max_procs_soft, desired_limit))
372     except:
373         pass
374
375     extra = ''
376     if len(run.tests) != numTotalTests:
377         extra = ' of %d' % numTotalTests
378     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
379                                                       opts.numThreads)
380
381     progressBar = None
382     if not opts.quiet:
383         if opts.succinct and opts.useProgressBar:
384             try:
385                 tc = lit.ProgressBar.TerminalController()
386                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
387             except ValueError:
388                 print(header)
389                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
390         else:
391             print(header)
392
393     startTime = time.time()
394     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
395     try:
396         run.execute_tests(display, opts.numThreads, opts.maxTime,
397                           opts.useProcesses)
398     except KeyboardInterrupt:
399         sys.exit(2)
400     display.finish()
401
402     testing_time = time.time() - startTime
403     if not opts.quiet:
404         print('Testing Time: %.2fs' % (testing_time,))
405
406     # Write out the test data, if requested.
407     if opts.output_path is not None:
408         write_test_results(run, litConfig, testing_time, opts.output_path)
409
410     # List test results organized by kind.
411     hasFailures = False
412     byCode = {}
413     for test in run.tests:
414         if test.result.code not in byCode:
415             byCode[test.result.code] = []
416         byCode[test.result.code].append(test)
417         if test.result.code.isFailure:
418             hasFailures = True
419
420     # Print each test in any of the failing groups.
421     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
422                        ('Failing Tests', lit.Test.FAIL),
423                        ('Unresolved Tests', lit.Test.UNRESOLVED),
424                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
425                        ('Expected Failing Tests', lit.Test.XFAIL)):
426         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
427            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
428             continue
429         elts = byCode.get(code)
430         if not elts:
431             continue
432         print('*'*20)
433         print('%s (%d):' % (title, len(elts)))
434         for test in elts:
435             print('    %s' % test.getFullName())
436         sys.stdout.write('\n')
437
438     if opts.timeTests and run.tests:
439         # Order by time.
440         test_times = [(test.getFullName(), test.result.elapsed)
441                       for test in run.tests]
442         lit.util.printHistogram(test_times, title='Tests')
443
444     for name,code in (('Expected Passes    ', lit.Test.PASS),
445                       ('Passes With Retry  ', lit.Test.FLAKYPASS),
446                       ('Expected Failures  ', lit.Test.XFAIL),
447                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
448                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
449                       ('Unexpected Passes  ', lit.Test.XPASS),
450                       ('Unexpected Failures', lit.Test.FAIL)):
451         if opts.quiet and not code.isFailure:
452             continue
453         N = len(byCode.get(code,[]))
454         if N:
455             print('  %s: %d' % (name,N))
456
457     if opts.xunit_output_file:
458         # Collect the tests, indexed by test suite
459         by_suite = {}
460         for result_test in run.tests:
461             suite = result_test.suite.config.name
462             if suite not in by_suite:
463                 by_suite[suite] = {
464                                    'passes'   : 0,
465                                    'failures' : 0,
466                                    'tests'    : [] }
467             by_suite[suite]['tests'].append(result_test)
468             if result_test.result.code.isFailure:
469                 by_suite[suite]['failures'] += 1
470             else:
471                 by_suite[suite]['passes'] += 1
472         xunit_output_file = open(opts.xunit_output_file, "w")
473         xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
474         xunit_output_file.write("<testsuites>\n")
475         for suite_name, suite in by_suite.items():
476             safe_suite_name = suite_name.replace(".", "-")
477             xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
478             xunit_output_file.write(" tests='" + str(suite['passes'] + 
479               suite['failures']) + "'")
480             xunit_output_file.write(" failures='" + str(suite['failures']) + 
481               "'>\n")
482             for result_test in suite['tests']:
483                 xunit_output_file.write(result_test.getJUnitXML() + "\n")
484             xunit_output_file.write("</testsuite>\n")
485         xunit_output_file.write("</testsuites>")
486         xunit_output_file.close()
487
488     # If we encountered any additional errors, exit abnormally.
489     if litConfig.numErrors:
490         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
491         sys.exit(2)
492
493     # Warn about warnings.
494     if litConfig.numWarnings:
495         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
496
497     if hasFailures:
498         sys.exit(1)
499     sys.exit(0)
500
501 if __name__=='__main__':
502     main()