Lit: Rework r249161; Move RLIMIT_NPROC to main.py.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (not self.opts.quiet and not self.opts.succinct)
47         if not shouldShow:
48             return
49
50         if self.progressBar:
51             self.progressBar.clear()
52
53         # Show the test result line.
54         test_name = test.getFullName()
55         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
56                                      self.completed, self.numTests))
57
58         # Show the test failure output, if requested.
59         if test.result.code.isFailure and self.opts.showOutput:
60             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
61                                               '*'*20))
62             print(test.result.output)
63             print("*" * 20)
64
65         # Report test metrics, if present.
66         if test.result.metrics:
67             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
68                                                '*'*10))
69             items = sorted(test.result.metrics.items())
70             for metric_name, value in items:
71                 print('%s: %s ' % (metric_name, value.format()))
72             print("*" * 10)
73
74         # Ensure the output is flushed.
75         sys.stdout.flush()
76
77 def write_test_results(run, lit_config, testing_time, output_path):
78     try:
79         import json
80     except ImportError:
81         lit_config.fatal('test output unsupported with Python 2.5')
82
83     # Construct the data we will write.
84     data = {}
85     # Encode the current lit version as a schema version.
86     data['__version__'] = lit.__versioninfo__
87     data['elapsed'] = testing_time
88     # FIXME: Record some information on the lit configuration used?
89     # FIXME: Record information from the individual test suites?
90
91     # Encode the tests.
92     data['tests'] = tests_data = []
93     for test in run.tests:
94         test_data = {
95             'name' : test.getFullName(),
96             'code' : test.result.code.name,
97             'output' : test.result.output,
98             'elapsed' : test.result.elapsed }
99
100         # Add test metrics, if present.
101         if test.result.metrics:
102             test_data['metrics'] = metrics_data = {}
103             for key, value in test.result.metrics.items():
104                 metrics_data[key] = value.todata()
105
106         tests_data.append(test_data)
107
108     # Write the output.
109     f = open(output_path, 'w')
110     try:
111         json.dump(data, f, indent=2, sort_keys=True)
112         f.write('\n')
113     finally:
114         f.close()
115
116 def update_incremental_cache(test):
117     if not test.result.code.isFailure:
118         return
119     fname = test.getFilePath()
120     os.utime(fname, None)
121
122 def sort_by_incremental_cache(run):
123     def sortIndex(test):
124         fname = test.getFilePath()
125         try:
126             return -os.path.getmtime(fname)
127         except:
128             return 0
129     run.tests.sort(key = lambda t: sortIndex(t))
130
131 def main(builtinParameters = {}):
132     # Use processes by default on Unix platforms.
133     isWindows = platform.system() == 'Windows'
134     useProcessesIsDefault = not isWindows
135
136     global options
137     from optparse import OptionParser, OptionGroup
138     parser = OptionParser("usage: %prog [options] {file-or-path}")
139
140     parser.add_option("", "--version", dest="show_version",
141                       help="Show version and exit",
142                       action="store_true", default=False)
143     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
144                       help="Number of testing threads",
145                       type=int, action="store", default=None)
146     parser.add_option("", "--config-prefix", dest="configPrefix",
147                       metavar="NAME", help="Prefix for 'lit' config files",
148                       action="store", default=None)
149     parser.add_option("-D", "--param", dest="userParameters",
150                       metavar="NAME=VAL",
151                       help="Add 'NAME' = 'VAL' to the user defined parameters",
152                       type=str, action="append", default=[])
153
154     group = OptionGroup(parser, "Output Format")
155     # FIXME: I find these names very confusing, although I like the
156     # functionality.
157     group.add_option("-q", "--quiet", dest="quiet",
158                      help="Suppress no error output",
159                      action="store_true", default=False)
160     group.add_option("-s", "--succinct", dest="succinct",
161                      help="Reduce amount of output",
162                      action="store_true", default=False)
163     group.add_option("-v", "--verbose", dest="showOutput",
164                      help="Show all test output",
165                      action="store_true", default=False)
166     group.add_option("-o", "--output", dest="output_path",
167                      help="Write test results to the provided path",
168                      action="store", type=str, metavar="PATH")
169     group.add_option("", "--no-progress-bar", dest="useProgressBar",
170                      help="Do not use curses based progress bar",
171                      action="store_false", default=True)
172     group.add_option("", "--show-unsupported", dest="show_unsupported",
173                      help="Show unsupported tests",
174                      action="store_true", default=False)
175     group.add_option("", "--show-xfail", dest="show_xfail",
176                      help="Show tests that were expected to fail",
177                      action="store_true", default=False)
178     parser.add_option_group(group)
179
180     group = OptionGroup(parser, "Test Execution")
181     group.add_option("", "--path", dest="path",
182                      help="Additional paths to add to testing environment",
183                      action="append", type=str, default=[])
184     group.add_option("", "--vg", dest="useValgrind",
185                      help="Run tests under valgrind",
186                      action="store_true", default=False)
187     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
188                      help="Check for memory leaks under valgrind",
189                      action="store_true", default=False)
190     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
191                      help="Specify an extra argument for valgrind",
192                      type=str, action="append", default=[])
193     group.add_option("", "--time-tests", dest="timeTests",
194                      help="Track elapsed wall time for each test",
195                      action="store_true", default=False)
196     group.add_option("", "--no-execute", dest="noExecute",
197                      help="Don't execute any tests (assume PASS)",
198                      action="store_true", default=False)
199     group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
200                       help=("Write XUnit-compatible XML test reports to the"
201                             " specified file"), default=None)
202     parser.add_option_group(group)
203
204     group = OptionGroup(parser, "Test Selection")
205     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
206                      help="Maximum number of tests to run",
207                      action="store", type=int, default=None)
208     group.add_option("", "--max-time", dest="maxTime", metavar="N",
209                      help="Maximum time to spend testing (in seconds)",
210                      action="store", type=float, default=None)
211     group.add_option("", "--shuffle", dest="shuffle",
212                      help="Run tests in random order",
213                      action="store_true", default=False)
214     group.add_option("-i", "--incremental", dest="incremental",
215                      help="Run modified and failing tests first (updates "
216                      "mtimes)",
217                      action="store_true", default=False)
218     group.add_option("", "--filter", dest="filter", metavar="REGEX",
219                      help=("Only run tests with paths matching the given "
220                            "regular expression"),
221                      action="store", default=None)
222     parser.add_option_group(group)
223
224     group = OptionGroup(parser, "Debug and Experimental Options")
225     group.add_option("", "--debug", dest="debug",
226                       help="Enable debugging (for 'lit' development)",
227                       action="store_true", default=False)
228     group.add_option("", "--show-suites", dest="showSuites",
229                       help="Show discovered test suites",
230                       action="store_true", default=False)
231     group.add_option("", "--show-tests", dest="showTests",
232                       help="Show all discovered tests",
233                       action="store_true", default=False)
234     group.add_option("", "--use-processes", dest="useProcesses",
235                       help="Run tests in parallel with processes (not threads)",
236                       action="store_true", default=useProcessesIsDefault)
237     group.add_option("", "--use-threads", dest="useProcesses",
238                       help="Run tests in parallel with threads (not processes)",
239                       action="store_false", default=useProcessesIsDefault)
240     parser.add_option_group(group)
241
242     (opts, args) = parser.parse_args()
243
244     if opts.show_version:
245         print("lit %s" % (lit.__version__,))
246         return
247
248     if not args:
249         parser.error('No inputs specified')
250
251     if opts.numThreads is None:
252 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
253 # http://bugs.python.org/issue1731717
254 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
255 # threads by default there.
256        if sys.hexversion >= 0x2050200:
257                opts.numThreads = lit.util.detectCPUs()
258        else:
259                opts.numThreads = 1
260
261     inputs = args
262
263     # Create the user defined parameters.
264     userParams = dict(builtinParameters)
265     for entry in opts.userParameters:
266         if '=' not in entry:
267             name,val = entry,''
268         else:
269             name,val = entry.split('=', 1)
270         userParams[name] = val
271
272     # Create the global config object.
273     litConfig = lit.LitConfig.LitConfig(
274         progname = os.path.basename(sys.argv[0]),
275         path = opts.path,
276         quiet = opts.quiet,
277         useValgrind = opts.useValgrind,
278         valgrindLeakCheck = opts.valgrindLeakCheck,
279         valgrindArgs = opts.valgrindArgs,
280         noExecute = opts.noExecute,
281         debug = opts.debug,
282         isWindows = isWindows,
283         params = userParams,
284         config_prefix = opts.configPrefix)
285
286     # Perform test discovery.
287     run = lit.run.Run(litConfig,
288                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
289
290     if opts.showSuites or opts.showTests:
291         # Aggregate the tests by suite.
292         suitesAndTests = {}
293         for result_test in run.tests:
294             if result_test.suite not in suitesAndTests:
295                 suitesAndTests[result_test.suite] = []
296             suitesAndTests[result_test.suite].append(result_test)
297         suitesAndTests = list(suitesAndTests.items())
298         suitesAndTests.sort(key = lambda item: item[0].name)
299
300         # Show the suites, if requested.
301         if opts.showSuites:
302             print('-- Test Suites --')
303             for ts,ts_tests in suitesAndTests:
304                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
305                 print('    Source Root: %s' % ts.source_root)
306                 print('    Exec Root  : %s' % ts.exec_root)
307
308         # Show the tests, if requested.
309         if opts.showTests:
310             print('-- Available Tests --')
311             for ts,ts_tests in suitesAndTests:
312                 ts_tests.sort(key = lambda test: test.path_in_suite)
313                 for test in ts_tests:
314                     print('  %s' % (test.getFullName(),))
315
316         # Exit.
317         sys.exit(0)
318
319     # Select and order the tests.
320     numTotalTests = len(run.tests)
321
322     # First, select based on the filter expression if given.
323     if opts.filter:
324         try:
325             rex = re.compile(opts.filter)
326         except:
327             parser.error("invalid regular expression for --filter: %r" % (
328                     opts.filter))
329         run.tests = [result_test for result_test in run.tests
330                      if rex.search(result_test.getFullName())]
331
332     # Then select the order.
333     if opts.shuffle:
334         random.shuffle(run.tests)
335     elif opts.incremental:
336         sort_by_incremental_cache(run)
337     else:
338         run.tests.sort(key = lambda result_test: result_test.getFullName())
339
340     # Finally limit the number of tests, if desired.
341     if opts.maxTests is not None:
342         run.tests = run.tests[:opts.maxTests]
343
344     # Don't create more threads than tests.
345     opts.numThreads = min(len(run.tests), opts.numThreads)
346
347     # Because some tests use threads internally, and at least on Linux each
348     # of these threads counts toward the current process limit, try to
349     # raise the (soft) process limit so that tests don't fail due to
350     # resource exhaustion.
351     try:
352         cpus = lit.util.detectCPUs()
353         desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
354
355         # Import the resource module here inside this try block because it
356         # will likely fail on Windows.
357         import resource
358
359         max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
360         desired_limit = min(desired_limit, max_procs_hard)
361
362         if max_procs_soft < desired_limit:
363             resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
364             litConfig.note('raised the process limit from %d to %d' % \
365                                (max_procs_soft, desired_limit))
366     except:
367         pass
368
369     extra = ''
370     if len(run.tests) != numTotalTests:
371         extra = ' of %d' % numTotalTests
372     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
373                                                       opts.numThreads)
374
375     progressBar = None
376     if not opts.quiet:
377         if opts.succinct and opts.useProgressBar:
378             try:
379                 tc = lit.ProgressBar.TerminalController()
380                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
381             except ValueError:
382                 print(header)
383                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
384         else:
385             print(header)
386
387     startTime = time.time()
388     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
389     try:
390         run.execute_tests(display, opts.numThreads, opts.maxTime,
391                           opts.useProcesses)
392     except KeyboardInterrupt:
393         sys.exit(2)
394     display.finish()
395
396     testing_time = time.time() - startTime
397     if not opts.quiet:
398         print('Testing Time: %.2fs' % (testing_time,))
399
400     # Write out the test data, if requested.
401     if opts.output_path is not None:
402         write_test_results(run, litConfig, testing_time, opts.output_path)
403
404     # List test results organized by kind.
405     hasFailures = False
406     byCode = {}
407     for test in run.tests:
408         if test.result.code not in byCode:
409             byCode[test.result.code] = []
410         byCode[test.result.code].append(test)
411         if test.result.code.isFailure:
412             hasFailures = True
413
414     # Print each test in any of the failing groups.
415     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
416                        ('Failing Tests', lit.Test.FAIL),
417                        ('Unresolved Tests', lit.Test.UNRESOLVED),
418                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
419                        ('Expected Failing Tests', lit.Test.XFAIL)):
420         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
421            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
422             continue
423         elts = byCode.get(code)
424         if not elts:
425             continue
426         print('*'*20)
427         print('%s (%d):' % (title, len(elts)))
428         for test in elts:
429             print('    %s' % test.getFullName())
430         sys.stdout.write('\n')
431
432     if opts.timeTests and run.tests:
433         # Order by time.
434         test_times = [(test.getFullName(), test.result.elapsed)
435                       for test in run.tests]
436         lit.util.printHistogram(test_times, title='Tests')
437
438     for name,code in (('Expected Passes    ', lit.Test.PASS),
439                       ('Passes With Retry  ', lit.Test.FLAKYPASS),
440                       ('Expected Failures  ', lit.Test.XFAIL),
441                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
442                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
443                       ('Unexpected Passes  ', lit.Test.XPASS),
444                       ('Unexpected Failures', lit.Test.FAIL)):
445         if opts.quiet and not code.isFailure:
446             continue
447         N = len(byCode.get(code,[]))
448         if N:
449             print('  %s: %d' % (name,N))
450
451     if opts.xunit_output_file:
452         # Collect the tests, indexed by test suite
453         by_suite = {}
454         for result_test in run.tests:
455             suite = result_test.suite.config.name
456             if suite not in by_suite:
457                 by_suite[suite] = {
458                                    'passes'   : 0,
459                                    'failures' : 0,
460                                    'tests'    : [] }
461             by_suite[suite]['tests'].append(result_test)
462             if result_test.result.code.isFailure:
463                 by_suite[suite]['failures'] += 1
464             else:
465                 by_suite[suite]['passes'] += 1
466         xunit_output_file = open(opts.xunit_output_file, "w")
467         xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
468         xunit_output_file.write("<testsuites>\n")
469         for suite_name, suite in by_suite.items():
470             safe_suite_name = suite_name.replace(".", "-")
471             xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
472             xunit_output_file.write(" tests='" + str(suite['passes'] + 
473               suite['failures']) + "'")
474             xunit_output_file.write(" failures='" + str(suite['failures']) + 
475               "'>\n")
476             for result_test in suite['tests']:
477                 xunit_output_file.write(result_test.getJUnitXML() + "\n")
478             xunit_output_file.write("</testsuite>\n")
479         xunit_output_file.write("</testsuites>")
480         xunit_output_file.close()
481
482     # If we encountered any additional errors, exit abnormally.
483     if litConfig.numErrors:
484         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
485         sys.exit(2)
486
487     # Warn about warnings.
488     if litConfig.numWarnings:
489         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
490
491     if hasFailures:
492         sys.exit(1)
493     sys.exit(0)
494
495 if __name__=='__main__':
496     main()