[lit] Implement support of per test timeout in lit.
[oota-llvm.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             self.opts.showAllOutput or \
47             (not self.opts.quiet and not self.opts.succinct)
48         if not shouldShow:
49             return
50
51         if self.progressBar:
52             self.progressBar.clear()
53
54         # Show the test result line.
55         test_name = test.getFullName()
56         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57                                      self.completed, self.numTests))
58
59         # Show the test failure output, if requested.
60         if (test.result.code.isFailure and self.opts.showOutput) or \
61            self.opts.showAllOutput:
62             if test.result.code.isFailure:
63                 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
64                                                   '*'*20))
65             print(test.result.output)
66             print("*" * 20)
67
68         # Report test metrics, if present.
69         if test.result.metrics:
70             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
71                                                '*'*10))
72             items = sorted(test.result.metrics.items())
73             for metric_name, value in items:
74                 print('%s: %s ' % (metric_name, value.format()))
75             print("*" * 10)
76
77         # Ensure the output is flushed.
78         sys.stdout.flush()
79
80 def write_test_results(run, lit_config, testing_time, output_path):
81     try:
82         import json
83     except ImportError:
84         lit_config.fatal('test output unsupported with Python 2.5')
85
86     # Construct the data we will write.
87     data = {}
88     # Encode the current lit version as a schema version.
89     data['__version__'] = lit.__versioninfo__
90     data['elapsed'] = testing_time
91     # FIXME: Record some information on the lit configuration used?
92     # FIXME: Record information from the individual test suites?
93
94     # Encode the tests.
95     data['tests'] = tests_data = []
96     for test in run.tests:
97         test_data = {
98             'name' : test.getFullName(),
99             'code' : test.result.code.name,
100             'output' : test.result.output,
101             'elapsed' : test.result.elapsed }
102
103         # Add test metrics, if present.
104         if test.result.metrics:
105             test_data['metrics'] = metrics_data = {}
106             for key, value in test.result.metrics.items():
107                 metrics_data[key] = value.todata()
108
109         tests_data.append(test_data)
110
111     # Write the output.
112     f = open(output_path, 'w')
113     try:
114         json.dump(data, f, indent=2, sort_keys=True)
115         f.write('\n')
116     finally:
117         f.close()
118
119 def update_incremental_cache(test):
120     if not test.result.code.isFailure:
121         return
122     fname = test.getFilePath()
123     os.utime(fname, None)
124
125 def sort_by_incremental_cache(run):
126     def sortIndex(test):
127         fname = test.getFilePath()
128         try:
129             return -os.path.getmtime(fname)
130         except:
131             return 0
132     run.tests.sort(key = lambda t: sortIndex(t))
133
134 def main(builtinParameters = {}):
135     # Use processes by default on Unix platforms.
136     isWindows = platform.system() == 'Windows'
137     useProcessesIsDefault = not isWindows
138
139     global options
140     from optparse import OptionParser, OptionGroup
141     parser = OptionParser("usage: %prog [options] {file-or-path}")
142
143     parser.add_option("", "--version", dest="show_version",
144                       help="Show version and exit",
145                       action="store_true", default=False)
146     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
147                       help="Number of testing threads",
148                       type=int, action="store", default=None)
149     parser.add_option("", "--config-prefix", dest="configPrefix",
150                       metavar="NAME", help="Prefix for 'lit' config files",
151                       action="store", default=None)
152     parser.add_option("-D", "--param", dest="userParameters",
153                       metavar="NAME=VAL",
154                       help="Add 'NAME' = 'VAL' to the user defined parameters",
155                       type=str, action="append", default=[])
156
157     group = OptionGroup(parser, "Output Format")
158     # FIXME: I find these names very confusing, although I like the
159     # functionality.
160     group.add_option("-q", "--quiet", dest="quiet",
161                      help="Suppress no error output",
162                      action="store_true", default=False)
163     group.add_option("-s", "--succinct", dest="succinct",
164                      help="Reduce amount of output",
165                      action="store_true", default=False)
166     group.add_option("-v", "--verbose", dest="showOutput",
167                      help="Show test output for failures",
168                      action="store_true", default=False)
169     group.add_option("-a", "--show-all", dest="showAllOutput",
170                      help="Display all commandlines and output",
171                      action="store_true", default=False)
172     group.add_option("-o", "--output", dest="output_path",
173                      help="Write test results to the provided path",
174                      action="store", type=str, metavar="PATH")
175     group.add_option("", "--no-progress-bar", dest="useProgressBar",
176                      help="Do not use curses based progress bar",
177                      action="store_false", default=True)
178     group.add_option("", "--show-unsupported", dest="show_unsupported",
179                      help="Show unsupported tests",
180                      action="store_true", default=False)
181     group.add_option("", "--show-xfail", dest="show_xfail",
182                      help="Show tests that were expected to fail",
183                      action="store_true", default=False)
184     parser.add_option_group(group)
185
186     group = OptionGroup(parser, "Test Execution")
187     group.add_option("", "--path", dest="path",
188                      help="Additional paths to add to testing environment",
189                      action="append", type=str, default=[])
190     group.add_option("", "--vg", dest="useValgrind",
191                      help="Run tests under valgrind",
192                      action="store_true", default=False)
193     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
194                      help="Check for memory leaks under valgrind",
195                      action="store_true", default=False)
196     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
197                      help="Specify an extra argument for valgrind",
198                      type=str, action="append", default=[])
199     group.add_option("", "--time-tests", dest="timeTests",
200                      help="Track elapsed wall time for each test",
201                      action="store_true", default=False)
202     group.add_option("", "--no-execute", dest="noExecute",
203                      help="Don't execute any tests (assume PASS)",
204                      action="store_true", default=False)
205     group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
206                       help=("Write XUnit-compatible XML test reports to the"
207                             " specified file"), default=None)
208     group.add_option("", "--timeout", dest="maxIndividualTestTime",
209                      help="Maximum time to spend running a single test (in seconds)."
210                      "0 means no time limit. [Default: 0]",
211                     type=int, default=None)
212     parser.add_option_group(group)
213
214     group = OptionGroup(parser, "Test Selection")
215     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
216                      help="Maximum number of tests to run",
217                      action="store", type=int, default=None)
218     group.add_option("", "--max-time", dest="maxTime", metavar="N",
219                      help="Maximum time to spend testing (in seconds)",
220                      action="store", type=float, default=None)
221     group.add_option("", "--shuffle", dest="shuffle",
222                      help="Run tests in random order",
223                      action="store_true", default=False)
224     group.add_option("-i", "--incremental", dest="incremental",
225                      help="Run modified and failing tests first (updates "
226                      "mtimes)",
227                      action="store_true", default=False)
228     group.add_option("", "--filter", dest="filter", metavar="REGEX",
229                      help=("Only run tests with paths matching the given "
230                            "regular expression"),
231                      action="store", default=None)
232     parser.add_option_group(group)
233
234     group = OptionGroup(parser, "Debug and Experimental Options")
235     group.add_option("", "--debug", dest="debug",
236                       help="Enable debugging (for 'lit' development)",
237                       action="store_true", default=False)
238     group.add_option("", "--show-suites", dest="showSuites",
239                       help="Show discovered test suites",
240                       action="store_true", default=False)
241     group.add_option("", "--show-tests", dest="showTests",
242                       help="Show all discovered tests",
243                       action="store_true", default=False)
244     group.add_option("", "--use-processes", dest="useProcesses",
245                       help="Run tests in parallel with processes (not threads)",
246                       action="store_true", default=useProcessesIsDefault)
247     group.add_option("", "--use-threads", dest="useProcesses",
248                       help="Run tests in parallel with threads (not processes)",
249                       action="store_false", default=useProcessesIsDefault)
250     parser.add_option_group(group)
251
252     (opts, args) = parser.parse_args()
253
254     if opts.show_version:
255         print("lit %s" % (lit.__version__,))
256         return
257
258     if not args:
259         parser.error('No inputs specified')
260
261     if opts.numThreads is None:
262 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
263 # http://bugs.python.org/issue1731717
264 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
265 # threads by default there.
266        if sys.hexversion >= 0x2050200:
267                opts.numThreads = lit.util.detectCPUs()
268        else:
269                opts.numThreads = 1
270
271     inputs = args
272
273     # Create the user defined parameters.
274     userParams = dict(builtinParameters)
275     for entry in opts.userParameters:
276         if '=' not in entry:
277             name,val = entry,''
278         else:
279             name,val = entry.split('=', 1)
280         userParams[name] = val
281
282     # Decide what the requested maximum indvidual test time should be
283     if opts.maxIndividualTestTime != None:
284         maxIndividualTestTime = opts.maxIndividualTestTime
285     else:
286         # Default is zero
287         maxIndividualTestTime = 0
288
289
290     # Create the global config object.
291     litConfig = lit.LitConfig.LitConfig(
292         progname = os.path.basename(sys.argv[0]),
293         path = opts.path,
294         quiet = opts.quiet,
295         useValgrind = opts.useValgrind,
296         valgrindLeakCheck = opts.valgrindLeakCheck,
297         valgrindArgs = opts.valgrindArgs,
298         noExecute = opts.noExecute,
299         debug = opts.debug,
300         isWindows = isWindows,
301         params = userParams,
302         config_prefix = opts.configPrefix,
303         maxIndividualTestTime = maxIndividualTestTime)
304
305     # Perform test discovery.
306     run = lit.run.Run(litConfig,
307                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
308
309     # After test discovery the configuration might have changed
310     # the maxIndividualTestTime. If we explicitly set this on the
311     # command line then override what was set in the test configuration
312     if opts.maxIndividualTestTime != None:
313         if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
314             litConfig.note(('The test suite configuration requested an individual'
315                 ' test timeout of {0} seconds but a timeout of {1} seconds was'
316                 ' requested on the command line. Forcing timeout to be {1}'
317                 ' seconds')
318                 .format(litConfig.maxIndividualTestTime,
319                         opts.maxIndividualTestTime))
320             litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
321
322     if opts.showSuites or opts.showTests:
323         # Aggregate the tests by suite.
324         suitesAndTests = {}
325         for result_test in run.tests:
326             if result_test.suite not in suitesAndTests:
327                 suitesAndTests[result_test.suite] = []
328             suitesAndTests[result_test.suite].append(result_test)
329         suitesAndTests = list(suitesAndTests.items())
330         suitesAndTests.sort(key = lambda item: item[0].name)
331
332         # Show the suites, if requested.
333         if opts.showSuites:
334             print('-- Test Suites --')
335             for ts,ts_tests in suitesAndTests:
336                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
337                 print('    Source Root: %s' % ts.source_root)
338                 print('    Exec Root  : %s' % ts.exec_root)
339
340         # Show the tests, if requested.
341         if opts.showTests:
342             print('-- Available Tests --')
343             for ts,ts_tests in suitesAndTests:
344                 ts_tests.sort(key = lambda test: test.path_in_suite)
345                 for test in ts_tests:
346                     print('  %s' % (test.getFullName(),))
347
348         # Exit.
349         sys.exit(0)
350
351     # Select and order the tests.
352     numTotalTests = len(run.tests)
353
354     # First, select based on the filter expression if given.
355     if opts.filter:
356         try:
357             rex = re.compile(opts.filter)
358         except:
359             parser.error("invalid regular expression for --filter: %r" % (
360                     opts.filter))
361         run.tests = [result_test for result_test in run.tests
362                      if rex.search(result_test.getFullName())]
363
364     # Then select the order.
365     if opts.shuffle:
366         random.shuffle(run.tests)
367     elif opts.incremental:
368         sort_by_incremental_cache(run)
369     else:
370         run.tests.sort(key = lambda result_test: result_test.getFullName())
371
372     # Finally limit the number of tests, if desired.
373     if opts.maxTests is not None:
374         run.tests = run.tests[:opts.maxTests]
375
376     # Don't create more threads than tests.
377     opts.numThreads = min(len(run.tests), opts.numThreads)
378
379     # Because some tests use threads internally, and at least on Linux each
380     # of these threads counts toward the current process limit, try to
381     # raise the (soft) process limit so that tests don't fail due to
382     # resource exhaustion.
383     try:
384         cpus = lit.util.detectCPUs()
385         desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
386
387         # Import the resource module here inside this try block because it
388         # will likely fail on Windows.
389         import resource
390
391         max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
392         desired_limit = min(desired_limit, max_procs_hard)
393
394         if max_procs_soft < desired_limit:
395             resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
396             litConfig.note('raised the process limit from %d to %d' % \
397                                (max_procs_soft, desired_limit))
398     except:
399         pass
400
401     extra = ''
402     if len(run.tests) != numTotalTests:
403         extra = ' of %d' % numTotalTests
404     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
405                                                       opts.numThreads)
406     progressBar = None
407     if not opts.quiet:
408         if opts.succinct and opts.useProgressBar:
409             try:
410                 tc = lit.ProgressBar.TerminalController()
411                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
412             except ValueError:
413                 print(header)
414                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
415         else:
416             print(header)
417
418     startTime = time.time()
419     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
420     try:
421         run.execute_tests(display, opts.numThreads, opts.maxTime,
422                           opts.useProcesses)
423     except KeyboardInterrupt:
424         sys.exit(2)
425     display.finish()
426
427     testing_time = time.time() - startTime
428     if not opts.quiet:
429         print('Testing Time: %.2fs' % (testing_time,))
430
431     # Write out the test data, if requested.
432     if opts.output_path is not None:
433         write_test_results(run, litConfig, testing_time, opts.output_path)
434
435     # List test results organized by kind.
436     hasFailures = False
437     byCode = {}
438     for test in run.tests:
439         if test.result.code not in byCode:
440             byCode[test.result.code] = []
441         byCode[test.result.code].append(test)
442         if test.result.code.isFailure:
443             hasFailures = True
444
445     # Print each test in any of the failing groups.
446     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
447                        ('Failing Tests', lit.Test.FAIL),
448                        ('Unresolved Tests', lit.Test.UNRESOLVED),
449                        ('Unsupported Tests', lit.Test.UNSUPPORTED),
450                        ('Expected Failing Tests', lit.Test.XFAIL),
451                        ('Timed Out Tests', lit.Test.TIMEOUT)):
452         if (lit.Test.XFAIL == code and not opts.show_xfail) or \
453            (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
454             continue
455         elts = byCode.get(code)
456         if not elts:
457             continue
458         print('*'*20)
459         print('%s (%d):' % (title, len(elts)))
460         for test in elts:
461             print('    %s' % test.getFullName())
462         sys.stdout.write('\n')
463
464     if opts.timeTests and run.tests:
465         # Order by time.
466         test_times = [(test.getFullName(), test.result.elapsed)
467                       for test in run.tests]
468         lit.util.printHistogram(test_times, title='Tests')
469
470     for name,code in (('Expected Passes    ', lit.Test.PASS),
471                       ('Passes With Retry  ', lit.Test.FLAKYPASS),
472                       ('Expected Failures  ', lit.Test.XFAIL),
473                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
474                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
475                       ('Unexpected Passes  ', lit.Test.XPASS),
476                       ('Unexpected Failures', lit.Test.FAIL),
477                       ('Individual Timeouts', lit.Test.TIMEOUT)):
478         if opts.quiet and not code.isFailure:
479             continue
480         N = len(byCode.get(code,[]))
481         if N:
482             print('  %s: %d' % (name,N))
483
484     if opts.xunit_output_file:
485         # Collect the tests, indexed by test suite
486         by_suite = {}
487         for result_test in run.tests:
488             suite = result_test.suite.config.name
489             if suite not in by_suite:
490                 by_suite[suite] = {
491                                    'passes'   : 0,
492                                    'failures' : 0,
493                                    'tests'    : [] }
494             by_suite[suite]['tests'].append(result_test)
495             if result_test.result.code.isFailure:
496                 by_suite[suite]['failures'] += 1
497             else:
498                 by_suite[suite]['passes'] += 1
499         xunit_output_file = open(opts.xunit_output_file, "w")
500         xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
501         xunit_output_file.write("<testsuites>\n")
502         for suite_name, suite in by_suite.items():
503             safe_suite_name = suite_name.replace(".", "-")
504             xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
505             xunit_output_file.write(" tests='" + str(suite['passes'] + 
506               suite['failures']) + "'")
507             xunit_output_file.write(" failures='" + str(suite['failures']) + 
508               "'>\n")
509             for result_test in suite['tests']:
510                 xunit_output_file.write(result_test.getJUnitXML() + "\n")
511             xunit_output_file.write("</testsuite>\n")
512         xunit_output_file.write("</testsuites>")
513         xunit_output_file.close()
514
515     # If we encountered any additional errors, exit abnormally.
516     if litConfig.numErrors:
517         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
518         sys.exit(2)
519
520     # Warn about warnings.
521     if litConfig.numWarnings:
522         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
523
524     if hasFailures:
525         sys.exit(1)
526     sys.exit(0)
527
528 if __name__=='__main__':
529     main()