lit: Add '-a' option to display commands+output of all tests
[oota-llvm.git] / utils / lit / lit / main.py
index 44042a34a51a52b2eaae8f474e347e3767cb3684..18c4a8c3aaec678bbd95a2dcc1d91672cafa084f 100755 (executable)
@@ -34,12 +34,17 @@ class TestingProgressDisplay(object):
 
     def update(self, test):
         self.completed += 1
 
     def update(self, test):
         self.completed += 1
+
+        if self.opts.incremental:
+            update_incremental_cache(test)
+
         if self.progressBar:
             self.progressBar.update(float(self.completed)/self.numTests,
                                     test.getFullName())
 
         if self.progressBar:
             self.progressBar.update(float(self.completed)/self.numTests,
                                     test.getFullName())
 
-        if not test.result.code.isFailure and \
-                (self.opts.quiet or self.opts.succinct):
+        shouldShow = test.result.code.isFailure or \
+            (not self.opts.quiet and not self.opts.succinct)
+        if not shouldShow:
             return
 
         if self.progressBar:
             return
 
         if self.progressBar:
@@ -51,9 +56,11 @@ class TestingProgressDisplay(object):
                                      self.completed, self.numTests))
 
         # Show the test failure output, if requested.
                                      self.completed, self.numTests))
 
         # Show the test failure output, if requested.
-        if test.result.code.isFailure and self.opts.showOutput:
-            print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
-                                              '*'*20))
+        if (test.result.code.isFailure and self.opts.showOutput) or \
+           self.opts.showAllOutput:
+            if test.result.code.isFailure:
+                print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
+                                                  '*'*20))
             print(test.result.output)
             print("*" * 20)
 
             print(test.result.output)
             print("*" * 20)
 
@@ -108,22 +115,40 @@ def write_test_results(run, lit_config, testing_time, output_path):
     finally:
         f.close()
 
     finally:
         f.close()
 
+def update_incremental_cache(test):
+    if not test.result.code.isFailure:
+        return
+    fname = test.getFilePath()
+    os.utime(fname, None)
+
+def sort_by_incremental_cache(run):
+    def sortIndex(test):
+        fname = test.getFilePath()
+        try:
+            return -os.path.getmtime(fname)
+        except:
+            return 0
+    run.tests.sort(key = lambda t: sortIndex(t))
+
 def main(builtinParameters = {}):
     # Use processes by default on Unix platforms.
     isWindows = platform.system() == 'Windows'
 def main(builtinParameters = {}):
     # Use processes by default on Unix platforms.
     isWindows = platform.system() == 'Windows'
-    useProcessesIsDefault = (not isWindows) and platform.system() != 'OpenBSD'
+    useProcessesIsDefault = not isWindows
 
     global options
     from optparse import OptionParser, OptionGroup
     parser = OptionParser("usage: %prog [options] {file-or-path}")
 
 
     global options
     from optparse import OptionParser, OptionGroup
     parser = OptionParser("usage: %prog [options] {file-or-path}")
 
+    parser.add_option("", "--version", dest="show_version",
+                      help="Show version and exit",
+                      action="store_true", default=False)
     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
                       help="Number of testing threads",
                       type=int, action="store", default=None)
     parser.add_option("", "--config-prefix", dest="configPrefix",
                       metavar="NAME", help="Prefix for 'lit' config files",
                       action="store", default=None)
     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
                       help="Number of testing threads",
                       type=int, action="store", default=None)
     parser.add_option("", "--config-prefix", dest="configPrefix",
                       metavar="NAME", help="Prefix for 'lit' config files",
                       action="store", default=None)
-    parser.add_option("", "--param", dest="userParameters",
+    parser.add_option("-D", "--param", dest="userParameters",
                       metavar="NAME=VAL",
                       help="Add 'NAME' = 'VAL' to the user defined parameters",
                       type=str, action="append", default=[])
                       metavar="NAME=VAL",
                       help="Add 'NAME' = 'VAL' to the user defined parameters",
                       type=str, action="append", default=[])
@@ -138,7 +163,10 @@ def main(builtinParameters = {}):
                      help="Reduce amount of output",
                      action="store_true", default=False)
     group.add_option("-v", "--verbose", dest="showOutput",
                      help="Reduce amount of output",
                      action="store_true", default=False)
     group.add_option("-v", "--verbose", dest="showOutput",
-                     help="Show all test output",
+                     help="Show test output for failures",
+                     action="store_true", default=False)
+    group.add_option("-a", "--show-all", dest="showAllOutput",
+                     help="Display all commandlines and output",
                      action="store_true", default=False)
     group.add_option("-o", "--output", dest="output_path",
                      help="Write test results to the provided path",
                      action="store_true", default=False)
     group.add_option("-o", "--output", dest="output_path",
                      help="Write test results to the provided path",
@@ -146,6 +174,12 @@ def main(builtinParameters = {}):
     group.add_option("", "--no-progress-bar", dest="useProgressBar",
                      help="Do not use curses based progress bar",
                      action="store_false", default=True)
     group.add_option("", "--no-progress-bar", dest="useProgressBar",
                      help="Do not use curses based progress bar",
                      action="store_false", default=True)
+    group.add_option("", "--show-unsupported", dest="show_unsupported",
+                     help="Show unsupported tests",
+                     action="store_true", default=False)
+    group.add_option("", "--show-xfail", dest="show_xfail",
+                     help="Show tests that were expected to fail",
+                     action="store_true", default=False)
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Execution")
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Execution")
@@ -167,6 +201,9 @@ def main(builtinParameters = {}):
     group.add_option("", "--no-execute", dest="noExecute",
                      help="Don't execute any tests (assume PASS)",
                      action="store_true", default=False)
     group.add_option("", "--no-execute", dest="noExecute",
                      help="Don't execute any tests (assume PASS)",
                      action="store_true", default=False)
+    group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
+                      help=("Write XUnit-compatible XML test reports to the"
+                            " specified file"), default=None)
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Selection")
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Selection")
@@ -179,6 +216,10 @@ def main(builtinParameters = {}):
     group.add_option("", "--shuffle", dest="shuffle",
                      help="Run tests in random order",
                      action="store_true", default=False)
     group.add_option("", "--shuffle", dest="shuffle",
                      help="Run tests in random order",
                      action="store_true", default=False)
+    group.add_option("-i", "--incremental", dest="incremental",
+                     help="Run modified and failing tests first (updates "
+                     "mtimes)",
+                     action="store_true", default=False)
     group.add_option("", "--filter", dest="filter", metavar="REGEX",
                      help=("Only run tests with paths matching the given "
                            "regular expression"),
     group.add_option("", "--filter", dest="filter", metavar="REGEX",
                      help=("Only run tests with paths matching the given "
                            "regular expression"),
@@ -205,6 +246,10 @@ def main(builtinParameters = {}):
 
     (opts, args) = parser.parse_args()
 
 
     (opts, args) = parser.parse_args()
 
+    if opts.show_version:
+        print("lit %s" % (lit.__version__,))
+        return
+
     if not args:
         parser.error('No inputs specified')
 
     if not args:
         parser.error('No inputs specified')
 
@@ -250,10 +295,10 @@ def main(builtinParameters = {}):
     if opts.showSuites or opts.showTests:
         # Aggregate the tests by suite.
         suitesAndTests = {}
     if opts.showSuites or opts.showTests:
         # Aggregate the tests by suite.
         suitesAndTests = {}
-        for t in run.tests:
-            if t.suite not in suitesAndTests:
-                suitesAndTests[t.suite] = []
-            suitesAndTests[t.suite].append(t)
+        for result_test in run.tests:
+            if result_test.suite not in suitesAndTests:
+                suitesAndTests[result_test.suite] = []
+            suitesAndTests[result_test.suite].append(result_test)
         suitesAndTests = list(suitesAndTests.items())
         suitesAndTests.sort(key = lambda item: item[0].name)
 
         suitesAndTests = list(suitesAndTests.items())
         suitesAndTests.sort(key = lambda item: item[0].name)
 
@@ -286,14 +331,16 @@ def main(builtinParameters = {}):
         except:
             parser.error("invalid regular expression for --filter: %r" % (
                     opts.filter))
         except:
             parser.error("invalid regular expression for --filter: %r" % (
                     opts.filter))
-        run.tests = [t for t in run.tests
-                     if rex.search(t.getFullName())]
+        run.tests = [result_test for result_test in run.tests
+                     if rex.search(result_test.getFullName())]
 
     # Then select the order.
     if opts.shuffle:
         random.shuffle(run.tests)
 
     # Then select the order.
     if opts.shuffle:
         random.shuffle(run.tests)
+    elif opts.incremental:
+        sort_by_incremental_cache(run)
     else:
     else:
-        run.tests.sort(key = lambda t: t.getFullName())
+        run.tests.sort(key = lambda result_test: result_test.getFullName())
 
     # Finally limit the number of tests, if desired.
     if opts.maxTests is not None:
 
     # Finally limit the number of tests, if desired.
     if opts.maxTests is not None:
@@ -302,6 +349,28 @@ def main(builtinParameters = {}):
     # Don't create more threads than tests.
     opts.numThreads = min(len(run.tests), opts.numThreads)
 
     # Don't create more threads than tests.
     opts.numThreads = min(len(run.tests), opts.numThreads)
 
+    # Because some tests use threads internally, and at least on Linux each
+    # of these threads counts toward the current process limit, try to
+    # raise the (soft) process limit so that tests don't fail due to
+    # resource exhaustion.
+    try:
+        cpus = lit.util.detectCPUs()
+        desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
+
+        # Import the resource module here inside this try block because it
+        # will likely fail on Windows.
+        import resource
+
+        max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
+        desired_limit = min(desired_limit, max_procs_hard)
+
+        if max_procs_soft < desired_limit:
+            resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
+            litConfig.note('raised the process limit from %d to %d' % \
+                               (max_procs_soft, desired_limit))
+    except:
+        pass
+
     extra = ''
     if len(run.tests) != numTotalTests:
         extra = ' of %d' % numTotalTests
     extra = ''
     if len(run.tests) != numTotalTests:
         extra = ' of %d' % numTotalTests
@@ -350,7 +419,12 @@ def main(builtinParameters = {}):
     # Print each test in any of the failing groups.
     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
                        ('Failing Tests', lit.Test.FAIL),
     # Print each test in any of the failing groups.
     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
                        ('Failing Tests', lit.Test.FAIL),
-                       ('Unresolved Tests', lit.Test.UNRESOLVED)):
+                       ('Unresolved Tests', lit.Test.UNRESOLVED),
+                       ('Unsupported Tests', lit.Test.UNSUPPORTED),
+                       ('Expected Failing Tests', lit.Test.XFAIL)):
+        if (lit.Test.XFAIL == code and not opts.show_xfail) or \
+           (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
+            continue
         elts = byCode.get(code)
         if not elts:
             continue
         elts = byCode.get(code)
         if not elts:
             continue
@@ -367,17 +441,49 @@ def main(builtinParameters = {}):
         lit.util.printHistogram(test_times, title='Tests')
 
     for name,code in (('Expected Passes    ', lit.Test.PASS),
         lit.util.printHistogram(test_times, title='Tests')
 
     for name,code in (('Expected Passes    ', lit.Test.PASS),
+                      ('Passes With Retry  ', lit.Test.FLAKYPASS),
                       ('Expected Failures  ', lit.Test.XFAIL),
                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
                       ('Unexpected Passes  ', lit.Test.XPASS),
                       ('Expected Failures  ', lit.Test.XFAIL),
                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
                       ('Unexpected Passes  ', lit.Test.XPASS),
-                      ('Unexpected Failures', lit.Test.FAIL),):
+                      ('Unexpected Failures', lit.Test.FAIL)):
         if opts.quiet and not code.isFailure:
             continue
         N = len(byCode.get(code,[]))
         if N:
             print('  %s: %d' % (name,N))
 
         if opts.quiet and not code.isFailure:
             continue
         N = len(byCode.get(code,[]))
         if N:
             print('  %s: %d' % (name,N))
 
+    if opts.xunit_output_file:
+        # Collect the tests, indexed by test suite
+        by_suite = {}
+        for result_test in run.tests:
+            suite = result_test.suite.config.name
+            if suite not in by_suite:
+                by_suite[suite] = {
+                                   'passes'   : 0,
+                                   'failures' : 0,
+                                   'tests'    : [] }
+            by_suite[suite]['tests'].append(result_test)
+            if result_test.result.code.isFailure:
+                by_suite[suite]['failures'] += 1
+            else:
+                by_suite[suite]['passes'] += 1
+        xunit_output_file = open(opts.xunit_output_file, "w")
+        xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
+        xunit_output_file.write("<testsuites>\n")
+        for suite_name, suite in by_suite.items():
+            safe_suite_name = suite_name.replace(".", "-")
+            xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
+            xunit_output_file.write(" tests='" + str(suite['passes'] + 
+              suite['failures']) + "'")
+            xunit_output_file.write(" failures='" + str(suite['failures']) + 
+              "'>\n")
+            for result_test in suite['tests']:
+                xunit_output_file.write(result_test.getJUnitXML() + "\n")
+            xunit_output_file.write("</testsuite>\n")
+        xunit_output_file.write("</testsuites>")
+        xunit_output_file.close()
+
     # If we encountered any additional errors, exit abnormally.
     if litConfig.numErrors:
         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
     # If we encountered any additional errors, exit abnormally.
     if litConfig.numErrors:
         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)