[lit] Implement support of per test timeout in lit.
[oota-llvm.git] / utils / lit / lit / main.py
index 1480a7a74fd5e59b004a251375a7247d0a590ca1..4df2571da998b4eed684d9b4db8ea4c6b2fcc6ed 100755 (executable)
@@ -7,38 +7,23 @@ See lit.pod for more information.
 """
 
 from __future__ import absolute_import
 """
 
 from __future__ import absolute_import
-import math, os, platform, random, re, sys, time, threading, traceback
+import math, os, platform, random, re, sys, time
 
 import lit.ProgressBar
 import lit.LitConfig
 import lit.Test
 import lit.run
 import lit.util
 
 import lit.ProgressBar
 import lit.LitConfig
 import lit.Test
 import lit.run
 import lit.util
-
 import lit.discovery
 
 import lit.discovery
 
-class TestingProgressDisplay:
+class TestingProgressDisplay(object):
     def __init__(self, opts, numTests, progressBar=None):
         self.opts = opts
         self.numTests = numTests
         self.current = None
     def __init__(self, opts, numTests, progressBar=None):
         self.opts = opts
         self.numTests = numTests
         self.current = None
-        self.lock = threading.Lock()
         self.progressBar = progressBar
         self.completed = 0
 
         self.progressBar = progressBar
         self.completed = 0
 
-    def update(self, test):
-        # Avoid locking overhead in quiet mode
-        if self.opts.quiet and not test.result.code.isFailure:
-            self.completed += 1
-            return
-
-        # Output lock.
-        self.lock.acquire()
-        try:
-            self.handleUpdate(test)
-        finally:
-            self.lock.release()
-
     def finish(self):
         if self.progressBar:
             self.progressBar.clear()
     def finish(self):
         if self.progressBar:
             self.progressBar.clear()
@@ -47,121 +32,124 @@ class TestingProgressDisplay:
         elif self.opts.succinct:
             sys.stdout.write('\n')
 
         elif self.opts.succinct:
             sys.stdout.write('\n')
 
-    def handleUpdate(self, test):
+    def update(self, test):
         self.completed += 1
         self.completed += 1
+
+        if self.opts.incremental:
+            update_incremental_cache(test)
+
         if self.progressBar:
             self.progressBar.update(float(self.completed)/self.numTests,
                                     test.getFullName())
 
         if self.progressBar:
             self.progressBar.update(float(self.completed)/self.numTests,
                                     test.getFullName())
 
-        if self.opts.succinct and not test.result.code.isFailure:
+        shouldShow = test.result.code.isFailure or \
+            self.opts.showAllOutput or \
+            (not self.opts.quiet and not self.opts.succinct)
+        if not shouldShow:
             return
 
         if self.progressBar:
             self.progressBar.clear()
 
             return
 
         if self.progressBar:
             self.progressBar.clear()
 
-        print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
+        # Show the test result line.
+        test_name = test.getFullName()
+        print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
                                      self.completed, self.numTests))
 
                                      self.completed, self.numTests))
 
-        if test.result.code.isFailure and self.opts.showOutput:
-            print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
-                                              '*'*20))
+        # Show the test failure output, if requested.
+        if (test.result.code.isFailure and self.opts.showOutput) or \
+           self.opts.showAllOutput:
+            if test.result.code.isFailure:
+                print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
+                                                  '*'*20))
             print(test.result.output)
             print("*" * 20)
 
             print(test.result.output)
             print("*" * 20)
 
+        # Report test metrics, if present.
+        if test.result.metrics:
+            print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
+                                               '*'*10))
+            items = sorted(test.result.metrics.items())
+            for metric_name, value in items:
+                print('%s: %s ' % (metric_name, value.format()))
+            print("*" * 10)
+
+        # Ensure the output is flushed.
         sys.stdout.flush()
 
         sys.stdout.flush()
 
-class TestProvider:
-    def __init__(self, tests, maxTime):
-        self.maxTime = maxTime
-        self.iter = iter(tests)
-        self.lock = threading.Lock()
-        self.startTime = time.time()
-        self.canceled = False
-
-    def cancel(self):
-        self.lock.acquire()
-        self.canceled = True
-        self.lock.release()
-
-    def get(self):
-        # Check if we have run out of time.
-        if self.maxTime is not None:
-            if time.time() - self.startTime > self.maxTime:
-                return None
-
-        # Otherwise take the next test.
-        self.lock.acquire()
-        if self.canceled:
-          self.lock.release()
-          return None
-        for item in self.iter:
-            break
-        else:
-            item = None
-        self.lock.release()
-        return item
-
-class Tester(threading.Thread):
-    def __init__(self, run_instance, provider, display):
-        threading.Thread.__init__(self)
-        self.run_instance = run_instance
-        self.provider = provider
-        self.display = display
-
-    def run(self):
-        while 1:
-            item = self.provider.get()
-            if item is None:
-                break
-            self.runTest(item)
-
-    def runTest(self, test):
-        try:
-            self.run_instance.execute_test(test)
-        except KeyboardInterrupt:
-            # This is a sad hack. Unfortunately subprocess goes
-            # bonkers with ctrl-c and we start forking merrily.
-            print('\nCtrl-C detected, goodbye.')
-            os.kill(0,9)
-        self.display.update(test)
-
-def runTests(numThreads, run, provider, display):
-    # If only using one testing thread, don't use threads at all; this lets us
-    # profile, among other things.
-    if numThreads == 1:
-        t = Tester(run, provider, display)
-        t.run()
+def write_test_results(run, lit_config, testing_time, output_path):
+    try:
+        import json
+    except ImportError:
+        lit_config.fatal('test output unsupported with Python 2.5')
+
+    # Construct the data we will write.
+    data = {}
+    # Encode the current lit version as a schema version.
+    data['__version__'] = lit.__versioninfo__
+    data['elapsed'] = testing_time
+    # FIXME: Record some information on the lit configuration used?
+    # FIXME: Record information from the individual test suites?
+
+    # Encode the tests.
+    data['tests'] = tests_data = []
+    for test in run.tests:
+        test_data = {
+            'name' : test.getFullName(),
+            'code' : test.result.code.name,
+            'output' : test.result.output,
+            'elapsed' : test.result.elapsed }
+
+        # Add test metrics, if present.
+        if test.result.metrics:
+            test_data['metrics'] = metrics_data = {}
+            for key, value in test.result.metrics.items():
+                metrics_data[key] = value.todata()
+
+        tests_data.append(test_data)
+
+    # Write the output.
+    f = open(output_path, 'w')
+    try:
+        json.dump(data, f, indent=2, sort_keys=True)
+        f.write('\n')
+    finally:
+        f.close()
+
+def update_incremental_cache(test):
+    if not test.result.code.isFailure:
         return
         return
+    fname = test.getFilePath()
+    os.utime(fname, None)
 
 
-    # Otherwise spin up the testing threads and wait for them to finish.
-    testers = [Tester(run, provider, display)
-               for i in range(numThreads)]
-    for t in testers:
-        t.start()
-    try:
-        for t in testers:
-            t.join()
-    except KeyboardInterrupt:
-        sys.exit(2)
+def sort_by_incremental_cache(run):
+    def sortIndex(test):
+        fname = test.getFilePath()
+        try:
+            return -os.path.getmtime(fname)
+        except:
+            return 0
+    run.tests.sort(key = lambda t: sortIndex(t))
 
 def main(builtinParameters = {}):
 
 def main(builtinParameters = {}):
-    # Bump the GIL check interval, its more important to get any one thread to a
-    # blocking operation (hopefully exec) than to try and unblock other threads.
-    #
-    # FIXME: This is a hack.
-    sys.setcheckinterval(1000)
+    # Use processes by default on Unix platforms.
+    isWindows = platform.system() == 'Windows'
+    useProcessesIsDefault = not isWindows
 
     global options
     from optparse import OptionParser, OptionGroup
     parser = OptionParser("usage: %prog [options] {file-or-path}")
 
 
     global options
     from optparse import OptionParser, OptionGroup
     parser = OptionParser("usage: %prog [options] {file-or-path}")
 
+    parser.add_option("", "--version", dest="show_version",
+                      help="Show version and exit",
+                      action="store_true", default=False)
     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
                       help="Number of testing threads",
                       type=int, action="store", default=None)
     parser.add_option("", "--config-prefix", dest="configPrefix",
                       metavar="NAME", help="Prefix for 'lit' config files",
                       action="store", default=None)
     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
                       help="Number of testing threads",
                       type=int, action="store", default=None)
     parser.add_option("", "--config-prefix", dest="configPrefix",
                       metavar="NAME", help="Prefix for 'lit' config files",
                       action="store", default=None)
-    parser.add_option("", "--param", dest="userParameters",
+    parser.add_option("-D", "--param", dest="userParameters",
                       metavar="NAME=VAL",
                       help="Add 'NAME' = 'VAL' to the user defined parameters",
                       type=str, action="append", default=[])
                       metavar="NAME=VAL",
                       help="Add 'NAME' = 'VAL' to the user defined parameters",
                       type=str, action="append", default=[])
@@ -176,11 +164,23 @@ def main(builtinParameters = {}):
                      help="Reduce amount of output",
                      action="store_true", default=False)
     group.add_option("-v", "--verbose", dest="showOutput",
                      help="Reduce amount of output",
                      action="store_true", default=False)
     group.add_option("-v", "--verbose", dest="showOutput",
-                     help="Show all test output",
+                     help="Show test output for failures",
+                     action="store_true", default=False)
+    group.add_option("-a", "--show-all", dest="showAllOutput",
+                     help="Display all commandlines and output",
                      action="store_true", default=False)
                      action="store_true", default=False)
+    group.add_option("-o", "--output", dest="output_path",
+                     help="Write test results to the provided path",
+                     action="store", type=str, metavar="PATH")
     group.add_option("", "--no-progress-bar", dest="useProgressBar",
                      help="Do not use curses based progress bar",
                      action="store_false", default=True)
     group.add_option("", "--no-progress-bar", dest="useProgressBar",
                      help="Do not use curses based progress bar",
                      action="store_false", default=True)
+    group.add_option("", "--show-unsupported", dest="show_unsupported",
+                     help="Show unsupported tests",
+                     action="store_true", default=False)
+    group.add_option("", "--show-xfail", dest="show_xfail",
+                     help="Show tests that were expected to fail",
+                     action="store_true", default=False)
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Execution")
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Execution")
@@ -202,6 +202,13 @@ def main(builtinParameters = {}):
     group.add_option("", "--no-execute", dest="noExecute",
                      help="Don't execute any tests (assume PASS)",
                      action="store_true", default=False)
     group.add_option("", "--no-execute", dest="noExecute",
                      help="Don't execute any tests (assume PASS)",
                      action="store_true", default=False)
+    group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
+                      help=("Write XUnit-compatible XML test reports to the"
+                            " specified file"), default=None)
+    group.add_option("", "--timeout", dest="maxIndividualTestTime",
+                     help="Maximum time to spend running a single test (in seconds)."
+                     "0 means no time limit. [Default: 0]",
+                    type=int, default=None)
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Selection")
     parser.add_option_group(group)
 
     group = OptionGroup(parser, "Test Selection")
@@ -214,6 +221,10 @@ def main(builtinParameters = {}):
     group.add_option("", "--shuffle", dest="shuffle",
                      help="Run tests in random order",
                      action="store_true", default=False)
     group.add_option("", "--shuffle", dest="shuffle",
                      help="Run tests in random order",
                      action="store_true", default=False)
+    group.add_option("-i", "--incremental", dest="incremental",
+                     help="Run modified and failing tests first (updates "
+                     "mtimes)",
+                     action="store_true", default=False)
     group.add_option("", "--filter", dest="filter", metavar="REGEX",
                      help=("Only run tests with paths matching the given "
                            "regular expression"),
     group.add_option("", "--filter", dest="filter", metavar="REGEX",
                      help=("Only run tests with paths matching the given "
                            "regular expression"),
@@ -230,10 +241,20 @@ def main(builtinParameters = {}):
     group.add_option("", "--show-tests", dest="showTests",
                       help="Show all discovered tests",
                       action="store_true", default=False)
     group.add_option("", "--show-tests", dest="showTests",
                       help="Show all discovered tests",
                       action="store_true", default=False)
+    group.add_option("", "--use-processes", dest="useProcesses",
+                      help="Run tests in parallel with processes (not threads)",
+                      action="store_true", default=useProcessesIsDefault)
+    group.add_option("", "--use-threads", dest="useProcesses",
+                      help="Run tests in parallel with threads (not processes)",
+                      action="store_false", default=useProcessesIsDefault)
     parser.add_option_group(group)
 
     (opts, args) = parser.parse_args()
 
     parser.add_option_group(group)
 
     (opts, args) = parser.parse_args()
 
+    if opts.show_version:
+        print("lit %s" % (lit.__version__,))
+        return
+
     if not args:
         parser.error('No inputs specified')
 
     if not args:
         parser.error('No inputs specified')
 
@@ -258,6 +279,14 @@ def main(builtinParameters = {}):
             name,val = entry.split('=', 1)
         userParams[name] = val
 
             name,val = entry.split('=', 1)
         userParams[name] = val
 
+    # Decide what the requested maximum indvidual test time should be
+    if opts.maxIndividualTestTime != None:
+        maxIndividualTestTime = opts.maxIndividualTestTime
+    else:
+        # Default is zero
+        maxIndividualTestTime = 0
+
+
     # Create the global config object.
     litConfig = lit.LitConfig.LitConfig(
         progname = os.path.basename(sys.argv[0]),
     # Create the global config object.
     litConfig = lit.LitConfig.LitConfig(
         progname = os.path.basename(sys.argv[0]),
@@ -268,21 +297,35 @@ def main(builtinParameters = {}):
         valgrindArgs = opts.valgrindArgs,
         noExecute = opts.noExecute,
         debug = opts.debug,
         valgrindArgs = opts.valgrindArgs,
         noExecute = opts.noExecute,
         debug = opts.debug,
-        isWindows = (platform.system()=='Windows'),
+        isWindows = isWindows,
         params = userParams,
         params = userParams,
-        config_prefix = opts.configPrefix)
+        config_prefix = opts.configPrefix,
+        maxIndividualTestTime = maxIndividualTestTime)
 
     # Perform test discovery.
     run = lit.run.Run(litConfig,
                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
 
 
     # Perform test discovery.
     run = lit.run.Run(litConfig,
                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
 
+    # After test discovery the configuration might have changed
+    # the maxIndividualTestTime. If we explicitly set this on the
+    # command line then override what was set in the test configuration
+    if opts.maxIndividualTestTime != None:
+        if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
+            litConfig.note(('The test suite configuration requested an individual'
+                ' test timeout of {0} seconds but a timeout of {1} seconds was'
+                ' requested on the command line. Forcing timeout to be {1}'
+                ' seconds')
+                .format(litConfig.maxIndividualTestTime,
+                        opts.maxIndividualTestTime))
+            litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
+
     if opts.showSuites or opts.showTests:
         # Aggregate the tests by suite.
         suitesAndTests = {}
     if opts.showSuites or opts.showTests:
         # Aggregate the tests by suite.
         suitesAndTests = {}
-        for t in run.tests:
-            if t.suite not in suitesAndTests:
-                suitesAndTests[t.suite] = []
-            suitesAndTests[t.suite].append(t)
+        for result_test in run.tests:
+            if result_test.suite not in suitesAndTests:
+                suitesAndTests[result_test.suite] = []
+            suitesAndTests[result_test.suite].append(result_test)
         suitesAndTests = list(suitesAndTests.items())
         suitesAndTests.sort(key = lambda item: item[0].name)
 
         suitesAndTests = list(suitesAndTests.items())
         suitesAndTests.sort(key = lambda item: item[0].name)
 
@@ -315,14 +358,16 @@ def main(builtinParameters = {}):
         except:
             parser.error("invalid regular expression for --filter: %r" % (
                     opts.filter))
         except:
             parser.error("invalid regular expression for --filter: %r" % (
                     opts.filter))
-        run.tests = [t for t in run.tests
-                     if rex.search(t.getFullName())]
+        run.tests = [result_test for result_test in run.tests
+                     if rex.search(result_test.getFullName())]
 
     # Then select the order.
     if opts.shuffle:
         random.shuffle(run.tests)
 
     # Then select the order.
     if opts.shuffle:
         random.shuffle(run.tests)
+    elif opts.incremental:
+        sort_by_incremental_cache(run)
     else:
     else:
-        run.tests.sort(key = lambda t: t.getFullName())
+        run.tests.sort(key = lambda result_test: result_test.getFullName())
 
     # Finally limit the number of tests, if desired.
     if opts.maxTests is not None:
 
     # Finally limit the number of tests, if desired.
     if opts.maxTests is not None:
@@ -331,12 +376,33 @@ def main(builtinParameters = {}):
     # Don't create more threads than tests.
     opts.numThreads = min(len(run.tests), opts.numThreads)
 
     # Don't create more threads than tests.
     opts.numThreads = min(len(run.tests), opts.numThreads)
 
+    # Because some tests use threads internally, and at least on Linux each
+    # of these threads counts toward the current process limit, try to
+    # raise the (soft) process limit so that tests don't fail due to
+    # resource exhaustion.
+    try:
+        cpus = lit.util.detectCPUs()
+        desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
+
+        # Import the resource module here inside this try block because it
+        # will likely fail on Windows.
+        import resource
+
+        max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
+        desired_limit = min(desired_limit, max_procs_hard)
+
+        if max_procs_soft < desired_limit:
+            resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
+            litConfig.note('raised the process limit from %d to %d' % \
+                               (max_procs_soft, desired_limit))
+    except:
+        pass
+
     extra = ''
     if len(run.tests) != numTotalTests:
         extra = ' of %d' % numTotalTests
     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
                                                       opts.numThreads)
     extra = ''
     if len(run.tests) != numTotalTests:
         extra = ' of %d' % numTotalTests
     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
                                                       opts.numThreads)
-
     progressBar = None
     if not opts.quiet:
         if opts.succinct and opts.useProgressBar:
     progressBar = None
     if not opts.quiet:
         if opts.succinct and opts.useProgressBar:
@@ -351,28 +417,20 @@ def main(builtinParameters = {}):
 
     startTime = time.time()
     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
 
     startTime = time.time()
     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
-    provider = TestProvider(run.tests, opts.maxTime)
-
     try:
     try:
-      import win32api
-    except ImportError:
-      pass
-    else:
-      def console_ctrl_handler(type):
-        provider.cancel()
-        return True
-      win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
-
-    runTests(opts.numThreads, run, provider, display)
+        run.execute_tests(display, opts.numThreads, opts.maxTime,
+                          opts.useProcesses)
+    except KeyboardInterrupt:
+        sys.exit(2)
     display.finish()
 
     display.finish()
 
+    testing_time = time.time() - startTime
     if not opts.quiet:
     if not opts.quiet:
-        print('Testing Time: %.2fs'%(time.time() - startTime))
+        print('Testing Time: %.2fs' % (testing_time,))
 
 
-    # Update results for any tests which weren't run.
-    for test in run.tests:
-        if test.result is None:
-            test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
+    # Write out the test data, if requested.
+    if opts.output_path is not None:
+        write_test_results(run, litConfig, testing_time, opts.output_path)
 
     # List test results organized by kind.
     hasFailures = False
 
     # List test results organized by kind.
     hasFailures = False
@@ -387,7 +445,13 @@ def main(builtinParameters = {}):
     # Print each test in any of the failing groups.
     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
                        ('Failing Tests', lit.Test.FAIL),
     # Print each test in any of the failing groups.
     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
                        ('Failing Tests', lit.Test.FAIL),
-                       ('Unresolved Tests', lit.Test.UNRESOLVED)):
+                       ('Unresolved Tests', lit.Test.UNRESOLVED),
+                       ('Unsupported Tests', lit.Test.UNSUPPORTED),
+                       ('Expected Failing Tests', lit.Test.XFAIL),
+                       ('Timed Out Tests', lit.Test.TIMEOUT)):
+        if (lit.Test.XFAIL == code and not opts.show_xfail) or \
+           (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
+            continue
         elts = byCode.get(code)
         if not elts:
             continue
         elts = byCode.get(code)
         if not elts:
             continue
@@ -404,17 +468,50 @@ def main(builtinParameters = {}):
         lit.util.printHistogram(test_times, title='Tests')
 
     for name,code in (('Expected Passes    ', lit.Test.PASS),
         lit.util.printHistogram(test_times, title='Tests')
 
     for name,code in (('Expected Passes    ', lit.Test.PASS),
+                      ('Passes With Retry  ', lit.Test.FLAKYPASS),
                       ('Expected Failures  ', lit.Test.XFAIL),
                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
                       ('Unexpected Passes  ', lit.Test.XPASS),
                       ('Expected Failures  ', lit.Test.XFAIL),
                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
                       ('Unexpected Passes  ', lit.Test.XPASS),
-                      ('Unexpected Failures', lit.Test.FAIL),):
+                      ('Unexpected Failures', lit.Test.FAIL),
+                      ('Individual Timeouts', lit.Test.TIMEOUT)):
         if opts.quiet and not code.isFailure:
             continue
         N = len(byCode.get(code,[]))
         if N:
             print('  %s: %d' % (name,N))
 
         if opts.quiet and not code.isFailure:
             continue
         N = len(byCode.get(code,[]))
         if N:
             print('  %s: %d' % (name,N))
 
+    if opts.xunit_output_file:
+        # Collect the tests, indexed by test suite
+        by_suite = {}
+        for result_test in run.tests:
+            suite = result_test.suite.config.name
+            if suite not in by_suite:
+                by_suite[suite] = {
+                                   'passes'   : 0,
+                                   'failures' : 0,
+                                   'tests'    : [] }
+            by_suite[suite]['tests'].append(result_test)
+            if result_test.result.code.isFailure:
+                by_suite[suite]['failures'] += 1
+            else:
+                by_suite[suite]['passes'] += 1
+        xunit_output_file = open(opts.xunit_output_file, "w")
+        xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
+        xunit_output_file.write("<testsuites>\n")
+        for suite_name, suite in by_suite.items():
+            safe_suite_name = suite_name.replace(".", "-")
+            xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
+            xunit_output_file.write(" tests='" + str(suite['passes'] + 
+              suite['failures']) + "'")
+            xunit_output_file.write(" failures='" + str(suite['failures']) + 
+              "'>\n")
+            for result_test in suite['tests']:
+                xunit_output_file.write(result_test.getJUnitXML() + "\n")
+            xunit_output_file.write("</testsuite>\n")
+        xunit_output_file.write("</testsuites>")
+        xunit_output_file.close()
+
     # If we encountered any additional errors, exit abnormally.
     if litConfig.numErrors:
         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
     # If we encountered any additional errors, exit abnormally.
     if litConfig.numErrors:
         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)