X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=utils%2Flit%2Flit%2Fmain.py;h=4df2571da998b4eed684d9b4db8ea4c6b2fcc6ed;hb=d3bcf04e8331314fbfec3f0af41e137b6bd242c7;hp=6f672a01eb3df368ecca3a4900251d4499c4deb4;hpb=d80961c91f5318cdbbd5150f54ddad04102b769a;p=oota-llvm.git diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py index 6f672a01eb3..4df2571da99 100755 --- a/utils/lit/lit/main.py +++ b/utils/lit/lit/main.py @@ -34,12 +34,18 @@ class TestingProgressDisplay(object): def update(self, test): self.completed += 1 + + if self.opts.incremental: + update_incremental_cache(test) + if self.progressBar: self.progressBar.update(float(self.completed)/self.numTests, test.getFullName()) - if not test.result.code.isFailure and \ - (self.opts.quiet or self.opts.succinct): + shouldShow = test.result.code.isFailure or \ + self.opts.showAllOutput or \ + (not self.opts.quiet and not self.opts.succinct) + if not shouldShow: return if self.progressBar: @@ -51,9 +57,11 @@ class TestingProgressDisplay(object): self.completed, self.numTests)) # Show the test failure output, if requested. - if test.result.code.isFailure and self.opts.showOutput: - print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), - '*'*20)) + if (test.result.code.isFailure and self.opts.showOutput) or \ + self.opts.showAllOutput: + if test.result.code.isFailure: + print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), + '*'*20)) print(test.result.output) print("*" * 20) @@ -108,6 +116,21 @@ def write_test_results(run, lit_config, testing_time, output_path): finally: f.close() +def update_incremental_cache(test): + if not test.result.code.isFailure: + return + fname = test.getFilePath() + os.utime(fname, None) + +def sort_by_incremental_cache(run): + def sortIndex(test): + fname = test.getFilePath() + try: + return -os.path.getmtime(fname) + except: + return 0 + run.tests.sort(key = lambda t: sortIndex(t)) + def main(builtinParameters = {}): # Use processes by default on Unix platforms. isWindows = platform.system() == 'Windows' @@ -117,13 +140,16 @@ def main(builtinParameters = {}): from optparse import OptionParser, OptionGroup parser = OptionParser("usage: %prog [options] {file-or-path}") + parser.add_option("", "--version", dest="show_version", + help="Show version and exit", + action="store_true", default=False) parser.add_option("-j", "--threads", dest="numThreads", metavar="N", help="Number of testing threads", type=int, action="store", default=None) parser.add_option("", "--config-prefix", dest="configPrefix", metavar="NAME", help="Prefix for 'lit' config files", action="store", default=None) - parser.add_option("", "--param", dest="userParameters", + parser.add_option("-D", "--param", dest="userParameters", metavar="NAME=VAL", help="Add 'NAME' = 'VAL' to the user defined parameters", type=str, action="append", default=[]) @@ -138,7 +164,10 @@ def main(builtinParameters = {}): help="Reduce amount of output", action="store_true", default=False) group.add_option("-v", "--verbose", dest="showOutput", - help="Show all test output", + help="Show test output for failures", + action="store_true", default=False) + group.add_option("-a", "--show-all", dest="showAllOutput", + help="Display all commandlines and output", action="store_true", default=False) group.add_option("-o", "--output", dest="output_path", help="Write test results to the provided path", @@ -146,6 +175,12 @@ def main(builtinParameters = {}): group.add_option("", "--no-progress-bar", dest="useProgressBar", help="Do not use curses based progress bar", action="store_false", default=True) + group.add_option("", "--show-unsupported", dest="show_unsupported", + help="Show unsupported tests", + action="store_true", default=False) + group.add_option("", "--show-xfail", dest="show_xfail", + help="Show tests that were expected to fail", + action="store_true", default=False) parser.add_option_group(group) group = OptionGroup(parser, "Test Execution") @@ -167,6 +202,13 @@ def main(builtinParameters = {}): group.add_option("", "--no-execute", dest="noExecute", help="Don't execute any tests (assume PASS)", action="store_true", default=False) + group.add_option("", "--xunit-xml-output", dest="xunit_output_file", + help=("Write XUnit-compatible XML test reports to the" + " specified file"), default=None) + group.add_option("", "--timeout", dest="maxIndividualTestTime", + help="Maximum time to spend running a single test (in seconds)." + "0 means no time limit. [Default: 0]", + type=int, default=None) parser.add_option_group(group) group = OptionGroup(parser, "Test Selection") @@ -179,6 +221,10 @@ def main(builtinParameters = {}): group.add_option("", "--shuffle", dest="shuffle", help="Run tests in random order", action="store_true", default=False) + group.add_option("-i", "--incremental", dest="incremental", + help="Run modified and failing tests first (updates " + "mtimes)", + action="store_true", default=False) group.add_option("", "--filter", dest="filter", metavar="REGEX", help=("Only run tests with paths matching the given " "regular expression"), @@ -205,6 +251,10 @@ def main(builtinParameters = {}): (opts, args) = parser.parse_args() + if opts.show_version: + print("lit %s" % (lit.__version__,)) + return + if not args: parser.error('No inputs specified') @@ -229,6 +279,14 @@ def main(builtinParameters = {}): name,val = entry.split('=', 1) userParams[name] = val + # Decide what the requested maximum indvidual test time should be + if opts.maxIndividualTestTime != None: + maxIndividualTestTime = opts.maxIndividualTestTime + else: + # Default is zero + maxIndividualTestTime = 0 + + # Create the global config object. litConfig = lit.LitConfig.LitConfig( progname = os.path.basename(sys.argv[0]), @@ -241,19 +299,33 @@ def main(builtinParameters = {}): debug = opts.debug, isWindows = isWindows, params = userParams, - config_prefix = opts.configPrefix) + config_prefix = opts.configPrefix, + maxIndividualTestTime = maxIndividualTestTime) # Perform test discovery. run = lit.run.Run(litConfig, lit.discovery.find_tests_for_inputs(litConfig, inputs)) + # After test discovery the configuration might have changed + # the maxIndividualTestTime. If we explicitly set this on the + # command line then override what was set in the test configuration + if opts.maxIndividualTestTime != None: + if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime: + litConfig.note(('The test suite configuration requested an individual' + ' test timeout of {0} seconds but a timeout of {1} seconds was' + ' requested on the command line. Forcing timeout to be {1}' + ' seconds') + .format(litConfig.maxIndividualTestTime, + opts.maxIndividualTestTime)) + litConfig.maxIndividualTestTime = opts.maxIndividualTestTime + if opts.showSuites or opts.showTests: # Aggregate the tests by suite. suitesAndTests = {} - for t in run.tests: - if t.suite not in suitesAndTests: - suitesAndTests[t.suite] = [] - suitesAndTests[t.suite].append(t) + for result_test in run.tests: + if result_test.suite not in suitesAndTests: + suitesAndTests[result_test.suite] = [] + suitesAndTests[result_test.suite].append(result_test) suitesAndTests = list(suitesAndTests.items()) suitesAndTests.sort(key = lambda item: item[0].name) @@ -286,14 +358,16 @@ def main(builtinParameters = {}): except: parser.error("invalid regular expression for --filter: %r" % ( opts.filter)) - run.tests = [t for t in run.tests - if rex.search(t.getFullName())] + run.tests = [result_test for result_test in run.tests + if rex.search(result_test.getFullName())] # Then select the order. if opts.shuffle: random.shuffle(run.tests) + elif opts.incremental: + sort_by_incremental_cache(run) else: - run.tests.sort(key = lambda t: t.getFullName()) + run.tests.sort(key = lambda result_test: result_test.getFullName()) # Finally limit the number of tests, if desired. if opts.maxTests is not None: @@ -302,12 +376,33 @@ def main(builtinParameters = {}): # Don't create more threads than tests. opts.numThreads = min(len(run.tests), opts.numThreads) + # Because some tests use threads internally, and at least on Linux each + # of these threads counts toward the current process limit, try to + # raise the (soft) process limit so that tests don't fail due to + # resource exhaustion. + try: + cpus = lit.util.detectCPUs() + desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor + + # Import the resource module here inside this try block because it + # will likely fail on Windows. + import resource + + max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC) + desired_limit = min(desired_limit, max_procs_hard) + + if max_procs_soft < desired_limit: + resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard)) + litConfig.note('raised the process limit from %d to %d' % \ + (max_procs_soft, desired_limit)) + except: + pass + extra = '' if len(run.tests) != numTotalTests: extra = ' of %d' % numTotalTests header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra, opts.numThreads) - progressBar = None if not opts.quiet: if opts.succinct and opts.useProgressBar: @@ -350,7 +445,13 @@ def main(builtinParameters = {}): # Print each test in any of the failing groups. for title,code in (('Unexpected Passing Tests', lit.Test.XPASS), ('Failing Tests', lit.Test.FAIL), - ('Unresolved Tests', lit.Test.UNRESOLVED)): + ('Unresolved Tests', lit.Test.UNRESOLVED), + ('Unsupported Tests', lit.Test.UNSUPPORTED), + ('Expected Failing Tests', lit.Test.XFAIL), + ('Timed Out Tests', lit.Test.TIMEOUT)): + if (lit.Test.XFAIL == code and not opts.show_xfail) or \ + (lit.Test.UNSUPPORTED == code and not opts.show_unsupported): + continue elts = byCode.get(code) if not elts: continue @@ -367,17 +468,50 @@ def main(builtinParameters = {}): lit.util.printHistogram(test_times, title='Tests') for name,code in (('Expected Passes ', lit.Test.PASS), + ('Passes With Retry ', lit.Test.FLAKYPASS), ('Expected Failures ', lit.Test.XFAIL), ('Unsupported Tests ', lit.Test.UNSUPPORTED), ('Unresolved Tests ', lit.Test.UNRESOLVED), ('Unexpected Passes ', lit.Test.XPASS), - ('Unexpected Failures', lit.Test.FAIL),): + ('Unexpected Failures', lit.Test.FAIL), + ('Individual Timeouts', lit.Test.TIMEOUT)): if opts.quiet and not code.isFailure: continue N = len(byCode.get(code,[])) if N: print(' %s: %d' % (name,N)) + if opts.xunit_output_file: + # Collect the tests, indexed by test suite + by_suite = {} + for result_test in run.tests: + suite = result_test.suite.config.name + if suite not in by_suite: + by_suite[suite] = { + 'passes' : 0, + 'failures' : 0, + 'tests' : [] } + by_suite[suite]['tests'].append(result_test) + if result_test.result.code.isFailure: + by_suite[suite]['failures'] += 1 + else: + by_suite[suite]['passes'] += 1 + xunit_output_file = open(opts.xunit_output_file, "w") + xunit_output_file.write("\n") + xunit_output_file.write("\n") + for suite_name, suite in by_suite.items(): + safe_suite_name = suite_name.replace(".", "-") + xunit_output_file.write("\n") + for result_test in suite['tests']: + xunit_output_file.write(result_test.getJUnitXML() + "\n") + xunit_output_file.write("\n") + xunit_output_file.write("") + xunit_output_file.close() + # If we encountered any additional errors, exit abnormally. if litConfig.numErrors: sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)