X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=utils%2Flit%2Flit%2Fmain.py;h=4df2571da998b4eed684d9b4db8ea4c6b2fcc6ed;hb=d3bcf04e8331314fbfec3f0af41e137b6bd242c7;hp=430fa23ab7a0a4fdf5c1903a8b110aec7bf56013;hpb=0b714f768605c9f5d3c8caf2b5708e617b82f86f;p=oota-llvm.git diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py index 430fa23ab7a..4df2571da99 100755 --- a/utils/lit/lit/main.py +++ b/utils/lit/lit/main.py @@ -6,39 +6,24 @@ lit - LLVM Integrated Tester. See lit.pod for more information. """ -import math, os, platform, random, re, sys, time, threading, traceback - -import ProgressBar -import TestRunner -import Util - -import LitConfig -import Test - +from __future__ import absolute_import +import math, os, platform, random, re, sys, time + +import lit.ProgressBar +import lit.LitConfig +import lit.Test +import lit.run +import lit.util import lit.discovery -class TestingProgressDisplay: +class TestingProgressDisplay(object): def __init__(self, opts, numTests, progressBar=None): self.opts = opts self.numTests = numTests self.current = None - self.lock = threading.Lock() self.progressBar = progressBar self.completed = 0 - def update(self, test): - # Avoid locking overhead in quiet mode - if self.opts.quiet and not test.result.isFailure: - self.completed += 1 - return - - # Output lock. - self.lock.acquire() - try: - self.handleUpdate(test) - finally: - self.lock.release() - def finish(self): if self.progressBar: self.progressBar.clear() @@ -47,136 +32,124 @@ class TestingProgressDisplay: elif self.opts.succinct: sys.stdout.write('\n') - def handleUpdate(self, test): + def update(self, test): self.completed += 1 + + if self.opts.incremental: + update_incremental_cache(test) + if self.progressBar: self.progressBar.update(float(self.completed)/self.numTests, test.getFullName()) - if self.opts.succinct and not test.result.isFailure: + shouldShow = test.result.code.isFailure or \ + self.opts.showAllOutput or \ + (not self.opts.quiet and not self.opts.succinct) + if not shouldShow: return if self.progressBar: self.progressBar.clear() - print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(), - self.completed, self.numTests) - - if test.result.isFailure and self.opts.showOutput: - print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), - '*'*20) - print test.output - print "*" * 20 - + # Show the test result line. + test_name = test.getFullName() + print('%s: %s (%d of %d)' % (test.result.code.name, test_name, + self.completed, self.numTests)) + + # Show the test failure output, if requested. + if (test.result.code.isFailure and self.opts.showOutput) or \ + self.opts.showAllOutput: + if test.result.code.isFailure: + print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), + '*'*20)) + print(test.result.output) + print("*" * 20) + + # Report test metrics, if present. + if test.result.metrics: + print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(), + '*'*10)) + items = sorted(test.result.metrics.items()) + for metric_name, value in items: + print('%s: %s ' % (metric_name, value.format())) + print("*" * 10) + + # Ensure the output is flushed. sys.stdout.flush() -class TestProvider: - def __init__(self, tests, maxTime): - self.maxTime = maxTime - self.iter = iter(tests) - self.lock = threading.Lock() - self.startTime = time.time() - self.canceled = False - - def cancel(self): - self.lock.acquire() - self.canceled = True - self.lock.release() - - def get(self): - # Check if we have run out of time. - if self.maxTime is not None: - if time.time() - self.startTime > self.maxTime: - return None - - # Otherwise take the next test. - self.lock.acquire() - if self.canceled: - self.lock.release() - return None +def write_test_results(run, lit_config, testing_time, output_path): + try: + import json + except ImportError: + lit_config.fatal('test output unsupported with Python 2.5') + + # Construct the data we will write. + data = {} + # Encode the current lit version as a schema version. + data['__version__'] = lit.__versioninfo__ + data['elapsed'] = testing_time + # FIXME: Record some information on the lit configuration used? + # FIXME: Record information from the individual test suites? + + # Encode the tests. + data['tests'] = tests_data = [] + for test in run.tests: + test_data = { + 'name' : test.getFullName(), + 'code' : test.result.code.name, + 'output' : test.result.output, + 'elapsed' : test.result.elapsed } + + # Add test metrics, if present. + if test.result.metrics: + test_data['metrics'] = metrics_data = {} + for key, value in test.result.metrics.items(): + metrics_data[key] = value.todata() + + tests_data.append(test_data) + + # Write the output. + f = open(output_path, 'w') + try: + json.dump(data, f, indent=2, sort_keys=True) + f.write('\n') + finally: + f.close() - try: - item = self.iter.next() - except StopIteration: - item = None - self.lock.release() - return item - -class Tester(threading.Thread): - def __init__(self, litConfig, provider, display): - threading.Thread.__init__(self) - self.litConfig = litConfig - self.provider = provider - self.display = display - - def run(self): - while 1: - item = self.provider.get() - if item is None: - break - self.runTest(item) - - def runTest(self, test): - result = None - startTime = time.time() - try: - result, output = test.config.test_format.execute(test, - self.litConfig) - except KeyboardInterrupt: - # This is a sad hack. Unfortunately subprocess goes - # bonkers with ctrl-c and we start forking merrily. - print '\nCtrl-C detected, goodbye.' - os.kill(0,9) - except: - if self.litConfig.debug: - raise - result = Test.UNRESOLVED - output = 'Exception during script execution:\n' - output += traceback.format_exc() - output += '\n' - elapsed = time.time() - startTime - - test.setResult(result, output, elapsed) - self.display.update(test) - -def runTests(numThreads, litConfig, provider, display): - # If only using one testing thread, don't use threads at all; this lets us - # profile, among other things. - if numThreads == 1: - t = Tester(litConfig, provider, display) - t.run() +def update_incremental_cache(test): + if not test.result.code.isFailure: return + fname = test.getFilePath() + os.utime(fname, None) - # Otherwise spin up the testing threads and wait for them to finish. - testers = [Tester(litConfig, provider, display) - for i in range(numThreads)] - for t in testers: - t.start() - try: - for t in testers: - t.join() - except KeyboardInterrupt: - sys.exit(2) +def sort_by_incremental_cache(run): + def sortIndex(test): + fname = test.getFilePath() + try: + return -os.path.getmtime(fname) + except: + return 0 + run.tests.sort(key = lambda t: sortIndex(t)) def main(builtinParameters = {}): - # Bump the GIL check interval, its more important to get any one thread to a - # blocking operation (hopefully exec) than to try and unblock other threads. - # - # FIXME: This is a hack. - import sys - sys.setcheckinterval(1000) + # Use processes by default on Unix platforms. + isWindows = platform.system() == 'Windows' + useProcessesIsDefault = not isWindows global options from optparse import OptionParser, OptionGroup parser = OptionParser("usage: %prog [options] {file-or-path}") + parser.add_option("", "--version", dest="show_version", + help="Show version and exit", + action="store_true", default=False) parser.add_option("-j", "--threads", dest="numThreads", metavar="N", help="Number of testing threads", type=int, action="store", default=None) parser.add_option("", "--config-prefix", dest="configPrefix", metavar="NAME", help="Prefix for 'lit' config files", action="store", default=None) - parser.add_option("", "--param", dest="userParameters", + parser.add_option("-D", "--param", dest="userParameters", metavar="NAME=VAL", help="Add 'NAME' = 'VAL' to the user defined parameters", type=str, action="append", default=[]) @@ -191,11 +164,23 @@ def main(builtinParameters = {}): help="Reduce amount of output", action="store_true", default=False) group.add_option("-v", "--verbose", dest="showOutput", - help="Show all test output", + help="Show test output for failures", + action="store_true", default=False) + group.add_option("-a", "--show-all", dest="showAllOutput", + help="Display all commandlines and output", action="store_true", default=False) + group.add_option("-o", "--output", dest="output_path", + help="Write test results to the provided path", + action="store", type=str, metavar="PATH") group.add_option("", "--no-progress-bar", dest="useProgressBar", help="Do not use curses based progress bar", action="store_false", default=True) + group.add_option("", "--show-unsupported", dest="show_unsupported", + help="Show unsupported tests", + action="store_true", default=False) + group.add_option("", "--show-xfail", dest="show_xfail", + help="Show tests that were expected to fail", + action="store_true", default=False) parser.add_option_group(group) group = OptionGroup(parser, "Test Execution") @@ -214,6 +199,16 @@ def main(builtinParameters = {}): group.add_option("", "--time-tests", dest="timeTests", help="Track elapsed wall time for each test", action="store_true", default=False) + group.add_option("", "--no-execute", dest="noExecute", + help="Don't execute any tests (assume PASS)", + action="store_true", default=False) + group.add_option("", "--xunit-xml-output", dest="xunit_output_file", + help=("Write XUnit-compatible XML test reports to the" + " specified file"), default=None) + group.add_option("", "--timeout", dest="maxIndividualTestTime", + help="Maximum time to spend running a single test (in seconds)." + "0 means no time limit. [Default: 0]", + type=int, default=None) parser.add_option_group(group) group = OptionGroup(parser, "Test Selection") @@ -226,6 +221,10 @@ def main(builtinParameters = {}): group.add_option("", "--shuffle", dest="shuffle", help="Run tests in random order", action="store_true", default=False) + group.add_option("-i", "--incremental", dest="incremental", + help="Run modified and failing tests first (updates " + "mtimes)", + action="store_true", default=False) group.add_option("", "--filter", dest="filter", metavar="REGEX", help=("Only run tests with paths matching the given " "regular expression"), @@ -242,13 +241,20 @@ def main(builtinParameters = {}): group.add_option("", "--show-tests", dest="showTests", help="Show all discovered tests", action="store_true", default=False) - group.add_option("", "--repeat", dest="repeatTests", metavar="N", - help="Repeat tests N times (for timing)", - action="store", default=None, type=int) + group.add_option("", "--use-processes", dest="useProcesses", + help="Run tests in parallel with processes (not threads)", + action="store_true", default=useProcessesIsDefault) + group.add_option("", "--use-threads", dest="useProcesses", + help="Run tests in parallel with threads (not processes)", + action="store_false", default=useProcessesIsDefault) parser.add_option_group(group) (opts, args) = parser.parse_args() + if opts.show_version: + print("lit %s" % (lit.__version__,)) + return + if not args: parser.error('No inputs specified') @@ -258,7 +264,7 @@ def main(builtinParameters = {}): # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple # threads by default there. if sys.hexversion >= 0x2050200: - opts.numThreads = Util.detectCPUs() + opts.numThreads = lit.util.detectCPUs() else: opts.numThreads = 1 @@ -273,48 +279,77 @@ def main(builtinParameters = {}): name,val = entry.split('=', 1) userParams[name] = val + # Decide what the requested maximum indvidual test time should be + if opts.maxIndividualTestTime != None: + maxIndividualTestTime = opts.maxIndividualTestTime + else: + # Default is zero + maxIndividualTestTime = 0 + + # Create the global config object. - litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]), - path = opts.path, - quiet = opts.quiet, - useValgrind = opts.useValgrind, - valgrindLeakCheck = opts.valgrindLeakCheck, - valgrindArgs = opts.valgrindArgs, - debug = opts.debug, - isWindows = (platform.system()=='Windows'), - params = userParams, - config_prefix = opts.configPrefix) - - tests = lit.discovery.find_tests_for_inputs(litConfig, inputs) + litConfig = lit.LitConfig.LitConfig( + progname = os.path.basename(sys.argv[0]), + path = opts.path, + quiet = opts.quiet, + useValgrind = opts.useValgrind, + valgrindLeakCheck = opts.valgrindLeakCheck, + valgrindArgs = opts.valgrindArgs, + noExecute = opts.noExecute, + debug = opts.debug, + isWindows = isWindows, + params = userParams, + config_prefix = opts.configPrefix, + maxIndividualTestTime = maxIndividualTestTime) + + # Perform test discovery. + run = lit.run.Run(litConfig, + lit.discovery.find_tests_for_inputs(litConfig, inputs)) + + # After test discovery the configuration might have changed + # the maxIndividualTestTime. If we explicitly set this on the + # command line then override what was set in the test configuration + if opts.maxIndividualTestTime != None: + if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime: + litConfig.note(('The test suite configuration requested an individual' + ' test timeout of {0} seconds but a timeout of {1} seconds was' + ' requested on the command line. Forcing timeout to be {1}' + ' seconds') + .format(litConfig.maxIndividualTestTime, + opts.maxIndividualTestTime)) + litConfig.maxIndividualTestTime = opts.maxIndividualTestTime if opts.showSuites or opts.showTests: # Aggregate the tests by suite. suitesAndTests = {} - for t in tests: - if t.suite not in suitesAndTests: - suitesAndTests[t.suite] = [] - suitesAndTests[t.suite].append(t) - suitesAndTests = suitesAndTests.items() + for result_test in run.tests: + if result_test.suite not in suitesAndTests: + suitesAndTests[result_test.suite] = [] + suitesAndTests[result_test.suite].append(result_test) + suitesAndTests = list(suitesAndTests.items()) suitesAndTests.sort(key = lambda item: item[0].name) # Show the suites, if requested. if opts.showSuites: - print '-- Test Suites --' + print('-- Test Suites --') for ts,ts_tests in suitesAndTests: - print ' %s - %d tests' %(ts.name, len(ts_tests)) - print ' Source Root: %s' % ts.source_root - print ' Exec Root : %s' % ts.exec_root + print(' %s - %d tests' %(ts.name, len(ts_tests))) + print(' Source Root: %s' % ts.source_root) + print(' Exec Root : %s' % ts.exec_root) # Show the tests, if requested. if opts.showTests: - print '-- Available Tests --' + print('-- Available Tests --') for ts,ts_tests in suitesAndTests: ts_tests.sort(key = lambda test: test.path_in_suite) for test in ts_tests: - print ' %s' % (test.getFullName(),) - + print(' %s' % (test.getFullName(),)) + + # Exit. + sys.exit(0) + # Select and order the tests. - numTotalTests = len(tests) + numTotalTests = len(run.tests) # First, select based on the filter expression if given. if opts.filter: @@ -323,124 +358,168 @@ def main(builtinParameters = {}): except: parser.error("invalid regular expression for --filter: %r" % ( opts.filter)) - tests = [t for t in tests - if rex.search(t.getFullName())] + run.tests = [result_test for result_test in run.tests + if rex.search(result_test.getFullName())] # Then select the order. if opts.shuffle: - random.shuffle(tests) + random.shuffle(run.tests) + elif opts.incremental: + sort_by_incremental_cache(run) else: - tests.sort(key = lambda t: t.getFullName()) + run.tests.sort(key = lambda result_test: result_test.getFullName()) # Finally limit the number of tests, if desired. if opts.maxTests is not None: - tests = tests[:opts.maxTests] + run.tests = run.tests[:opts.maxTests] # Don't create more threads than tests. - opts.numThreads = min(len(tests), opts.numThreads) + opts.numThreads = min(len(run.tests), opts.numThreads) + + # Because some tests use threads internally, and at least on Linux each + # of these threads counts toward the current process limit, try to + # raise the (soft) process limit so that tests don't fail due to + # resource exhaustion. + try: + cpus = lit.util.detectCPUs() + desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor + + # Import the resource module here inside this try block because it + # will likely fail on Windows. + import resource + + max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC) + desired_limit = min(desired_limit, max_procs_hard) + + if max_procs_soft < desired_limit: + resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard)) + litConfig.note('raised the process limit from %d to %d' % \ + (max_procs_soft, desired_limit)) + except: + pass extra = '' - if len(tests) != numTotalTests: + if len(run.tests) != numTotalTests: extra = ' of %d' % numTotalTests - header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra, + header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra, opts.numThreads) - - if opts.repeatTests: - tests = [t.copyWithIndex(i) - for t in tests - for i in range(opts.repeatTests)] - progressBar = None if not opts.quiet: if opts.succinct and opts.useProgressBar: try: - tc = ProgressBar.TerminalController() - progressBar = ProgressBar.ProgressBar(tc, header) + tc = lit.ProgressBar.TerminalController() + progressBar = lit.ProgressBar.ProgressBar(tc, header) except ValueError: - print header - progressBar = ProgressBar.SimpleProgressBar('Testing: ') + print(header) + progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ') else: - print header + print(header) startTime = time.time() - display = TestingProgressDisplay(opts, len(tests), progressBar) - provider = TestProvider(tests, opts.maxTime) - + display = TestingProgressDisplay(opts, len(run.tests), progressBar) try: - import win32api - except ImportError: - pass - else: - def console_ctrl_handler(type): - provider.cancel() - return True - win32api.SetConsoleCtrlHandler(console_ctrl_handler, True) - - runTests(opts.numThreads, litConfig, provider, display) + run.execute_tests(display, opts.numThreads, opts.maxTime, + opts.useProcesses) + except KeyboardInterrupt: + sys.exit(2) display.finish() + testing_time = time.time() - startTime if not opts.quiet: - print 'Testing Time: %.2fs'%(time.time() - startTime) + print('Testing Time: %.2fs' % (testing_time,)) - # Update results for any tests which weren't run. - for t in tests: - if t.result is None: - t.setResult(Test.UNRESOLVED, '', 0.0) + # Write out the test data, if requested. + if opts.output_path is not None: + write_test_results(run, litConfig, testing_time, opts.output_path) # List test results organized by kind. hasFailures = False byCode = {} - for t in tests: - if t.result not in byCode: - byCode[t.result] = [] - byCode[t.result].append(t) - if t.result.isFailure: + for test in run.tests: + if test.result.code not in byCode: + byCode[test.result.code] = [] + byCode[test.result.code].append(test) + if test.result.code.isFailure: hasFailures = True - # FIXME: Show unresolved and (optionally) unsupported tests. - for title,code in (('Unexpected Passing Tests', Test.XPASS), - ('Failing Tests', Test.FAIL)): + # Print each test in any of the failing groups. + for title,code in (('Unexpected Passing Tests', lit.Test.XPASS), + ('Failing Tests', lit.Test.FAIL), + ('Unresolved Tests', lit.Test.UNRESOLVED), + ('Unsupported Tests', lit.Test.UNSUPPORTED), + ('Expected Failing Tests', lit.Test.XFAIL), + ('Timed Out Tests', lit.Test.TIMEOUT)): + if (lit.Test.XFAIL == code and not opts.show_xfail) or \ + (lit.Test.UNSUPPORTED == code and not opts.show_unsupported): + continue elts = byCode.get(code) if not elts: continue - print '*'*20 - print '%s (%d):' % (title, len(elts)) - for t in elts: - print ' %s' % t.getFullName() - print - - if opts.timeTests: - # Collate, in case we repeated tests. - times = {} - for t in tests: - key = t.getFullName() - times[key] = times.get(key, 0.) + t.elapsed - - byTime = list(times.items()) - byTime.sort(key = lambda item: item[1]) - if byTime: - Util.printHistogram(byTime, title='Tests') - - for name,code in (('Expected Passes ', Test.PASS), - ('Expected Failures ', Test.XFAIL), - ('Unsupported Tests ', Test.UNSUPPORTED), - ('Unresolved Tests ', Test.UNRESOLVED), - ('Unexpected Passes ', Test.XPASS), - ('Unexpected Failures', Test.FAIL),): + print('*'*20) + print('%s (%d):' % (title, len(elts))) + for test in elts: + print(' %s' % test.getFullName()) + sys.stdout.write('\n') + + if opts.timeTests and run.tests: + # Order by time. + test_times = [(test.getFullName(), test.result.elapsed) + for test in run.tests] + lit.util.printHistogram(test_times, title='Tests') + + for name,code in (('Expected Passes ', lit.Test.PASS), + ('Passes With Retry ', lit.Test.FLAKYPASS), + ('Expected Failures ', lit.Test.XFAIL), + ('Unsupported Tests ', lit.Test.UNSUPPORTED), + ('Unresolved Tests ', lit.Test.UNRESOLVED), + ('Unexpected Passes ', lit.Test.XPASS), + ('Unexpected Failures', lit.Test.FAIL), + ('Individual Timeouts', lit.Test.TIMEOUT)): if opts.quiet and not code.isFailure: continue N = len(byCode.get(code,[])) if N: - print ' %s: %d' % (name,N) + print(' %s: %d' % (name,N)) + + if opts.xunit_output_file: + # Collect the tests, indexed by test suite + by_suite = {} + for result_test in run.tests: + suite = result_test.suite.config.name + if suite not in by_suite: + by_suite[suite] = { + 'passes' : 0, + 'failures' : 0, + 'tests' : [] } + by_suite[suite]['tests'].append(result_test) + if result_test.result.code.isFailure: + by_suite[suite]['failures'] += 1 + else: + by_suite[suite]['passes'] += 1 + xunit_output_file = open(opts.xunit_output_file, "w") + xunit_output_file.write("\n") + xunit_output_file.write("\n") + for suite_name, suite in by_suite.items(): + safe_suite_name = suite_name.replace(".", "-") + xunit_output_file.write("\n") + for result_test in suite['tests']: + xunit_output_file.write(result_test.getJUnitXML() + "\n") + xunit_output_file.write("\n") + xunit_output_file.write("") + xunit_output_file.close() # If we encountered any additional errors, exit abnormally. if litConfig.numErrors: - print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors + sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors) sys.exit(2) # Warn about warnings. if litConfig.numWarnings: - print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings + sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings) if hasFailures: sys.exit(1)