4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 self.progressBar.update(float(self.completed)/self.numTests,
41 if not test.result.code.isFailure and \
42 (self.opts.quiet or self.opts.succinct):
46 self.progressBar.clear()
48 print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
49 self.completed, self.numTests))
51 if test.result.code.isFailure and self.opts.showOutput:
52 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
54 print(test.result.output)
59 def main(builtinParameters = {}):
60 # Bump the GIL check interval, its more important to get any one thread to a
61 # blocking operation (hopefully exec) than to try and unblock other threads.
63 # FIXME: This is a hack.
64 sys.setcheckinterval(1000)
67 from optparse import OptionParser, OptionGroup
68 parser = OptionParser("usage: %prog [options] {file-or-path}")
70 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
71 help="Number of testing threads",
72 type=int, action="store", default=None)
73 parser.add_option("", "--config-prefix", dest="configPrefix",
74 metavar="NAME", help="Prefix for 'lit' config files",
75 action="store", default=None)
76 parser.add_option("", "--param", dest="userParameters",
78 help="Add 'NAME' = 'VAL' to the user defined parameters",
79 type=str, action="append", default=[])
81 group = OptionGroup(parser, "Output Format")
82 # FIXME: I find these names very confusing, although I like the
84 group.add_option("-q", "--quiet", dest="quiet",
85 help="Suppress no error output",
86 action="store_true", default=False)
87 group.add_option("-s", "--succinct", dest="succinct",
88 help="Reduce amount of output",
89 action="store_true", default=False)
90 group.add_option("-v", "--verbose", dest="showOutput",
91 help="Show all test output",
92 action="store_true", default=False)
93 group.add_option("", "--no-progress-bar", dest="useProgressBar",
94 help="Do not use curses based progress bar",
95 action="store_false", default=True)
96 parser.add_option_group(group)
98 group = OptionGroup(parser, "Test Execution")
99 group.add_option("", "--path", dest="path",
100 help="Additional paths to add to testing environment",
101 action="append", type=str, default=[])
102 group.add_option("", "--vg", dest="useValgrind",
103 help="Run tests under valgrind",
104 action="store_true", default=False)
105 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
106 help="Check for memory leaks under valgrind",
107 action="store_true", default=False)
108 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
109 help="Specify an extra argument for valgrind",
110 type=str, action="append", default=[])
111 group.add_option("", "--time-tests", dest="timeTests",
112 help="Track elapsed wall time for each test",
113 action="store_true", default=False)
114 group.add_option("", "--no-execute", dest="noExecute",
115 help="Don't execute any tests (assume PASS)",
116 action="store_true", default=False)
117 parser.add_option_group(group)
119 group = OptionGroup(parser, "Test Selection")
120 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
121 help="Maximum number of tests to run",
122 action="store", type=int, default=None)
123 group.add_option("", "--max-time", dest="maxTime", metavar="N",
124 help="Maximum time to spend testing (in seconds)",
125 action="store", type=float, default=None)
126 group.add_option("", "--shuffle", dest="shuffle",
127 help="Run tests in random order",
128 action="store_true", default=False)
129 group.add_option("", "--filter", dest="filter", metavar="REGEX",
130 help=("Only run tests with paths matching the given "
131 "regular expression"),
132 action="store", default=None)
133 parser.add_option_group(group)
135 group = OptionGroup(parser, "Debug and Experimental Options")
136 group.add_option("", "--debug", dest="debug",
137 help="Enable debugging (for 'lit' development)",
138 action="store_true", default=False)
139 group.add_option("", "--show-suites", dest="showSuites",
140 help="Show discovered test suites",
141 action="store_true", default=False)
142 group.add_option("", "--show-tests", dest="showTests",
143 help="Show all discovered tests",
144 action="store_true", default=False)
145 group.add_option("", "--use-processes", dest="useProcesses",
146 help="Run tests in parallel with processes (not threads)",
147 action="store_true", default=False)
148 group.add_option("", "--use-threads", dest="useProcesses",
149 help="Run tests in parallel with threads (not processes)",
150 action="store_false", default=False)
151 parser.add_option_group(group)
153 (opts, args) = parser.parse_args()
156 parser.error('No inputs specified')
158 if opts.numThreads is None:
159 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
160 # http://bugs.python.org/issue1731717
161 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
162 # threads by default there.
163 if sys.hexversion >= 0x2050200:
164 opts.numThreads = lit.util.detectCPUs()
170 # Create the user defined parameters.
171 userParams = dict(builtinParameters)
172 for entry in opts.userParameters:
176 name,val = entry.split('=', 1)
177 userParams[name] = val
179 # Create the global config object.
180 litConfig = lit.LitConfig.LitConfig(
181 progname = os.path.basename(sys.argv[0]),
184 useValgrind = opts.useValgrind,
185 valgrindLeakCheck = opts.valgrindLeakCheck,
186 valgrindArgs = opts.valgrindArgs,
187 noExecute = opts.noExecute,
189 isWindows = (platform.system()=='Windows'),
191 config_prefix = opts.configPrefix)
193 # Perform test discovery.
194 run = lit.run.Run(litConfig,
195 lit.discovery.find_tests_for_inputs(litConfig, inputs))
197 if opts.showSuites or opts.showTests:
198 # Aggregate the tests by suite.
201 if t.suite not in suitesAndTests:
202 suitesAndTests[t.suite] = []
203 suitesAndTests[t.suite].append(t)
204 suitesAndTests = list(suitesAndTests.items())
205 suitesAndTests.sort(key = lambda item: item[0].name)
207 # Show the suites, if requested.
209 print('-- Test Suites --')
210 for ts,ts_tests in suitesAndTests:
211 print(' %s - %d tests' %(ts.name, len(ts_tests)))
212 print(' Source Root: %s' % ts.source_root)
213 print(' Exec Root : %s' % ts.exec_root)
215 # Show the tests, if requested.
217 print('-- Available Tests --')
218 for ts,ts_tests in suitesAndTests:
219 ts_tests.sort(key = lambda test: test.path_in_suite)
220 for test in ts_tests:
221 print(' %s' % (test.getFullName(),))
226 # Select and order the tests.
227 numTotalTests = len(run.tests)
229 # First, select based on the filter expression if given.
232 rex = re.compile(opts.filter)
234 parser.error("invalid regular expression for --filter: %r" % (
236 run.tests = [t for t in run.tests
237 if rex.search(t.getFullName())]
239 # Then select the order.
241 random.shuffle(run.tests)
243 run.tests.sort(key = lambda t: t.getFullName())
245 # Finally limit the number of tests, if desired.
246 if opts.maxTests is not None:
247 run.tests = run.tests[:opts.maxTests]
249 # Don't create more threads than tests.
250 opts.numThreads = min(len(run.tests), opts.numThreads)
253 if len(run.tests) != numTotalTests:
254 extra = ' of %d' % numTotalTests
255 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
260 if opts.succinct and opts.useProgressBar:
262 tc = lit.ProgressBar.TerminalController()
263 progressBar = lit.ProgressBar.ProgressBar(tc, header)
266 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
270 startTime = time.time()
271 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
273 run.execute_tests(display, opts.numThreads, opts.maxTime,
275 except KeyboardInterrupt:
280 print('Testing Time: %.2fs'%(time.time() - startTime))
282 # List test results organized by kind.
285 for test in run.tests:
286 if test.result.code not in byCode:
287 byCode[test.result.code] = []
288 byCode[test.result.code].append(test)
289 if test.result.code.isFailure:
292 # Print each test in any of the failing groups.
293 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
294 ('Failing Tests', lit.Test.FAIL),
295 ('Unresolved Tests', lit.Test.UNRESOLVED)):
296 elts = byCode.get(code)
300 print('%s (%d):' % (title, len(elts)))
302 print(' %s' % test.getFullName())
303 sys.stdout.write('\n')
305 if opts.timeTests and run.tests:
307 test_times = [(test.getFullName(), test.result.elapsed)
308 for test in run.tests]
309 lit.util.printHistogram(test_times, title='Tests')
311 for name,code in (('Expected Passes ', lit.Test.PASS),
312 ('Expected Failures ', lit.Test.XFAIL),
313 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
314 ('Unresolved Tests ', lit.Test.UNRESOLVED),
315 ('Unexpected Passes ', lit.Test.XPASS),
316 ('Unexpected Failures', lit.Test.FAIL),):
317 if opts.quiet and not code.isFailure:
319 N = len(byCode.get(code,[]))
321 print(' %s: %d' % (name,N))
323 # If we encountered any additional errors, exit abnormally.
324 if litConfig.numErrors:
325 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
328 # Warn about warnings.
329 if litConfig.numWarnings:
330 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
336 if __name__=='__main__':