4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 self.progressBar.update(float(self.completed)/self.numTests,
41 if not test.result.code.isFailure and \
42 (self.opts.quiet or self.opts.succinct):
46 self.progressBar.clear()
48 # Show the test result line.
49 test_name = test.getFullName()
50 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51 self.completed, self.numTests))
53 # Show the test failure output, if requested.
54 if test.result.code.isFailure and self.opts.showOutput:
55 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
57 print(test.result.output)
60 # Report test metrics, if present.
61 if test.result.metrics:
62 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
64 items = sorted(test.result.metrics.items())
65 for metric_name, value in items:
66 print('%s: %s ' % (metric_name, value.format()))
69 # Ensure the output is flushed.
72 def write_test_results(run, lit_config, testing_time, output_path):
76 lit_config.fatal('test output unsupported with Python 2.5')
78 # Construct the data we will write.
80 # Encode the current lit version as a schema version.
81 data['__version__'] = lit.__versioninfo__
82 data['elapsed'] = testing_time
83 # FIXME: Record some information on the lit configuration used?
84 # FIXME: Record information from the individual test suites?
87 data['tests'] = tests_data = []
88 for test in run.tests:
90 'name' : test.getFullName(),
91 'code' : test.result.code.name,
92 'output' : test.result.output,
93 'elapsed' : test.result.elapsed }
95 # Add test metrics, if present.
96 if test.result.metrics:
97 test_data['metrics'] = metrics_data = {}
98 for key, value in test.result.metrics.items():
99 metrics_data[key] = value.todata()
101 tests_data.append(test_data)
104 f = open(output_path, 'w')
106 json.dump(data, f, indent=2, sort_keys=True)
111 def main(builtinParameters = {}):
112 # Use processes by default on Unix platforms.
113 isWindows = platform.system() == 'Windows'
114 useProcessesIsDefault = not isWindows
117 from optparse import OptionParser, OptionGroup
118 parser = OptionParser("usage: %prog [options] {file-or-path}")
120 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
121 help="Number of testing threads",
122 type=int, action="store", default=None)
123 parser.add_option("", "--config-prefix", dest="configPrefix",
124 metavar="NAME", help="Prefix for 'lit' config files",
125 action="store", default=None)
126 parser.add_option("", "--param", dest="userParameters",
128 help="Add 'NAME' = 'VAL' to the user defined parameters",
129 type=str, action="append", default=[])
131 group = OptionGroup(parser, "Output Format")
132 # FIXME: I find these names very confusing, although I like the
134 group.add_option("-q", "--quiet", dest="quiet",
135 help="Suppress no error output",
136 action="store_true", default=False)
137 group.add_option("-s", "--succinct", dest="succinct",
138 help="Reduce amount of output",
139 action="store_true", default=False)
140 group.add_option("-v", "--verbose", dest="showOutput",
141 help="Show all test output",
142 action="store_true", default=False)
143 group.add_option("-o", "--output", dest="output_path",
144 help="Write test results to the provided path",
145 action="store", type=str, metavar="PATH")
146 group.add_option("", "--no-progress-bar", dest="useProgressBar",
147 help="Do not use curses based progress bar",
148 action="store_false", default=True)
149 parser.add_option_group(group)
151 group = OptionGroup(parser, "Test Execution")
152 group.add_option("", "--path", dest="path",
153 help="Additional paths to add to testing environment",
154 action="append", type=str, default=[])
155 group.add_option("", "--vg", dest="useValgrind",
156 help="Run tests under valgrind",
157 action="store_true", default=False)
158 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
159 help="Check for memory leaks under valgrind",
160 action="store_true", default=False)
161 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
162 help="Specify an extra argument for valgrind",
163 type=str, action="append", default=[])
164 group.add_option("", "--time-tests", dest="timeTests",
165 help="Track elapsed wall time for each test",
166 action="store_true", default=False)
167 group.add_option("", "--no-execute", dest="noExecute",
168 help="Don't execute any tests (assume PASS)",
169 action="store_true", default=False)
170 parser.add_option_group(group)
172 group = OptionGroup(parser, "Test Selection")
173 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
174 help="Maximum number of tests to run",
175 action="store", type=int, default=None)
176 group.add_option("", "--max-time", dest="maxTime", metavar="N",
177 help="Maximum time to spend testing (in seconds)",
178 action="store", type=float, default=None)
179 group.add_option("", "--shuffle", dest="shuffle",
180 help="Run tests in random order",
181 action="store_true", default=False)
182 group.add_option("", "--filter", dest="filter", metavar="REGEX",
183 help=("Only run tests with paths matching the given "
184 "regular expression"),
185 action="store", default=None)
186 parser.add_option_group(group)
188 group = OptionGroup(parser, "Debug and Experimental Options")
189 group.add_option("", "--debug", dest="debug",
190 help="Enable debugging (for 'lit' development)",
191 action="store_true", default=False)
192 group.add_option("", "--show-suites", dest="showSuites",
193 help="Show discovered test suites",
194 action="store_true", default=False)
195 group.add_option("", "--show-tests", dest="showTests",
196 help="Show all discovered tests",
197 action="store_true", default=False)
198 group.add_option("", "--use-processes", dest="useProcesses",
199 help="Run tests in parallel with processes (not threads)",
200 action="store_true", default=useProcessesIsDefault)
201 group.add_option("", "--use-threads", dest="useProcesses",
202 help="Run tests in parallel with threads (not processes)",
203 action="store_false", default=useProcessesIsDefault)
204 parser.add_option_group(group)
206 (opts, args) = parser.parse_args()
209 parser.error('No inputs specified')
211 if opts.numThreads is None:
212 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
213 # http://bugs.python.org/issue1731717
214 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
215 # threads by default there.
216 if sys.hexversion >= 0x2050200:
217 opts.numThreads = lit.util.detectCPUs()
223 # Create the user defined parameters.
224 userParams = dict(builtinParameters)
225 for entry in opts.userParameters:
229 name,val = entry.split('=', 1)
230 userParams[name] = val
232 # Create the global config object.
233 litConfig = lit.LitConfig.LitConfig(
234 progname = os.path.basename(sys.argv[0]),
237 useValgrind = opts.useValgrind,
238 valgrindLeakCheck = opts.valgrindLeakCheck,
239 valgrindArgs = opts.valgrindArgs,
240 noExecute = opts.noExecute,
242 isWindows = isWindows,
244 config_prefix = opts.configPrefix)
246 # Perform test discovery.
247 run = lit.run.Run(litConfig,
248 lit.discovery.find_tests_for_inputs(litConfig, inputs))
250 if opts.showSuites or opts.showTests:
251 # Aggregate the tests by suite.
254 if t.suite not in suitesAndTests:
255 suitesAndTests[t.suite] = []
256 suitesAndTests[t.suite].append(t)
257 suitesAndTests = list(suitesAndTests.items())
258 suitesAndTests.sort(key = lambda item: item[0].name)
260 # Show the suites, if requested.
262 print('-- Test Suites --')
263 for ts,ts_tests in suitesAndTests:
264 print(' %s - %d tests' %(ts.name, len(ts_tests)))
265 print(' Source Root: %s' % ts.source_root)
266 print(' Exec Root : %s' % ts.exec_root)
268 # Show the tests, if requested.
270 print('-- Available Tests --')
271 for ts,ts_tests in suitesAndTests:
272 ts_tests.sort(key = lambda test: test.path_in_suite)
273 for test in ts_tests:
274 print(' %s' % (test.getFullName(),))
279 # Select and order the tests.
280 numTotalTests = len(run.tests)
282 # First, select based on the filter expression if given.
285 rex = re.compile(opts.filter)
287 parser.error("invalid regular expression for --filter: %r" % (
289 run.tests = [t for t in run.tests
290 if rex.search(t.getFullName())]
292 # Then select the order.
294 random.shuffle(run.tests)
296 run.tests.sort(key = lambda t: t.getFullName())
298 # Finally limit the number of tests, if desired.
299 if opts.maxTests is not None:
300 run.tests = run.tests[:opts.maxTests]
302 # Don't create more threads than tests.
303 opts.numThreads = min(len(run.tests), opts.numThreads)
306 if len(run.tests) != numTotalTests:
307 extra = ' of %d' % numTotalTests
308 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
313 if opts.succinct and opts.useProgressBar:
315 tc = lit.ProgressBar.TerminalController()
316 progressBar = lit.ProgressBar.ProgressBar(tc, header)
319 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
323 startTime = time.time()
324 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
326 run.execute_tests(display, opts.numThreads, opts.maxTime,
328 except KeyboardInterrupt:
332 testing_time = time.time() - startTime
334 print('Testing Time: %.2fs' % (testing_time,))
336 # Write out the test data, if requested.
337 if opts.output_path is not None:
338 write_test_results(run, litConfig, testing_time, opts.output_path)
340 # List test results organized by kind.
343 for test in run.tests:
344 if test.result.code not in byCode:
345 byCode[test.result.code] = []
346 byCode[test.result.code].append(test)
347 if test.result.code.isFailure:
350 # Print each test in any of the failing groups.
351 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
352 ('Failing Tests', lit.Test.FAIL),
353 ('Unresolved Tests', lit.Test.UNRESOLVED)):
354 elts = byCode.get(code)
358 print('%s (%d):' % (title, len(elts)))
360 print(' %s' % test.getFullName())
361 sys.stdout.write('\n')
363 if opts.timeTests and run.tests:
365 test_times = [(test.getFullName(), test.result.elapsed)
366 for test in run.tests]
367 lit.util.printHistogram(test_times, title='Tests')
369 for name,code in (('Expected Passes ', lit.Test.PASS),
370 ('Expected Failures ', lit.Test.XFAIL),
371 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
372 ('Unresolved Tests ', lit.Test.UNRESOLVED),
373 ('Unexpected Passes ', lit.Test.XPASS),
374 ('Unexpected Failures', lit.Test.FAIL),):
375 if opts.quiet and not code.isFailure:
377 N = len(byCode.get(code,[]))
379 print(' %s: %d' % (name,N))
381 # If we encountered any additional errors, exit abnormally.
382 if litConfig.numErrors:
383 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
386 # Warn about warnings.
387 if litConfig.numWarnings:
388 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
394 if __name__=='__main__':