4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 self.progressBar.update(float(self.completed)/self.numTests,
41 if not test.result.code.isFailure and \
42 (self.opts.quiet or self.opts.succinct):
46 self.progressBar.clear()
48 # Show the test result line.
49 test_name = test.getFullName()
50 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
51 self.completed, self.numTests))
53 # Show the test failure output, if requested.
54 if test.result.code.isFailure and self.opts.showOutput:
55 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
57 print(test.result.output)
60 # Report test metrics, if present.
61 if test.result.metrics:
62 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
64 items = sorted(test.result.metrics.items())
65 for metric_name, value in items:
66 print('%s: %s ' % (metric_name, value.format()))
69 # Ensure the output is flushed.
72 def write_test_results(run, lit_config, testing_time, output_path):
76 lit_config.fatal('test output unsupported with Python 2.5')
78 # Construct the data we will write.
80 # Encode the current lit version as a schema version.
81 data['__version__'] = lit.__versioninfo__
82 data['elapsed'] = testing_time
83 # FIXME: Record some information on the lit configuration used?
84 # FIXME: Record information from the individual test suites?
87 data['tests'] = tests_data = []
88 for test in run.tests:
90 'name' : test.getFullName(),
91 'code' : test.result.code.name,
92 'output' : test.result.output,
93 'elapsed' : test.result.elapsed }
95 # Add test metrics, if present.
96 if test.result.metrics:
97 test_data['metrics'] = metrics_data = {}
98 for key, value in test.result.metrics.items():
99 metrics_data[key] = value.todata()
101 tests_data.append(test_data)
104 f = open(output_path, 'w')
106 json.dump(data, f, indent=2, sort_keys=True)
111 def main(builtinParameters = {}):
112 # Bump the GIL check interval, its more important to get any one thread to a
113 # blocking operation (hopefully exec) than to try and unblock other threads.
115 # FIXME: This is a hack.
116 sys.setcheckinterval(1000)
119 from optparse import OptionParser, OptionGroup
120 parser = OptionParser("usage: %prog [options] {file-or-path}")
122 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
123 help="Number of testing threads",
124 type=int, action="store", default=None)
125 parser.add_option("", "--config-prefix", dest="configPrefix",
126 metavar="NAME", help="Prefix for 'lit' config files",
127 action="store", default=None)
128 parser.add_option("", "--param", dest="userParameters",
130 help="Add 'NAME' = 'VAL' to the user defined parameters",
131 type=str, action="append", default=[])
133 group = OptionGroup(parser, "Output Format")
134 # FIXME: I find these names very confusing, although I like the
136 group.add_option("-q", "--quiet", dest="quiet",
137 help="Suppress no error output",
138 action="store_true", default=False)
139 group.add_option("-s", "--succinct", dest="succinct",
140 help="Reduce amount of output",
141 action="store_true", default=False)
142 group.add_option("-v", "--verbose", dest="showOutput",
143 help="Show all test output",
144 action="store_true", default=False)
145 group.add_option("-o", "--output", dest="output_path",
146 help="Write test results to the provided path",
147 action="store", type=str, metavar="PATH")
148 group.add_option("", "--no-progress-bar", dest="useProgressBar",
149 help="Do not use curses based progress bar",
150 action="store_false", default=True)
151 parser.add_option_group(group)
153 group = OptionGroup(parser, "Test Execution")
154 group.add_option("", "--path", dest="path",
155 help="Additional paths to add to testing environment",
156 action="append", type=str, default=[])
157 group.add_option("", "--vg", dest="useValgrind",
158 help="Run tests under valgrind",
159 action="store_true", default=False)
160 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
161 help="Check for memory leaks under valgrind",
162 action="store_true", default=False)
163 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
164 help="Specify an extra argument for valgrind",
165 type=str, action="append", default=[])
166 group.add_option("", "--time-tests", dest="timeTests",
167 help="Track elapsed wall time for each test",
168 action="store_true", default=False)
169 group.add_option("", "--no-execute", dest="noExecute",
170 help="Don't execute any tests (assume PASS)",
171 action="store_true", default=False)
172 parser.add_option_group(group)
174 group = OptionGroup(parser, "Test Selection")
175 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
176 help="Maximum number of tests to run",
177 action="store", type=int, default=None)
178 group.add_option("", "--max-time", dest="maxTime", metavar="N",
179 help="Maximum time to spend testing (in seconds)",
180 action="store", type=float, default=None)
181 group.add_option("", "--shuffle", dest="shuffle",
182 help="Run tests in random order",
183 action="store_true", default=False)
184 group.add_option("", "--filter", dest="filter", metavar="REGEX",
185 help=("Only run tests with paths matching the given "
186 "regular expression"),
187 action="store", default=None)
188 parser.add_option_group(group)
190 group = OptionGroup(parser, "Debug and Experimental Options")
191 group.add_option("", "--debug", dest="debug",
192 help="Enable debugging (for 'lit' development)",
193 action="store_true", default=False)
194 group.add_option("", "--show-suites", dest="showSuites",
195 help="Show discovered test suites",
196 action="store_true", default=False)
197 group.add_option("", "--show-tests", dest="showTests",
198 help="Show all discovered tests",
199 action="store_true", default=False)
200 group.add_option("", "--use-processes", dest="useProcesses",
201 help="Run tests in parallel with processes (not threads)",
202 action="store_true", default=False)
203 group.add_option("", "--use-threads", dest="useProcesses",
204 help="Run tests in parallel with threads (not processes)",
205 action="store_false", default=False)
206 parser.add_option_group(group)
208 (opts, args) = parser.parse_args()
211 parser.error('No inputs specified')
213 if opts.numThreads is None:
214 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
215 # http://bugs.python.org/issue1731717
216 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
217 # threads by default there.
218 if sys.hexversion >= 0x2050200:
219 opts.numThreads = lit.util.detectCPUs()
225 # Create the user defined parameters.
226 userParams = dict(builtinParameters)
227 for entry in opts.userParameters:
231 name,val = entry.split('=', 1)
232 userParams[name] = val
234 # Create the global config object.
235 litConfig = lit.LitConfig.LitConfig(
236 progname = os.path.basename(sys.argv[0]),
239 useValgrind = opts.useValgrind,
240 valgrindLeakCheck = opts.valgrindLeakCheck,
241 valgrindArgs = opts.valgrindArgs,
242 noExecute = opts.noExecute,
244 isWindows = (platform.system()=='Windows'),
246 config_prefix = opts.configPrefix)
248 # Perform test discovery.
249 run = lit.run.Run(litConfig,
250 lit.discovery.find_tests_for_inputs(litConfig, inputs))
252 if opts.showSuites or opts.showTests:
253 # Aggregate the tests by suite.
256 if t.suite not in suitesAndTests:
257 suitesAndTests[t.suite] = []
258 suitesAndTests[t.suite].append(t)
259 suitesAndTests = list(suitesAndTests.items())
260 suitesAndTests.sort(key = lambda item: item[0].name)
262 # Show the suites, if requested.
264 print('-- Test Suites --')
265 for ts,ts_tests in suitesAndTests:
266 print(' %s - %d tests' %(ts.name, len(ts_tests)))
267 print(' Source Root: %s' % ts.source_root)
268 print(' Exec Root : %s' % ts.exec_root)
270 # Show the tests, if requested.
272 print('-- Available Tests --')
273 for ts,ts_tests in suitesAndTests:
274 ts_tests.sort(key = lambda test: test.path_in_suite)
275 for test in ts_tests:
276 print(' %s' % (test.getFullName(),))
281 # Select and order the tests.
282 numTotalTests = len(run.tests)
284 # First, select based on the filter expression if given.
287 rex = re.compile(opts.filter)
289 parser.error("invalid regular expression for --filter: %r" % (
291 run.tests = [t for t in run.tests
292 if rex.search(t.getFullName())]
294 # Then select the order.
296 random.shuffle(run.tests)
298 run.tests.sort(key = lambda t: t.getFullName())
300 # Finally limit the number of tests, if desired.
301 if opts.maxTests is not None:
302 run.tests = run.tests[:opts.maxTests]
304 # Don't create more threads than tests.
305 opts.numThreads = min(len(run.tests), opts.numThreads)
308 if len(run.tests) != numTotalTests:
309 extra = ' of %d' % numTotalTests
310 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
315 if opts.succinct and opts.useProgressBar:
317 tc = lit.ProgressBar.TerminalController()
318 progressBar = lit.ProgressBar.ProgressBar(tc, header)
321 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
325 startTime = time.time()
326 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
328 run.execute_tests(display, opts.numThreads, opts.maxTime,
330 except KeyboardInterrupt:
334 testing_time = time.time() - startTime
336 print('Testing Time: %.2fs' % (testing_time,))
338 # Write out the test data, if requested.
339 if opts.output_path is not None:
340 write_test_results(run, litConfig, testing_time, opts.output_path)
342 # List test results organized by kind.
345 for test in run.tests:
346 if test.result.code not in byCode:
347 byCode[test.result.code] = []
348 byCode[test.result.code].append(test)
349 if test.result.code.isFailure:
352 # Print each test in any of the failing groups.
353 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
354 ('Failing Tests', lit.Test.FAIL),
355 ('Unresolved Tests', lit.Test.UNRESOLVED)):
356 elts = byCode.get(code)
360 print('%s (%d):' % (title, len(elts)))
362 print(' %s' % test.getFullName())
363 sys.stdout.write('\n')
365 if opts.timeTests and run.tests:
367 test_times = [(test.getFullName(), test.result.elapsed)
368 for test in run.tests]
369 lit.util.printHistogram(test_times, title='Tests')
371 for name,code in (('Expected Passes ', lit.Test.PASS),
372 ('Expected Failures ', lit.Test.XFAIL),
373 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
374 ('Unresolved Tests ', lit.Test.UNRESOLVED),
375 ('Unexpected Passes ', lit.Test.XPASS),
376 ('Unexpected Failures', lit.Test.FAIL),):
377 if opts.quiet and not code.isFailure:
379 N = len(byCode.get(code,[]))
381 print(' %s: %d' % (name,N))
383 # If we encountered any additional errors, exit abnormally.
384 if litConfig.numErrors:
385 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
388 # Warn about warnings.
389 if litConfig.numWarnings:
390 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
396 if __name__=='__main__':