4 lit - LLVM Integrated Tester.
6 See lit.pod for more information.
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
12 import lit.ProgressBar
19 class TestingProgressDisplay(object):
20 def __init__(self, opts, numTests, progressBar=None):
22 self.numTests = numTests
24 self.progressBar = progressBar
29 self.progressBar.clear()
32 elif self.opts.succinct:
33 sys.stdout.write('\n')
35 def update(self, test):
38 if self.opts.incremental:
39 update_incremental_cache(test)
42 self.progressBar.update(float(self.completed)/self.numTests,
45 shouldShow = test.result.code.isFailure or \
46 (self.opts.show_unsupported and test.result.code.name == 'UNSUPPORTED') or \
47 (not self.opts.quiet and not self.opts.succinct)
52 self.progressBar.clear()
54 # Show the test result line.
55 test_name = test.getFullName()
56 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57 self.completed, self.numTests))
59 # Show the test failure output, if requested.
60 if test.result.code.isFailure and self.opts.showOutput:
61 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
63 print(test.result.output)
66 # Report test metrics, if present.
67 if test.result.metrics:
68 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
70 items = sorted(test.result.metrics.items())
71 for metric_name, value in items:
72 print('%s: %s ' % (metric_name, value.format()))
75 # Ensure the output is flushed.
78 def write_test_results(run, lit_config, testing_time, output_path):
82 lit_config.fatal('test output unsupported with Python 2.5')
84 # Construct the data we will write.
86 # Encode the current lit version as a schema version.
87 data['__version__'] = lit.__versioninfo__
88 data['elapsed'] = testing_time
89 # FIXME: Record some information on the lit configuration used?
90 # FIXME: Record information from the individual test suites?
93 data['tests'] = tests_data = []
94 for test in run.tests:
96 'name' : test.getFullName(),
97 'code' : test.result.code.name,
98 'output' : test.result.output,
99 'elapsed' : test.result.elapsed }
101 # Add test metrics, if present.
102 if test.result.metrics:
103 test_data['metrics'] = metrics_data = {}
104 for key, value in test.result.metrics.items():
105 metrics_data[key] = value.todata()
107 tests_data.append(test_data)
110 f = open(output_path, 'w')
112 json.dump(data, f, indent=2, sort_keys=True)
117 def update_incremental_cache(test):
118 if not test.result.code.isFailure:
120 fname = test.getFilePath()
121 os.utime(fname, None)
123 def sort_by_incremental_cache(run):
125 fname = test.getFilePath()
127 return -os.path.getmtime(fname)
130 run.tests.sort(key = lambda t: sortIndex(t))
132 def main(builtinParameters = {}):
133 # Use processes by default on Unix platforms.
134 isWindows = platform.system() == 'Windows'
135 useProcessesIsDefault = not isWindows
138 from optparse import OptionParser, OptionGroup
139 parser = OptionParser("usage: %prog [options] {file-or-path}")
141 parser.add_option("", "--version", dest="show_version",
142 help="Show version and exit",
143 action="store_true", default=False)
144 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
145 help="Number of testing threads",
146 type=int, action="store", default=None)
147 parser.add_option("", "--config-prefix", dest="configPrefix",
148 metavar="NAME", help="Prefix for 'lit' config files",
149 action="store", default=None)
150 parser.add_option("", "--param", dest="userParameters",
152 help="Add 'NAME' = 'VAL' to the user defined parameters",
153 type=str, action="append", default=[])
155 group = OptionGroup(parser, "Output Format")
156 # FIXME: I find these names very confusing, although I like the
158 group.add_option("-q", "--quiet", dest="quiet",
159 help="Suppress no error output",
160 action="store_true", default=False)
161 group.add_option("-s", "--succinct", dest="succinct",
162 help="Reduce amount of output",
163 action="store_true", default=False)
164 group.add_option("-v", "--verbose", dest="showOutput",
165 help="Show all test output",
166 action="store_true", default=False)
167 group.add_option("-o", "--output", dest="output_path",
168 help="Write test results to the provided path",
169 action="store", type=str, metavar="PATH")
170 group.add_option("", "--no-progress-bar", dest="useProgressBar",
171 help="Do not use curses based progress bar",
172 action="store_false", default=True)
173 group.add_option("", "--show-unsupported", dest="show_unsupported",
174 help="Show unsupported tests",
175 action="store_true", default=False)
176 parser.add_option_group(group)
178 group = OptionGroup(parser, "Test Execution")
179 group.add_option("", "--path", dest="path",
180 help="Additional paths to add to testing environment",
181 action="append", type=str, default=[])
182 group.add_option("", "--vg", dest="useValgrind",
183 help="Run tests under valgrind",
184 action="store_true", default=False)
185 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
186 help="Check for memory leaks under valgrind",
187 action="store_true", default=False)
188 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
189 help="Specify an extra argument for valgrind",
190 type=str, action="append", default=[])
191 group.add_option("", "--time-tests", dest="timeTests",
192 help="Track elapsed wall time for each test",
193 action="store_true", default=False)
194 group.add_option("", "--no-execute", dest="noExecute",
195 help="Don't execute any tests (assume PASS)",
196 action="store_true", default=False)
197 parser.add_option_group(group)
199 group = OptionGroup(parser, "Test Selection")
200 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
201 help="Maximum number of tests to run",
202 action="store", type=int, default=None)
203 group.add_option("", "--max-time", dest="maxTime", metavar="N",
204 help="Maximum time to spend testing (in seconds)",
205 action="store", type=float, default=None)
206 group.add_option("", "--shuffle", dest="shuffle",
207 help="Run tests in random order",
208 action="store_true", default=False)
209 group.add_option("-i", "--incremental", dest="incremental",
210 help="Run modified and failing tests first (updates "
212 action="store_true", default=False)
213 group.add_option("", "--filter", dest="filter", metavar="REGEX",
214 help=("Only run tests with paths matching the given "
215 "regular expression"),
216 action="store", default=None)
217 parser.add_option_group(group)
219 group = OptionGroup(parser, "Debug and Experimental Options")
220 group.add_option("", "--debug", dest="debug",
221 help="Enable debugging (for 'lit' development)",
222 action="store_true", default=False)
223 group.add_option("", "--show-suites", dest="showSuites",
224 help="Show discovered test suites",
225 action="store_true", default=False)
226 group.add_option("", "--show-tests", dest="showTests",
227 help="Show all discovered tests",
228 action="store_true", default=False)
229 group.add_option("", "--use-processes", dest="useProcesses",
230 help="Run tests in parallel with processes (not threads)",
231 action="store_true", default=useProcessesIsDefault)
232 group.add_option("", "--use-threads", dest="useProcesses",
233 help="Run tests in parallel with threads (not processes)",
234 action="store_false", default=useProcessesIsDefault)
235 parser.add_option_group(group)
237 (opts, args) = parser.parse_args()
239 if opts.show_version:
240 print("lit %s" % (lit.__version__,))
244 parser.error('No inputs specified')
246 if opts.numThreads is None:
247 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
248 # http://bugs.python.org/issue1731717
249 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
250 # threads by default there.
251 if sys.hexversion >= 0x2050200:
252 opts.numThreads = lit.util.detectCPUs()
258 # Create the user defined parameters.
259 userParams = dict(builtinParameters)
260 for entry in opts.userParameters:
264 name,val = entry.split('=', 1)
265 userParams[name] = val
267 # Create the global config object.
268 litConfig = lit.LitConfig.LitConfig(
269 progname = os.path.basename(sys.argv[0]),
272 useValgrind = opts.useValgrind,
273 valgrindLeakCheck = opts.valgrindLeakCheck,
274 valgrindArgs = opts.valgrindArgs,
275 noExecute = opts.noExecute,
277 isWindows = isWindows,
279 config_prefix = opts.configPrefix)
281 # Perform test discovery.
282 run = lit.run.Run(litConfig,
283 lit.discovery.find_tests_for_inputs(litConfig, inputs))
285 if opts.showSuites or opts.showTests:
286 # Aggregate the tests by suite.
289 if t.suite not in suitesAndTests:
290 suitesAndTests[t.suite] = []
291 suitesAndTests[t.suite].append(t)
292 suitesAndTests = list(suitesAndTests.items())
293 suitesAndTests.sort(key = lambda item: item[0].name)
295 # Show the suites, if requested.
297 print('-- Test Suites --')
298 for ts,ts_tests in suitesAndTests:
299 print(' %s - %d tests' %(ts.name, len(ts_tests)))
300 print(' Source Root: %s' % ts.source_root)
301 print(' Exec Root : %s' % ts.exec_root)
303 # Show the tests, if requested.
305 print('-- Available Tests --')
306 for ts,ts_tests in suitesAndTests:
307 ts_tests.sort(key = lambda test: test.path_in_suite)
308 for test in ts_tests:
309 print(' %s' % (test.getFullName(),))
314 # Select and order the tests.
315 numTotalTests = len(run.tests)
317 # First, select based on the filter expression if given.
320 rex = re.compile(opts.filter)
322 parser.error("invalid regular expression for --filter: %r" % (
324 run.tests = [t for t in run.tests
325 if rex.search(t.getFullName())]
327 # Then select the order.
329 random.shuffle(run.tests)
330 elif opts.incremental:
331 sort_by_incremental_cache(run)
333 run.tests.sort(key = lambda t: t.getFullName())
335 # Finally limit the number of tests, if desired.
336 if opts.maxTests is not None:
337 run.tests = run.tests[:opts.maxTests]
339 # Don't create more threads than tests.
340 opts.numThreads = min(len(run.tests), opts.numThreads)
343 if len(run.tests) != numTotalTests:
344 extra = ' of %d' % numTotalTests
345 header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
350 if opts.succinct and opts.useProgressBar:
352 tc = lit.ProgressBar.TerminalController()
353 progressBar = lit.ProgressBar.ProgressBar(tc, header)
356 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
360 startTime = time.time()
361 display = TestingProgressDisplay(opts, len(run.tests), progressBar)
363 run.execute_tests(display, opts.numThreads, opts.maxTime,
365 except KeyboardInterrupt:
369 testing_time = time.time() - startTime
371 print('Testing Time: %.2fs' % (testing_time,))
373 # Write out the test data, if requested.
374 if opts.output_path is not None:
375 write_test_results(run, litConfig, testing_time, opts.output_path)
377 # List test results organized by kind.
380 for test in run.tests:
381 if test.result.code not in byCode:
382 byCode[test.result.code] = []
383 byCode[test.result.code].append(test)
384 if test.result.code.isFailure:
387 # Print each test in any of the failing groups.
388 for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
389 ('Failing Tests', lit.Test.FAIL),
390 ('Unresolved Tests', lit.Test.UNRESOLVED)):
391 elts = byCode.get(code)
395 print('%s (%d):' % (title, len(elts)))
397 print(' %s' % test.getFullName())
398 sys.stdout.write('\n')
400 if opts.timeTests and run.tests:
402 test_times = [(test.getFullName(), test.result.elapsed)
403 for test in run.tests]
404 lit.util.printHistogram(test_times, title='Tests')
406 for name,code in (('Expected Passes ', lit.Test.PASS),
407 ('Expected Failures ', lit.Test.XFAIL),
408 ('Unsupported Tests ', lit.Test.UNSUPPORTED),
409 ('Unresolved Tests ', lit.Test.UNRESOLVED),
410 ('Unexpected Passes ', lit.Test.XPASS),
411 ('Unexpected Failures', lit.Test.FAIL),):
412 if opts.quiet and not code.isFailure:
414 N = len(byCode.get(code,[]))
416 print(' %s: %d' % (name,N))
418 # If we encountered any additional errors, exit abnormally.
419 if litConfig.numErrors:
420 sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
423 # Warn about warnings.
424 if litConfig.numWarnings:
425 sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
431 if __name__=='__main__':