]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - utils/lit/lit/main.py
Vendor import of llvm RELEASE_350/final tag r216957 (effectively, 3.5.0 release):
[FreeBSD/FreeBSD.git] / utils / lit / lit / main.py
1 #!/usr/bin/env python
2
3 """
4 lit - LLVM Integrated Tester.
5
6 See lit.pod for more information.
7 """
8
9 from __future__ import absolute_import
10 import math, os, platform, random, re, sys, time
11
12 import lit.ProgressBar
13 import lit.LitConfig
14 import lit.Test
15 import lit.run
16 import lit.util
17 import lit.discovery
18
19 class TestingProgressDisplay(object):
20     def __init__(self, opts, numTests, progressBar=None):
21         self.opts = opts
22         self.numTests = numTests
23         self.current = None
24         self.progressBar = progressBar
25         self.completed = 0
26
27     def finish(self):
28         if self.progressBar:
29             self.progressBar.clear()
30         elif self.opts.quiet:
31             pass
32         elif self.opts.succinct:
33             sys.stdout.write('\n')
34
35     def update(self, test):
36         self.completed += 1
37
38         if self.opts.incremental:
39             update_incremental_cache(test)
40
41         if self.progressBar:
42             self.progressBar.update(float(self.completed)/self.numTests,
43                                     test.getFullName())
44
45         shouldShow = test.result.code.isFailure or \
46             (self.opts.show_unsupported and test.result.code.name == 'UNSUPPORTED') or \
47             (not self.opts.quiet and not self.opts.succinct)
48         if not shouldShow:
49             return
50
51         if self.progressBar:
52             self.progressBar.clear()
53
54         # Show the test result line.
55         test_name = test.getFullName()
56         print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
57                                      self.completed, self.numTests))
58
59         # Show the test failure output, if requested.
60         if test.result.code.isFailure and self.opts.showOutput:
61             print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
62                                               '*'*20))
63             print(test.result.output)
64             print("*" * 20)
65
66         # Report test metrics, if present.
67         if test.result.metrics:
68             print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
69                                                '*'*10))
70             items = sorted(test.result.metrics.items())
71             for metric_name, value in items:
72                 print('%s: %s ' % (metric_name, value.format()))
73             print("*" * 10)
74
75         # Ensure the output is flushed.
76         sys.stdout.flush()
77
78 def write_test_results(run, lit_config, testing_time, output_path):
79     try:
80         import json
81     except ImportError:
82         lit_config.fatal('test output unsupported with Python 2.5')
83
84     # Construct the data we will write.
85     data = {}
86     # Encode the current lit version as a schema version.
87     data['__version__'] = lit.__versioninfo__
88     data['elapsed'] = testing_time
89     # FIXME: Record some information on the lit configuration used?
90     # FIXME: Record information from the individual test suites?
91
92     # Encode the tests.
93     data['tests'] = tests_data = []
94     for test in run.tests:
95         test_data = {
96             'name' : test.getFullName(),
97             'code' : test.result.code.name,
98             'output' : test.result.output,
99             'elapsed' : test.result.elapsed }
100
101         # Add test metrics, if present.
102         if test.result.metrics:
103             test_data['metrics'] = metrics_data = {}
104             for key, value in test.result.metrics.items():
105                 metrics_data[key] = value.todata()
106
107         tests_data.append(test_data)
108
109     # Write the output.
110     f = open(output_path, 'w')
111     try:
112         json.dump(data, f, indent=2, sort_keys=True)
113         f.write('\n')
114     finally:
115         f.close()
116
117 def update_incremental_cache(test):
118     if not test.result.code.isFailure:
119         return
120     fname = test.getFilePath()
121     os.utime(fname, None)
122
123 def sort_by_incremental_cache(run):
124     def sortIndex(test):
125         fname = test.getFilePath()
126         try:
127             return -os.path.getmtime(fname)
128         except:
129             return 0
130     run.tests.sort(key = lambda t: sortIndex(t))
131
132 def main(builtinParameters = {}):
133     # Use processes by default on Unix platforms.
134     isWindows = platform.system() == 'Windows'
135     useProcessesIsDefault = not isWindows
136
137     global options
138     from optparse import OptionParser, OptionGroup
139     parser = OptionParser("usage: %prog [options] {file-or-path}")
140
141     parser.add_option("", "--version", dest="show_version",
142                       help="Show version and exit",
143                       action="store_true", default=False)
144     parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
145                       help="Number of testing threads",
146                       type=int, action="store", default=None)
147     parser.add_option("", "--config-prefix", dest="configPrefix",
148                       metavar="NAME", help="Prefix for 'lit' config files",
149                       action="store", default=None)
150     parser.add_option("", "--param", dest="userParameters",
151                       metavar="NAME=VAL",
152                       help="Add 'NAME' = 'VAL' to the user defined parameters",
153                       type=str, action="append", default=[])
154
155     group = OptionGroup(parser, "Output Format")
156     # FIXME: I find these names very confusing, although I like the
157     # functionality.
158     group.add_option("-q", "--quiet", dest="quiet",
159                      help="Suppress no error output",
160                      action="store_true", default=False)
161     group.add_option("-s", "--succinct", dest="succinct",
162                      help="Reduce amount of output",
163                      action="store_true", default=False)
164     group.add_option("-v", "--verbose", dest="showOutput",
165                      help="Show all test output",
166                      action="store_true", default=False)
167     group.add_option("-o", "--output", dest="output_path",
168                      help="Write test results to the provided path",
169                      action="store", type=str, metavar="PATH")
170     group.add_option("", "--no-progress-bar", dest="useProgressBar",
171                      help="Do not use curses based progress bar",
172                      action="store_false", default=True)
173     group.add_option("", "--show-unsupported", dest="show_unsupported",
174                      help="Show unsupported tests",
175                      action="store_true", default=False)
176     parser.add_option_group(group)
177
178     group = OptionGroup(parser, "Test Execution")
179     group.add_option("", "--path", dest="path",
180                      help="Additional paths to add to testing environment",
181                      action="append", type=str, default=[])
182     group.add_option("", "--vg", dest="useValgrind",
183                      help="Run tests under valgrind",
184                      action="store_true", default=False)
185     group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
186                      help="Check for memory leaks under valgrind",
187                      action="store_true", default=False)
188     group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
189                      help="Specify an extra argument for valgrind",
190                      type=str, action="append", default=[])
191     group.add_option("", "--time-tests", dest="timeTests",
192                      help="Track elapsed wall time for each test",
193                      action="store_true", default=False)
194     group.add_option("", "--no-execute", dest="noExecute",
195                      help="Don't execute any tests (assume PASS)",
196                      action="store_true", default=False)
197     parser.add_option_group(group)
198
199     group = OptionGroup(parser, "Test Selection")
200     group.add_option("", "--max-tests", dest="maxTests", metavar="N",
201                      help="Maximum number of tests to run",
202                      action="store", type=int, default=None)
203     group.add_option("", "--max-time", dest="maxTime", metavar="N",
204                      help="Maximum time to spend testing (in seconds)",
205                      action="store", type=float, default=None)
206     group.add_option("", "--shuffle", dest="shuffle",
207                      help="Run tests in random order",
208                      action="store_true", default=False)
209     group.add_option("-i", "--incremental", dest="incremental",
210                      help="Run modified and failing tests first (updates "
211                      "mtimes)",
212                      action="store_true", default=False)
213     group.add_option("", "--filter", dest="filter", metavar="REGEX",
214                      help=("Only run tests with paths matching the given "
215                            "regular expression"),
216                      action="store", default=None)
217     parser.add_option_group(group)
218
219     group = OptionGroup(parser, "Debug and Experimental Options")
220     group.add_option("", "--debug", dest="debug",
221                       help="Enable debugging (for 'lit' development)",
222                       action="store_true", default=False)
223     group.add_option("", "--show-suites", dest="showSuites",
224                       help="Show discovered test suites",
225                       action="store_true", default=False)
226     group.add_option("", "--show-tests", dest="showTests",
227                       help="Show all discovered tests",
228                       action="store_true", default=False)
229     group.add_option("", "--use-processes", dest="useProcesses",
230                       help="Run tests in parallel with processes (not threads)",
231                       action="store_true", default=useProcessesIsDefault)
232     group.add_option("", "--use-threads", dest="useProcesses",
233                       help="Run tests in parallel with threads (not processes)",
234                       action="store_false", default=useProcessesIsDefault)
235     parser.add_option_group(group)
236
237     (opts, args) = parser.parse_args()
238
239     if opts.show_version:
240         print("lit %s" % (lit.__version__,))
241         return
242
243     if not args:
244         parser.error('No inputs specified')
245
246     if opts.numThreads is None:
247 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
248 # http://bugs.python.org/issue1731717
249 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
250 # threads by default there.
251        if sys.hexversion >= 0x2050200:
252                opts.numThreads = lit.util.detectCPUs()
253        else:
254                opts.numThreads = 1
255
256     inputs = args
257
258     # Create the user defined parameters.
259     userParams = dict(builtinParameters)
260     for entry in opts.userParameters:
261         if '=' not in entry:
262             name,val = entry,''
263         else:
264             name,val = entry.split('=', 1)
265         userParams[name] = val
266
267     # Create the global config object.
268     litConfig = lit.LitConfig.LitConfig(
269         progname = os.path.basename(sys.argv[0]),
270         path = opts.path,
271         quiet = opts.quiet,
272         useValgrind = opts.useValgrind,
273         valgrindLeakCheck = opts.valgrindLeakCheck,
274         valgrindArgs = opts.valgrindArgs,
275         noExecute = opts.noExecute,
276         debug = opts.debug,
277         isWindows = isWindows,
278         params = userParams,
279         config_prefix = opts.configPrefix)
280
281     # Perform test discovery.
282     run = lit.run.Run(litConfig,
283                       lit.discovery.find_tests_for_inputs(litConfig, inputs))
284
285     if opts.showSuites or opts.showTests:
286         # Aggregate the tests by suite.
287         suitesAndTests = {}
288         for t in run.tests:
289             if t.suite not in suitesAndTests:
290                 suitesAndTests[t.suite] = []
291             suitesAndTests[t.suite].append(t)
292         suitesAndTests = list(suitesAndTests.items())
293         suitesAndTests.sort(key = lambda item: item[0].name)
294
295         # Show the suites, if requested.
296         if opts.showSuites:
297             print('-- Test Suites --')
298             for ts,ts_tests in suitesAndTests:
299                 print('  %s - %d tests' %(ts.name, len(ts_tests)))
300                 print('    Source Root: %s' % ts.source_root)
301                 print('    Exec Root  : %s' % ts.exec_root)
302
303         # Show the tests, if requested.
304         if opts.showTests:
305             print('-- Available Tests --')
306             for ts,ts_tests in suitesAndTests:
307                 ts_tests.sort(key = lambda test: test.path_in_suite)
308                 for test in ts_tests:
309                     print('  %s' % (test.getFullName(),))
310
311         # Exit.
312         sys.exit(0)
313
314     # Select and order the tests.
315     numTotalTests = len(run.tests)
316
317     # First, select based on the filter expression if given.
318     if opts.filter:
319         try:
320             rex = re.compile(opts.filter)
321         except:
322             parser.error("invalid regular expression for --filter: %r" % (
323                     opts.filter))
324         run.tests = [t for t in run.tests
325                      if rex.search(t.getFullName())]
326
327     # Then select the order.
328     if opts.shuffle:
329         random.shuffle(run.tests)
330     elif opts.incremental:
331         sort_by_incremental_cache(run)
332     else:
333         run.tests.sort(key = lambda t: t.getFullName())
334
335     # Finally limit the number of tests, if desired.
336     if opts.maxTests is not None:
337         run.tests = run.tests[:opts.maxTests]
338
339     # Don't create more threads than tests.
340     opts.numThreads = min(len(run.tests), opts.numThreads)
341
342     extra = ''
343     if len(run.tests) != numTotalTests:
344         extra = ' of %d' % numTotalTests
345     header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
346                                                       opts.numThreads)
347
348     progressBar = None
349     if not opts.quiet:
350         if opts.succinct and opts.useProgressBar:
351             try:
352                 tc = lit.ProgressBar.TerminalController()
353                 progressBar = lit.ProgressBar.ProgressBar(tc, header)
354             except ValueError:
355                 print(header)
356                 progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
357         else:
358             print(header)
359
360     startTime = time.time()
361     display = TestingProgressDisplay(opts, len(run.tests), progressBar)
362     try:
363         run.execute_tests(display, opts.numThreads, opts.maxTime,
364                           opts.useProcesses)
365     except KeyboardInterrupt:
366         sys.exit(2)
367     display.finish()
368
369     testing_time = time.time() - startTime
370     if not opts.quiet:
371         print('Testing Time: %.2fs' % (testing_time,))
372
373     # Write out the test data, if requested.
374     if opts.output_path is not None:
375         write_test_results(run, litConfig, testing_time, opts.output_path)
376
377     # List test results organized by kind.
378     hasFailures = False
379     byCode = {}
380     for test in run.tests:
381         if test.result.code not in byCode:
382             byCode[test.result.code] = []
383         byCode[test.result.code].append(test)
384         if test.result.code.isFailure:
385             hasFailures = True
386
387     # Print each test in any of the failing groups.
388     for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
389                        ('Failing Tests', lit.Test.FAIL),
390                        ('Unresolved Tests', lit.Test.UNRESOLVED)):
391         elts = byCode.get(code)
392         if not elts:
393             continue
394         print('*'*20)
395         print('%s (%d):' % (title, len(elts)))
396         for test in elts:
397             print('    %s' % test.getFullName())
398         sys.stdout.write('\n')
399
400     if opts.timeTests and run.tests:
401         # Order by time.
402         test_times = [(test.getFullName(), test.result.elapsed)
403                       for test in run.tests]
404         lit.util.printHistogram(test_times, title='Tests')
405
406     for name,code in (('Expected Passes    ', lit.Test.PASS),
407                       ('Expected Failures  ', lit.Test.XFAIL),
408                       ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
409                       ('Unresolved Tests   ', lit.Test.UNRESOLVED),
410                       ('Unexpected Passes  ', lit.Test.XPASS),
411                       ('Unexpected Failures', lit.Test.FAIL),):
412         if opts.quiet and not code.isFailure:
413             continue
414         N = len(byCode.get(code,[]))
415         if N:
416             print('  %s: %d' % (name,N))
417
418     # If we encountered any additional errors, exit abnormally.
419     if litConfig.numErrors:
420         sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
421         sys.exit(2)
422
423     # Warn about warnings.
424     if litConfig.numWarnings:
425         sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
426
427     if hasFailures:
428         sys.exit(1)
429     sys.exit(0)
430
431 if __name__=='__main__':
432     main()