1 # -*- coding: utf-8 -*-
2 # The LLVM Compiler Infrastructure
4 # This file is distributed under the University of Illinois Open Source
5 # License. See LICENSE.TXT for details.
6 """ This module implements the 'scan-build' command API.
8 To run the static analyzer against a build is done in multiple steps:
10 -- Intercept: capture the compilation command during the build,
11 -- Analyze: run the analyzer against the captured commands,
12 -- Report: create a cover report from the analyzer outputs. """
19 import multiprocessing
27 from collections import defaultdict
29 from libscanbuild import command_entry_point, compiler_wrapper, \
30 wrapper_environment, run_build, run_command, CtuConfig
31 from libscanbuild.arguments import parse_args_for_scan_build, \
32 parse_args_for_analyze_build
33 from libscanbuild.intercept import capture
34 from libscanbuild.report import document
35 from libscanbuild.compilation import split_command, classify_source, \
37 from libscanbuild.clang import get_version, get_arguments, get_triple_arch
38 from libscanbuild.shell import decode
40 __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
42 COMPILER_WRAPPER_CC = 'analyze-cc'
43 COMPILER_WRAPPER_CXX = 'analyze-c++'
45 CTU_FUNCTION_MAP_FILENAME = 'externalFnMap.txt'
46 CTU_TEMP_FNMAP_FOLDER = 'tmpExternalFnMaps'
51 """ Entry point for scan-build command. """
53 args = parse_args_for_scan_build()
54 # will re-assign the report directory as new output
55 with report_directory(args.output, args.keep_empty) as args.output:
56 # Run against a build command. there are cases, when analyzer run
57 # is not required. But we need to set up everything for the
58 # wrappers, because 'configure' needs to capture the CC/CXX values
60 if args.intercept_first:
61 # Run build command with intercept module.
62 exit_code = capture(args)
63 # Run the analyzer against the captured commands.
64 if need_analyzer(args.build):
65 govern_analyzer_runs(args)
67 # Run build command and analyzer with compiler wrappers.
68 environment = setup_environment(args)
69 exit_code = run_build(args.build, env=environment)
70 # Cover report generation and bug counting.
71 number_of_bugs = document(args)
72 # Set exit status as it was requested.
73 return number_of_bugs if args.status_bugs else exit_code
78 """ Entry point for analyze-build command. """
80 args = parse_args_for_analyze_build()
81 # will re-assign the report directory as new output
82 with report_directory(args.output, args.keep_empty) as args.output:
83 # Run the analyzer against a compilation db.
84 govern_analyzer_runs(args)
85 # Cover report generation and bug counting.
86 number_of_bugs = document(args)
87 # Set exit status as it was requested.
88 return number_of_bugs if args.status_bugs else 0
91 def need_analyzer(args):
92 """ Check the intent of the build command.
94 When static analyzer run against project configure step, it should be
95 silent and no need to run the analyzer or generate report.
97 To run `scan-build` against the configure step might be necessary,
98 when compiler wrappers are used. That's the moment when build setup
99 check the compiler and capture the location for the build process. """
101 return len(args) and not re.search('configure|autogen', args[0])
104 def prefix_with(constant, pieces):
105 """ From a sequence create another sequence where every second element
106 is from the original sequence and the odd elements are the prefix.
108 eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
110 return [elem for piece in pieces for elem in [constant, piece]]
113 def get_ctu_config_from_args(args):
114 """ CTU configuration is created from the chosen phases and dir. """
117 CtuConfig(collect=args.ctu_phases.collect,
118 analyze=args.ctu_phases.analyze,
120 func_map_cmd=args.func_map_cmd)
121 if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
122 else CtuConfig(collect=False, analyze=False, dir='', func_map_cmd=''))
125 def get_ctu_config_from_json(ctu_conf_json):
126 """ CTU configuration is created from the chosen phases and dir. """
128 ctu_config = json.loads(ctu_conf_json)
129 # Recover namedtuple from json when coming from analyze-cc or analyze-c++
130 return CtuConfig(collect=ctu_config[0],
131 analyze=ctu_config[1],
133 func_map_cmd=ctu_config[3])
136 def create_global_ctu_function_map(func_map_lines):
137 """ Takes iterator of individual function maps and creates a global map
138 keeping only unique names. We leave conflicting names out of CTU.
140 :param func_map_lines: Contains the id of a function (mangled name) and
141 the originating source (the corresponding AST file) name.
142 :type func_map_lines: Iterator of str.
143 :returns: Mangled name - AST file pairs.
144 :rtype: List of (str, str) tuples.
147 mangled_to_asts = defaultdict(set)
149 for line in func_map_lines:
150 mangled_name, ast_file = line.strip().split(' ', 1)
151 mangled_to_asts[mangled_name].add(ast_file)
153 mangled_ast_pairs = []
155 for mangled_name, ast_files in mangled_to_asts.items():
156 if len(ast_files) == 1:
157 mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
159 return mangled_ast_pairs
162 def merge_ctu_func_maps(ctudir):
163 """ Merge individual function maps into a global one.
165 As the collect phase runs parallel on multiple threads, all compilation
166 units are separately mapped into a temporary file in CTU_TEMP_FNMAP_FOLDER.
167 These function maps contain the mangled names of functions and the source
168 (AST generated from the source) which had them.
169 These files should be merged at the end into a global map file:
170 CTU_FUNCTION_MAP_FILENAME."""
172 def generate_func_map_lines(fnmap_dir):
173 """ Iterate over all lines of input files in a determined order. """
175 files = glob.glob(os.path.join(fnmap_dir, '*'))
177 for filename in files:
178 with open(filename, 'r') as in_file:
182 def write_global_map(arch, mangled_ast_pairs):
183 """ Write (mangled function name, ast file) pairs into final file. """
185 extern_fns_map_file = os.path.join(ctudir, arch,
186 CTU_FUNCTION_MAP_FILENAME)
187 with open(extern_fns_map_file, 'w') as out_file:
188 for mangled_name, ast_file in mangled_ast_pairs:
189 out_file.write('%s %s\n' % (mangled_name, ast_file))
191 triple_arches = glob.glob(os.path.join(ctudir, '*'))
192 for triple_path in triple_arches:
193 if os.path.isdir(triple_path):
194 triple_arch = os.path.basename(triple_path)
195 fnmap_dir = os.path.join(ctudir, triple_arch,
196 CTU_TEMP_FNMAP_FOLDER)
198 func_map_lines = generate_func_map_lines(fnmap_dir)
199 mangled_ast_pairs = create_global_ctu_function_map(func_map_lines)
200 write_global_map(triple_arch, mangled_ast_pairs)
202 # Remove all temporary files
203 shutil.rmtree(fnmap_dir, ignore_errors=True)
206 def run_analyzer_parallel(args):
207 """ Runs the analyzer against the given compilation database. """
209 def exclude(filename):
210 """ Return true when any excluded directory prefix the filename. """
211 return any(re.match(r'^' + directory, filename)
212 for directory in args.excludes)
216 'output_dir': args.output,
217 'output_format': args.output_format,
218 'output_failures': args.output_failures,
219 'direct_args': analyzer_params(args),
220 'force_debug': args.force_debug,
221 'ctu': get_ctu_config_from_args(args)
224 logging.debug('run analyzer against compilation database')
225 with open(args.cdb, 'r') as handle:
226 generator = (dict(cmd, **consts)
227 for cmd in json.load(handle) if not exclude(cmd['file']))
228 # when verbose output requested execute sequentially
229 pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
230 for current in pool.imap_unordered(run, generator):
231 if current is not None:
232 # display error message from the static analyzer
233 for line in current['error_output']:
234 logging.info(line.rstrip())
239 def govern_analyzer_runs(args):
240 """ Governs multiple runs in CTU mode or runs once in normal mode. """
242 ctu_config = get_ctu_config_from_args(args)
243 # If we do a CTU collect (1st phase) we remove all previous collection
245 if ctu_config.collect:
246 shutil.rmtree(ctu_config.dir, ignore_errors=True)
248 # If the user asked for a collect (1st) and analyze (2nd) phase, we do an
249 # all-in-one run where we deliberately remove collection data before and
250 # also after the run. If the user asks only for a single phase data is
251 # left so multiple analyze runs can use the same data gathered by a single
253 if ctu_config.collect and ctu_config.analyze:
254 # CTU strings are coming from args.ctu_dir and func_map_cmd,
255 # so we can leave it empty
256 args.ctu_phases = CtuConfig(collect=True, analyze=False,
257 dir='', func_map_cmd='')
258 run_analyzer_parallel(args)
259 merge_ctu_func_maps(ctu_config.dir)
260 args.ctu_phases = CtuConfig(collect=False, analyze=True,
261 dir='', func_map_cmd='')
262 run_analyzer_parallel(args)
263 shutil.rmtree(ctu_config.dir, ignore_errors=True)
265 # Single runs (collect or analyze) are launched from here.
266 run_analyzer_parallel(args)
267 if ctu_config.collect:
268 merge_ctu_func_maps(ctu_config.dir)
271 def setup_environment(args):
272 """ Set up environment for build command to interpose compiler wrapper. """
274 environment = dict(os.environ)
275 environment.update(wrapper_environment(args))
277 'CC': COMPILER_WRAPPER_CC,
278 'CXX': COMPILER_WRAPPER_CXX,
279 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
280 'ANALYZE_BUILD_REPORT_DIR': args.output,
281 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
282 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
283 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
284 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
285 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
291 def analyze_compiler_wrapper():
292 """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
294 return compiler_wrapper(analyze_compiler_wrapper_impl)
297 def analyze_compiler_wrapper_impl(result, execution):
298 """ Implements analyzer compiler wrapper functionality. """
300 # don't run analyzer when compilation fails. or when it's not requested.
301 if result or not os.getenv('ANALYZE_BUILD_CLANG'):
304 # check is it a compilation?
305 compilation = split_command(execution.cmd)
306 if compilation is None:
308 # collect the needed parameters from environment, crash when missing
310 'clang': os.getenv('ANALYZE_BUILD_CLANG'),
311 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
312 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
313 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
314 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
316 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
317 'directory': execution.cwd,
318 'command': [execution.cmd[0], '-c'] + compilation.flags,
319 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
321 # call static analyzer against the compilation
322 for source in compilation.files:
323 parameters.update({'file': source})
324 logging.debug('analyzer parameters %s', parameters)
325 current = run(parameters)
326 # display error message from the static analyzer
327 if current is not None:
328 for line in current['error_output']:
329 logging.info(line.rstrip())
332 @contextlib.contextmanager
333 def report_directory(hint, keep):
334 """ Responsible for the report directory.
336 hint -- could specify the parent directory of the output directory.
337 keep -- a boolean value to keep or delete the empty report directory. """
339 stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
340 stamp = datetime.datetime.now().strftime(stamp_format)
341 parent_dir = os.path.abspath(hint)
342 if not os.path.exists(parent_dir):
343 os.makedirs(parent_dir)
344 name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
346 logging.info('Report directory created: %s', name)
352 msg = "Run 'scan-view %s' to examine bug reports."
356 msg = "Report directory '%s' contains no report, but kept."
358 msg = "Removing directory '%s' because it contains no report."
359 logging.warning(msg, name)
365 def analyzer_params(args):
366 """ A group of command line arguments can mapped to command
367 line arguments of the analyzer. This method generates those. """
372 result.append('-analyzer-store={0}'.format(args.store_model))
373 if args.constraints_model:
374 result.append('-analyzer-constraints={0}'.format(
375 args.constraints_model))
376 if args.internal_stats:
377 result.append('-analyzer-stats')
378 if args.analyze_headers:
379 result.append('-analyzer-opt-analyze-headers')
381 result.append('-analyzer-checker=debug.Stats')
383 result.extend(['-analyzer-max-loop', str(args.maxloop)])
384 if args.output_format:
385 result.append('-analyzer-output={0}'.format(args.output_format))
386 if args.analyzer_config:
387 result.extend(['-analyzer-config', args.analyzer_config])
388 if args.verbose >= 4:
389 result.append('-analyzer-display-progress')
391 result.extend(prefix_with('-load', args.plugins))
392 if args.enable_checker:
393 checkers = ','.join(args.enable_checker)
394 result.extend(['-analyzer-checker', checkers])
395 if args.disable_checker:
396 checkers = ','.join(args.disable_checker)
397 result.extend(['-analyzer-disable-checker', checkers])
398 if os.getenv('UBIVIZ'):
399 result.append('-analyzer-viz-egraph-ubigraph')
401 return prefix_with('-Xclang', result)
404 def require(required):
405 """ Decorator for checking the required values in state.
407 It checks the required attributes in the passed state and stop when
408 any of those is missing. """
410 def decorator(function):
411 @functools.wraps(function)
412 def wrapper(*args, **kwargs):
414 if key not in args[0]:
415 raise KeyError('{0} not passed to {1}'.format(
416 key, function.__name__))
418 return function(*args, **kwargs)
425 @require(['command', # entry from compilation database
426 'directory', # entry from compilation database
427 'file', # entry from compilation database
428 'clang', # clang executable name (and path)
429 'direct_args', # arguments from command line
430 'force_debug', # kill non debug macros
431 'output_dir', # where generated report files shall go
432 'output_format', # it's 'plist', 'html', both or plist-multi-file
433 'output_failures', # generate crash reports or not
434 'ctu']) # ctu control options
436 """ Entry point to run (or not) static analyzer against a single entry
437 of the compilation database.
439 This complex task is decomposed into smaller methods which are calling
440 each other in chain. If the analyzis is not possible the given method
441 just return and break the chain.
443 The passed parameter is a python dictionary. Each method first check
444 that the needed parameters received. (This is done by the 'require'
445 decorator. It's like an 'assert' to check the contract between the
446 caller and the called method.) """
449 command = opts.pop('command')
450 command = command if isinstance(command, list) else decode(command)
451 logging.debug("Run analyzer against '%s'", command)
452 opts.update(classify_parameters(command))
454 return arch_check(opts)
456 logging.error("Problem occurred during analyzis.", exc_info=1)
460 @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
461 'error_output', 'exit_code'])
462 def report_failure(opts):
463 """ Create report when analyzer failed.
465 The major report is the preprocessor output. The output filename generated
466 randomly. The compiler output also captured into '.stderr.txt' file.
467 And some more execution context also saved into '.info.txt' file. """
470 """ Generate preprocessor file extension. """
472 mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
473 return mapping.get(opts['language'], '.i')
476 """ Creates failures directory if not exits yet. """
478 failures_dir = os.path.join(opts['output_dir'], 'failures')
479 if not os.path.isdir(failures_dir):
480 os.makedirs(failures_dir)
483 # Classify error type: when Clang terminated by a signal it's a 'Crash'.
484 # (python subprocess Popen.returncode is negative when child terminated
485 # by signal.) Everything else is 'Other Error'.
486 error = 'crash' if opts['exit_code'] < 0 else 'other_error'
487 # Create preprocessor output file name. (This is blindly following the
488 # Perl implementation.)
489 (handle, name) = tempfile.mkstemp(suffix=extension(),
490 prefix='clang_' + error + '_',
493 # Execute Clang again, but run the syntax check only.
494 cwd = opts['directory']
496 [opts['clang'], '-fsyntax-only', '-E'
497 ] + opts['flags'] + [opts['file'], '-o', name], cwd)
498 run_command(cmd, cwd=cwd)
499 # write general information about the crash
500 with open(name + '.info.txt', 'w') as handle:
501 handle.write(opts['file'] + os.linesep)
502 handle.write(error.title().replace('_', ' ') + os.linesep)
503 handle.write(' '.join(cmd) + os.linesep)
504 handle.write(' '.join(os.uname()) + os.linesep)
505 handle.write(get_version(opts['clang']))
507 # write the captured output too
508 with open(name + '.stderr.txt', 'w') as handle:
509 handle.writelines(opts['error_output'])
513 @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
515 def run_analyzer(opts, continuation=report_failure):
516 """ It assembles the analysis command line and executes it. Capture the
517 output of the analysis and returns with it. If failure reports are
518 requested, it calls the continuation to generate it. """
521 """ Creates output file name for reports. """
522 if opts['output_format'] in {
526 (handle, name) = tempfile.mkstemp(prefix='report-',
528 dir=opts['output_dir'])
531 return opts['output_dir']
534 cwd = opts['directory']
535 cmd = get_arguments([opts['clang'], '--analyze'] +
536 opts['direct_args'] + opts['flags'] +
537 [opts['file'], '-o', target()],
539 output = run_command(cmd, cwd=cwd)
540 return {'error_output': output, 'exit_code': 0}
541 except subprocess.CalledProcessError as ex:
542 result = {'error_output': ex.output, 'exit_code': ex.returncode}
543 if opts.get('output_failures', False):
549 def func_map_list_src_to_ast(func_src_list):
550 """ Turns textual function map list with source files into a
551 function map list with ast files. """
554 for fn_src_txt in func_src_list:
555 mangled_name, path = fn_src_txt.split(" ", 1)
556 # Normalize path on windows as well
557 path = os.path.splitdrive(path)[1]
558 # Make relative path out of absolute
559 path = path[1:] if path[0] == os.sep else path
560 ast_path = os.path.join("ast", path + ".ast")
561 func_ast_list.append(mangled_name + " " + ast_path)
565 @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
566 def ctu_collect_phase(opts):
567 """ Preprocess source by generating all data needed by CTU analysis. """
569 def generate_ast(triple_arch):
570 """ Generates ASTs for the current compilation command. """
572 args = opts['direct_args'] + opts['flags']
573 ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
574 os.path.realpath(opts['file'])[1:] +
576 ast_path = os.path.abspath(ast_joined_path)
577 ast_dir = os.path.dirname(ast_path)
578 if not os.path.isdir(ast_dir):
582 # In case an other process already created it.
584 ast_command = [opts['clang'], '-emit-ast']
585 ast_command.extend(args)
586 ast_command.append('-w')
587 ast_command.append(opts['file'])
588 ast_command.append('-o')
589 ast_command.append(ast_path)
590 logging.debug("Generating AST using '%s'", ast_command)
591 run_command(ast_command, cwd=opts['directory'])
593 def map_functions(triple_arch):
594 """ Generate function map file for the current source. """
596 args = opts['direct_args'] + opts['flags']
597 funcmap_command = [opts['ctu'].func_map_cmd]
598 funcmap_command.append(opts['file'])
599 funcmap_command.append('--')
600 funcmap_command.extend(args)
601 logging.debug("Generating function map using '%s'", funcmap_command)
602 func_src_list = run_command(funcmap_command, cwd=opts['directory'])
603 func_ast_list = func_map_list_src_to_ast(func_src_list)
604 extern_fns_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
605 CTU_TEMP_FNMAP_FOLDER)
606 if not os.path.isdir(extern_fns_map_folder):
608 os.makedirs(extern_fns_map_folder)
610 # In case an other process already created it.
613 with tempfile.NamedTemporaryFile(mode='w',
614 dir=extern_fns_map_folder,
615 delete=False) as out_file:
616 out_file.write("\n".join(func_ast_list) + "\n")
618 cwd = opts['directory']
619 cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
621 triple_arch = get_triple_arch(cmd, cwd)
622 generate_ast(triple_arch)
623 map_functions(triple_arch)
627 def dispatch_ctu(opts, continuation=run_analyzer):
628 """ Execute only one phase of 2 phases of CTU if needed. """
630 ctu_config = opts['ctu']
632 if ctu_config.collect or ctu_config.analyze:
633 assert ctu_config.collect != ctu_config.analyze
634 if ctu_config.collect:
635 return ctu_collect_phase(opts)
636 if ctu_config.analyze:
637 cwd = opts['directory']
638 cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
639 + opts['flags'] + [opts['file']]
640 triarch = get_triple_arch(cmd, cwd)
641 ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
642 'experimental-enable-naive-ctu-analysis=true']
643 analyzer_options = prefix_with('-analyzer-config', ctu_options)
644 direct_options = prefix_with('-Xanalyzer', analyzer_options)
645 opts['direct_args'].extend(direct_options)
647 return continuation(opts)
650 @require(['flags', 'force_debug'])
651 def filter_debug_flags(opts, continuation=dispatch_ctu):
652 """ Filter out nondebug macros when requested. """
654 if opts.pop('force_debug'):
655 # lazy implementation just append an undefine macro at the end
656 opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
658 return continuation(opts)
661 @require(['language', 'compiler', 'file', 'flags'])
662 def language_check(opts, continuation=filter_debug_flags):
663 """ Find out the language from command line parameters or file name
664 extension. The decision also influenced by the compiler invocation. """
666 accepted = frozenset({
667 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
668 'c++-cpp-output', 'objective-c-cpp-output'
671 # language can be given as a parameter...
672 language = opts.pop('language')
673 compiler = opts.pop('compiler')
674 # ... or find out from source file extension
675 if language is None and compiler is not None:
676 language = classify_source(opts['file'], compiler == 'c')
679 logging.debug('skip analysis, language not known')
681 elif language not in accepted:
682 logging.debug('skip analysis, language not supported')
685 logging.debug('analysis, language: %s', language)
686 opts.update({'language': language,
687 'flags': ['-x', language] + opts['flags']})
688 return continuation(opts)
691 @require(['arch_list', 'flags'])
692 def arch_check(opts, continuation=language_check):
693 """ Do run analyzer through one of the given architectures. """
695 disabled = frozenset({'ppc', 'ppc64'})
697 received_list = opts.pop('arch_list')
699 # filter out disabled architectures and -arch switches
700 filtered_list = [a for a in received_list if a not in disabled]
702 # There should be only one arch given (or the same multiple
703 # times). If there are multiple arch are given and are not
704 # the same, those should not change the pre-processing step.
705 # But that's the only pass we have before run the analyzer.
706 current = filtered_list.pop()
707 logging.debug('analysis, on arch: %s', current)
709 opts.update({'flags': ['-arch', current] + opts['flags']})
710 return continuation(opts)
712 logging.debug('skip analysis, found not supported arch')
715 logging.debug('analysis, on default arch')
716 return continuation(opts)
719 # To have good results from static analyzer certain compiler options shall be
720 # omitted. The compiler flag filtering only affects the static analyzer run.
722 # Keys are the option name, value number of options to skip
724 '-c': 0, # compile option will be overwritten
725 '-fsyntax-only': 0, # static analyzer option will be overwritten
726 '-o': 1, # will set up own output file
727 # flags below are inherited from the perl implementation.
731 '-exported_symbols_list': 1,
732 '-current_version': 1,
733 '-compatibility_version': 1,
738 '-multiply_defined': 1,
741 '--serialize-diagnostics': 1
745 def classify_parameters(command):
746 """ Prepare compiler flags (filters some and add others) and take out
747 language (-x) and architecture (-arch) flags for future processing. """
750 'flags': [], # the filtered compiler flags
751 'arch_list': [], # list of architecture flags
752 'language': None, # compilation language, None, if not specified
753 'compiler': compiler_language(command) # 'c' or 'c++'
756 # iterate on the compile options
757 args = iter(command[1:])
759 # take arch flags into a separate basket
761 result['arch_list'].append(next(args))
764 result['language'] = next(args)
765 # parameters which looks source file are not flags
766 elif re.match(r'^[^-].+', arg) and classify_source(arg):
769 elif arg in IGNORED_FLAGS:
770 count = IGNORED_FLAGS[arg]
771 for _ in range(count):
773 # we don't care about extra warnings, but we should suppress ones
774 # that we don't want to see.
775 elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
777 # and consider everything else as compilation flag.
779 result['flags'].append(arg)