1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Interfaces for registering analysis passes, producing common pass manager
12 /// configurations, and parsing of pass pipelines.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_PASSES_PASSBUILDER_H
17 #define LLVM_PASSES_PASSBUILDER_H
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/Analysis/CGSCCPassManager.h"
21 #include "llvm/IR/PassManager.h"
22 #include "llvm/Transforms/Scalar/LoopPassManager.h"
30 /// A struct capturing PGO tunables.
32 std::string ProfileGenFile = "";
33 std::string ProfileUseFile = "";
34 std::string SampleProfileFile = "";
35 bool RunProfileGen = false;
38 /// \brief This class provides access to building LLVM's passes.
40 /// It's members provide the baseline state available to passes during their
41 /// construction. The \c PassRegistry.def file specifies how to construct all
42 /// of the built-in passes, and those may reference these members during
46 Optional<PGOOptions> PGOOpt;
49 /// \brief A struct to capture parsed pass pipeline names.
51 /// A pipeline is defined as a series of names, each of which may in itself
52 /// recursively contain a nested pipeline. A name is either the name of a pass
53 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
54 /// name is the name of a pass, the InnerPipeline is empty, since passes
55 /// cannot contain inner pipelines. See parsePassPipeline() for a more
56 /// detailed description of the textual pipeline format.
57 struct PipelineElement {
59 std::vector<PipelineElement> InnerPipeline;
62 /// \brief LLVM-provided high-level optimization levels.
64 /// This enumerates the LLVM-provided high-level optimization levels. Each
65 /// level has a specific goal and rationale.
66 enum OptimizationLevel {
67 /// Disable as many optimizations as possible. This doesn't completely
68 /// disable the optimizer in all cases, for example always_inline functions
69 /// can be required to be inlined for correctness.
72 /// Optimize quickly without destroying debuggability.
74 /// FIXME: The current and historical behavior of this level does *not*
75 /// agree with this goal, but we would like to move toward this goal in the
78 /// This level is tuned to produce a result from the optimizer as quickly
79 /// as possible and to avoid destroying debuggability. This tends to result
80 /// in a very good development mode where the compiled code will be
81 /// immediately executed as part of testing. As a consequence, where
82 /// possible, we would like to produce efficient-to-execute code, but not
83 /// if it significantly slows down compilation or would prevent even basic
84 /// debugging of the resulting binary.
86 /// As an example, complex loop transformations such as versioning,
87 /// vectorization, or fusion might not make sense here due to the degree to
88 /// which the executed code would differ from the source code, and the
89 /// potential compile time cost.
92 /// Optimize for fast execution as much as possible without triggering
93 /// significant incremental compile time or code size growth.
95 /// The key idea is that optimizations at this level should "pay for
96 /// themselves". So if an optimization increases compile time by 5% or
97 /// increases code size by 5% for a particular benchmark, that benchmark
98 /// should also be one which sees a 5% runtime improvement. If the compile
99 /// time or code size penalties happen on average across a diverse range of
100 /// LLVM users' benchmarks, then the improvements should as well.
102 /// And no matter what, the compile time needs to not grow superlinearly
103 /// with the size of input to LLVM so that users can control the runtime of
104 /// the optimizer in this mode.
106 /// This is expected to be a good default optimization level for the vast
107 /// majority of users.
110 /// Optimize for fast execution as much as possible.
112 /// This mode is significantly more aggressive in trading off compile time
113 /// and code size to get execution time improvements. The core idea is that
114 /// this mode should include any optimization that helps execution time on
115 /// balance across a diverse collection of benchmarks, even if it increases
116 /// code size or compile time for some benchmarks without corresponding
117 /// improvements to execution time.
119 /// Despite being willing to trade more compile time off to get improved
120 /// execution time, this mode still tries to avoid superlinear growth in
121 /// order to make even significantly slower compile times at least scale
122 /// reasonably. This does not preclude very substantial constant factor
126 /// Similar to \c O2 but tries to optimize for small code size instead of
127 /// fast execution without triggering significant incremental execution
130 /// The logic here is exactly the same as \c O2, but with code size and
131 /// execution time metrics swapped.
133 /// A consequence of the different core goal is that this should in general
134 /// produce substantially smaller executables that still run in
135 /// a reasonable amount of time.
138 /// A very specialized mode that will optimize for code size at any and all
141 /// This is useful primarily when there are absolute size limitations and
142 /// any effort taken to reduce the size is worth it regardless of the
143 /// execution time impact. You should expect this level to produce rather
144 /// slow, but very small, code.
148 explicit PassBuilder(TargetMachine *TM = nullptr,
149 Optional<PGOOptions> PGOOpt = None)
150 : TM(TM), PGOOpt(PGOOpt) {}
152 /// \brief Cross register the analysis managers through their proxies.
154 /// This is an interface that can be used to cross register each
155 // AnalysisManager with all the others analysis managers.
156 void crossRegisterProxies(LoopAnalysisManager &LAM,
157 FunctionAnalysisManager &FAM,
158 CGSCCAnalysisManager &CGAM,
159 ModuleAnalysisManager &MAM);
161 /// \brief Registers all available module analysis passes.
163 /// This is an interface that can be used to populate a \c
164 /// ModuleAnalysisManager with all registered module analyses. Callers can
165 /// still manually register any additional analyses. Callers can also
166 /// pre-register analyses and this will not override those.
167 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
169 /// \brief Registers all available CGSCC analysis passes.
171 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
172 /// with all registered CGSCC analyses. Callers can still manually register any
173 /// additional analyses. Callers can also pre-register analyses and this will
174 /// not override those.
175 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
177 /// \brief Registers all available function analysis passes.
179 /// This is an interface that can be used to populate a \c
180 /// FunctionAnalysisManager with all registered function analyses. Callers can
181 /// still manually register any additional analyses. Callers can also
182 /// pre-register analyses and this will not override those.
183 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
185 /// \brief Registers all available loop analysis passes.
187 /// This is an interface that can be used to populate a \c LoopAnalysisManager
188 /// with all registered loop analyses. Callers can still manually register any
189 /// additional analyses.
190 void registerLoopAnalyses(LoopAnalysisManager &LAM);
192 /// Construct the core LLVM function canonicalization and simplification
195 /// This is a long pipeline and uses most of the per-function optimization
196 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
197 /// repeatedly over the IR and is not expected to destroy important
198 /// information about the semantics of the IR.
200 /// Note that \p Level cannot be `O0` here. The pipelines produced are
201 /// only intended for use when attempting to optimize code. If frontends
202 /// require some transformations for semantic reasons, they should explicitly
205 /// \p PrepareForThinLTO indicates whether this is invoked in
206 /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
207 /// ensure profile accurate in the backend profile annotation phase.
209 buildFunctionSimplificationPipeline(OptimizationLevel Level,
210 bool DebugLogging = false,
211 bool PrepareForThinLTO = false);
213 /// Construct the core LLVM module canonicalization and simplification
216 /// This pipeline focuses on canonicalizing and simplifying the entire module
217 /// of IR. Much like the function simplification pipeline above, it is
218 /// suitable to run repeatedly over the IR and is not expected to destroy
219 /// important information. It does, however, perform inlining and other
220 /// heuristic based simplifications that are not strictly reversible.
222 /// Note that \p Level cannot be `O0` here. The pipelines produced are
223 /// only intended for use when attempting to optimize code. If frontends
224 /// require some transformations for semantic reasons, they should explicitly
227 /// \p PrepareForThinLTO indicates whether this is invoked in
228 /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
229 /// ensure profile accurate in the backend profile annotation phase.
231 buildModuleSimplificationPipeline(OptimizationLevel Level,
232 bool DebugLogging = false,
233 bool PrepareForThinLTO = false);
235 /// Construct the core LLVM module optimization pipeline.
237 /// This pipeline focuses on optimizing the execution speed of the IR. It
238 /// uses cost modeling and thresholds to balance code growth against runtime
239 /// improvements. It includes vectorization and other information destroying
240 /// transformations. It also cannot generally be run repeatedly on a module
241 /// without potentially seriously regressing either runtime performance of
242 /// the code or serious code size growth.
244 /// Note that \p Level cannot be `O0` here. The pipelines produced are
245 /// only intended for use when attempting to optimize code. If frontends
246 /// require some transformations for semantic reasons, they should explicitly
248 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
249 bool DebugLogging = false);
251 /// Build a per-module default optimization pipeline.
253 /// This provides a good default optimization pipeline for per-module
254 /// optimization and code generation without any link-time optimization. It
255 /// typically correspond to frontend "-O[123]" options for optimization
256 /// levels \c O1, \c O2 and \c O3 resp.
258 /// Note that \p Level cannot be `O0` here. The pipelines produced are
259 /// only intended for use when attempting to optimize code. If frontends
260 /// require some transformations for semantic reasons, they should explicitly
262 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
263 bool DebugLogging = false);
265 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
268 /// This adds the pre-link optimizations tuned to prepare a module for
269 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
270 /// without making irreversible decisions which could be made better during
273 /// Note that \p Level cannot be `O0` here. The pipelines produced are
274 /// only intended for use when attempting to optimize code. If frontends
275 /// require some transformations for semantic reasons, they should explicitly
278 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
279 bool DebugLogging = false);
281 /// Build an ThinLTO default optimization pipeline to a pass manager.
283 /// This provides a good default optimization pipeline for link-time
284 /// optimization and code generation. It is particularly tuned to fit well
285 /// when IR coming into the LTO phase was first run through \c
286 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
288 /// Note that \p Level cannot be `O0` here. The pipelines produced are
289 /// only intended for use when attempting to optimize code. If frontends
290 /// require some transformations for semantic reasons, they should explicitly
292 ModulePassManager buildThinLTODefaultPipeline(OptimizationLevel Level,
293 bool DebugLogging = false);
295 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
298 /// This adds the pre-link optimizations tuned to work well with a later LTO
299 /// run. It works to minimize the IR which needs to be analyzed without
300 /// making irreversible decisions which could be made better during the LTO
303 /// Note that \p Level cannot be `O0` here. The pipelines produced are
304 /// only intended for use when attempting to optimize code. If frontends
305 /// require some transformations for semantic reasons, they should explicitly
307 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
308 bool DebugLogging = false);
310 /// Build an LTO default optimization pipeline to a pass manager.
312 /// This provides a good default optimization pipeline for link-time
313 /// optimization and code generation. It is particularly tuned to fit well
314 /// when IR coming into the LTO phase was first run through \c
315 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
317 /// Note that \p Level cannot be `O0` here. The pipelines produced are
318 /// only intended for use when attempting to optimize code. If frontends
319 /// require some transformations for semantic reasons, they should explicitly
321 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
322 bool DebugLogging = false);
324 /// Build the default `AAManager` with the default alias analysis pipeline
326 AAManager buildDefaultAAPipeline();
328 /// \brief Parse a textual pass pipeline description into a \c
329 /// ModulePassManager.
331 /// The format of the textual pass pipeline description looks something like:
333 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
335 /// Pass managers have ()s describing the nest structure of passes. All passes
336 /// are comma separated. As a special shortcut, if the very first pass is not
337 /// a module pass (as a module pass manager is), this will automatically form
338 /// the shortest stack of pass managers that allow inserting that first pass.
339 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
340 /// passes 'lpassN', all of these are valid:
342 /// fpass1,fpass2,fpass3
343 /// cgpass1,cgpass2,cgpass3
344 /// lpass1,lpass2,lpass3
346 /// And they are equivalent to the following (resp.):
348 /// module(function(fpass1,fpass2,fpass3))
349 /// module(cgscc(cgpass1,cgpass2,cgpass3))
350 /// module(function(loop(lpass1,lpass2,lpass3)))
352 /// This shortcut is especially useful for debugging and testing small pass
353 /// combinations. Note that these shortcuts don't introduce any other magic.
354 /// If the sequence of passes aren't all the exact same kind of pass, it will
355 /// be an error. You cannot mix different levels implicitly, you must
356 /// explicitly form a pass manager in which to nest passes.
357 bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
358 bool VerifyEachPass = true, bool DebugLogging = false);
360 /// {{@ Parse a textual pass pipeline description into a specific PassManager
362 /// Automatic deduction of an appropriate pass manager stack is not supported.
363 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
364 /// this is the valid pipeline text:
367 bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
368 bool VerifyEachPass = true, bool DebugLogging = false);
369 bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
370 bool VerifyEachPass = true, bool DebugLogging = false);
371 bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
372 bool VerifyEachPass = true, bool DebugLogging = false);
375 /// Parse a textual alias analysis pipeline into the provided AA manager.
377 /// The format of the textual AA pipeline is a comma separated list of AA
380 /// basic-aa,globals-aa,...
382 /// The AA manager is set up such that the provided alias analyses are tried
383 /// in the order specified. See the \c AAManaager documentation for details
384 /// about the logic used. This routine just provides the textual mapping
385 /// between AA names and the analyses to register with the manager.
387 /// Returns false if the text cannot be parsed cleanly. The specific state of
388 /// the \p AA manager is unspecified if such an error is encountered and this
390 bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
392 /// \brief Register a callback for a default optimizer pipeline extension
395 /// This extension point allows adding passes that perform peephole
396 /// optimizations similar to the instruction combiner. These passes will be
397 /// inserted after each instance of the instruction combiner pass.
398 void registerPeepholeEPCallback(
399 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
400 PeepholeEPCallbacks.push_back(C);
403 /// \brief Register a callback for a default optimizer pipeline extension
406 /// This extension point allows adding late loop canonicalization and
407 /// simplification passes. This is the last point in the loop optimization
408 /// pipeline before loop deletion. Each pass added
409 /// here must be an instance of LoopPass.
410 /// This is the place to add passes that can remove loops, such as target-
411 /// specific loop idiom recognition.
412 void registerLateLoopOptimizationsEPCallback(
413 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
414 LateLoopOptimizationsEPCallbacks.push_back(C);
417 /// \brief Register a callback for a default optimizer pipeline extension
420 /// This extension point allows adding loop passes to the end of the loop
422 void registerLoopOptimizerEndEPCallback(
423 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
424 LoopOptimizerEndEPCallbacks.push_back(C);
427 /// \brief Register a callback for a default optimizer pipeline extension
430 /// This extension point allows adding optimization passes after most of the
431 /// main optimizations, but before the last cleanup-ish optimizations.
432 void registerScalarOptimizerLateEPCallback(
433 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
434 ScalarOptimizerLateEPCallbacks.push_back(C);
437 /// \brief Register a callback for a default optimizer pipeline extension
440 /// This extension point allows adding CallGraphSCC passes at the end of the
441 /// main CallGraphSCC passes and before any function simplification passes run
442 /// by CGPassManager.
443 void registerCGSCCOptimizerLateEPCallback(
444 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
445 CGSCCOptimizerLateEPCallbacks.push_back(C);
448 /// \brief Register a callback for a default optimizer pipeline extension
451 /// This extension point allows adding optimization passes before the
452 /// vectorizer and other highly target specific optimization passes are
454 void registerVectorizerStartEPCallback(
455 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
456 VectorizerStartEPCallbacks.push_back(C);
459 /// \brief Register a callback for parsing an AliasAnalysis Name to populate
460 /// the given AAManager \p AA
461 void registerParseAACallback(
462 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
463 AAParsingCallbacks.push_back(C);
466 /// {{@ Register callbacks for analysis registration with this PassBuilder
468 /// Callees register their analyses with the given AnalysisManager objects.
469 void registerAnalysisRegistrationCallback(
470 const std::function<void(CGSCCAnalysisManager &)> &C) {
471 CGSCCAnalysisRegistrationCallbacks.push_back(C);
473 void registerAnalysisRegistrationCallback(
474 const std::function<void(FunctionAnalysisManager &)> &C) {
475 FunctionAnalysisRegistrationCallbacks.push_back(C);
477 void registerAnalysisRegistrationCallback(
478 const std::function<void(LoopAnalysisManager &)> &C) {
479 LoopAnalysisRegistrationCallbacks.push_back(C);
481 void registerAnalysisRegistrationCallback(
482 const std::function<void(ModuleAnalysisManager &)> &C) {
483 ModuleAnalysisRegistrationCallbacks.push_back(C);
487 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
488 /// Using these callbacks, callers can parse both a single pass name, as well
489 /// as entire sub-pipelines, and populate the PassManager instance
491 void registerPipelineParsingCallback(
492 const std::function<bool(StringRef Name, CGSCCPassManager &,
493 ArrayRef<PipelineElement>)> &C) {
494 CGSCCPipelineParsingCallbacks.push_back(C);
496 void registerPipelineParsingCallback(
497 const std::function<bool(StringRef Name, FunctionPassManager &,
498 ArrayRef<PipelineElement>)> &C) {
499 FunctionPipelineParsingCallbacks.push_back(C);
501 void registerPipelineParsingCallback(
502 const std::function<bool(StringRef Name, LoopPassManager &,
503 ArrayRef<PipelineElement>)> &C) {
504 LoopPipelineParsingCallbacks.push_back(C);
506 void registerPipelineParsingCallback(
507 const std::function<bool(StringRef Name, ModulePassManager &,
508 ArrayRef<PipelineElement>)> &C) {
509 ModulePipelineParsingCallbacks.push_back(C);
513 /// \brief Register a callback for a top-level pipeline entry.
515 /// If the PassManager type is not given at the top level of the pipeline
516 /// text, this Callback should be used to determine the appropriate stack of
517 /// PassManagers and populate the passed ModulePassManager.
518 void registerParseTopLevelPipelineCallback(
519 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
520 bool VerifyEachPass, bool DebugLogging)> &C) {
521 TopLevelPipelineParsingCallbacks.push_back(C);
525 static Optional<std::vector<PipelineElement>>
526 parsePipelineText(StringRef Text);
528 bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
529 bool VerifyEachPass, bool DebugLogging);
530 bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
531 bool VerifyEachPass, bool DebugLogging);
532 bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
533 bool VerifyEachPass, bool DebugLogging);
534 bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
535 bool VerifyEachPass, bool DebugLogging);
536 bool parseAAPassName(AAManager &AA, StringRef Name);
538 bool parseLoopPassPipeline(LoopPassManager &LPM,
539 ArrayRef<PipelineElement> Pipeline,
540 bool VerifyEachPass, bool DebugLogging);
541 bool parseFunctionPassPipeline(FunctionPassManager &FPM,
542 ArrayRef<PipelineElement> Pipeline,
543 bool VerifyEachPass, bool DebugLogging);
544 bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
545 ArrayRef<PipelineElement> Pipeline,
546 bool VerifyEachPass, bool DebugLogging);
547 bool parseModulePassPipeline(ModulePassManager &MPM,
548 ArrayRef<PipelineElement> Pipeline,
549 bool VerifyEachPass, bool DebugLogging);
551 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
552 OptimizationLevel Level, bool RunProfileGen,
553 std::string ProfileGenFile,
554 std::string ProfileUseFile);
556 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
558 // Extension Point callbacks
559 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
561 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
562 LateLoopOptimizationsEPCallbacks;
563 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
564 LoopOptimizerEndEPCallbacks;
565 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
566 ScalarOptimizerLateEPCallbacks;
567 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
568 CGSCCOptimizerLateEPCallbacks;
569 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
570 VectorizerStartEPCallbacks;
572 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
573 ModuleAnalysisRegistrationCallbacks;
574 SmallVector<std::function<bool(StringRef, ModulePassManager &,
575 ArrayRef<PipelineElement>)>,
577 ModulePipelineParsingCallbacks;
578 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
579 bool VerifyEachPass, bool DebugLogging)>,
581 TopLevelPipelineParsingCallbacks;
583 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
584 CGSCCAnalysisRegistrationCallbacks;
585 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
586 ArrayRef<PipelineElement>)>,
588 CGSCCPipelineParsingCallbacks;
589 // Function callbacks
590 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
591 FunctionAnalysisRegistrationCallbacks;
592 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
593 ArrayRef<PipelineElement>)>,
595 FunctionPipelineParsingCallbacks;
597 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
598 LoopAnalysisRegistrationCallbacks;
599 SmallVector<std::function<bool(StringRef, LoopPassManager &,
600 ArrayRef<PipelineElement>)>,
602 LoopPipelineParsingCallbacks;
604 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
608 /// This utility template takes care of adding require<> and invalidate<>
609 /// passes for an analysis to a given \c PassManager. It is intended to be used
610 /// during parsing of a pass pipeline when parsing a single PipelineName.
611 /// When registering a new function analysis FancyAnalysis with the pass
612 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
615 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
616 /// ArrayRef<PipelineElement> P) {
617 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
622 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
623 typename... ExtraArgTs>
624 bool parseAnalysisUtilityPasses(
625 StringRef AnalysisName, StringRef PipelineName,
626 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
627 if (!PipelineName.endswith(">"))
629 // See if this is an invalidate<> pass name
630 if (PipelineName.startswith("invalidate<")) {
631 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
632 if (PipelineName != AnalysisName)
634 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
638 // See if this is a require<> pass name
639 if (PipelineName.startswith("require<")) {
640 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
641 if (PipelineName != AnalysisName)
643 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,