1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Interfaces for registering analysis passes, producing common pass manager
12 /// configurations, and parsing of pass pipelines.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_PASSES_PASSBUILDER_H
17 #define LLVM_PASSES_PASSBUILDER_H
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/Analysis/CGSCCPassManager.h"
21 #include "llvm/IR/PassManager.h"
22 #include "llvm/Support/Error.h"
23 #include "llvm/Transforms/Instrumentation.h"
24 #include "llvm/Transforms/Scalar/LoopPassManager.h"
31 class ModuleSummaryIndex;
33 /// A struct capturing PGO tunables.
35 PGOOptions(std::string ProfileGenFile = "", std::string ProfileUseFile = "",
36 std::string SampleProfileFile = "",
37 std::string ProfileRemappingFile = "",
38 bool RunProfileGen = false, bool SamplePGOSupport = false)
39 : ProfileGenFile(ProfileGenFile), ProfileUseFile(ProfileUseFile),
40 SampleProfileFile(SampleProfileFile),
41 ProfileRemappingFile(ProfileRemappingFile),
42 RunProfileGen(RunProfileGen),
43 SamplePGOSupport(SamplePGOSupport || !SampleProfileFile.empty()) {
44 assert((RunProfileGen ||
45 !SampleProfileFile.empty() ||
46 !ProfileUseFile.empty() ||
47 SamplePGOSupport) && "Illegal PGOOptions.");
49 std::string ProfileGenFile;
50 std::string ProfileUseFile;
51 std::string SampleProfileFile;
52 std::string ProfileRemappingFile;
54 bool SamplePGOSupport;
57 /// This class provides access to building LLVM's passes.
59 /// It's members provide the baseline state available to passes during their
60 /// construction. The \c PassRegistry.def file specifies how to construct all
61 /// of the built-in passes, and those may reference these members during
65 Optional<PGOOptions> PGOOpt;
66 PassInstrumentationCallbacks *PIC;
69 /// A struct to capture parsed pass pipeline names.
71 /// A pipeline is defined as a series of names, each of which may in itself
72 /// recursively contain a nested pipeline. A name is either the name of a pass
73 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
74 /// name is the name of a pass, the InnerPipeline is empty, since passes
75 /// cannot contain inner pipelines. See parsePassPipeline() for a more
76 /// detailed description of the textual pipeline format.
77 struct PipelineElement {
79 std::vector<PipelineElement> InnerPipeline;
84 /// This enumerates the LLVM ThinLTO optimization phases.
85 enum class ThinLTOPhase {
86 /// No ThinLTO behavior needed.
88 // ThinLTO prelink (summary) phase.
90 // ThinLTO postlink (backend compile) phase.
94 /// LLVM-provided high-level optimization levels.
96 /// This enumerates the LLVM-provided high-level optimization levels. Each
97 /// level has a specific goal and rationale.
98 enum OptimizationLevel {
99 /// Disable as many optimizations as possible. This doesn't completely
100 /// disable the optimizer in all cases, for example always_inline functions
101 /// can be required to be inlined for correctness.
104 /// Optimize quickly without destroying debuggability.
106 /// FIXME: The current and historical behavior of this level does *not*
107 /// agree with this goal, but we would like to move toward this goal in the
110 /// This level is tuned to produce a result from the optimizer as quickly
111 /// as possible and to avoid destroying debuggability. This tends to result
112 /// in a very good development mode where the compiled code will be
113 /// immediately executed as part of testing. As a consequence, where
114 /// possible, we would like to produce efficient-to-execute code, but not
115 /// if it significantly slows down compilation or would prevent even basic
116 /// debugging of the resulting binary.
118 /// As an example, complex loop transformations such as versioning,
119 /// vectorization, or fusion might not make sense here due to the degree to
120 /// which the executed code would differ from the source code, and the
121 /// potential compile time cost.
124 /// Optimize for fast execution as much as possible without triggering
125 /// significant incremental compile time or code size growth.
127 /// The key idea is that optimizations at this level should "pay for
128 /// themselves". So if an optimization increases compile time by 5% or
129 /// increases code size by 5% for a particular benchmark, that benchmark
130 /// should also be one which sees a 5% runtime improvement. If the compile
131 /// time or code size penalties happen on average across a diverse range of
132 /// LLVM users' benchmarks, then the improvements should as well.
134 /// And no matter what, the compile time needs to not grow superlinearly
135 /// with the size of input to LLVM so that users can control the runtime of
136 /// the optimizer in this mode.
138 /// This is expected to be a good default optimization level for the vast
139 /// majority of users.
142 /// Optimize for fast execution as much as possible.
144 /// This mode is significantly more aggressive in trading off compile time
145 /// and code size to get execution time improvements. The core idea is that
146 /// this mode should include any optimization that helps execution time on
147 /// balance across a diverse collection of benchmarks, even if it increases
148 /// code size or compile time for some benchmarks without corresponding
149 /// improvements to execution time.
151 /// Despite being willing to trade more compile time off to get improved
152 /// execution time, this mode still tries to avoid superlinear growth in
153 /// order to make even significantly slower compile times at least scale
154 /// reasonably. This does not preclude very substantial constant factor
158 /// Similar to \c O2 but tries to optimize for small code size instead of
159 /// fast execution without triggering significant incremental execution
162 /// The logic here is exactly the same as \c O2, but with code size and
163 /// execution time metrics swapped.
165 /// A consequence of the different core goal is that this should in general
166 /// produce substantially smaller executables that still run in
167 /// a reasonable amount of time.
170 /// A very specialized mode that will optimize for code size at any and all
173 /// This is useful primarily when there are absolute size limitations and
174 /// any effort taken to reduce the size is worth it regardless of the
175 /// execution time impact. You should expect this level to produce rather
176 /// slow, but very small, code.
180 explicit PassBuilder(TargetMachine *TM = nullptr,
181 Optional<PGOOptions> PGOOpt = None,
182 PassInstrumentationCallbacks *PIC = nullptr)
183 : TM(TM), PGOOpt(PGOOpt), PIC(PIC) {}
185 /// Cross register the analysis managers through their proxies.
187 /// This is an interface that can be used to cross register each
188 // AnalysisManager with all the others analysis managers.
189 void crossRegisterProxies(LoopAnalysisManager &LAM,
190 FunctionAnalysisManager &FAM,
191 CGSCCAnalysisManager &CGAM,
192 ModuleAnalysisManager &MAM);
194 /// Registers all available module analysis passes.
196 /// This is an interface that can be used to populate a \c
197 /// ModuleAnalysisManager with all registered module analyses. Callers can
198 /// still manually register any additional analyses. Callers can also
199 /// pre-register analyses and this will not override those.
200 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
202 /// Registers all available CGSCC analysis passes.
204 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
205 /// with all registered CGSCC analyses. Callers can still manually register any
206 /// additional analyses. Callers can also pre-register analyses and this will
207 /// not override those.
208 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
210 /// Registers all available function analysis passes.
212 /// This is an interface that can be used to populate a \c
213 /// FunctionAnalysisManager with all registered function analyses. Callers can
214 /// still manually register any additional analyses. Callers can also
215 /// pre-register analyses and this will not override those.
216 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
218 /// Registers all available loop analysis passes.
220 /// This is an interface that can be used to populate a \c LoopAnalysisManager
221 /// with all registered loop analyses. Callers can still manually register any
222 /// additional analyses.
223 void registerLoopAnalyses(LoopAnalysisManager &LAM);
225 /// Construct the core LLVM function canonicalization and simplification
228 /// This is a long pipeline and uses most of the per-function optimization
229 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
230 /// repeatedly over the IR and is not expected to destroy important
231 /// information about the semantics of the IR.
233 /// Note that \p Level cannot be `O0` here. The pipelines produced are
234 /// only intended for use when attempting to optimize code. If frontends
235 /// require some transformations for semantic reasons, they should explicitly
238 /// \p Phase indicates the current ThinLTO phase.
240 buildFunctionSimplificationPipeline(OptimizationLevel Level,
242 bool DebugLogging = false);
244 /// Construct the core LLVM module canonicalization and simplification
247 /// This pipeline focuses on canonicalizing and simplifying the entire module
248 /// of IR. Much like the function simplification pipeline above, it is
249 /// suitable to run repeatedly over the IR and is not expected to destroy
250 /// important information. It does, however, perform inlining and other
251 /// heuristic based simplifications that are not strictly reversible.
253 /// Note that \p Level cannot be `O0` here. The pipelines produced are
254 /// only intended for use when attempting to optimize code. If frontends
255 /// require some transformations for semantic reasons, they should explicitly
258 /// \p Phase indicates the current ThinLTO phase.
260 buildModuleSimplificationPipeline(OptimizationLevel Level,
262 bool DebugLogging = false);
264 /// Construct the core LLVM module optimization pipeline.
266 /// This pipeline focuses on optimizing the execution speed of the IR. It
267 /// uses cost modeling and thresholds to balance code growth against runtime
268 /// improvements. It includes vectorization and other information destroying
269 /// transformations. It also cannot generally be run repeatedly on a module
270 /// without potentially seriously regressing either runtime performance of
271 /// the code or serious code size growth.
273 /// Note that \p Level cannot be `O0` here. The pipelines produced are
274 /// only intended for use when attempting to optimize code. If frontends
275 /// require some transformations for semantic reasons, they should explicitly
277 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
278 bool DebugLogging = false);
280 /// Build a per-module default optimization pipeline.
282 /// This provides a good default optimization pipeline for per-module
283 /// optimization and code generation without any link-time optimization. It
284 /// typically correspond to frontend "-O[123]" options for optimization
285 /// levels \c O1, \c O2 and \c O3 resp.
287 /// Note that \p Level cannot be `O0` here. The pipelines produced are
288 /// only intended for use when attempting to optimize code. If frontends
289 /// require some transformations for semantic reasons, they should explicitly
291 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
292 bool DebugLogging = false);
294 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
297 /// This adds the pre-link optimizations tuned to prepare a module for
298 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
299 /// without making irreversible decisions which could be made better during
302 /// Note that \p Level cannot be `O0` here. The pipelines produced are
303 /// only intended for use when attempting to optimize code. If frontends
304 /// require some transformations for semantic reasons, they should explicitly
307 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
308 bool DebugLogging = false);
310 /// Build an ThinLTO default optimization pipeline to a pass manager.
312 /// This provides a good default optimization pipeline for link-time
313 /// optimization and code generation. It is particularly tuned to fit well
314 /// when IR coming into the LTO phase was first run through \c
315 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
317 /// Note that \p Level cannot be `O0` here. The pipelines produced are
318 /// only intended for use when attempting to optimize code. If frontends
319 /// require some transformations for semantic reasons, they should explicitly
322 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
323 const ModuleSummaryIndex *ImportSummary);
325 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
328 /// This adds the pre-link optimizations tuned to work well with a later LTO
329 /// run. It works to minimize the IR which needs to be analyzed without
330 /// making irreversible decisions which could be made better during the LTO
333 /// Note that \p Level cannot be `O0` here. The pipelines produced are
334 /// only intended for use when attempting to optimize code. If frontends
335 /// require some transformations for semantic reasons, they should explicitly
337 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
338 bool DebugLogging = false);
340 /// Build an LTO default optimization pipeline to a pass manager.
342 /// This provides a good default optimization pipeline for link-time
343 /// optimization and code generation. It is particularly tuned to fit well
344 /// when IR coming into the LTO phase was first run through \c
345 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
347 /// Note that \p Level cannot be `O0` here. The pipelines produced are
348 /// only intended for use when attempting to optimize code. If frontends
349 /// require some transformations for semantic reasons, they should explicitly
351 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
353 ModuleSummaryIndex *ExportSummary);
355 /// Build the default `AAManager` with the default alias analysis pipeline
357 AAManager buildDefaultAAPipeline();
359 /// Parse a textual pass pipeline description into a \c
360 /// ModulePassManager.
362 /// The format of the textual pass pipeline description looks something like:
364 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
366 /// Pass managers have ()s describing the nest structure of passes. All passes
367 /// are comma separated. As a special shortcut, if the very first pass is not
368 /// a module pass (as a module pass manager is), this will automatically form
369 /// the shortest stack of pass managers that allow inserting that first pass.
370 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
371 /// passes 'lpassN', all of these are valid:
373 /// fpass1,fpass2,fpass3
374 /// cgpass1,cgpass2,cgpass3
375 /// lpass1,lpass2,lpass3
377 /// And they are equivalent to the following (resp.):
379 /// module(function(fpass1,fpass2,fpass3))
380 /// module(cgscc(cgpass1,cgpass2,cgpass3))
381 /// module(function(loop(lpass1,lpass2,lpass3)))
383 /// This shortcut is especially useful for debugging and testing small pass
384 /// combinations. Note that these shortcuts don't introduce any other magic.
385 /// If the sequence of passes aren't all the exact same kind of pass, it will
386 /// be an error. You cannot mix different levels implicitly, you must
387 /// explicitly form a pass manager in which to nest passes.
388 Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
389 bool VerifyEachPass = true,
390 bool DebugLogging = false);
392 /// {{@ Parse a textual pass pipeline description into a specific PassManager
394 /// Automatic deduction of an appropriate pass manager stack is not supported.
395 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
396 /// this is the valid pipeline text:
399 Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
400 bool VerifyEachPass = true,
401 bool DebugLogging = false);
402 Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
403 bool VerifyEachPass = true,
404 bool DebugLogging = false);
405 Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
406 bool VerifyEachPass = true,
407 bool DebugLogging = false);
410 /// Parse a textual alias analysis pipeline into the provided AA manager.
412 /// The format of the textual AA pipeline is a comma separated list of AA
415 /// basic-aa,globals-aa,...
417 /// The AA manager is set up such that the provided alias analyses are tried
418 /// in the order specified. See the \c AAManaager documentation for details
419 /// about the logic used. This routine just provides the textual mapping
420 /// between AA names and the analyses to register with the manager.
422 /// Returns false if the text cannot be parsed cleanly. The specific state of
423 /// the \p AA manager is unspecified if such an error is encountered and this
425 Error parseAAPipeline(AAManager &AA, StringRef PipelineText);
427 /// Register a callback for a default optimizer pipeline extension
430 /// This extension point allows adding passes that perform peephole
431 /// optimizations similar to the instruction combiner. These passes will be
432 /// inserted after each instance of the instruction combiner pass.
433 void registerPeepholeEPCallback(
434 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
435 PeepholeEPCallbacks.push_back(C);
438 /// Register a callback for a default optimizer pipeline extension
441 /// This extension point allows adding late loop canonicalization and
442 /// simplification passes. This is the last point in the loop optimization
443 /// pipeline before loop deletion. Each pass added
444 /// here must be an instance of LoopPass.
445 /// This is the place to add passes that can remove loops, such as target-
446 /// specific loop idiom recognition.
447 void registerLateLoopOptimizationsEPCallback(
448 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
449 LateLoopOptimizationsEPCallbacks.push_back(C);
452 /// Register a callback for a default optimizer pipeline extension
455 /// This extension point allows adding loop passes to the end of the loop
457 void registerLoopOptimizerEndEPCallback(
458 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
459 LoopOptimizerEndEPCallbacks.push_back(C);
462 /// Register a callback for a default optimizer pipeline extension
465 /// This extension point allows adding optimization passes after most of the
466 /// main optimizations, but before the last cleanup-ish optimizations.
467 void registerScalarOptimizerLateEPCallback(
468 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
469 ScalarOptimizerLateEPCallbacks.push_back(C);
472 /// Register a callback for a default optimizer pipeline extension
475 /// This extension point allows adding CallGraphSCC passes at the end of the
476 /// main CallGraphSCC passes and before any function simplification passes run
477 /// by CGPassManager.
478 void registerCGSCCOptimizerLateEPCallback(
479 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
480 CGSCCOptimizerLateEPCallbacks.push_back(C);
483 /// Register a callback for a default optimizer pipeline extension
486 /// This extension point allows adding optimization passes before the
487 /// vectorizer and other highly target specific optimization passes are
489 void registerVectorizerStartEPCallback(
490 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
491 VectorizerStartEPCallbacks.push_back(C);
494 /// Register a callback for a default optimizer pipeline extension point.
496 /// This extension point allows adding optimization once at the start of the
497 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
498 /// link-time pipelines).
499 void registerPipelineStartEPCallback(
500 const std::function<void(ModulePassManager &)> &C) {
501 PipelineStartEPCallbacks.push_back(C);
504 /// Register a callback for a default optimizer pipeline extension point
506 /// This extension point allows adding optimizations at the very end of the
507 /// function optimization pipeline. A key difference between this and the
508 /// legacy PassManager's OptimizerLast callback is that this extension point
509 /// is not triggered at O0. Extensions to the O0 pipeline should append their
510 /// passes to the end of the overall pipeline.
511 void registerOptimizerLastEPCallback(
512 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
513 OptimizerLastEPCallbacks.push_back(C);
516 /// Register a callback for parsing an AliasAnalysis Name to populate
517 /// the given AAManager \p AA
518 void registerParseAACallback(
519 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
520 AAParsingCallbacks.push_back(C);
523 /// {{@ Register callbacks for analysis registration with this PassBuilder
525 /// Callees register their analyses with the given AnalysisManager objects.
526 void registerAnalysisRegistrationCallback(
527 const std::function<void(CGSCCAnalysisManager &)> &C) {
528 CGSCCAnalysisRegistrationCallbacks.push_back(C);
530 void registerAnalysisRegistrationCallback(
531 const std::function<void(FunctionAnalysisManager &)> &C) {
532 FunctionAnalysisRegistrationCallbacks.push_back(C);
534 void registerAnalysisRegistrationCallback(
535 const std::function<void(LoopAnalysisManager &)> &C) {
536 LoopAnalysisRegistrationCallbacks.push_back(C);
538 void registerAnalysisRegistrationCallback(
539 const std::function<void(ModuleAnalysisManager &)> &C) {
540 ModuleAnalysisRegistrationCallbacks.push_back(C);
544 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
545 /// Using these callbacks, callers can parse both a single pass name, as well
546 /// as entire sub-pipelines, and populate the PassManager instance
548 void registerPipelineParsingCallback(
549 const std::function<bool(StringRef Name, CGSCCPassManager &,
550 ArrayRef<PipelineElement>)> &C) {
551 CGSCCPipelineParsingCallbacks.push_back(C);
553 void registerPipelineParsingCallback(
554 const std::function<bool(StringRef Name, FunctionPassManager &,
555 ArrayRef<PipelineElement>)> &C) {
556 FunctionPipelineParsingCallbacks.push_back(C);
558 void registerPipelineParsingCallback(
559 const std::function<bool(StringRef Name, LoopPassManager &,
560 ArrayRef<PipelineElement>)> &C) {
561 LoopPipelineParsingCallbacks.push_back(C);
563 void registerPipelineParsingCallback(
564 const std::function<bool(StringRef Name, ModulePassManager &,
565 ArrayRef<PipelineElement>)> &C) {
566 ModulePipelineParsingCallbacks.push_back(C);
570 /// Register a callback for a top-level pipeline entry.
572 /// If the PassManager type is not given at the top level of the pipeline
573 /// text, this Callback should be used to determine the appropriate stack of
574 /// PassManagers and populate the passed ModulePassManager.
575 void registerParseTopLevelPipelineCallback(
576 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
577 bool VerifyEachPass, bool DebugLogging)> &C) {
578 TopLevelPipelineParsingCallbacks.push_back(C);
582 static Optional<std::vector<PipelineElement>>
583 parsePipelineText(StringRef Text);
585 Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
586 bool VerifyEachPass, bool DebugLogging);
587 Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
588 bool VerifyEachPass, bool DebugLogging);
589 Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
590 bool VerifyEachPass, bool DebugLogging);
591 Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
592 bool VerifyEachPass, bool DebugLogging);
593 bool parseAAPassName(AAManager &AA, StringRef Name);
595 Error parseLoopPassPipeline(LoopPassManager &LPM,
596 ArrayRef<PipelineElement> Pipeline,
597 bool VerifyEachPass, bool DebugLogging);
598 Error parseFunctionPassPipeline(FunctionPassManager &FPM,
599 ArrayRef<PipelineElement> Pipeline,
600 bool VerifyEachPass, bool DebugLogging);
601 Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
602 ArrayRef<PipelineElement> Pipeline,
603 bool VerifyEachPass, bool DebugLogging);
604 Error parseModulePassPipeline(ModulePassManager &MPM,
605 ArrayRef<PipelineElement> Pipeline,
606 bool VerifyEachPass, bool DebugLogging);
608 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
609 OptimizationLevel Level, bool RunProfileGen,
610 std::string ProfileGenFile,
611 std::string ProfileUseFile,
612 std::string ProfileRemappingFile);
614 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
616 // Extension Point callbacks
617 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
619 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
620 LateLoopOptimizationsEPCallbacks;
621 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
622 LoopOptimizerEndEPCallbacks;
623 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
624 ScalarOptimizerLateEPCallbacks;
625 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
626 CGSCCOptimizerLateEPCallbacks;
627 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
628 VectorizerStartEPCallbacks;
629 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
630 OptimizerLastEPCallbacks;
632 SmallVector<std::function<void(ModulePassManager &)>, 2>
633 PipelineStartEPCallbacks;
634 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
635 ModuleAnalysisRegistrationCallbacks;
636 SmallVector<std::function<bool(StringRef, ModulePassManager &,
637 ArrayRef<PipelineElement>)>,
639 ModulePipelineParsingCallbacks;
640 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
641 bool VerifyEachPass, bool DebugLogging)>,
643 TopLevelPipelineParsingCallbacks;
645 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
646 CGSCCAnalysisRegistrationCallbacks;
647 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
648 ArrayRef<PipelineElement>)>,
650 CGSCCPipelineParsingCallbacks;
651 // Function callbacks
652 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
653 FunctionAnalysisRegistrationCallbacks;
654 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
655 ArrayRef<PipelineElement>)>,
657 FunctionPipelineParsingCallbacks;
659 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
660 LoopAnalysisRegistrationCallbacks;
661 SmallVector<std::function<bool(StringRef, LoopPassManager &,
662 ArrayRef<PipelineElement>)>,
664 LoopPipelineParsingCallbacks;
666 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
670 /// This utility template takes care of adding require<> and invalidate<>
671 /// passes for an analysis to a given \c PassManager. It is intended to be used
672 /// during parsing of a pass pipeline when parsing a single PipelineName.
673 /// When registering a new function analysis FancyAnalysis with the pass
674 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
677 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
678 /// ArrayRef<PipelineElement> P) {
679 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
684 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
685 typename... ExtraArgTs>
686 bool parseAnalysisUtilityPasses(
687 StringRef AnalysisName, StringRef PipelineName,
688 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
689 if (!PipelineName.endswith(">"))
691 // See if this is an invalidate<> pass name
692 if (PipelineName.startswith("invalidate<")) {
693 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
694 if (PipelineName != AnalysisName)
696 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
700 // See if this is a require<> pass name
701 if (PipelineName.startswith("require<")) {
702 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
703 if (PipelineName != AnalysisName)
705 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,