1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Interfaces for registering analysis passes, producing common pass manager
11 /// configurations, and parsing of pass pipelines.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_PASSES_PASSBUILDER_H
16 #define LLVM_PASSES_PASSBUILDER_H
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/Analysis/CGSCCPassManager.h"
20 #include "llvm/IR/PassManager.h"
21 #include "llvm/Support/Error.h"
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/Transforms/Scalar/LoopPassManager.h"
30 class ModuleSummaryIndex;
32 /// A struct capturing PGO tunables.
34 enum PGOAction { NoAction, IRInstr, IRUse, SampleUse };
35 enum CSPGOAction { NoCSAction, CSIRInstr, CSIRUse };
36 PGOOptions(std::string ProfileFile = "", std::string CSProfileGenFile = "",
37 std::string ProfileRemappingFile = "", PGOAction Action = NoAction,
38 CSPGOAction CSAction = NoCSAction, bool SamplePGOSupport = false)
39 : ProfileFile(ProfileFile), CSProfileGenFile(CSProfileGenFile),
40 ProfileRemappingFile(ProfileRemappingFile), Action(Action),
42 SamplePGOSupport(SamplePGOSupport || Action == SampleUse) {
43 // Note, we do allow ProfileFile.empty() for Action=IRUse LTO can
44 // callback with IRUse action without ProfileFile.
46 // If there is a CSAction, PGOAction cannot be IRInstr or SampleUse.
47 assert(this->CSAction == NoCSAction ||
48 (this->Action != IRInstr && this->Action != SampleUse));
50 // For CSIRInstr, CSProfileGenFile also needs to be nonempty.
51 assert(this->CSAction != CSIRInstr || !this->CSProfileGenFile.empty());
53 // If CSAction is CSIRUse, PGOAction needs to be IRUse as they share
55 assert(this->CSAction != CSIRUse || this->Action == IRUse);
57 // If neither Action nor CSAction, SamplePGOSupport needs to be true.
58 assert(this->Action != NoAction || this->CSAction != NoCSAction ||
59 this->SamplePGOSupport);
61 std::string ProfileFile;
62 std::string CSProfileGenFile;
63 std::string ProfileRemappingFile;
66 bool SamplePGOSupport;
69 /// Tunable parameters for passes in the default pipelines.
70 class PipelineTuningOptions {
72 /// Constructor sets pipeline tuning defaults based on cl::opts. Each option
73 /// can be set in the PassBuilder when using a LLVM as a library.
74 PipelineTuningOptions();
76 /// Tuning option to set loop interleaving on/off. Its default value is that
77 /// of the flag: `-interleave-loops`.
78 bool LoopInterleaving;
80 /// Tuning option to enable/disable loop vectorization. Its default value is
81 /// that of the flag: `-vectorize-loops`.
82 bool LoopVectorization;
84 /// Tuning option to enable/disable slp loop vectorization. Its default value
85 /// is that of the flag: `vectorize-slp`.
86 bool SLPVectorization;
88 /// Tuning option to enable/disable loop unrolling. Its default value is true.
91 /// Tuning option to forget all SCEV loops in LoopUnroll. Its default value
92 /// is that of the flag: `-forget-scev-loop-unroll`.
93 bool ForgetAllSCEVInLoopUnroll;
95 /// Tuning option to cap the number of calls to retrive clobbering accesses in
96 /// MemorySSA, in LICM.
97 unsigned LicmMssaOptCap;
99 /// Tuning option to disable promotion to scalars in LICM with MemorySSA, if
100 /// the number of access is too large.
101 unsigned LicmMssaNoAccForPromotionCap;
104 /// This class provides access to building LLVM's passes.
106 /// Its members provide the baseline state available to passes during their
107 /// construction. The \c PassRegistry.def file specifies how to construct all
108 /// of the built-in passes, and those may reference these members during
112 PipelineTuningOptions PTO;
113 Optional<PGOOptions> PGOOpt;
114 PassInstrumentationCallbacks *PIC;
117 /// A struct to capture parsed pass pipeline names.
119 /// A pipeline is defined as a series of names, each of which may in itself
120 /// recursively contain a nested pipeline. A name is either the name of a pass
121 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
122 /// name is the name of a pass, the InnerPipeline is empty, since passes
123 /// cannot contain inner pipelines. See parsePassPipeline() for a more
124 /// detailed description of the textual pipeline format.
125 struct PipelineElement {
127 std::vector<PipelineElement> InnerPipeline;
132 /// This enumerates the LLVM ThinLTO optimization phases.
133 enum class ThinLTOPhase {
134 /// No ThinLTO behavior needed.
136 /// ThinLTO prelink (summary) phase.
138 /// ThinLTO postlink (backend compile) phase.
142 /// LLVM-provided high-level optimization levels.
144 /// This enumerates the LLVM-provided high-level optimization levels. Each
145 /// level has a specific goal and rationale.
146 enum OptimizationLevel {
147 /// Disable as many optimizations as possible. This doesn't completely
148 /// disable the optimizer in all cases, for example always_inline functions
149 /// can be required to be inlined for correctness.
152 /// Optimize quickly without destroying debuggability.
154 /// This level is tuned to produce a result from the optimizer as quickly
155 /// as possible and to avoid destroying debuggability. This tends to result
156 /// in a very good development mode where the compiled code will be
157 /// immediately executed as part of testing. As a consequence, where
158 /// possible, we would like to produce efficient-to-execute code, but not
159 /// if it significantly slows down compilation or would prevent even basic
160 /// debugging of the resulting binary.
162 /// As an example, complex loop transformations such as versioning,
163 /// vectorization, or fusion don't make sense here due to the degree to
164 /// which the executed code differs from the source code, and the compile time
168 /// Optimize for fast execution as much as possible without triggering
169 /// significant incremental compile time or code size growth.
171 /// The key idea is that optimizations at this level should "pay for
172 /// themselves". So if an optimization increases compile time by 5% or
173 /// increases code size by 5% for a particular benchmark, that benchmark
174 /// should also be one which sees a 5% runtime improvement. If the compile
175 /// time or code size penalties happen on average across a diverse range of
176 /// LLVM users' benchmarks, then the improvements should as well.
178 /// And no matter what, the compile time needs to not grow superlinearly
179 /// with the size of input to LLVM so that users can control the runtime of
180 /// the optimizer in this mode.
182 /// This is expected to be a good default optimization level for the vast
183 /// majority of users.
186 /// Optimize for fast execution as much as possible.
188 /// This mode is significantly more aggressive in trading off compile time
189 /// and code size to get execution time improvements. The core idea is that
190 /// this mode should include any optimization that helps execution time on
191 /// balance across a diverse collection of benchmarks, even if it increases
192 /// code size or compile time for some benchmarks without corresponding
193 /// improvements to execution time.
195 /// Despite being willing to trade more compile time off to get improved
196 /// execution time, this mode still tries to avoid superlinear growth in
197 /// order to make even significantly slower compile times at least scale
198 /// reasonably. This does not preclude very substantial constant factor
202 /// Similar to \c O2 but tries to optimize for small code size instead of
203 /// fast execution without triggering significant incremental execution
206 /// The logic here is exactly the same as \c O2, but with code size and
207 /// execution time metrics swapped.
209 /// A consequence of the different core goal is that this should in general
210 /// produce substantially smaller executables that still run in
211 /// a reasonable amount of time.
214 /// A very specialized mode that will optimize for code size at any and all
217 /// This is useful primarily when there are absolute size limitations and
218 /// any effort taken to reduce the size is worth it regardless of the
219 /// execution time impact. You should expect this level to produce rather
220 /// slow, but very small, code.
224 explicit PassBuilder(TargetMachine *TM = nullptr,
225 PipelineTuningOptions PTO = PipelineTuningOptions(),
226 Optional<PGOOptions> PGOOpt = None,
227 PassInstrumentationCallbacks *PIC = nullptr)
228 : TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC) {}
230 /// Cross register the analysis managers through their proxies.
232 /// This is an interface that can be used to cross register each
233 /// AnalysisManager with all the others analysis managers.
234 void crossRegisterProxies(LoopAnalysisManager &LAM,
235 FunctionAnalysisManager &FAM,
236 CGSCCAnalysisManager &CGAM,
237 ModuleAnalysisManager &MAM);
239 /// Registers all available module analysis passes.
241 /// This is an interface that can be used to populate a \c
242 /// ModuleAnalysisManager with all registered module analyses. Callers can
243 /// still manually register any additional analyses. Callers can also
244 /// pre-register analyses and this will not override those.
245 void registerModuleAnalyses(ModuleAnalysisManager &MAM);
247 /// Registers all available CGSCC analysis passes.
249 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
250 /// with all registered CGSCC analyses. Callers can still manually register any
251 /// additional analyses. Callers can also pre-register analyses and this will
252 /// not override those.
253 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
255 /// Registers all available function analysis passes.
257 /// This is an interface that can be used to populate a \c
258 /// FunctionAnalysisManager with all registered function analyses. Callers can
259 /// still manually register any additional analyses. Callers can also
260 /// pre-register analyses and this will not override those.
261 void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
263 /// Registers all available loop analysis passes.
265 /// This is an interface that can be used to populate a \c LoopAnalysisManager
266 /// with all registered loop analyses. Callers can still manually register any
267 /// additional analyses.
268 void registerLoopAnalyses(LoopAnalysisManager &LAM);
270 /// Construct the core LLVM function canonicalization and simplification
273 /// This is a long pipeline and uses most of the per-function optimization
274 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
275 /// repeatedly over the IR and is not expected to destroy important
276 /// information about the semantics of the IR.
278 /// Note that \p Level cannot be `O0` here. The pipelines produced are
279 /// only intended for use when attempting to optimize code. If frontends
280 /// require some transformations for semantic reasons, they should explicitly
283 /// \p Phase indicates the current ThinLTO phase.
285 buildFunctionSimplificationPipeline(OptimizationLevel Level,
287 bool DebugLogging = false);
289 /// Construct the core LLVM module canonicalization and simplification
292 /// This pipeline focuses on canonicalizing and simplifying the entire module
293 /// of IR. Much like the function simplification pipeline above, it is
294 /// suitable to run repeatedly over the IR and is not expected to destroy
295 /// important information. It does, however, perform inlining and other
296 /// heuristic based simplifications that are not strictly reversible.
298 /// Note that \p Level cannot be `O0` here. The pipelines produced are
299 /// only intended for use when attempting to optimize code. If frontends
300 /// require some transformations for semantic reasons, they should explicitly
303 /// \p Phase indicates the current ThinLTO phase.
305 buildModuleSimplificationPipeline(OptimizationLevel Level,
307 bool DebugLogging = false);
309 /// Construct the core LLVM module optimization pipeline.
311 /// This pipeline focuses on optimizing the execution speed of the IR. It
312 /// uses cost modeling and thresholds to balance code growth against runtime
313 /// improvements. It includes vectorization and other information destroying
314 /// transformations. It also cannot generally be run repeatedly on a module
315 /// without potentially seriously regressing either runtime performance of
316 /// the code or serious code size growth.
318 /// Note that \p Level cannot be `O0` here. The pipelines produced are
319 /// only intended for use when attempting to optimize code. If frontends
320 /// require some transformations for semantic reasons, they should explicitly
322 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level,
323 bool DebugLogging = false,
324 bool LTOPreLink = false);
326 /// Build a per-module default optimization pipeline.
328 /// This provides a good default optimization pipeline for per-module
329 /// optimization and code generation without any link-time optimization. It
330 /// typically correspond to frontend "-O[123]" options for optimization
331 /// levels \c O1, \c O2 and \c O3 resp.
333 /// Note that \p Level cannot be `O0` here. The pipelines produced are
334 /// only intended for use when attempting to optimize code. If frontends
335 /// require some transformations for semantic reasons, they should explicitly
337 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
338 bool DebugLogging = false,
339 bool LTOPreLink = false);
341 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
344 /// This adds the pre-link optimizations tuned to prepare a module for
345 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
346 /// without making irreversible decisions which could be made better during
349 /// Note that \p Level cannot be `O0` here. The pipelines produced are
350 /// only intended for use when attempting to optimize code. If frontends
351 /// require some transformations for semantic reasons, they should explicitly
354 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level,
355 bool DebugLogging = false);
357 /// Build an ThinLTO default optimization pipeline to a pass manager.
359 /// This provides a good default optimization pipeline for link-time
360 /// optimization and code generation. It is particularly tuned to fit well
361 /// when IR coming into the LTO phase was first run through \c
362 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
364 /// Note that \p Level cannot be `O0` here. The pipelines produced are
365 /// only intended for use when attempting to optimize code. If frontends
366 /// require some transformations for semantic reasons, they should explicitly
369 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging,
370 const ModuleSummaryIndex *ImportSummary);
372 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
375 /// This adds the pre-link optimizations tuned to work well with a later LTO
376 /// run. It works to minimize the IR which needs to be analyzed without
377 /// making irreversible decisions which could be made better during the LTO
380 /// Note that \p Level cannot be `O0` here. The pipelines produced are
381 /// only intended for use when attempting to optimize code. If frontends
382 /// require some transformations for semantic reasons, they should explicitly
384 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
385 bool DebugLogging = false);
387 /// Build an LTO default optimization pipeline to a pass manager.
389 /// This provides a good default optimization pipeline for link-time
390 /// optimization and code generation. It is particularly tuned to fit well
391 /// when IR coming into the LTO phase was first run through \c
392 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
394 /// Note that \p Level cannot be `O0` here. The pipelines produced are
395 /// only intended for use when attempting to optimize code. If frontends
396 /// require some transformations for semantic reasons, they should explicitly
398 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
400 ModuleSummaryIndex *ExportSummary);
402 /// Build the default `AAManager` with the default alias analysis pipeline
404 AAManager buildDefaultAAPipeline();
406 /// Parse a textual pass pipeline description into a \c
407 /// ModulePassManager.
409 /// The format of the textual pass pipeline description looks something like:
411 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
413 /// Pass managers have ()s describing the nest structure of passes. All passes
414 /// are comma separated. As a special shortcut, if the very first pass is not
415 /// a module pass (as a module pass manager is), this will automatically form
416 /// the shortest stack of pass managers that allow inserting that first pass.
417 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
418 /// passes 'lpassN', all of these are valid:
420 /// fpass1,fpass2,fpass3
421 /// cgpass1,cgpass2,cgpass3
422 /// lpass1,lpass2,lpass3
424 /// And they are equivalent to the following (resp.):
426 /// module(function(fpass1,fpass2,fpass3))
427 /// module(cgscc(cgpass1,cgpass2,cgpass3))
428 /// module(function(loop(lpass1,lpass2,lpass3)))
430 /// This shortcut is especially useful for debugging and testing small pass
431 /// combinations. Note that these shortcuts don't introduce any other magic.
432 /// If the sequence of passes aren't all the exact same kind of pass, it will
433 /// be an error. You cannot mix different levels implicitly, you must
434 /// explicitly form a pass manager in which to nest passes.
435 Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
436 bool VerifyEachPass = true,
437 bool DebugLogging = false);
439 /// {{@ Parse a textual pass pipeline description into a specific PassManager
441 /// Automatic deduction of an appropriate pass manager stack is not supported.
442 /// For example, to insert a loop pass 'lpass' into a FunctionPassManager,
443 /// this is the valid pipeline text:
446 Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
447 bool VerifyEachPass = true,
448 bool DebugLogging = false);
449 Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
450 bool VerifyEachPass = true,
451 bool DebugLogging = false);
452 Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
453 bool VerifyEachPass = true,
454 bool DebugLogging = false);
457 /// Parse a textual alias analysis pipeline into the provided AA manager.
459 /// The format of the textual AA pipeline is a comma separated list of AA
462 /// basic-aa,globals-aa,...
464 /// The AA manager is set up such that the provided alias analyses are tried
465 /// in the order specified. See the \c AAManaager documentation for details
466 /// about the logic used. This routine just provides the textual mapping
467 /// between AA names and the analyses to register with the manager.
469 /// Returns false if the text cannot be parsed cleanly. The specific state of
470 /// the \p AA manager is unspecified if such an error is encountered and this
472 Error parseAAPipeline(AAManager &AA, StringRef PipelineText);
474 /// Register a callback for a default optimizer pipeline extension
477 /// This extension point allows adding passes that perform peephole
478 /// optimizations similar to the instruction combiner. These passes will be
479 /// inserted after each instance of the instruction combiner pass.
480 void registerPeepholeEPCallback(
481 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
482 PeepholeEPCallbacks.push_back(C);
485 /// Register a callback for a default optimizer pipeline extension
488 /// This extension point allows adding late loop canonicalization and
489 /// simplification passes. This is the last point in the loop optimization
490 /// pipeline before loop deletion. Each pass added
491 /// here must be an instance of LoopPass.
492 /// This is the place to add passes that can remove loops, such as target-
493 /// specific loop idiom recognition.
494 void registerLateLoopOptimizationsEPCallback(
495 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
496 LateLoopOptimizationsEPCallbacks.push_back(C);
499 /// Register a callback for a default optimizer pipeline extension
502 /// This extension point allows adding loop passes to the end of the loop
504 void registerLoopOptimizerEndEPCallback(
505 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
506 LoopOptimizerEndEPCallbacks.push_back(C);
509 /// Register a callback for a default optimizer pipeline extension
512 /// This extension point allows adding optimization passes after most of the
513 /// main optimizations, but before the last cleanup-ish optimizations.
514 void registerScalarOptimizerLateEPCallback(
515 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
516 ScalarOptimizerLateEPCallbacks.push_back(C);
519 /// Register a callback for a default optimizer pipeline extension
522 /// This extension point allows adding CallGraphSCC passes at the end of the
523 /// main CallGraphSCC passes and before any function simplification passes run
524 /// by CGPassManager.
525 void registerCGSCCOptimizerLateEPCallback(
526 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
527 CGSCCOptimizerLateEPCallbacks.push_back(C);
530 /// Register a callback for a default optimizer pipeline extension
533 /// This extension point allows adding optimization passes before the
534 /// vectorizer and other highly target specific optimization passes are
536 void registerVectorizerStartEPCallback(
537 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
538 VectorizerStartEPCallbacks.push_back(C);
541 /// Register a callback for a default optimizer pipeline extension point.
543 /// This extension point allows adding optimization once at the start of the
544 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
545 /// link-time pipelines).
546 void registerPipelineStartEPCallback(
547 const std::function<void(ModulePassManager &)> &C) {
548 PipelineStartEPCallbacks.push_back(C);
551 /// Register a callback for a default optimizer pipeline extension point
553 /// This extension point allows adding optimizations at the very end of the
554 /// function optimization pipeline. A key difference between this and the
555 /// legacy PassManager's OptimizerLast callback is that this extension point
556 /// is not triggered at O0. Extensions to the O0 pipeline should append their
557 /// passes to the end of the overall pipeline.
558 void registerOptimizerLastEPCallback(
559 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
560 OptimizerLastEPCallbacks.push_back(C);
563 /// Register a callback for parsing an AliasAnalysis Name to populate
564 /// the given AAManager \p AA
565 void registerParseAACallback(
566 const std::function<bool(StringRef Name, AAManager &AA)> &C) {
567 AAParsingCallbacks.push_back(C);
570 /// {{@ Register callbacks for analysis registration with this PassBuilder
572 /// Callees register their analyses with the given AnalysisManager objects.
573 void registerAnalysisRegistrationCallback(
574 const std::function<void(CGSCCAnalysisManager &)> &C) {
575 CGSCCAnalysisRegistrationCallbacks.push_back(C);
577 void registerAnalysisRegistrationCallback(
578 const std::function<void(FunctionAnalysisManager &)> &C) {
579 FunctionAnalysisRegistrationCallbacks.push_back(C);
581 void registerAnalysisRegistrationCallback(
582 const std::function<void(LoopAnalysisManager &)> &C) {
583 LoopAnalysisRegistrationCallbacks.push_back(C);
585 void registerAnalysisRegistrationCallback(
586 const std::function<void(ModuleAnalysisManager &)> &C) {
587 ModuleAnalysisRegistrationCallbacks.push_back(C);
591 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
592 /// Using these callbacks, callers can parse both a single pass name, as well
593 /// as entire sub-pipelines, and populate the PassManager instance
595 void registerPipelineParsingCallback(
596 const std::function<bool(StringRef Name, CGSCCPassManager &,
597 ArrayRef<PipelineElement>)> &C) {
598 CGSCCPipelineParsingCallbacks.push_back(C);
600 void registerPipelineParsingCallback(
601 const std::function<bool(StringRef Name, FunctionPassManager &,
602 ArrayRef<PipelineElement>)> &C) {
603 FunctionPipelineParsingCallbacks.push_back(C);
605 void registerPipelineParsingCallback(
606 const std::function<bool(StringRef Name, LoopPassManager &,
607 ArrayRef<PipelineElement>)> &C) {
608 LoopPipelineParsingCallbacks.push_back(C);
610 void registerPipelineParsingCallback(
611 const std::function<bool(StringRef Name, ModulePassManager &,
612 ArrayRef<PipelineElement>)> &C) {
613 ModulePipelineParsingCallbacks.push_back(C);
617 /// Register a callback for a top-level pipeline entry.
619 /// If the PassManager type is not given at the top level of the pipeline
620 /// text, this Callback should be used to determine the appropriate stack of
621 /// PassManagers and populate the passed ModulePassManager.
622 void registerParseTopLevelPipelineCallback(
623 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
624 bool VerifyEachPass, bool DebugLogging)> &C) {
625 TopLevelPipelineParsingCallbacks.push_back(C);
628 /// Add PGOInstrumenation passes for O0 only.
629 void addPGOInstrPassesForO0(ModulePassManager &MPM, bool DebugLogging,
630 bool RunProfileGen, bool IsCS,
631 std::string ProfileFile,
632 std::string ProfileRemappingFile);
635 /// Returns PIC. External libraries can use this to register pass
636 /// instrumentation callbacks.
637 PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
642 static Optional<std::vector<PipelineElement>>
643 parsePipelineText(StringRef Text);
645 Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
646 bool VerifyEachPass, bool DebugLogging);
647 Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
648 bool VerifyEachPass, bool DebugLogging);
649 Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
650 bool VerifyEachPass, bool DebugLogging);
651 Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
652 bool VerifyEachPass, bool DebugLogging);
653 bool parseAAPassName(AAManager &AA, StringRef Name);
655 Error parseLoopPassPipeline(LoopPassManager &LPM,
656 ArrayRef<PipelineElement> Pipeline,
657 bool VerifyEachPass, bool DebugLogging);
658 Error parseFunctionPassPipeline(FunctionPassManager &FPM,
659 ArrayRef<PipelineElement> Pipeline,
660 bool VerifyEachPass, bool DebugLogging);
661 Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
662 ArrayRef<PipelineElement> Pipeline,
663 bool VerifyEachPass, bool DebugLogging);
664 Error parseModulePassPipeline(ModulePassManager &MPM,
665 ArrayRef<PipelineElement> Pipeline,
666 bool VerifyEachPass, bool DebugLogging);
668 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
669 OptimizationLevel Level, bool RunProfileGen, bool IsCS,
670 std::string ProfileFile,
671 std::string ProfileRemappingFile);
672 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
674 // Extension Point callbacks
675 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
677 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
678 LateLoopOptimizationsEPCallbacks;
679 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
680 LoopOptimizerEndEPCallbacks;
681 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
682 ScalarOptimizerLateEPCallbacks;
683 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
684 CGSCCOptimizerLateEPCallbacks;
685 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
686 VectorizerStartEPCallbacks;
687 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
688 OptimizerLastEPCallbacks;
690 SmallVector<std::function<void(ModulePassManager &)>, 2>
691 PipelineStartEPCallbacks;
692 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
693 ModuleAnalysisRegistrationCallbacks;
694 SmallVector<std::function<bool(StringRef, ModulePassManager &,
695 ArrayRef<PipelineElement>)>,
697 ModulePipelineParsingCallbacks;
698 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
699 bool VerifyEachPass, bool DebugLogging)>,
701 TopLevelPipelineParsingCallbacks;
703 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
704 CGSCCAnalysisRegistrationCallbacks;
705 SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
706 ArrayRef<PipelineElement>)>,
708 CGSCCPipelineParsingCallbacks;
709 // Function callbacks
710 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
711 FunctionAnalysisRegistrationCallbacks;
712 SmallVector<std::function<bool(StringRef, FunctionPassManager &,
713 ArrayRef<PipelineElement>)>,
715 FunctionPipelineParsingCallbacks;
717 SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
718 LoopAnalysisRegistrationCallbacks;
719 SmallVector<std::function<bool(StringRef, LoopPassManager &,
720 ArrayRef<PipelineElement>)>,
722 LoopPipelineParsingCallbacks;
724 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
728 /// This utility template takes care of adding require<> and invalidate<>
729 /// passes for an analysis to a given \c PassManager. It is intended to be used
730 /// during parsing of a pass pipeline when parsing a single PipelineName.
731 /// When registering a new function analysis FancyAnalysis with the pass
732 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
735 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
736 /// ArrayRef<PipelineElement> P) {
737 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
742 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
743 typename... ExtraArgTs>
744 bool parseAnalysisUtilityPasses(
745 StringRef AnalysisName, StringRef PipelineName,
746 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
747 if (!PipelineName.endswith(">"))
749 // See if this is an invalidate<> pass name
750 if (PipelineName.startswith("invalidate<")) {
751 PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
752 if (PipelineName != AnalysisName)
754 PM.addPass(InvalidateAnalysisPass<AnalysisT>());
758 // See if this is a require<> pass name
759 if (PipelineName.startswith("require<")) {
760 PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
761 if (PipelineName != AnalysisName)
763 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,