1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information needed to emit code for R600 and SI GPUs.
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUTargetMachine.h"
18 #include "AMDGPUAliasAnalysis.h"
19 #include "AMDGPUCallLowering.h"
20 #include "AMDGPUInstructionSelector.h"
21 #include "AMDGPULegalizerInfo.h"
22 #include "AMDGPUMacroFusion.h"
23 #include "AMDGPUTargetObjectFile.h"
24 #include "AMDGPUTargetTransformInfo.h"
25 #include "GCNIterativeScheduler.h"
26 #include "GCNSchedStrategy.h"
27 #include "R600MachineScheduler.h"
28 #include "SIMachineScheduler.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
33 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/CodeGen/TargetLoweringObjectFile.h"
35 #include "llvm/CodeGen/TargetPassConfig.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/LegacyPassManager.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Compiler.h"
42 #include "llvm/Support/TargetRegistry.h"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/Transforms/IPO/AlwaysInliner.h"
45 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include "llvm/Transforms/Scalar/GVN.h"
48 #include "llvm/Transforms/Vectorize.h"
53 static cl::opt<bool> EnableR600StructurizeCFG(
54 "r600-ir-structurize",
55 cl::desc("Use StructurizeCFG IR pass"),
58 static cl::opt<bool> EnableSROA(
60 cl::desc("Run SROA after promote alloca pass"),
65 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
66 cl::desc("Run early if-conversion"),
69 static cl::opt<bool> EnableR600IfConvert(
71 cl::desc("Use if conversion pass"),
75 // Option to disable vectorizer for tests.
76 static cl::opt<bool> EnableLoadStoreVectorizer(
77 "amdgpu-load-store-vectorizer",
78 cl::desc("Enable load store vectorizer"),
82 // Option to to control global loads scalarization
83 static cl::opt<bool> ScalarizeGlobal(
84 "amdgpu-scalarize-global-loads",
85 cl::desc("Enable global load scalarization"),
89 // Option to run internalize pass.
90 static cl::opt<bool> InternalizeSymbols(
91 "amdgpu-internalize-symbols",
92 cl::desc("Enable elimination of non-kernel functions and unused globals"),
96 // Option to inline all early.
97 static cl::opt<bool> EarlyInlineAll(
98 "amdgpu-early-inline-all",
99 cl::desc("Inline all functions early"),
103 static cl::opt<bool> EnableSDWAPeephole(
104 "amdgpu-sdwa-peephole",
105 cl::desc("Enable SDWA peepholer"),
108 // Enable address space based alias analysis
109 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
110 cl::desc("Enable AMDGPU Alias Analysis"),
113 // Option to enable new waitcnt insertion pass.
114 static cl::opt<bool> EnableSIInsertWaitcntsPass(
115 "enable-si-insert-waitcnts",
116 cl::desc("Use new waitcnt insertion pass"),
119 // Option to run late CFG structurizer
120 static cl::opt<bool, true> LateCFGStructurize(
121 "amdgpu-late-structurize",
122 cl::desc("Enable late CFG structurization"),
123 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
126 static cl::opt<bool> EnableAMDGPUFunctionCalls(
127 "amdgpu-function-calls",
129 cl::desc("Enable AMDGPU function call support"),
132 // Enable lib calls simplifications
133 static cl::opt<bool> EnableLibCallSimplify(
134 "amdgpu-simplify-libcall",
135 cl::desc("Enable mdgpu library simplifications"),
139 extern "C" void LLVMInitializeAMDGPUTarget() {
140 // Register the target
141 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
142 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
144 PassRegistry *PR = PassRegistry::getPassRegistry();
145 initializeR600ClauseMergePassPass(*PR);
146 initializeR600ControlFlowFinalizerPass(*PR);
147 initializeR600PacketizerPass(*PR);
148 initializeR600ExpandSpecialInstrsPassPass(*PR);
149 initializeR600VectorRegMergerPass(*PR);
150 initializeGlobalISel(*PR);
151 initializeAMDGPUDAGToDAGISelPass(*PR);
152 initializeSILowerI1CopiesPass(*PR);
153 initializeSIFixSGPRCopiesPass(*PR);
154 initializeSIFixVGPRCopiesPass(*PR);
155 initializeSIFoldOperandsPass(*PR);
156 initializeSIPeepholeSDWAPass(*PR);
157 initializeSIShrinkInstructionsPass(*PR);
158 initializeSIOptimizeExecMaskingPreRAPass(*PR);
159 initializeSILoadStoreOptimizerPass(*PR);
160 initializeAMDGPUAlwaysInlinePass(*PR);
161 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
162 initializeAMDGPUAnnotateUniformValuesPass(*PR);
163 initializeAMDGPUArgumentUsageInfoPass(*PR);
164 initializeAMDGPULowerIntrinsicsPass(*PR);
165 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
166 initializeAMDGPUPromoteAllocaPass(*PR);
167 initializeAMDGPUCodeGenPreparePass(*PR);
168 initializeAMDGPURewriteOutArgumentsPass(*PR);
169 initializeAMDGPUUnifyMetadataPass(*PR);
170 initializeSIAnnotateControlFlowPass(*PR);
171 initializeSIInsertWaitsPass(*PR);
172 initializeSIInsertWaitcntsPass(*PR);
173 initializeSIWholeQuadModePass(*PR);
174 initializeSILowerControlFlowPass(*PR);
175 initializeSIInsertSkipsPass(*PR);
176 initializeSIMemoryLegalizerPass(*PR);
177 initializeSIDebuggerInsertNopsPass(*PR);
178 initializeSIOptimizeExecMaskingPass(*PR);
179 initializeSIFixWWMLivenessPass(*PR);
180 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
181 initializeAMDGPUAAWrapperPassPass(*PR);
182 initializeAMDGPUUseNativeCallsPass(*PR);
183 initializeAMDGPUSimplifyLibCallsPass(*PR);
184 initializeAMDGPUInlinerPass(*PR);
187 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
188 return llvm::make_unique<AMDGPUTargetObjectFile>();
191 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
192 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
195 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
196 return new SIScheduleDAGMI(C);
199 static ScheduleDAGInstrs *
200 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
201 ScheduleDAGMILive *DAG =
202 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
203 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
204 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
205 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
209 static ScheduleDAGInstrs *
210 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
211 auto DAG = new GCNIterativeScheduler(C,
212 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
213 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
214 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
218 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
219 return new GCNIterativeScheduler(C,
220 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
223 static ScheduleDAGInstrs *
224 createIterativeILPMachineScheduler(MachineSchedContext *C) {
225 auto DAG = new GCNIterativeScheduler(C,
226 GCNIterativeScheduler::SCHEDULE_ILP);
227 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
228 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
229 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
233 static MachineSchedRegistry
234 R600SchedRegistry("r600", "Run R600's custom scheduler",
235 createR600MachineScheduler);
237 static MachineSchedRegistry
238 SISchedRegistry("si", "Run SI's custom scheduler",
239 createSIMachineScheduler);
241 static MachineSchedRegistry
242 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
243 "Run GCN scheduler to maximize occupancy",
244 createGCNMaxOccupancyMachineScheduler);
246 static MachineSchedRegistry
247 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
248 "Run GCN scheduler to maximize occupancy (experimental)",
249 createIterativeGCNMaxOccupancyMachineScheduler);
251 static MachineSchedRegistry
252 GCNMinRegSchedRegistry("gcn-minreg",
253 "Run GCN iterative scheduler for minimal register usage (experimental)",
254 createMinRegScheduler);
256 static MachineSchedRegistry
257 GCNILPSchedRegistry("gcn-ilp",
258 "Run GCN iterative scheduler for ILP scheduling (experimental)",
259 createIterativeILPMachineScheduler);
261 static StringRef computeDataLayout(const Triple &TT) {
262 if (TT.getArch() == Triple::r600) {
264 if (TT.getEnvironmentName() == "amdgiz" ||
265 TT.getEnvironmentName() == "amdgizcl")
266 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
267 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5";
268 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
269 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
272 // 32-bit private, local, and region pointers. 64-bit global, constant and
274 if (TT.getEnvironmentName() == "amdgiz" ||
275 TT.getEnvironmentName() == "amdgizcl")
276 return "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32"
277 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
278 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5";
279 return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
280 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
281 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
285 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
289 if (TT.getArch() == Triple::amdgcn)
295 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
296 // The AMDGPU toolchain only supports generating shared objects, so we
297 // must always use PIC.
301 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) {
304 return CodeModel::Small;
307 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
308 StringRef CPU, StringRef FS,
309 TargetOptions Options,
310 Optional<Reloc::Model> RM,
311 Optional<CodeModel::Model> CM,
312 CodeGenOpt::Level OptLevel)
313 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
314 FS, Options, getEffectiveRelocModel(RM),
315 getEffectiveCodeModel(CM), OptLevel),
316 TLOF(createTLOF(getTargetTriple())) {
317 AS = AMDGPU::getAMDGPUAS(TT);
321 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
323 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
325 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
326 Attribute GPUAttr = F.getFnAttribute("target-cpu");
327 return GPUAttr.hasAttribute(Attribute::None) ?
328 getTargetCPU() : GPUAttr.getValueAsString();
331 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
332 Attribute FSAttr = F.getFnAttribute("target-features");
334 return FSAttr.hasAttribute(Attribute::None) ?
335 getTargetFeatureString() :
336 FSAttr.getValueAsString();
339 static ImmutablePass *createAMDGPUExternalAAWrapperPass() {
340 return createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) {
341 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
342 AAR.addAAResult(WrapperPass->getResult());
346 /// Predicate for Internalize pass.
347 static bool mustPreserveGV(const GlobalValue &GV) {
348 if (const Function *F = dyn_cast<Function>(&GV))
349 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
351 return !GV.use_empty();
354 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
355 Builder.DivergentTarget = true;
357 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
358 bool Internalize = InternalizeSymbols;
359 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls;
360 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
361 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
363 if (EnableAMDGPUFunctionCalls) {
364 delete Builder.Inliner;
365 Builder.Inliner = createAMDGPUFunctionInliningPass();
369 // If we're generating code, we always have the whole program available. The
370 // relocations expected for externally visible functions aren't supported,
371 // so make sure every non-entry function is hidden.
372 Builder.addExtension(
373 PassManagerBuilder::EP_EnabledOnOptLevel0,
374 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
375 PM.add(createInternalizePass(mustPreserveGV));
379 Builder.addExtension(
380 PassManagerBuilder::EP_ModuleOptimizerEarly,
381 [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
382 legacy::PassManagerBase &PM) {
384 PM.add(createAMDGPUAAWrapperPass());
385 PM.add(createAMDGPUExternalAAWrapperPass());
387 PM.add(createAMDGPUUnifyMetadataPass());
389 PM.add(createInternalizePass(mustPreserveGV));
390 PM.add(createGlobalDCEPass());
393 PM.add(createAMDGPUAlwaysInlinePass(false));
396 const auto &Opt = Options;
397 Builder.addExtension(
398 PassManagerBuilder::EP_EarlyAsPossible,
399 [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
400 legacy::PassManagerBase &PM) {
402 PM.add(createAMDGPUAAWrapperPass());
403 PM.add(createAMDGPUExternalAAWrapperPass());
405 PM.add(llvm::createAMDGPUUseNativeCallsPass());
407 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
410 Builder.addExtension(
411 PassManagerBuilder::EP_CGSCCOptimizerLate,
412 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
413 // Add infer address spaces pass to the opt pipeline after inlining
414 // but before SROA to increase SROA opportunities.
415 PM.add(createInferAddressSpacesPass());
419 //===----------------------------------------------------------------------===//
420 // R600 Target Machine (R600 -> Cayman)
421 //===----------------------------------------------------------------------===//
423 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
424 StringRef CPU, StringRef FS,
425 TargetOptions Options,
426 Optional<Reloc::Model> RM,
427 Optional<CodeModel::Model> CM,
428 CodeGenOpt::Level OL, bool JIT)
429 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
430 setRequiresStructuredCFG(true);
433 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
434 const Function &F) const {
435 StringRef GPU = getGPUName(F);
436 StringRef FS = getFeatureString(F);
438 SmallString<128> SubtargetKey(GPU);
439 SubtargetKey.append(FS);
441 auto &I = SubtargetMap[SubtargetKey];
443 // This needs to be done before we create a new subtarget since any
444 // creation will depend on the TM and the code generation flags on the
445 // function that reside in TargetOptions.
446 resetTargetOptions(F);
447 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
453 //===----------------------------------------------------------------------===//
454 // GCN Target Machine (SI+)
455 //===----------------------------------------------------------------------===//
457 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
458 StringRef CPU, StringRef FS,
459 TargetOptions Options,
460 Optional<Reloc::Model> RM,
461 Optional<CodeModel::Model> CM,
462 CodeGenOpt::Level OL, bool JIT)
463 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
465 const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
466 StringRef GPU = getGPUName(F);
467 StringRef FS = getFeatureString(F);
469 SmallString<128> SubtargetKey(GPU);
470 SubtargetKey.append(FS);
472 auto &I = SubtargetMap[SubtargetKey];
474 // This needs to be done before we create a new subtarget since any
475 // creation will depend on the TM and the code generation flags on the
476 // function that reside in TargetOptions.
477 resetTargetOptions(F);
478 I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
481 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
486 //===----------------------------------------------------------------------===//
488 //===----------------------------------------------------------------------===//
492 class AMDGPUPassConfig : public TargetPassConfig {
494 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
495 : TargetPassConfig(TM, PM) {
496 // Exceptions and StackMaps are not supported, so these passes will never do
498 disablePass(&StackMapLivenessID);
499 disablePass(&FuncletLayoutID);
502 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
503 return getTM<AMDGPUTargetMachine>();
507 createMachineScheduler(MachineSchedContext *C) const override {
508 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
509 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
510 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
514 void addEarlyCSEOrGVNPass();
515 void addStraightLineScalarOptimizationPasses();
516 void addIRPasses() override;
517 void addCodeGenPrepare() override;
518 bool addPreISel() override;
519 bool addInstSelector() override;
520 bool addGCPasses() override;
523 class R600PassConfig final : public AMDGPUPassConfig {
525 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
526 : AMDGPUPassConfig(TM, PM) {}
528 ScheduleDAGInstrs *createMachineScheduler(
529 MachineSchedContext *C) const override {
530 return createR600MachineScheduler(C);
533 bool addPreISel() override;
534 bool addInstSelector() override;
535 void addPreRegAlloc() override;
536 void addPreSched2() override;
537 void addPreEmitPass() override;
540 class GCNPassConfig final : public AMDGPUPassConfig {
542 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
543 : AMDGPUPassConfig(TM, PM) {
544 // It is necessary to know the register usage of the entire call graph. We
545 // allow calls without EnableAMDGPUFunctionCalls if they are marked
546 // noinline, so this is always required.
547 setRequiresCodeGenSCCOrder(true);
550 GCNTargetMachine &getGCNTargetMachine() const {
551 return getTM<GCNTargetMachine>();
555 createMachineScheduler(MachineSchedContext *C) const override;
557 bool addPreISel() override;
558 void addMachineSSAOptimization() override;
559 bool addILPOpts() override;
560 bool addInstSelector() override;
561 bool addIRTranslator() override;
562 bool addLegalizeMachineIR() override;
563 bool addRegBankSelect() override;
564 bool addGlobalInstructionSelect() override;
565 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
566 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
567 void addPreRegAlloc() override;
568 void addPostRegAlloc() override;
569 void addPreSched2() override;
570 void addPreEmitPass() override;
573 } // end anonymous namespace
576 AMDGPUTargetMachine::getTargetTransformInfo(const Function &F) {
577 return TargetTransformInfo(AMDGPUTTIImpl(this, F));
580 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
581 if (getOptLevel() == CodeGenOpt::Aggressive)
582 addPass(createGVNPass());
584 addPass(createEarlyCSEPass());
587 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
588 addPass(createSeparateConstOffsetFromGEPPass());
589 addPass(createSpeculativeExecutionPass());
590 // ReassociateGEPs exposes more opportunites for SLSR. See
591 // the example in reassociate-geps-and-slsr.ll.
592 addPass(createStraightLineStrengthReducePass());
593 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
594 // EarlyCSE can reuse.
595 addEarlyCSEOrGVNPass();
596 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
597 addPass(createNaryReassociatePass());
598 // NaryReassociate on GEPs creates redundant common expressions, so run
599 // EarlyCSE after it.
600 addPass(createEarlyCSEPass());
603 void AMDGPUPassConfig::addIRPasses() {
604 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
606 // There is no reason to run these.
607 disablePass(&StackMapLivenessID);
608 disablePass(&FuncletLayoutID);
609 disablePass(&PatchableFunctionID);
611 addPass(createAMDGPULowerIntrinsicsPass());
613 if (TM.getTargetTriple().getArch() == Triple::r600 ||
614 !EnableAMDGPUFunctionCalls) {
615 // Function calls are not supported, so make sure we inline everything.
616 addPass(createAMDGPUAlwaysInlinePass());
617 addPass(createAlwaysInlinerLegacyPass());
618 // We need to add the barrier noop pass, otherwise adding the function
619 // inlining pass will cause all of the PassConfigs passes to be run
620 // one function at a time, which means if we have a nodule with two
621 // functions, then we will generate code for the first function
622 // without ever running any passes on the second.
623 addPass(createBarrierNoopPass());
626 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
627 // TODO: May want to move later or split into an early and late one.
629 addPass(createAMDGPUCodeGenPreparePass());
632 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
633 addPass(createAMDGPUOpenCLImageTypeLoweringPass());
635 // Replace OpenCL enqueued block function pointers with global variables.
636 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
638 if (TM.getOptLevel() > CodeGenOpt::None) {
639 addPass(createInferAddressSpacesPass());
640 addPass(createAMDGPUPromoteAlloca());
643 addPass(createSROAPass());
645 addStraightLineScalarOptimizationPasses();
647 if (EnableAMDGPUAliasAnalysis) {
648 addPass(createAMDGPUAAWrapperPass());
649 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
651 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
652 AAR.addAAResult(WrapperPass->getResult());
657 TargetPassConfig::addIRPasses();
659 // EarlyCSE is not always strong enough to clean up what LSR produces. For
660 // example, GVN can combine
667 // %0 = shl nsw %a, 2
670 // but EarlyCSE can do neither of them.
671 if (getOptLevel() != CodeGenOpt::None)
672 addEarlyCSEOrGVNPass();
675 void AMDGPUPassConfig::addCodeGenPrepare() {
676 TargetPassConfig::addCodeGenPrepare();
678 if (EnableLoadStoreVectorizer)
679 addPass(createLoadStoreVectorizerPass());
682 bool AMDGPUPassConfig::addPreISel() {
683 addPass(createFlattenCFGPass());
687 bool AMDGPUPassConfig::addInstSelector() {
688 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
692 bool AMDGPUPassConfig::addGCPasses() {
693 // Do nothing. GC is not supported.
697 //===----------------------------------------------------------------------===//
699 //===----------------------------------------------------------------------===//
701 bool R600PassConfig::addPreISel() {
702 AMDGPUPassConfig::addPreISel();
704 if (EnableR600StructurizeCFG)
705 addPass(createStructurizeCFGPass());
709 bool R600PassConfig::addInstSelector() {
710 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
714 void R600PassConfig::addPreRegAlloc() {
715 addPass(createR600VectorRegMerger());
718 void R600PassConfig::addPreSched2() {
719 addPass(createR600EmitClauseMarkers(), false);
720 if (EnableR600IfConvert)
721 addPass(&IfConverterID, false);
722 addPass(createR600ClauseMergePass(), false);
725 void R600PassConfig::addPreEmitPass() {
726 addPass(createAMDGPUCFGStructurizerPass(), false);
727 addPass(createR600ExpandSpecialInstrsPass(), false);
728 addPass(&FinalizeMachineBundlesID, false);
729 addPass(createR600Packetizer(), false);
730 addPass(createR600ControlFlowFinalizer(), false);
733 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
734 return new R600PassConfig(*this, PM);
737 //===----------------------------------------------------------------------===//
739 //===----------------------------------------------------------------------===//
741 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
742 MachineSchedContext *C) const {
743 const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
744 if (ST.enableSIScheduler())
745 return createSIMachineScheduler(C);
746 return createGCNMaxOccupancyMachineScheduler(C);
749 bool GCNPassConfig::addPreISel() {
750 AMDGPUPassConfig::addPreISel();
752 // FIXME: We need to run a pass to propagate the attributes when calls are
754 addPass(createAMDGPUAnnotateKernelFeaturesPass());
756 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
757 // regions formed by them.
758 addPass(&AMDGPUUnifyDivergentExitNodesID);
759 if (!LateCFGStructurize) {
760 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
762 addPass(createSinkingPass());
763 addPass(createAMDGPUAnnotateUniformValues());
764 if (!LateCFGStructurize) {
765 addPass(createSIAnnotateControlFlowPass());
771 void GCNPassConfig::addMachineSSAOptimization() {
772 TargetPassConfig::addMachineSSAOptimization();
774 // We want to fold operands after PeepholeOptimizer has run (or as part of
775 // it), because it will eliminate extra copies making it easier to fold the
776 // real source operand. We want to eliminate dead instructions after, so that
777 // we see fewer uses of the copies. We then need to clean up the dead
778 // instructions leftover after the operands are folded as well.
780 // XXX - Can we get away without running DeadMachineInstructionElim again?
781 addPass(&SIFoldOperandsID);
782 addPass(&DeadMachineInstructionElimID);
783 addPass(&SILoadStoreOptimizerID);
784 if (EnableSDWAPeephole) {
785 addPass(&SIPeepholeSDWAID);
786 addPass(&MachineLICMID);
787 addPass(&MachineCSEID);
788 addPass(&SIFoldOperandsID);
789 addPass(&DeadMachineInstructionElimID);
791 addPass(createSIShrinkInstructionsPass());
794 bool GCNPassConfig::addILPOpts() {
795 if (EnableEarlyIfConversion)
796 addPass(&EarlyIfConverterID);
798 TargetPassConfig::addILPOpts();
802 bool GCNPassConfig::addInstSelector() {
803 AMDGPUPassConfig::addInstSelector();
804 addPass(createSILowerI1CopiesPass());
805 addPass(&SIFixSGPRCopiesID);
809 bool GCNPassConfig::addIRTranslator() {
810 addPass(new IRTranslator());
814 bool GCNPassConfig::addLegalizeMachineIR() {
815 addPass(new Legalizer());
819 bool GCNPassConfig::addRegBankSelect() {
820 addPass(new RegBankSelect());
824 bool GCNPassConfig::addGlobalInstructionSelect() {
825 addPass(new InstructionSelect());
829 void GCNPassConfig::addPreRegAlloc() {
830 if (LateCFGStructurize) {
831 addPass(createAMDGPUMachineCFGStructurizerPass());
833 addPass(createSIWholeQuadModePass());
836 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
837 // FIXME: We have to disable the verifier here because of PHIElimination +
838 // TwoAddressInstructions disabling it.
840 // This must be run immediately after phi elimination and before
841 // TwoAddressInstructions, otherwise the processing of the tied operand of
842 // SI_ELSE will introduce a copy of the tied operand source after the else.
843 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
845 // This must be run after SILowerControlFlow, since it needs to use the
846 // machine-level CFG, but before register allocation.
847 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
849 TargetPassConfig::addFastRegAlloc(RegAllocPass);
852 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
853 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
855 // This must be run immediately after phi elimination and before
856 // TwoAddressInstructions, otherwise the processing of the tied operand of
857 // SI_ELSE will introduce a copy of the tied operand source after the else.
858 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
860 // This must be run after SILowerControlFlow, since it needs to use the
861 // machine-level CFG, but before register allocation.
862 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
864 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
867 void GCNPassConfig::addPostRegAlloc() {
868 addPass(&SIFixVGPRCopiesID);
869 addPass(&SIOptimizeExecMaskingID);
870 TargetPassConfig::addPostRegAlloc();
873 void GCNPassConfig::addPreSched2() {
876 void GCNPassConfig::addPreEmitPass() {
877 // The hazard recognizer that runs as part of the post-ra scheduler does not
878 // guarantee to be able handle all hazards correctly. This is because if there
879 // are multiple scheduling regions in a basic block, the regions are scheduled
880 // bottom up, so when we begin to schedule a region we don't know what
881 // instructions were emitted directly before it.
883 // Here we add a stand-alone hazard recognizer pass which can handle all
885 addPass(&PostRAHazardRecognizerID);
887 if (EnableSIInsertWaitcntsPass)
888 addPass(createSIInsertWaitcntsPass());
890 addPass(createSIInsertWaitsPass());
891 addPass(createSIShrinkInstructionsPass());
892 addPass(&SIInsertSkipsPassID);
893 addPass(createSIMemoryLegalizerPass());
894 addPass(createSIDebuggerInsertNopsPass());
895 addPass(&BranchRelaxationPassID);
898 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
899 return new GCNPassConfig(*this, PM);