1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// The AMDGPU target machine contains all of the hardware specific
12 /// information needed to emit code for R600 and SI GPUs.
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUTargetMachine.h"
18 #include "AMDGPUAliasAnalysis.h"
19 #include "AMDGPUCallLowering.h"
20 #include "AMDGPUInstructionSelector.h"
21 #include "AMDGPULegalizerInfo.h"
22 #include "AMDGPUMacroFusion.h"
23 #include "AMDGPUTargetObjectFile.h"
24 #include "AMDGPUTargetTransformInfo.h"
25 #include "GCNIterativeScheduler.h"
26 #include "GCNSchedStrategy.h"
27 #include "R600MachineScheduler.h"
28 #include "SIMachineScheduler.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
33 #include "llvm/CodeGen/Passes.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/LegacyPassManager.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/TargetRegistry.h"
42 #include "llvm/Target/TargetLoweringObjectFile.h"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/Transforms/IPO/AlwaysInliner.h"
45 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include "llvm/Transforms/Scalar/GVN.h"
48 #include "llvm/Transforms/Utils.h"
49 #include "llvm/Transforms/Vectorize.h"
54 static cl::opt<bool> EnableR600StructurizeCFG(
55 "r600-ir-structurize",
56 cl::desc("Use StructurizeCFG IR pass"),
59 static cl::opt<bool> EnableSROA(
61 cl::desc("Run SROA after promote alloca pass"),
66 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
67 cl::desc("Run early if-conversion"),
70 static cl::opt<bool> EnableR600IfConvert(
72 cl::desc("Use if conversion pass"),
76 // Option to disable vectorizer for tests.
77 static cl::opt<bool> EnableLoadStoreVectorizer(
78 "amdgpu-load-store-vectorizer",
79 cl::desc("Enable load store vectorizer"),
83 // Option to control global loads scalarization
84 static cl::opt<bool> ScalarizeGlobal(
85 "amdgpu-scalarize-global-loads",
86 cl::desc("Enable global load scalarization"),
90 // Option to run internalize pass.
91 static cl::opt<bool> InternalizeSymbols(
92 "amdgpu-internalize-symbols",
93 cl::desc("Enable elimination of non-kernel functions and unused globals"),
97 // Option to inline all early.
98 static cl::opt<bool> EarlyInlineAll(
99 "amdgpu-early-inline-all",
100 cl::desc("Inline all functions early"),
104 static cl::opt<bool> EnableSDWAPeephole(
105 "amdgpu-sdwa-peephole",
106 cl::desc("Enable SDWA peepholer"),
109 static cl::opt<bool> EnableDPPCombine(
110 "amdgpu-dpp-combine",
111 cl::desc("Enable DPP combiner"),
114 // Enable address space based alias analysis
115 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
116 cl::desc("Enable AMDGPU Alias Analysis"),
119 // Option to run late CFG structurizer
120 static cl::opt<bool, true> LateCFGStructurize(
121 "amdgpu-late-structurize",
122 cl::desc("Enable late CFG structurization"),
123 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
126 static cl::opt<bool, true> EnableAMDGPUFunctionCalls(
127 "amdgpu-function-calls",
128 cl::desc("Enable AMDGPU function call support"),
129 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
133 // Enable lib calls simplifications
134 static cl::opt<bool> EnableLibCallSimplify(
135 "amdgpu-simplify-libcall",
136 cl::desc("Enable amdgpu library simplifications"),
140 static cl::opt<bool> EnableLowerKernelArguments(
141 "amdgpu-ir-lower-kernel-arguments",
142 cl::desc("Lower kernel argument loads in IR pass"),
146 // Enable atomic optimization
147 static cl::opt<bool> EnableAtomicOptimizations(
148 "amdgpu-atomic-optimizations",
149 cl::desc("Enable atomic optimizations"),
153 // Enable Mode register optimization
154 static cl::opt<bool> EnableSIModeRegisterPass(
155 "amdgpu-mode-register",
156 cl::desc("Enable mode register pass"),
160 extern "C" void LLVMInitializeAMDGPUTarget() {
161 // Register the target
162 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
163 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
165 PassRegistry *PR = PassRegistry::getPassRegistry();
166 initializeR600ClauseMergePassPass(*PR);
167 initializeR600ControlFlowFinalizerPass(*PR);
168 initializeR600PacketizerPass(*PR);
169 initializeR600ExpandSpecialInstrsPassPass(*PR);
170 initializeR600VectorRegMergerPass(*PR);
171 initializeGlobalISel(*PR);
172 initializeAMDGPUDAGToDAGISelPass(*PR);
173 initializeGCNDPPCombinePass(*PR);
174 initializeSILowerI1CopiesPass(*PR);
175 initializeSIFixSGPRCopiesPass(*PR);
176 initializeSIFixVGPRCopiesPass(*PR);
177 initializeSIFixupVectorISelPass(*PR);
178 initializeSIFoldOperandsPass(*PR);
179 initializeSIPeepholeSDWAPass(*PR);
180 initializeSIShrinkInstructionsPass(*PR);
181 initializeSIOptimizeExecMaskingPreRAPass(*PR);
182 initializeSILoadStoreOptimizerPass(*PR);
183 initializeAMDGPUFixFunctionBitcastsPass(*PR);
184 initializeAMDGPUAlwaysInlinePass(*PR);
185 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
186 initializeAMDGPUAnnotateUniformValuesPass(*PR);
187 initializeAMDGPUArgumentUsageInfoPass(*PR);
188 initializeAMDGPUAtomicOptimizerPass(*PR);
189 initializeAMDGPULowerKernelArgumentsPass(*PR);
190 initializeAMDGPULowerKernelAttributesPass(*PR);
191 initializeAMDGPULowerIntrinsicsPass(*PR);
192 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
193 initializeAMDGPUPromoteAllocaPass(*PR);
194 initializeAMDGPUCodeGenPreparePass(*PR);
195 initializeAMDGPURewriteOutArgumentsPass(*PR);
196 initializeAMDGPUUnifyMetadataPass(*PR);
197 initializeSIAnnotateControlFlowPass(*PR);
198 initializeSIInsertWaitcntsPass(*PR);
199 initializeSIModeRegisterPass(*PR);
200 initializeSIWholeQuadModePass(*PR);
201 initializeSILowerControlFlowPass(*PR);
202 initializeSIInsertSkipsPass(*PR);
203 initializeSIMemoryLegalizerPass(*PR);
204 initializeSIDebuggerInsertNopsPass(*PR);
205 initializeSIOptimizeExecMaskingPass(*PR);
206 initializeSIFixWWMLivenessPass(*PR);
207 initializeSIFormMemoryClausesPass(*PR);
208 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
209 initializeAMDGPUAAWrapperPassPass(*PR);
210 initializeAMDGPUExternalAAWrapperPass(*PR);
211 initializeAMDGPUUseNativeCallsPass(*PR);
212 initializeAMDGPUSimplifyLibCallsPass(*PR);
213 initializeAMDGPUInlinerPass(*PR);
216 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
217 return llvm::make_unique<AMDGPUTargetObjectFile>();
220 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
221 return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
224 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
225 return new SIScheduleDAGMI(C);
228 static ScheduleDAGInstrs *
229 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
230 ScheduleDAGMILive *DAG =
231 new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
232 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
233 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
234 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
238 static ScheduleDAGInstrs *
239 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
240 auto DAG = new GCNIterativeScheduler(C,
241 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
242 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
243 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
247 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
248 return new GCNIterativeScheduler(C,
249 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
252 static ScheduleDAGInstrs *
253 createIterativeILPMachineScheduler(MachineSchedContext *C) {
254 auto DAG = new GCNIterativeScheduler(C,
255 GCNIterativeScheduler::SCHEDULE_ILP);
256 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
257 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
258 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
262 static MachineSchedRegistry
263 R600SchedRegistry("r600", "Run R600's custom scheduler",
264 createR600MachineScheduler);
266 static MachineSchedRegistry
267 SISchedRegistry("si", "Run SI's custom scheduler",
268 createSIMachineScheduler);
270 static MachineSchedRegistry
271 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
272 "Run GCN scheduler to maximize occupancy",
273 createGCNMaxOccupancyMachineScheduler);
275 static MachineSchedRegistry
276 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
277 "Run GCN scheduler to maximize occupancy (experimental)",
278 createIterativeGCNMaxOccupancyMachineScheduler);
280 static MachineSchedRegistry
281 GCNMinRegSchedRegistry("gcn-minreg",
282 "Run GCN iterative scheduler for minimal register usage (experimental)",
283 createMinRegScheduler);
285 static MachineSchedRegistry
286 GCNILPSchedRegistry("gcn-ilp",
287 "Run GCN iterative scheduler for ILP scheduling (experimental)",
288 createIterativeILPMachineScheduler);
290 static StringRef computeDataLayout(const Triple &TT) {
291 if (TT.getArch() == Triple::r600) {
293 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
294 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
297 // 32-bit private, local, and region pointers. 64-bit global, constant and
299 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
300 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
301 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
305 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
309 if (TT.getArch() == Triple::amdgcn)
315 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
316 // The AMDGPU toolchain only supports generating shared objects, so we
317 // must always use PIC.
321 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
322 StringRef CPU, StringRef FS,
323 TargetOptions Options,
324 Optional<Reloc::Model> RM,
325 Optional<CodeModel::Model> CM,
326 CodeGenOpt::Level OptLevel)
327 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
328 FS, Options, getEffectiveRelocModel(RM),
329 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
330 TLOF(createTLOF(getTargetTriple())) {
334 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
335 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
337 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
339 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
340 Attribute GPUAttr = F.getFnAttribute("target-cpu");
341 return GPUAttr.hasAttribute(Attribute::None) ?
342 getTargetCPU() : GPUAttr.getValueAsString();
345 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
346 Attribute FSAttr = F.getFnAttribute("target-features");
348 return FSAttr.hasAttribute(Attribute::None) ?
349 getTargetFeatureString() :
350 FSAttr.getValueAsString();
353 /// Predicate for Internalize pass.
354 static bool mustPreserveGV(const GlobalValue &GV) {
355 if (const Function *F = dyn_cast<Function>(&GV))
356 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
358 return !GV.use_empty();
361 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
362 Builder.DivergentTarget = true;
364 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
365 bool Internalize = InternalizeSymbols;
366 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls;
367 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
368 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
370 if (EnableAMDGPUFunctionCalls) {
371 delete Builder.Inliner;
372 Builder.Inliner = createAMDGPUFunctionInliningPass();
375 Builder.addExtension(
376 PassManagerBuilder::EP_ModuleOptimizerEarly,
377 [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
378 legacy::PassManagerBase &PM) {
380 PM.add(createAMDGPUAAWrapperPass());
381 PM.add(createAMDGPUExternalAAWrapperPass());
383 PM.add(createAMDGPUUnifyMetadataPass());
385 PM.add(createInternalizePass(mustPreserveGV));
386 PM.add(createGlobalDCEPass());
389 PM.add(createAMDGPUAlwaysInlinePass(false));
392 const auto &Opt = Options;
393 Builder.addExtension(
394 PassManagerBuilder::EP_EarlyAsPossible,
395 [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
396 legacy::PassManagerBase &PM) {
398 PM.add(createAMDGPUAAWrapperPass());
399 PM.add(createAMDGPUExternalAAWrapperPass());
401 PM.add(llvm::createAMDGPUUseNativeCallsPass());
403 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
406 Builder.addExtension(
407 PassManagerBuilder::EP_CGSCCOptimizerLate,
408 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
409 // Add infer address spaces pass to the opt pipeline after inlining
410 // but before SROA to increase SROA opportunities.
411 PM.add(createInferAddressSpacesPass());
413 // This should run after inlining to have any chance of doing anything,
414 // and before other cleanup optimizations.
415 PM.add(createAMDGPULowerKernelAttributesPass());
419 //===----------------------------------------------------------------------===//
420 // R600 Target Machine (R600 -> Cayman)
421 //===----------------------------------------------------------------------===//
423 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
424 StringRef CPU, StringRef FS,
425 TargetOptions Options,
426 Optional<Reloc::Model> RM,
427 Optional<CodeModel::Model> CM,
428 CodeGenOpt::Level OL, bool JIT)
429 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
430 setRequiresStructuredCFG(true);
433 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
434 const Function &F) const {
435 StringRef GPU = getGPUName(F);
436 StringRef FS = getFeatureString(F);
438 SmallString<128> SubtargetKey(GPU);
439 SubtargetKey.append(FS);
441 auto &I = SubtargetMap[SubtargetKey];
443 // This needs to be done before we create a new subtarget since any
444 // creation will depend on the TM and the code generation flags on the
445 // function that reside in TargetOptions.
446 resetTargetOptions(F);
447 I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
454 R600TargetMachine::getTargetTransformInfo(const Function &F) {
455 return TargetTransformInfo(R600TTIImpl(this, F));
458 //===----------------------------------------------------------------------===//
459 // GCN Target Machine (SI+)
460 //===----------------------------------------------------------------------===//
462 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
463 StringRef CPU, StringRef FS,
464 TargetOptions Options,
465 Optional<Reloc::Model> RM,
466 Optional<CodeModel::Model> CM,
467 CodeGenOpt::Level OL, bool JIT)
468 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
470 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
471 StringRef GPU = getGPUName(F);
472 StringRef FS = getFeatureString(F);
474 SmallString<128> SubtargetKey(GPU);
475 SubtargetKey.append(FS);
477 auto &I = SubtargetMap[SubtargetKey];
479 // This needs to be done before we create a new subtarget since any
480 // creation will depend on the TM and the code generation flags on the
481 // function that reside in TargetOptions.
482 resetTargetOptions(F);
483 I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
486 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
492 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
493 return TargetTransformInfo(GCNTTIImpl(this, F));
496 //===----------------------------------------------------------------------===//
498 //===----------------------------------------------------------------------===//
502 class AMDGPUPassConfig : public TargetPassConfig {
504 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
505 : TargetPassConfig(TM, PM) {
506 // Exceptions and StackMaps are not supported, so these passes will never do
508 disablePass(&StackMapLivenessID);
509 disablePass(&FuncletLayoutID);
512 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
513 return getTM<AMDGPUTargetMachine>();
517 createMachineScheduler(MachineSchedContext *C) const override {
518 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
519 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
520 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
524 void addEarlyCSEOrGVNPass();
525 void addStraightLineScalarOptimizationPasses();
526 void addIRPasses() override;
527 void addCodeGenPrepare() override;
528 bool addPreISel() override;
529 bool addInstSelector() override;
530 bool addGCPasses() override;
533 class R600PassConfig final : public AMDGPUPassConfig {
535 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
536 : AMDGPUPassConfig(TM, PM) {}
538 ScheduleDAGInstrs *createMachineScheduler(
539 MachineSchedContext *C) const override {
540 return createR600MachineScheduler(C);
543 bool addPreISel() override;
544 bool addInstSelector() override;
545 void addPreRegAlloc() override;
546 void addPreSched2() override;
547 void addPreEmitPass() override;
550 class GCNPassConfig final : public AMDGPUPassConfig {
552 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
553 : AMDGPUPassConfig(TM, PM) {
554 // It is necessary to know the register usage of the entire call graph. We
555 // allow calls without EnableAMDGPUFunctionCalls if they are marked
556 // noinline, so this is always required.
557 setRequiresCodeGenSCCOrder(true);
560 GCNTargetMachine &getGCNTargetMachine() const {
561 return getTM<GCNTargetMachine>();
565 createMachineScheduler(MachineSchedContext *C) const override;
567 bool addPreISel() override;
568 void addMachineSSAOptimization() override;
569 bool addILPOpts() override;
570 bool addInstSelector() override;
571 bool addIRTranslator() override;
572 bool addLegalizeMachineIR() override;
573 bool addRegBankSelect() override;
574 bool addGlobalInstructionSelect() override;
575 void addFastRegAlloc(FunctionPass *RegAllocPass) override;
576 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
577 void addPreRegAlloc() override;
578 void addPostRegAlloc() override;
579 void addPreSched2() override;
580 void addPreEmitPass() override;
583 } // end anonymous namespace
585 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
586 if (getOptLevel() == CodeGenOpt::Aggressive)
587 addPass(createGVNPass());
589 addPass(createEarlyCSEPass());
592 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
593 addPass(createLICMPass());
594 addPass(createSeparateConstOffsetFromGEPPass());
595 addPass(createSpeculativeExecutionPass());
596 // ReassociateGEPs exposes more opportunites for SLSR. See
597 // the example in reassociate-geps-and-slsr.ll.
598 addPass(createStraightLineStrengthReducePass());
599 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
600 // EarlyCSE can reuse.
601 addEarlyCSEOrGVNPass();
602 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
603 addPass(createNaryReassociatePass());
604 // NaryReassociate on GEPs creates redundant common expressions, so run
605 // EarlyCSE after it.
606 addPass(createEarlyCSEPass());
609 void AMDGPUPassConfig::addIRPasses() {
610 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
612 // There is no reason to run these.
613 disablePass(&StackMapLivenessID);
614 disablePass(&FuncletLayoutID);
615 disablePass(&PatchableFunctionID);
617 addPass(createAtomicExpandPass());
619 // This must occur before inlining, as the inliner will not look through
621 addPass(createAMDGPUFixFunctionBitcastsPass());
623 addPass(createAMDGPULowerIntrinsicsPass());
625 // Function calls are not supported, so make sure we inline everything.
626 addPass(createAMDGPUAlwaysInlinePass());
627 addPass(createAlwaysInlinerLegacyPass());
628 // We need to add the barrier noop pass, otherwise adding the function
629 // inlining pass will cause all of the PassConfigs passes to be run
630 // one function at a time, which means if we have a nodule with two
631 // functions, then we will generate code for the first function
632 // without ever running any passes on the second.
633 addPass(createBarrierNoopPass());
635 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
636 // TODO: May want to move later or split into an early and late one.
638 addPass(createAMDGPUCodeGenPreparePass());
641 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
642 if (TM.getTargetTriple().getArch() == Triple::r600)
643 addPass(createR600OpenCLImageTypeLoweringPass());
645 // Replace OpenCL enqueued block function pointers with global variables.
646 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
648 if (TM.getOptLevel() > CodeGenOpt::None) {
649 addPass(createInferAddressSpacesPass());
650 addPass(createAMDGPUPromoteAlloca());
653 addPass(createSROAPass());
655 addStraightLineScalarOptimizationPasses();
657 if (EnableAMDGPUAliasAnalysis) {
658 addPass(createAMDGPUAAWrapperPass());
659 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
661 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
662 AAR.addAAResult(WrapperPass->getResult());
667 TargetPassConfig::addIRPasses();
669 // EarlyCSE is not always strong enough to clean up what LSR produces. For
670 // example, GVN can combine
677 // %0 = shl nsw %a, 2
680 // but EarlyCSE can do neither of them.
681 if (getOptLevel() != CodeGenOpt::None)
682 addEarlyCSEOrGVNPass();
685 void AMDGPUPassConfig::addCodeGenPrepare() {
686 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
687 EnableLowerKernelArguments)
688 addPass(createAMDGPULowerKernelArgumentsPass());
690 TargetPassConfig::addCodeGenPrepare();
692 if (EnableLoadStoreVectorizer)
693 addPass(createLoadStoreVectorizerPass());
696 bool AMDGPUPassConfig::addPreISel() {
697 addPass(createLowerSwitchPass());
698 addPass(createFlattenCFGPass());
702 bool AMDGPUPassConfig::addInstSelector() {
703 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
707 bool AMDGPUPassConfig::addGCPasses() {
708 // Do nothing. GC is not supported.
712 //===----------------------------------------------------------------------===//
714 //===----------------------------------------------------------------------===//
716 bool R600PassConfig::addPreISel() {
717 AMDGPUPassConfig::addPreISel();
719 if (EnableR600StructurizeCFG)
720 addPass(createStructurizeCFGPass());
724 bool R600PassConfig::addInstSelector() {
725 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
729 void R600PassConfig::addPreRegAlloc() {
730 addPass(createR600VectorRegMerger());
733 void R600PassConfig::addPreSched2() {
734 addPass(createR600EmitClauseMarkers(), false);
735 if (EnableR600IfConvert)
736 addPass(&IfConverterID, false);
737 addPass(createR600ClauseMergePass(), false);
740 void R600PassConfig::addPreEmitPass() {
741 addPass(createAMDGPUCFGStructurizerPass(), false);
742 addPass(createR600ExpandSpecialInstrsPass(), false);
743 addPass(&FinalizeMachineBundlesID, false);
744 addPass(createR600Packetizer(), false);
745 addPass(createR600ControlFlowFinalizer(), false);
748 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
749 return new R600PassConfig(*this, PM);
752 //===----------------------------------------------------------------------===//
754 //===----------------------------------------------------------------------===//
756 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
757 MachineSchedContext *C) const {
758 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
759 if (ST.enableSIScheduler())
760 return createSIMachineScheduler(C);
761 return createGCNMaxOccupancyMachineScheduler(C);
764 bool GCNPassConfig::addPreISel() {
765 AMDGPUPassConfig::addPreISel();
767 if (EnableAtomicOptimizations) {
768 addPass(createAMDGPUAtomicOptimizerPass());
771 // FIXME: We need to run a pass to propagate the attributes when calls are
773 addPass(createAMDGPUAnnotateKernelFeaturesPass());
775 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
776 // regions formed by them.
777 addPass(&AMDGPUUnifyDivergentExitNodesID);
778 if (!LateCFGStructurize) {
779 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
781 addPass(createSinkingPass());
782 addPass(createAMDGPUAnnotateUniformValues());
783 if (!LateCFGStructurize) {
784 addPass(createSIAnnotateControlFlowPass());
790 void GCNPassConfig::addMachineSSAOptimization() {
791 TargetPassConfig::addMachineSSAOptimization();
793 // We want to fold operands after PeepholeOptimizer has run (or as part of
794 // it), because it will eliminate extra copies making it easier to fold the
795 // real source operand. We want to eliminate dead instructions after, so that
796 // we see fewer uses of the copies. We then need to clean up the dead
797 // instructions leftover after the operands are folded as well.
799 // XXX - Can we get away without running DeadMachineInstructionElim again?
800 addPass(&SIFoldOperandsID);
801 if (EnableDPPCombine)
802 addPass(&GCNDPPCombineID);
803 addPass(&DeadMachineInstructionElimID);
804 addPass(&SILoadStoreOptimizerID);
805 if (EnableSDWAPeephole) {
806 addPass(&SIPeepholeSDWAID);
807 addPass(&EarlyMachineLICMID);
808 addPass(&MachineCSEID);
809 addPass(&SIFoldOperandsID);
810 addPass(&DeadMachineInstructionElimID);
812 addPass(createSIShrinkInstructionsPass());
815 bool GCNPassConfig::addILPOpts() {
816 if (EnableEarlyIfConversion)
817 addPass(&EarlyIfConverterID);
819 TargetPassConfig::addILPOpts();
823 bool GCNPassConfig::addInstSelector() {
824 AMDGPUPassConfig::addInstSelector();
825 addPass(&SIFixSGPRCopiesID);
826 addPass(createSILowerI1CopiesPass());
827 addPass(createSIFixupVectorISelPass());
828 addPass(createSIAddIMGInitPass());
832 bool GCNPassConfig::addIRTranslator() {
833 addPass(new IRTranslator());
837 bool GCNPassConfig::addLegalizeMachineIR() {
838 addPass(new Legalizer());
842 bool GCNPassConfig::addRegBankSelect() {
843 addPass(new RegBankSelect());
847 bool GCNPassConfig::addGlobalInstructionSelect() {
848 addPass(new InstructionSelect());
852 void GCNPassConfig::addPreRegAlloc() {
853 if (LateCFGStructurize) {
854 addPass(createAMDGPUMachineCFGStructurizerPass());
856 addPass(createSIWholeQuadModePass());
859 void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
860 // FIXME: We have to disable the verifier here because of PHIElimination +
861 // TwoAddressInstructions disabling it.
863 // This must be run immediately after phi elimination and before
864 // TwoAddressInstructions, otherwise the processing of the tied operand of
865 // SI_ELSE will introduce a copy of the tied operand source after the else.
866 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
868 // This must be run after SILowerControlFlow, since it needs to use the
869 // machine-level CFG, but before register allocation.
870 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
872 TargetPassConfig::addFastRegAlloc(RegAllocPass);
875 void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
876 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
878 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
880 // This must be run immediately after phi elimination and before
881 // TwoAddressInstructions, otherwise the processing of the tied operand of
882 // SI_ELSE will introduce a copy of the tied operand source after the else.
883 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
885 // This must be run after SILowerControlFlow, since it needs to use the
886 // machine-level CFG, but before register allocation.
887 insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
889 TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
892 void GCNPassConfig::addPostRegAlloc() {
893 addPass(&SIFixVGPRCopiesID);
894 if (getOptLevel() > CodeGenOpt::None)
895 addPass(&SIOptimizeExecMaskingID);
896 TargetPassConfig::addPostRegAlloc();
899 void GCNPassConfig::addPreSched2() {
902 void GCNPassConfig::addPreEmitPass() {
903 addPass(createSIMemoryLegalizerPass());
904 addPass(createSIInsertWaitcntsPass());
905 addPass(createSIShrinkInstructionsPass());
906 addPass(createSIModeRegisterPass());
908 // The hazard recognizer that runs as part of the post-ra scheduler does not
909 // guarantee to be able handle all hazards correctly. This is because if there
910 // are multiple scheduling regions in a basic block, the regions are scheduled
911 // bottom up, so when we begin to schedule a region we don't know what
912 // instructions were emitted directly before it.
914 // Here we add a stand-alone hazard recognizer pass which can handle all
917 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
918 // be better for it to emit S_NOP <N> when possible.
919 addPass(&PostRAHazardRecognizerID);
921 addPass(&SIInsertSkipsPassID);
922 addPass(createSIDebuggerInsertNopsPass());
923 addPass(&BranchRelaxationPassID);
926 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
927 return new GCNPassConfig(*this, PM);