1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// The AMDGPU target machine contains all of the hardware specific
11 /// information needed to emit code for R600 and SI GPUs.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUTargetMachine.h"
17 #include "AMDGPUAliasAnalysis.h"
18 #include "AMDGPUCallLowering.h"
19 #include "AMDGPUInstructionSelector.h"
20 #include "AMDGPULegalizerInfo.h"
21 #include "AMDGPUMacroFusion.h"
22 #include "AMDGPUTargetObjectFile.h"
23 #include "AMDGPUTargetTransformInfo.h"
24 #include "GCNIterativeScheduler.h"
25 #include "GCNSchedStrategy.h"
26 #include "R600MachineScheduler.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIMachineScheduler.h"
29 #include "TargetInfo/AMDGPUTargetInfo.h"
30 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
31 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
32 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
33 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
34 #include "llvm/CodeGen/MIRParser/MIParser.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/LegacyPassManager.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/TargetRegistry.h"
45 #include "llvm/Target/TargetLoweringObjectFile.h"
46 #include "llvm/Transforms/IPO.h"
47 #include "llvm/Transforms/IPO/AlwaysInliner.h"
48 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
49 #include "llvm/Transforms/Scalar.h"
50 #include "llvm/Transforms/Scalar/GVN.h"
51 #include "llvm/Transforms/Utils.h"
52 #include "llvm/Transforms/Vectorize.h"
57 static cl::opt<bool> EnableR600StructurizeCFG(
58 "r600-ir-structurize",
59 cl::desc("Use StructurizeCFG IR pass"),
62 static cl::opt<bool> EnableSROA(
64 cl::desc("Run SROA after promote alloca pass"),
69 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
70 cl::desc("Run early if-conversion"),
74 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
75 cl::desc("Run pre-RA exec mask optimizations"),
78 static cl::opt<bool> EnableR600IfConvert(
80 cl::desc("Use if conversion pass"),
84 // Option to disable vectorizer for tests.
85 static cl::opt<bool> EnableLoadStoreVectorizer(
86 "amdgpu-load-store-vectorizer",
87 cl::desc("Enable load store vectorizer"),
91 // Option to control global loads scalarization
92 static cl::opt<bool> ScalarizeGlobal(
93 "amdgpu-scalarize-global-loads",
94 cl::desc("Enable global load scalarization"),
98 // Option to run internalize pass.
99 static cl::opt<bool> InternalizeSymbols(
100 "amdgpu-internalize-symbols",
101 cl::desc("Enable elimination of non-kernel functions and unused globals"),
105 // Option to inline all early.
106 static cl::opt<bool> EarlyInlineAll(
107 "amdgpu-early-inline-all",
108 cl::desc("Inline all functions early"),
112 static cl::opt<bool> EnableSDWAPeephole(
113 "amdgpu-sdwa-peephole",
114 cl::desc("Enable SDWA peepholer"),
117 static cl::opt<bool> EnableDPPCombine(
118 "amdgpu-dpp-combine",
119 cl::desc("Enable DPP combiner"),
122 // Enable address space based alias analysis
123 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
124 cl::desc("Enable AMDGPU Alias Analysis"),
127 // Option to run late CFG structurizer
128 static cl::opt<bool, true> LateCFGStructurize(
129 "amdgpu-late-structurize",
130 cl::desc("Enable late CFG structurization"),
131 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
134 static cl::opt<bool, true> EnableAMDGPUFunctionCallsOpt(
135 "amdgpu-function-calls",
136 cl::desc("Enable AMDGPU function call support"),
137 cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
141 // Enable lib calls simplifications
142 static cl::opt<bool> EnableLibCallSimplify(
143 "amdgpu-simplify-libcall",
144 cl::desc("Enable amdgpu library simplifications"),
148 static cl::opt<bool> EnableLowerKernelArguments(
149 "amdgpu-ir-lower-kernel-arguments",
150 cl::desc("Lower kernel argument loads in IR pass"),
154 static cl::opt<bool> EnableRegReassign(
155 "amdgpu-reassign-regs",
156 cl::desc("Enable register reassign optimizations on gfx10+"),
160 // Enable atomic optimization
161 static cl::opt<bool> EnableAtomicOptimizations(
162 "amdgpu-atomic-optimizations",
163 cl::desc("Enable atomic optimizations"),
167 // Enable Mode register optimization
168 static cl::opt<bool> EnableSIModeRegisterPass(
169 "amdgpu-mode-register",
170 cl::desc("Enable mode register pass"),
174 // Option is used in lit tests to prevent deadcoding of patterns inspected.
176 EnableDCEInRA("amdgpu-dce-in-ra",
177 cl::init(true), cl::Hidden,
178 cl::desc("Enable machine DCE inside regalloc"));
180 static cl::opt<bool> EnableScalarIRPasses(
181 "amdgpu-scalar-ir-passes",
182 cl::desc("Enable scalar IR passes"),
186 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
187 // Register the target
188 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
189 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
191 PassRegistry *PR = PassRegistry::getPassRegistry();
192 initializeR600ClauseMergePassPass(*PR);
193 initializeR600ControlFlowFinalizerPass(*PR);
194 initializeR600PacketizerPass(*PR);
195 initializeR600ExpandSpecialInstrsPassPass(*PR);
196 initializeR600VectorRegMergerPass(*PR);
197 initializeGlobalISel(*PR);
198 initializeAMDGPUDAGToDAGISelPass(*PR);
199 initializeGCNDPPCombinePass(*PR);
200 initializeSILowerI1CopiesPass(*PR);
201 initializeSILowerSGPRSpillsPass(*PR);
202 initializeSIFixSGPRCopiesPass(*PR);
203 initializeSIFixVGPRCopiesPass(*PR);
204 initializeSIFixupVectorISelPass(*PR);
205 initializeSIFoldOperandsPass(*PR);
206 initializeSIPeepholeSDWAPass(*PR);
207 initializeSIShrinkInstructionsPass(*PR);
208 initializeSIOptimizeExecMaskingPreRAPass(*PR);
209 initializeSILoadStoreOptimizerPass(*PR);
210 initializeAMDGPUFixFunctionBitcastsPass(*PR);
211 initializeAMDGPUAlwaysInlinePass(*PR);
212 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
213 initializeAMDGPUAnnotateUniformValuesPass(*PR);
214 initializeAMDGPUArgumentUsageInfoPass(*PR);
215 initializeAMDGPUAtomicOptimizerPass(*PR);
216 initializeAMDGPULowerKernelArgumentsPass(*PR);
217 initializeAMDGPULowerKernelAttributesPass(*PR);
218 initializeAMDGPULowerIntrinsicsPass(*PR);
219 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
220 initializeAMDGPUPromoteAllocaPass(*PR);
221 initializeAMDGPUCodeGenPreparePass(*PR);
222 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
223 initializeAMDGPUPropagateAttributesLatePass(*PR);
224 initializeAMDGPURewriteOutArgumentsPass(*PR);
225 initializeAMDGPUUnifyMetadataPass(*PR);
226 initializeSIAnnotateControlFlowPass(*PR);
227 initializeSIInsertWaitcntsPass(*PR);
228 initializeSIModeRegisterPass(*PR);
229 initializeSIWholeQuadModePass(*PR);
230 initializeSILowerControlFlowPass(*PR);
231 initializeSIRemoveShortExecBranchesPass(*PR);
232 initializeSIInsertSkipsPass(*PR);
233 initializeSIMemoryLegalizerPass(*PR);
234 initializeSIOptimizeExecMaskingPass(*PR);
235 initializeSIPreAllocateWWMRegsPass(*PR);
236 initializeSIFormMemoryClausesPass(*PR);
237 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
238 initializeAMDGPUAAWrapperPassPass(*PR);
239 initializeAMDGPUExternalAAWrapperPass(*PR);
240 initializeAMDGPUUseNativeCallsPass(*PR);
241 initializeAMDGPUSimplifyLibCallsPass(*PR);
242 initializeAMDGPUInlinerPass(*PR);
243 initializeAMDGPUPrintfRuntimeBindingPass(*PR);
244 initializeGCNRegBankReassignPass(*PR);
245 initializeGCNNSAReassignPass(*PR);
248 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
249 return std::make_unique<AMDGPUTargetObjectFile>();
252 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
253 return new ScheduleDAGMILive(C, std::make_unique<R600SchedStrategy>());
256 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
257 return new SIScheduleDAGMI(C);
260 static ScheduleDAGInstrs *
261 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
262 ScheduleDAGMILive *DAG =
263 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
264 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
265 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
266 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
270 static ScheduleDAGInstrs *
271 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
272 auto DAG = new GCNIterativeScheduler(C,
273 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
274 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
275 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
279 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
280 return new GCNIterativeScheduler(C,
281 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
284 static ScheduleDAGInstrs *
285 createIterativeILPMachineScheduler(MachineSchedContext *C) {
286 auto DAG = new GCNIterativeScheduler(C,
287 GCNIterativeScheduler::SCHEDULE_ILP);
288 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
289 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
290 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
294 static MachineSchedRegistry
295 R600SchedRegistry("r600", "Run R600's custom scheduler",
296 createR600MachineScheduler);
298 static MachineSchedRegistry
299 SISchedRegistry("si", "Run SI's custom scheduler",
300 createSIMachineScheduler);
302 static MachineSchedRegistry
303 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
304 "Run GCN scheduler to maximize occupancy",
305 createGCNMaxOccupancyMachineScheduler);
307 static MachineSchedRegistry
308 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
309 "Run GCN scheduler to maximize occupancy (experimental)",
310 createIterativeGCNMaxOccupancyMachineScheduler);
312 static MachineSchedRegistry
313 GCNMinRegSchedRegistry("gcn-minreg",
314 "Run GCN iterative scheduler for minimal register usage (experimental)",
315 createMinRegScheduler);
317 static MachineSchedRegistry
318 GCNILPSchedRegistry("gcn-ilp",
319 "Run GCN iterative scheduler for ILP scheduling (experimental)",
320 createIterativeILPMachineScheduler);
322 static StringRef computeDataLayout(const Triple &TT) {
323 if (TT.getArch() == Triple::r600) {
325 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
326 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
329 // 32-bit private, local, and region pointers. 64-bit global, constant and
330 // flat, non-integral buffer fat pointers.
331 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
332 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
333 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
338 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
342 // Need to default to a target with flat support for HSA.
343 if (TT.getArch() == Triple::amdgcn)
344 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
349 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
350 // The AMDGPU toolchain only supports generating shared objects, so we
351 // must always use PIC.
355 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
356 StringRef CPU, StringRef FS,
357 TargetOptions Options,
358 Optional<Reloc::Model> RM,
359 Optional<CodeModel::Model> CM,
360 CodeGenOpt::Level OptLevel)
361 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
362 FS, Options, getEffectiveRelocModel(RM),
363 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
364 TLOF(createTLOF(getTargetTriple())) {
368 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
369 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
371 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
373 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
374 Attribute GPUAttr = F.getFnAttribute("target-cpu");
375 return GPUAttr.hasAttribute(Attribute::None) ?
376 getTargetCPU() : GPUAttr.getValueAsString();
379 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
380 Attribute FSAttr = F.getFnAttribute("target-features");
382 return FSAttr.hasAttribute(Attribute::None) ?
383 getTargetFeatureString() :
384 FSAttr.getValueAsString();
387 /// Predicate for Internalize pass.
388 static bool mustPreserveGV(const GlobalValue &GV) {
389 if (const Function *F = dyn_cast<Function>(&GV))
390 return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
392 return !GV.use_empty();
395 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
396 Builder.DivergentTarget = true;
398 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
399 bool Internalize = InternalizeSymbols;
400 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
401 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
402 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
404 if (EnableFunctionCalls) {
405 delete Builder.Inliner;
406 Builder.Inliner = createAMDGPUFunctionInliningPass();
409 Builder.addExtension(
410 PassManagerBuilder::EP_ModuleOptimizerEarly,
411 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
412 legacy::PassManagerBase &PM) {
414 PM.add(createAMDGPUAAWrapperPass());
415 PM.add(createAMDGPUExternalAAWrapperPass());
417 PM.add(createAMDGPUUnifyMetadataPass());
418 PM.add(createAMDGPUPrintfRuntimeBinding());
419 PM.add(createAMDGPUPropagateAttributesLatePass(this));
421 PM.add(createInternalizePass(mustPreserveGV));
422 PM.add(createGlobalDCEPass());
425 PM.add(createAMDGPUAlwaysInlinePass(false));
428 const auto &Opt = Options;
429 Builder.addExtension(
430 PassManagerBuilder::EP_EarlyAsPossible,
431 [AMDGPUAA, LibCallSimplify, &Opt, this](const PassManagerBuilder &,
432 legacy::PassManagerBase &PM) {
434 PM.add(createAMDGPUAAWrapperPass());
435 PM.add(createAMDGPUExternalAAWrapperPass());
437 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
438 PM.add(llvm::createAMDGPUUseNativeCallsPass());
440 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt, this));
443 Builder.addExtension(
444 PassManagerBuilder::EP_CGSCCOptimizerLate,
445 [](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
446 // Add infer address spaces pass to the opt pipeline after inlining
447 // but before SROA to increase SROA opportunities.
448 PM.add(createInferAddressSpacesPass());
450 // This should run after inlining to have any chance of doing anything,
451 // and before other cleanup optimizations.
452 PM.add(createAMDGPULowerKernelAttributesPass());
456 //===----------------------------------------------------------------------===//
457 // R600 Target Machine (R600 -> Cayman)
458 //===----------------------------------------------------------------------===//
460 R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
461 StringRef CPU, StringRef FS,
462 TargetOptions Options,
463 Optional<Reloc::Model> RM,
464 Optional<CodeModel::Model> CM,
465 CodeGenOpt::Level OL, bool JIT)
466 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
467 setRequiresStructuredCFG(true);
469 // Override the default since calls aren't supported for r600.
470 if (EnableFunctionCalls &&
471 EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
472 EnableFunctionCalls = false;
475 const R600Subtarget *R600TargetMachine::getSubtargetImpl(
476 const Function &F) const {
477 StringRef GPU = getGPUName(F);
478 StringRef FS = getFeatureString(F);
480 SmallString<128> SubtargetKey(GPU);
481 SubtargetKey.append(FS);
483 auto &I = SubtargetMap[SubtargetKey];
485 // This needs to be done before we create a new subtarget since any
486 // creation will depend on the TM and the code generation flags on the
487 // function that reside in TargetOptions.
488 resetTargetOptions(F);
489 I = std::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
496 R600TargetMachine::getTargetTransformInfo(const Function &F) {
497 return TargetTransformInfo(R600TTIImpl(this, F));
500 //===----------------------------------------------------------------------===//
501 // GCN Target Machine (SI+)
502 //===----------------------------------------------------------------------===//
504 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
505 StringRef CPU, StringRef FS,
506 TargetOptions Options,
507 Optional<Reloc::Model> RM,
508 Optional<CodeModel::Model> CM,
509 CodeGenOpt::Level OL, bool JIT)
510 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
512 const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
513 StringRef GPU = getGPUName(F);
514 StringRef FS = getFeatureString(F);
516 SmallString<128> SubtargetKey(GPU);
517 SubtargetKey.append(FS);
519 auto &I = SubtargetMap[SubtargetKey];
521 // This needs to be done before we create a new subtarget since any
522 // creation will depend on the TM and the code generation flags on the
523 // function that reside in TargetOptions.
524 resetTargetOptions(F);
525 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
528 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
534 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
535 return TargetTransformInfo(GCNTTIImpl(this, F));
538 //===----------------------------------------------------------------------===//
540 //===----------------------------------------------------------------------===//
544 class AMDGPUPassConfig : public TargetPassConfig {
546 AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
547 : TargetPassConfig(TM, PM) {
548 // Exceptions and StackMaps are not supported, so these passes will never do
550 disablePass(&StackMapLivenessID);
551 disablePass(&FuncletLayoutID);
554 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
555 return getTM<AMDGPUTargetMachine>();
559 createMachineScheduler(MachineSchedContext *C) const override {
560 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
561 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
562 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
566 void addEarlyCSEOrGVNPass();
567 void addStraightLineScalarOptimizationPasses();
568 void addIRPasses() override;
569 void addCodeGenPrepare() override;
570 bool addPreISel() override;
571 bool addInstSelector() override;
572 bool addGCPasses() override;
574 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
577 std::unique_ptr<CSEConfigBase> AMDGPUPassConfig::getCSEConfig() const {
578 return getStandardCSEConfigForOpt(TM->getOptLevel());
581 class R600PassConfig final : public AMDGPUPassConfig {
583 R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
584 : AMDGPUPassConfig(TM, PM) {}
586 ScheduleDAGInstrs *createMachineScheduler(
587 MachineSchedContext *C) const override {
588 return createR600MachineScheduler(C);
591 bool addPreISel() override;
592 bool addInstSelector() override;
593 void addPreRegAlloc() override;
594 void addPreSched2() override;
595 void addPreEmitPass() override;
598 class GCNPassConfig final : public AMDGPUPassConfig {
600 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
601 : AMDGPUPassConfig(TM, PM) {
602 // It is necessary to know the register usage of the entire call graph. We
603 // allow calls without EnableAMDGPUFunctionCalls if they are marked
604 // noinline, so this is always required.
605 setRequiresCodeGenSCCOrder(true);
608 GCNTargetMachine &getGCNTargetMachine() const {
609 return getTM<GCNTargetMachine>();
613 createMachineScheduler(MachineSchedContext *C) const override;
615 bool addPreISel() override;
616 void addMachineSSAOptimization() override;
617 bool addILPOpts() override;
618 bool addInstSelector() override;
619 bool addIRTranslator() override;
620 bool addLegalizeMachineIR() override;
621 bool addRegBankSelect() override;
622 bool addGlobalInstructionSelect() override;
623 void addFastRegAlloc() override;
624 void addOptimizedRegAlloc() override;
625 void addPreRegAlloc() override;
626 bool addPreRewrite() override;
627 void addPostRegAlloc() override;
628 void addPreSched2() override;
629 void addPreEmitPass() override;
632 } // end anonymous namespace
634 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
635 if (getOptLevel() == CodeGenOpt::Aggressive)
636 addPass(createGVNPass());
638 addPass(createEarlyCSEPass());
641 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
642 addPass(createLICMPass());
643 addPass(createSeparateConstOffsetFromGEPPass());
644 addPass(createSpeculativeExecutionPass());
645 // ReassociateGEPs exposes more opportunites for SLSR. See
646 // the example in reassociate-geps-and-slsr.ll.
647 addPass(createStraightLineStrengthReducePass());
648 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
649 // EarlyCSE can reuse.
650 addEarlyCSEOrGVNPass();
651 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
652 addPass(createNaryReassociatePass());
653 // NaryReassociate on GEPs creates redundant common expressions, so run
654 // EarlyCSE after it.
655 addPass(createEarlyCSEPass());
658 void AMDGPUPassConfig::addIRPasses() {
659 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
661 // There is no reason to run these.
662 disablePass(&StackMapLivenessID);
663 disablePass(&FuncletLayoutID);
664 disablePass(&PatchableFunctionID);
666 addPass(createAMDGPUPrintfRuntimeBinding());
668 // This must occur before inlining, as the inliner will not look through
670 addPass(createAMDGPUFixFunctionBitcastsPass());
672 // A call to propagate attributes pass in the backend in case opt was not run.
673 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
675 addPass(createAtomicExpandPass());
678 addPass(createAMDGPULowerIntrinsicsPass());
680 // Function calls are not supported, so make sure we inline everything.
681 addPass(createAMDGPUAlwaysInlinePass());
682 addPass(createAlwaysInlinerLegacyPass());
683 // We need to add the barrier noop pass, otherwise adding the function
684 // inlining pass will cause all of the PassConfigs passes to be run
685 // one function at a time, which means if we have a nodule with two
686 // functions, then we will generate code for the first function
687 // without ever running any passes on the second.
688 addPass(createBarrierNoopPass());
690 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
691 if (TM.getTargetTriple().getArch() == Triple::r600)
692 addPass(createR600OpenCLImageTypeLoweringPass());
694 // Replace OpenCL enqueued block function pointers with global variables.
695 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
697 if (TM.getOptLevel() > CodeGenOpt::None) {
698 addPass(createInferAddressSpacesPass());
699 addPass(createAMDGPUPromoteAlloca());
702 addPass(createSROAPass());
704 if (EnableScalarIRPasses)
705 addStraightLineScalarOptimizationPasses();
707 if (EnableAMDGPUAliasAnalysis) {
708 addPass(createAMDGPUAAWrapperPass());
709 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
711 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
712 AAR.addAAResult(WrapperPass->getResult());
717 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
718 // TODO: May want to move later or split into an early and late one.
719 addPass(createAMDGPUCodeGenPreparePass());
722 TargetPassConfig::addIRPasses();
724 // EarlyCSE is not always strong enough to clean up what LSR produces. For
725 // example, GVN can combine
732 // %0 = shl nsw %a, 2
735 // but EarlyCSE can do neither of them.
736 if (getOptLevel() != CodeGenOpt::None && EnableScalarIRPasses)
737 addEarlyCSEOrGVNPass();
740 void AMDGPUPassConfig::addCodeGenPrepare() {
741 if (TM->getTargetTriple().getArch() == Triple::amdgcn)
742 addPass(createAMDGPUAnnotateKernelFeaturesPass());
744 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
745 EnableLowerKernelArguments)
746 addPass(createAMDGPULowerKernelArgumentsPass());
748 addPass(&AMDGPUPerfHintAnalysisID);
750 TargetPassConfig::addCodeGenPrepare();
752 if (EnableLoadStoreVectorizer)
753 addPass(createLoadStoreVectorizerPass());
756 bool AMDGPUPassConfig::addPreISel() {
757 addPass(createLowerSwitchPass());
758 addPass(createFlattenCFGPass());
762 bool AMDGPUPassConfig::addInstSelector() {
763 // Defer the verifier until FinalizeISel.
764 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()), false);
768 bool AMDGPUPassConfig::addGCPasses() {
769 // Do nothing. GC is not supported.
773 //===----------------------------------------------------------------------===//
775 //===----------------------------------------------------------------------===//
777 bool R600PassConfig::addPreISel() {
778 AMDGPUPassConfig::addPreISel();
780 if (EnableR600StructurizeCFG)
781 addPass(createStructurizeCFGPass());
785 bool R600PassConfig::addInstSelector() {
786 addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
790 void R600PassConfig::addPreRegAlloc() {
791 addPass(createR600VectorRegMerger());
794 void R600PassConfig::addPreSched2() {
795 addPass(createR600EmitClauseMarkers(), false);
796 if (EnableR600IfConvert)
797 addPass(&IfConverterID, false);
798 addPass(createR600ClauseMergePass(), false);
801 void R600PassConfig::addPreEmitPass() {
802 addPass(createAMDGPUCFGStructurizerPass(), false);
803 addPass(createR600ExpandSpecialInstrsPass(), false);
804 addPass(&FinalizeMachineBundlesID, false);
805 addPass(createR600Packetizer(), false);
806 addPass(createR600ControlFlowFinalizer(), false);
809 TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
810 return new R600PassConfig(*this, PM);
813 //===----------------------------------------------------------------------===//
815 //===----------------------------------------------------------------------===//
817 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
818 MachineSchedContext *C) const {
819 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
820 if (ST.enableSIScheduler())
821 return createSIMachineScheduler(C);
822 return createGCNMaxOccupancyMachineScheduler(C);
825 bool GCNPassConfig::addPreISel() {
826 AMDGPUPassConfig::addPreISel();
828 if (EnableAtomicOptimizations) {
829 addPass(createAMDGPUAtomicOptimizerPass());
832 // FIXME: We need to run a pass to propagate the attributes when calls are
835 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
836 // regions formed by them.
837 addPass(&AMDGPUUnifyDivergentExitNodesID);
838 if (!LateCFGStructurize) {
839 addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
841 addPass(createSinkingPass());
842 addPass(createAMDGPUAnnotateUniformValues());
843 if (!LateCFGStructurize) {
844 addPass(createSIAnnotateControlFlowPass());
846 addPass(createLCSSAPass());
851 void GCNPassConfig::addMachineSSAOptimization() {
852 TargetPassConfig::addMachineSSAOptimization();
854 // We want to fold operands after PeepholeOptimizer has run (or as part of
855 // it), because it will eliminate extra copies making it easier to fold the
856 // real source operand. We want to eliminate dead instructions after, so that
857 // we see fewer uses of the copies. We then need to clean up the dead
858 // instructions leftover after the operands are folded as well.
860 // XXX - Can we get away without running DeadMachineInstructionElim again?
861 addPass(&SIFoldOperandsID);
862 if (EnableDPPCombine)
863 addPass(&GCNDPPCombineID);
864 addPass(&DeadMachineInstructionElimID);
865 addPass(&SILoadStoreOptimizerID);
866 if (EnableSDWAPeephole) {
867 addPass(&SIPeepholeSDWAID);
868 addPass(&EarlyMachineLICMID);
869 addPass(&MachineCSEID);
870 addPass(&SIFoldOperandsID);
871 addPass(&DeadMachineInstructionElimID);
873 addPass(createSIShrinkInstructionsPass());
876 bool GCNPassConfig::addILPOpts() {
877 if (EnableEarlyIfConversion)
878 addPass(&EarlyIfConverterID);
880 TargetPassConfig::addILPOpts();
884 bool GCNPassConfig::addInstSelector() {
885 AMDGPUPassConfig::addInstSelector();
886 addPass(&SIFixSGPRCopiesID);
887 addPass(createSILowerI1CopiesPass());
888 addPass(createSIFixupVectorISelPass());
889 addPass(createSIAddIMGInitPass());
893 bool GCNPassConfig::addIRTranslator() {
894 addPass(new IRTranslator());
898 bool GCNPassConfig::addLegalizeMachineIR() {
899 addPass(new Legalizer());
903 bool GCNPassConfig::addRegBankSelect() {
904 addPass(new RegBankSelect());
908 bool GCNPassConfig::addGlobalInstructionSelect() {
909 addPass(new InstructionSelect());
913 void GCNPassConfig::addPreRegAlloc() {
914 if (LateCFGStructurize) {
915 addPass(createAMDGPUMachineCFGStructurizerPass());
917 addPass(createSIWholeQuadModePass());
920 void GCNPassConfig::addFastRegAlloc() {
921 // FIXME: We have to disable the verifier here because of PHIElimination +
922 // TwoAddressInstructions disabling it.
924 // This must be run immediately after phi elimination and before
925 // TwoAddressInstructions, otherwise the processing of the tied operand of
926 // SI_ELSE will introduce a copy of the tied operand source after the else.
927 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
929 // This must be run just after RegisterCoalescing.
930 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
932 TargetPassConfig::addFastRegAlloc();
935 void GCNPassConfig::addOptimizedRegAlloc() {
936 if (OptExecMaskPreRA) {
937 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
938 insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
940 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
943 // This must be run immediately after phi elimination and before
944 // TwoAddressInstructions, otherwise the processing of the tied operand of
945 // SI_ELSE will introduce a copy of the tied operand source after the else.
946 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
948 // This must be run just after RegisterCoalescing.
949 insertPass(&RegisterCoalescerID, &SIPreAllocateWWMRegsID, false);
952 insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID);
954 TargetPassConfig::addOptimizedRegAlloc();
957 bool GCNPassConfig::addPreRewrite() {
958 if (EnableRegReassign) {
959 addPass(&GCNNSAReassignID);
960 addPass(&GCNRegBankReassignID);
965 void GCNPassConfig::addPostRegAlloc() {
966 addPass(&SIFixVGPRCopiesID);
967 if (getOptLevel() > CodeGenOpt::None)
968 addPass(&SIOptimizeExecMaskingID);
969 TargetPassConfig::addPostRegAlloc();
971 // Equivalent of PEI for SGPRs.
972 addPass(&SILowerSGPRSpillsID);
975 void GCNPassConfig::addPreSched2() {
978 void GCNPassConfig::addPreEmitPass() {
979 addPass(createSIMemoryLegalizerPass());
980 addPass(createSIInsertWaitcntsPass());
981 addPass(createSIShrinkInstructionsPass());
982 addPass(createSIModeRegisterPass());
984 // The hazard recognizer that runs as part of the post-ra scheduler does not
985 // guarantee to be able handle all hazards correctly. This is because if there
986 // are multiple scheduling regions in a basic block, the regions are scheduled
987 // bottom up, so when we begin to schedule a region we don't know what
988 // instructions were emitted directly before it.
990 // Here we add a stand-alone hazard recognizer pass which can handle all
993 // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
994 // be better for it to emit S_NOP <N> when possible.
995 addPass(&PostRAHazardRecognizerID);
997 addPass(&SIRemoveShortExecBranchesID);
998 addPass(&SIInsertSkipsPassID);
999 addPass(&BranchRelaxationPassID);
1002 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
1003 return new GCNPassConfig(*this, PM);
1006 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1007 return new yaml::SIMachineFunctionInfo();
1010 yaml::MachineFunctionInfo *
1011 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1012 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1013 return new yaml::SIMachineFunctionInfo(*MFI,
1014 *MF.getSubtarget().getRegisterInfo());
1017 bool GCNTargetMachine::parseMachineFunctionInfo(
1018 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1019 SMDiagnostic &Error, SMRange &SourceRange) const {
1020 const yaml::SIMachineFunctionInfo &YamlMFI =
1021 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1022 MachineFunction &MF = PFS.MF;
1023 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1025 MFI->initializeBaseYamlFields(YamlMFI);
1027 auto parseRegister = [&](const yaml::StringValue &RegName, unsigned &RegVal) {
1028 if (parseNamedRegisterReference(PFS, RegVal, RegName.Value, Error)) {
1029 SourceRange = RegName.SourceRange;
1036 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1037 // Create a diagnostic for a the register string literal.
1038 const MemoryBuffer &Buffer =
1039 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1040 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1041 RegName.Value.size(), SourceMgr::DK_Error,
1042 "incorrect register class for field", RegName.Value,
1044 SourceRange = RegName.SourceRange;
1048 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1049 parseRegister(YamlMFI.ScratchWaveOffsetReg, MFI->ScratchWaveOffsetReg) ||
1050 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1051 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1054 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1055 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1056 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1059 if (MFI->ScratchWaveOffsetReg != AMDGPU::SCRATCH_WAVE_OFFSET_REG &&
1060 !AMDGPU::SGPR_32RegClass.contains(MFI->ScratchWaveOffsetReg)) {
1061 return diagnoseRegisterClass(YamlMFI.ScratchWaveOffsetReg);
1064 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1065 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1066 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1069 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1070 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1071 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1074 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1075 const TargetRegisterClass &RC,
1076 ArgDescriptor &Arg, unsigned UserSGPRs,
1077 unsigned SystemSGPRs) {
1078 // Skip parsing if it's not present.
1082 if (A->IsRegister) {
1084 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1085 SourceRange = A->RegisterName.SourceRange;
1088 if (!RC.contains(Reg))
1089 return diagnoseRegisterClass(A->RegisterName);
1090 Arg = ArgDescriptor::createRegister(Reg);
1092 Arg = ArgDescriptor::createStack(A->StackOffset);
1093 // Check and apply the optional mask.
1095 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1097 MFI->NumUserSGPRs += UserSGPRs;
1098 MFI->NumSystemSGPRs += SystemSGPRs;
1102 if (YamlMFI.ArgInfo &&
1103 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1104 AMDGPU::SGPR_128RegClass,
1105 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1106 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1107 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1109 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1110 MFI->ArgInfo.QueuePtr, 2, 0) ||
1111 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1112 AMDGPU::SReg_64RegClass,
1113 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1114 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1115 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1117 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1118 AMDGPU::SReg_64RegClass,
1119 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1120 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1121 AMDGPU::SGPR_32RegClass,
1122 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1123 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1124 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1126 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1127 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1129 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1130 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1132 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1133 AMDGPU::SGPR_32RegClass,
1134 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1135 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1136 AMDGPU::SGPR_32RegClass,
1137 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1138 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1139 AMDGPU::SReg_64RegClass,
1140 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1141 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1142 AMDGPU::SReg_64RegClass,
1143 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1144 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1145 AMDGPU::VGPR_32RegClass,
1146 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1147 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1148 AMDGPU::VGPR_32RegClass,
1149 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1150 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1151 AMDGPU::VGPR_32RegClass,
1152 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1155 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1156 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1157 MFI->Mode.FP32Denormals = YamlMFI.Mode.FP32Denormals;
1158 MFI->Mode.FP64FP16Denormals = YamlMFI.Mode.FP64FP16Denormals;