1 //===- AMDGPUPerfHintAnalysis.cpp - analysis of functions memory traffic --===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// \brief Analyzes if a function potentially memory bound and if a kernel
11 /// kernel may benefit from limiting number of waves to reduce cache thrashing.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUPerfHintAnalysis.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/CallGraph.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Target/TargetMachine.h"
32 #define DEBUG_TYPE "amdgpu-perf-hint"
34 static cl::opt<unsigned>
35 MemBoundThresh("amdgpu-membound-threshold", cl::init(50), cl::Hidden,
36 cl::desc("Function mem bound threshold in %"));
38 static cl::opt<unsigned>
39 LimitWaveThresh("amdgpu-limit-wave-threshold", cl::init(50), cl::Hidden,
40 cl::desc("Kernel limit wave threshold in %"));
42 static cl::opt<unsigned>
43 IAWeight("amdgpu-indirect-access-weight", cl::init(1000), cl::Hidden,
44 cl::desc("Indirect access memory instruction weight"));
46 static cl::opt<unsigned>
47 LSWeight("amdgpu-large-stride-weight", cl::init(1000), cl::Hidden,
48 cl::desc("Large stride memory access weight"));
50 static cl::opt<unsigned>
51 LargeStrideThresh("amdgpu-large-stride-threshold", cl::init(64), cl::Hidden,
52 cl::desc("Large stride memory access threshold"));
54 STATISTIC(NumMemBound, "Number of functions marked as memory bound");
55 STATISTIC(NumLimitWave, "Number of functions marked as needing limit wave");
57 char llvm::AMDGPUPerfHintAnalysis::ID = 0;
58 char &llvm::AMDGPUPerfHintAnalysisID = AMDGPUPerfHintAnalysis::ID;
60 INITIALIZE_PASS(AMDGPUPerfHintAnalysis, DEBUG_TYPE,
61 "Analysis if a function is memory bound", true, true)
65 struct AMDGPUPerfHint {
66 friend AMDGPUPerfHintAnalysis;
69 AMDGPUPerfHint(AMDGPUPerfHintAnalysis::FuncInfoMap &FIM_,
70 const TargetLowering *TLI_)
71 : FIM(FIM_), DL(nullptr), TLI(TLI_) {}
73 bool runOnFunction(Function &F);
76 struct MemAccessInfo {
80 MemAccessInfo() : V(nullptr), Base(nullptr), Offset(0) {}
81 bool isLargeStride(MemAccessInfo &Reference) const;
82 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
83 Printable print() const {
84 return Printable([this](raw_ostream &OS) {
85 OS << "Value: " << *V << '\n'
86 << "Base: " << *Base << " Offset: " << Offset << '\n';
92 MemAccessInfo makeMemAccessInfo(Instruction *) const;
94 MemAccessInfo LastAccess; // Last memory access info
96 AMDGPUPerfHintAnalysis::FuncInfoMap &FIM;
100 const TargetLowering *TLI;
102 AMDGPUPerfHintAnalysis::FuncInfo *visit(const Function &F);
103 static bool isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &F);
104 static bool needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &F);
106 bool isIndirectAccess(const Instruction *Inst) const;
108 /// Check if the instruction is large stride.
109 /// The purpose is to identify memory access pattern like:
113 /// In the above example, the second and third memory access will be marked
114 /// large stride memory access.
115 bool isLargeStride(const Instruction *Inst);
117 bool isGlobalAddr(const Value *V) const;
118 bool isLocalAddr(const Value *V) const;
119 bool isConstantAddr(const Value *V) const;
122 static std::pair<const Value *, const Type *> getMemoryInstrPtrAndType(
123 const Instruction *Inst) {
124 if (auto LI = dyn_cast<LoadInst>(Inst))
125 return {LI->getPointerOperand(), LI->getType()};
126 if (auto SI = dyn_cast<StoreInst>(Inst))
127 return {SI->getPointerOperand(), SI->getValueOperand()->getType()};
128 if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst))
129 return {AI->getPointerOperand(), AI->getCompareOperand()->getType()};
130 if (auto AI = dyn_cast<AtomicRMWInst>(Inst))
131 return {AI->getPointerOperand(), AI->getValOperand()->getType()};
132 if (auto MI = dyn_cast<AnyMemIntrinsic>(Inst))
133 return {MI->getRawDest(), Type::getInt8Ty(MI->getContext())};
135 return {nullptr, nullptr};
138 bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const {
139 LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n');
140 SmallSet<const Value *, 32> WorkSet;
141 SmallSet<const Value *, 32> Visited;
142 if (const Value *MO = getMemoryInstrPtrAndType(Inst).first) {
143 if (isGlobalAddr(MO))
147 while (!WorkSet.empty()) {
148 const Value *V = *WorkSet.begin();
149 WorkSet.erase(*WorkSet.begin());
150 if (!Visited.insert(V).second)
152 LLVM_DEBUG(dbgs() << " check: " << *V << '\n');
154 if (auto LD = dyn_cast<LoadInst>(V)) {
155 auto M = LD->getPointerOperand();
156 if (isGlobalAddr(M) || isLocalAddr(M) || isConstantAddr(M)) {
157 LLVM_DEBUG(dbgs() << " is IA\n");
163 if (auto GEP = dyn_cast<GetElementPtrInst>(V)) {
164 auto P = GEP->getPointerOperand();
166 for (unsigned I = 1, E = GEP->getNumIndices() + 1; I != E; ++I)
167 WorkSet.insert(GEP->getOperand(I));
171 if (auto U = dyn_cast<UnaryInstruction>(V)) {
172 WorkSet.insert(U->getOperand(0));
176 if (auto BO = dyn_cast<BinaryOperator>(V)) {
177 WorkSet.insert(BO->getOperand(0));
178 WorkSet.insert(BO->getOperand(1));
182 if (auto S = dyn_cast<SelectInst>(V)) {
183 WorkSet.insert(S->getFalseValue());
184 WorkSet.insert(S->getTrueValue());
188 if (auto E = dyn_cast<ExtractElementInst>(V)) {
189 WorkSet.insert(E->getVectorOperand());
193 LLVM_DEBUG(dbgs() << " dropped\n");
196 LLVM_DEBUG(dbgs() << " is not IA\n");
200 AMDGPUPerfHintAnalysis::FuncInfo *AMDGPUPerfHint::visit(const Function &F) {
201 AMDGPUPerfHintAnalysis::FuncInfo &FI = FIM[&F];
203 LLVM_DEBUG(dbgs() << "[AMDGPUPerfHint] process " << F.getName() << '\n');
206 LastAccess = MemAccessInfo();
208 if (const Type *Ty = getMemoryInstrPtrAndType(&I).second) {
209 unsigned Size = divideCeil(Ty->getPrimitiveSizeInBits(), 32);
210 if (isIndirectAccess(&I))
211 FI.IAMInstCost += Size;
212 if (isLargeStride(&I))
213 FI.LSMInstCost += Size;
214 FI.MemInstCost += Size;
218 if (auto *CB = dyn_cast<CallBase>(&I)) {
219 Function *Callee = CB->getCalledFunction();
220 if (!Callee || Callee->isDeclaration()) {
224 if (&F == Callee) // Handle immediate recursion
227 auto Loc = FIM.find(Callee);
228 if (Loc == FIM.end())
231 FI.MemInstCost += Loc->second.MemInstCost;
232 FI.InstCost += Loc->second.InstCost;
233 FI.IAMInstCost += Loc->second.IAMInstCost;
234 FI.LSMInstCost += Loc->second.LSMInstCost;
235 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
236 TargetLoweringBase::AddrMode AM;
237 auto *Ptr = GetPointerBaseWithConstantOffset(GEP, AM.BaseOffs, *DL);
238 AM.BaseGV = dyn_cast_or_null<GlobalValue>(const_cast<Value *>(Ptr));
239 AM.HasBaseReg = !AM.BaseGV;
240 if (TLI->isLegalAddressingMode(*DL, AM, GEP->getResultElementType(),
241 GEP->getPointerAddressSpace()))
242 // Offset will likely be folded into load or store
254 bool AMDGPUPerfHint::runOnFunction(Function &F) {
255 const Module &M = *F.getParent();
256 DL = &M.getDataLayout();
258 if (F.hasFnAttribute("amdgpu-wave-limiter") &&
259 F.hasFnAttribute("amdgpu-memory-bound"))
262 const AMDGPUPerfHintAnalysis::FuncInfo *Info = visit(F);
264 LLVM_DEBUG(dbgs() << F.getName() << " MemInst cost: " << Info->MemInstCost
266 << " IAMInst cost: " << Info->IAMInstCost << '\n'
267 << " LSMInst cost: " << Info->LSMInstCost << '\n'
268 << " TotalInst cost: " << Info->InstCost << '\n');
270 if (isMemBound(*Info)) {
271 LLVM_DEBUG(dbgs() << F.getName() << " is memory bound\n");
273 F.addFnAttr("amdgpu-memory-bound", "true");
276 if (AMDGPU::isEntryFunctionCC(F.getCallingConv()) && needLimitWave(*Info)) {
277 LLVM_DEBUG(dbgs() << F.getName() << " needs limit wave\n");
279 F.addFnAttr("amdgpu-wave-limiter", "true");
285 bool AMDGPUPerfHint::isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
286 return FI.MemInstCost * 100 / FI.InstCost > MemBoundThresh;
289 bool AMDGPUPerfHint::needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &FI) {
290 return ((FI.MemInstCost + FI.IAMInstCost * IAWeight +
291 FI.LSMInstCost * LSWeight) * 100 / FI.InstCost) > LimitWaveThresh;
294 bool AMDGPUPerfHint::isGlobalAddr(const Value *V) const {
295 if (auto PT = dyn_cast<PointerType>(V->getType())) {
296 unsigned As = PT->getAddressSpace();
297 // Flat likely points to global too.
298 return As == AMDGPUAS::GLOBAL_ADDRESS || As == AMDGPUAS::FLAT_ADDRESS;
303 bool AMDGPUPerfHint::isLocalAddr(const Value *V) const {
304 if (auto PT = dyn_cast<PointerType>(V->getType()))
305 return PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
309 bool AMDGPUPerfHint::isLargeStride(const Instruction *Inst) {
310 LLVM_DEBUG(dbgs() << "[isLargeStride] " << *Inst << '\n');
312 MemAccessInfo MAI = makeMemAccessInfo(const_cast<Instruction *>(Inst));
313 bool IsLargeStride = MAI.isLargeStride(LastAccess);
315 LastAccess = std::move(MAI);
317 return IsLargeStride;
320 AMDGPUPerfHint::MemAccessInfo
321 AMDGPUPerfHint::makeMemAccessInfo(Instruction *Inst) const {
323 const Value *MO = getMemoryInstrPtrAndType(Inst).first;
325 LLVM_DEBUG(dbgs() << "[isLargeStride] MO: " << *MO << '\n');
326 // Do not treat local-addr memory access as large stride.
331 MAI.Base = GetPointerBaseWithConstantOffset(MO, MAI.Offset, *DL);
335 bool AMDGPUPerfHint::isConstantAddr(const Value *V) const {
336 if (auto PT = dyn_cast<PointerType>(V->getType())) {
337 unsigned As = PT->getAddressSpace();
338 return As == AMDGPUAS::CONSTANT_ADDRESS ||
339 As == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
344 bool AMDGPUPerfHint::MemAccessInfo::isLargeStride(
345 MemAccessInfo &Reference) const {
347 if (!Base || !Reference.Base || Base != Reference.Base)
350 uint64_t Diff = Offset > Reference.Offset ? Offset - Reference.Offset
351 : Reference.Offset - Offset;
352 bool Result = Diff > LargeStrideThresh;
353 LLVM_DEBUG(dbgs() << "[isLargeStride compare]\n"
354 << print() << "<=>\n"
355 << Reference.print() << "Result:" << Result << '\n');
360 bool AMDGPUPerfHintAnalysis::runOnSCC(CallGraphSCC &SCC) {
361 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
365 const TargetMachine &TM = TPC->getTM<TargetMachine>();
367 bool Changed = false;
368 for (CallGraphNode *I : SCC) {
369 Function *F = I->getFunction();
370 if (!F || F->isDeclaration())
373 const TargetSubtargetInfo *ST = TM.getSubtargetImpl(*F);
374 AMDGPUPerfHint Analyzer(FIM, ST->getTargetLowering());
376 if (Analyzer.runOnFunction(*F))
383 bool AMDGPUPerfHintAnalysis::isMemoryBound(const Function *F) const {
384 auto FI = FIM.find(F);
388 return AMDGPUPerfHint::isMemBound(FI->second);
391 bool AMDGPUPerfHintAnalysis::needsWaveLimiter(const Function *F) const {
392 auto FI = FIM.find(F);
396 return AMDGPUPerfHint::needLimitWave(FI->second);