1 //===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 // This file implements a TargetTransformInfo analysis pass specific to the
12 // AMDGPU target machine. It uses the target's detailed information to provide
13 // more precise answers to certain TTI queries, while letting the target
14 // independent and default TTI implementations handle the rest.
16 //===----------------------------------------------------------------------===//
18 #include "AMDGPUTargetTransformInfo.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/BasicTTIImpl.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Target/CostTable.h"
27 #include "llvm/Target/TargetLowering.h"
30 #define DEBUG_TYPE "AMDGPUtti"
32 static cl::opt<unsigned> UnrollThresholdPrivate(
33 "amdgpu-unroll-threshold-private",
34 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
35 cl::init(2500), cl::Hidden);
37 static cl::opt<unsigned> UnrollThresholdLocal(
38 "amdgpu-unroll-threshold-local",
39 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
40 cl::init(1000), cl::Hidden);
42 static cl::opt<unsigned> UnrollThresholdIf(
43 "amdgpu-unroll-threshold-if",
44 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
45 cl::init(150), cl::Hidden);
47 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
49 const Instruction *I = dyn_cast<Instruction>(Cond);
53 for (const Value *V : I->operand_values()) {
56 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
57 if (none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
58 return SubLoop->contains(PHI); }))
60 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
66 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
67 TTI::UnrollingPreferences &UP) {
68 UP.Threshold = 300; // Twice the default.
69 UP.MaxCount = UINT_MAX;
72 // TODO: Do we want runtime unrolling?
74 // Maximum alloca size than can fit registers. Reserve 16 registers.
75 const unsigned MaxAlloca = (256 - 16) * 4;
76 unsigned ThresholdPrivate = UnrollThresholdPrivate;
77 unsigned ThresholdLocal = UnrollThresholdLocal;
78 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
79 AMDGPUAS ASST = ST->getAMDGPUAS();
80 for (const BasicBlock *BB : L->getBlocks()) {
81 const DataLayout &DL = BB->getModule()->getDataLayout();
82 unsigned LocalGEPsSeen = 0;
84 if (any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
85 return SubLoop->contains(BB); }))
86 continue; // Block belongs to an inner loop.
88 for (const Instruction &I : *BB) {
90 // Unroll a loop which contains an "if" statement whose condition
91 // defined by a PHI belonging to the loop. This may help to eliminate
92 // if region and potentially even PHI itself, saving on both divergence
93 // and registers used for the PHI.
94 // Add a small bonus for each of such "if" statements.
95 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
96 if (UP.Threshold < MaxBoost && Br->isConditional()) {
97 if (L->isLoopExiting(Br->getSuccessor(0)) ||
98 L->isLoopExiting(Br->getSuccessor(1)))
100 if (dependsOnLocalPhi(L, Br->getCondition())) {
101 UP.Threshold += UnrollThresholdIf;
102 DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
103 << " for loop:\n" << *L << " due to " << *Br << '\n');
104 if (UP.Threshold >= MaxBoost)
111 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
115 unsigned AS = GEP->getAddressSpace();
116 unsigned Threshold = 0;
117 if (AS == ASST.PRIVATE_ADDRESS)
118 Threshold = ThresholdPrivate;
119 else if (AS == ASST.LOCAL_ADDRESS)
120 Threshold = ThresholdLocal;
124 if (UP.Threshold >= Threshold)
127 if (AS == ASST.PRIVATE_ADDRESS) {
128 const Value *Ptr = GEP->getPointerOperand();
129 const AllocaInst *Alloca =
130 dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
131 if (!Alloca || !Alloca->isStaticAlloca())
133 Type *Ty = Alloca->getAllocatedType();
134 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
135 if (AllocaSize > MaxAlloca)
137 } else if (AS == ASST.LOCAL_ADDRESS) {
139 // Inhibit unroll for local memory if we have seen addressing not to
140 // a variable, most likely we will be unable to combine it.
141 // Do not unroll too deep inner loops for local memory to give a chance
142 // to unroll an outer loop for a more important reason.
143 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
144 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
145 !isa<Argument>(GEP->getPointerOperand())))
149 // Check if GEP depends on a value defined by this loop itself.
150 bool HasLoopDef = false;
151 for (const Value *Op : GEP->operands()) {
152 const Instruction *Inst = dyn_cast<Instruction>(Op);
153 if (!Inst || L->isLoopInvariant(Op))
156 if (any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
157 return SubLoop->contains(Inst); }))
165 // We want to do whatever we can to limit the number of alloca
166 // instructions that make it through to the code generator. allocas
167 // require us to use indirect addressing, which is slow and prone to
168 // compiler bugs. If this loop does an address calculation on an
169 // alloca ptr, then we want to use a higher than normal loop unroll
170 // threshold. This will give SROA a better chance to eliminate these
173 // We also want to have more unrolling for local memory to let ds
174 // instructions with different offsets combine.
176 // Don't use the maximum allowed value here as it will make some
177 // programs way too big.
178 UP.Threshold = Threshold;
179 DEBUG(dbgs() << "Set unroll threshold " << Threshold << " for loop:\n"
180 << *L << " due to " << *GEP << '\n');
181 if (UP.Threshold >= MaxBoost)
187 unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
191 // Number of VGPRs on SI.
192 if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
195 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
198 unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) {
199 return Vector ? 0 : 32;
202 unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
203 AMDGPUAS AS = ST->getAMDGPUAS();
204 if (AddrSpace == AS.GLOBAL_ADDRESS ||
205 AddrSpace == AS.CONSTANT_ADDRESS ||
206 AddrSpace == AS.FLAT_ADDRESS)
208 if (AddrSpace == AS.LOCAL_ADDRESS ||
209 AddrSpace == AS.REGION_ADDRESS)
211 if (AddrSpace == AS.PRIVATE_ADDRESS)
212 return 8 * ST->getMaxPrivateElementSize();
214 if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
215 (AddrSpace == AS.PARAM_D_ADDRESS ||
216 AddrSpace == AS.PARAM_I_ADDRESS ||
217 (AddrSpace >= AS.CONSTANT_BUFFER_0 &&
218 AddrSpace <= AS.CONSTANT_BUFFER_15)))
220 llvm_unreachable("unhandled address space");
223 bool AMDGPUTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
225 unsigned AddrSpace) const {
226 // We allow vectorization of flat stores, even though we may need to decompose
227 // them later if they may access private memory. We don't have enough context
228 // here, and legalization can handle it.
229 if (AddrSpace == ST->getAMDGPUAS().PRIVATE_ADDRESS) {
230 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
231 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
236 bool AMDGPUTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
238 unsigned AddrSpace) const {
239 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
242 bool AMDGPUTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
244 unsigned AddrSpace) const {
245 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
248 unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
249 // Disable unrolling if the loop is not vectorized.
253 // Semi-arbitrary large amount.
257 int AMDGPUTTIImpl::getArithmeticInstrCost(
258 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
259 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
260 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args ) {
262 EVT OrigTy = TLI->getValueType(DL, Ty);
263 if (!OrigTy.isSimple()) {
264 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
265 Opd1PropInfo, Opd2PropInfo);
268 // Legalize the type.
269 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
270 int ISD = TLI->InstructionOpcodeToISD(Opcode);
272 // Because we don't have any legal vector operations, but the legal types, we
273 // need to account for split vectors.
274 unsigned NElts = LT.second.isVector() ?
275 LT.second.getVectorNumElements() : 1;
277 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
284 return get64BitInstrCost() * LT.first * NElts;
287 return getFullRateInstrCost() * LT.first * NElts;
294 if (SLT == MVT::i64){
295 // and, or and xor are typically split into 2 VALU instructions.
296 return 2 * getFullRateInstrCost() * LT.first * NElts;
299 return LT.first * NElts * getFullRateInstrCost();
302 const int QuarterRateCost = getQuarterRateInstrCost();
303 if (SLT == MVT::i64) {
304 const int FullRateCost = getFullRateInstrCost();
305 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
309 return QuarterRateCost * NElts * LT.first;
315 return LT.first * NElts * get64BitInstrCost();
317 if (SLT == MVT::f32 || SLT == MVT::f16)
318 return LT.first * NElts * getFullRateInstrCost();
323 // FIXME: frem should be handled separately. The fdiv in it is most of it,
324 // but the current lowering is also not entirely correct.
325 if (SLT == MVT::f64) {
326 int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
328 // Add cost of workaround.
329 if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
330 Cost += 3 * getFullRateInstrCost();
332 return LT.first * Cost * NElts;
335 // Assuming no fp32 denormals lowering.
336 if (SLT == MVT::f32 || SLT == MVT::f16) {
337 assert(!ST->hasFP32Denormals() && "will change when supported");
338 int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
339 return LT.first * NElts * Cost;
347 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
348 Opd1PropInfo, Opd2PropInfo);
351 unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
352 // XXX - For some reason this isn't called for switch.
354 case Instruction::Br:
355 case Instruction::Ret:
358 return BaseT::getCFInstrCost(Opcode);
362 int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
365 case Instruction::ExtractElement:
366 case Instruction::InsertElement:
367 // Extracts are just reads of a subregister, so are free. Inserts are
368 // considered free because we don't want to have any cost for scalarizing
369 // operations, and we don't have to copy into a different register class.
371 // Dynamic indexing isn't free and is best avoided.
372 return Index == ~0u ? 2 : 0;
374 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
378 static bool isIntrinsicSourceOfDivergence(const IntrinsicInst *I) {
379 switch (I->getIntrinsicID()) {
380 case Intrinsic::amdgcn_workitem_id_x:
381 case Intrinsic::amdgcn_workitem_id_y:
382 case Intrinsic::amdgcn_workitem_id_z:
383 case Intrinsic::amdgcn_interp_mov:
384 case Intrinsic::amdgcn_interp_p1:
385 case Intrinsic::amdgcn_interp_p2:
386 case Intrinsic::amdgcn_mbcnt_hi:
387 case Intrinsic::amdgcn_mbcnt_lo:
388 case Intrinsic::r600_read_tidig_x:
389 case Intrinsic::r600_read_tidig_y:
390 case Intrinsic::r600_read_tidig_z:
391 case Intrinsic::amdgcn_atomic_inc:
392 case Intrinsic::amdgcn_atomic_dec:
393 case Intrinsic::amdgcn_image_atomic_swap:
394 case Intrinsic::amdgcn_image_atomic_add:
395 case Intrinsic::amdgcn_image_atomic_sub:
396 case Intrinsic::amdgcn_image_atomic_smin:
397 case Intrinsic::amdgcn_image_atomic_umin:
398 case Intrinsic::amdgcn_image_atomic_smax:
399 case Intrinsic::amdgcn_image_atomic_umax:
400 case Intrinsic::amdgcn_image_atomic_and:
401 case Intrinsic::amdgcn_image_atomic_or:
402 case Intrinsic::amdgcn_image_atomic_xor:
403 case Intrinsic::amdgcn_image_atomic_inc:
404 case Intrinsic::amdgcn_image_atomic_dec:
405 case Intrinsic::amdgcn_image_atomic_cmpswap:
406 case Intrinsic::amdgcn_buffer_atomic_swap:
407 case Intrinsic::amdgcn_buffer_atomic_add:
408 case Intrinsic::amdgcn_buffer_atomic_sub:
409 case Intrinsic::amdgcn_buffer_atomic_smin:
410 case Intrinsic::amdgcn_buffer_atomic_umin:
411 case Intrinsic::amdgcn_buffer_atomic_smax:
412 case Intrinsic::amdgcn_buffer_atomic_umax:
413 case Intrinsic::amdgcn_buffer_atomic_and:
414 case Intrinsic::amdgcn_buffer_atomic_or:
415 case Intrinsic::amdgcn_buffer_atomic_xor:
416 case Intrinsic::amdgcn_buffer_atomic_cmpswap:
417 case Intrinsic::amdgcn_ps_live:
418 case Intrinsic::amdgcn_ds_swizzle:
425 static bool isArgPassedInSGPR(const Argument *A) {
426 const Function *F = A->getParent();
428 // Arguments to compute shaders are never a source of divergence.
429 if (!AMDGPU::isShader(F->getCallingConv()))
432 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
433 if (F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
434 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal))
437 // Everything else is in VGPRs.
442 /// \returns true if the result of the value could potentially be
443 /// different across workitems in a wavefront.
444 bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
446 if (const Argument *A = dyn_cast<Argument>(V))
447 return !isArgPassedInSGPR(A);
449 // Loads from the private address space are divergent, because threads
450 // can execute the load instruction with the same inputs and get different
453 // All other loads are not divergent, because if threads issue loads with the
454 // same arguments, they will always get the same result.
455 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
456 return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS;
458 // Atomics are divergent because they are executed sequentially: when an
459 // atomic operation refers to the same address in each thread, then each
460 // thread after the first sees the value written by the previous thread as
462 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
465 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
466 return isIntrinsicSourceOfDivergence(Intrinsic);
468 // Assume all function calls are a source of divergence.
469 if (isa<CallInst>(V) || isa<InvokeInst>(V))