1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
12 //===----------------------------------------------------------------------===//
15 #include "AMDGPUSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/IntrinsicsAMDGPU.h"
41 #include "llvm/IR/IntrinsicsR600.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/Metadata.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetMachine.h"
63 #define DEBUG_TYPE "amdgpu-promote-alloca"
69 static cl::opt<bool> DisablePromoteAllocaToVector(
70 "disable-promote-alloca-to-vector",
71 cl::desc("Disable promote alloca to vector"),
74 static cl::opt<bool> DisablePromoteAllocaToLDS(
75 "disable-promote-alloca-to-lds",
76 cl::desc("Disable promote alloca to LDS"),
79 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
80 "amdgpu-promote-alloca-to-vector-limit",
81 cl::desc("Maximum byte size to consider promote alloca to vector"),
84 // FIXME: This can create globals so should be a module pass.
85 class AMDGPUPromoteAlloca : public FunctionPass {
87 const TargetMachine *TM;
88 Module *Mod = nullptr;
89 const DataLayout *DL = nullptr;
91 // FIXME: This should be per-kernel.
92 uint32_t LocalMemLimit = 0;
93 uint32_t CurrentLocalMemUsage = 0;
96 bool IsAMDGCN = false;
97 bool IsAMDHSA = false;
99 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
100 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
102 /// BaseAlloca is the alloca root the search started from.
103 /// Val may be that alloca or a recursive user of it.
104 bool collectUsesWithPtrTypes(Value *BaseAlloca,
106 std::vector<Value*> &WorkList) const;
108 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
109 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
110 /// Returns true if both operands are derived from the same alloca. Val should
111 /// be the same value as one of the input operands of UseInst.
112 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
113 Instruction *UseInst,
114 int OpIdx0, int OpIdx1) const;
116 /// Check whether we have enough local memory for promotion.
117 bool hasSufficientLocalMem(const Function &F);
122 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
124 bool doInitialization(Module &M) override;
125 bool runOnFunction(Function &F) override;
127 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
129 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
131 void getAnalysisUsage(AnalysisUsage &AU) const override {
132 AU.setPreservesCFG();
133 FunctionPass::getAnalysisUsage(AU);
137 class AMDGPUPromoteAllocaToVector : public FunctionPass {
144 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
146 bool runOnFunction(Function &F) override;
148 StringRef getPassName() const override {
149 return "AMDGPU Promote Alloca to vector";
152 bool handleAlloca(AllocaInst &I);
154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
156 FunctionPass::getAnalysisUsage(AU);
160 } // end anonymous namespace
162 char AMDGPUPromoteAlloca::ID = 0;
163 char AMDGPUPromoteAllocaToVector::ID = 0;
165 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
166 "AMDGPU promote alloca to vector or LDS", false, false)
168 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
169 "AMDGPU promote alloca to vector", false, false)
171 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
172 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
174 bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
176 DL = &Mod->getDataLayout();
181 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
185 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
186 TM = &TPC->getTM<TargetMachine>();
190 const Triple &TT = TM->getTargetTriple();
191 IsAMDGCN = TT.getArch() == Triple::amdgcn;
192 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
194 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
195 if (!ST.isPromoteAllocaEnabled())
199 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
200 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
205 bool SufficientLDS = hasSufficientLocalMem(F);
206 bool Changed = false;
207 BasicBlock &EntryBB = *F.begin();
209 SmallVector<AllocaInst *, 16> Allocas;
210 for (Instruction &I : EntryBB) {
211 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
212 Allocas.push_back(AI);
215 for (AllocaInst *AI : Allocas) {
216 if (handleAlloca(*AI, SufficientLDS))
223 std::pair<Value *, Value *>
224 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
225 const Function &F = *Builder.GetInsertBlock()->getParent();
226 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
229 Function *LocalSizeYFn
230 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
231 Function *LocalSizeZFn
232 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
234 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
235 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
237 ST.makeLIDRangeMetadata(LocalSizeY);
238 ST.makeLIDRangeMetadata(LocalSizeZ);
240 return std::make_pair(LocalSizeY, LocalSizeZ);
243 // We must read the size out of the dispatch pointer.
246 // We are indexing into this struct, and want to extract the workgroup_size_*
249 // typedef struct hsa_kernel_dispatch_packet_s {
252 // uint16_t workgroup_size_x ;
253 // uint16_t workgroup_size_y;
254 // uint16_t workgroup_size_z;
255 // uint16_t reserved0;
256 // uint32_t grid_size_x ;
257 // uint32_t grid_size_y ;
258 // uint32_t grid_size_z;
260 // uint32_t private_segment_size;
261 // uint32_t group_segment_size;
262 // uint64_t kernel_object;
264 // #ifdef HSA_LARGE_MODEL
265 // void *kernarg_address;
266 // #elif defined HSA_LITTLE_ENDIAN
267 // void *kernarg_address;
268 // uint32_t reserved1;
270 // uint32_t reserved1;
271 // void *kernarg_address;
273 // uint64_t reserved2;
274 // hsa_signal_t completion_signal; // uint64_t wrapper
275 // } hsa_kernel_dispatch_packet_t
277 Function *DispatchPtrFn
278 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
280 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
281 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
282 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
284 // Size of the dispatch packet struct.
285 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
287 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
288 Value *CastDispatchPtr = Builder.CreateBitCast(
289 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
291 // We could do a single 64-bit load here, but it's likely that the basic
292 // 32-bit and extract sequence is already present, and it is probably easier
293 // to CSE this. The loads should be mergable later anyway.
294 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
295 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
297 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
298 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
300 MDNode *MD = MDNode::get(Mod->getContext(), None);
301 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
302 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
303 ST.makeLIDRangeMetadata(LoadZU);
305 // Extract y component. Upper half of LoadZU should be zero already.
306 Value *Y = Builder.CreateLShr(LoadXY, 16);
308 return std::make_pair(Y, LoadZU);
311 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
312 const AMDGPUSubtarget &ST =
313 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
314 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
318 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
319 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
322 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
323 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
327 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
328 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
331 llvm_unreachable("invalid dimension");
334 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
335 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
336 ST.makeLIDRangeMetadata(CI);
341 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
342 return FixedVectorType::get(ArrayTy->getElementType(),
343 ArrayTy->getNumElements());
346 static Value *stripBitcasts(Value *V) {
347 while (Instruction *I = dyn_cast<Instruction>(V)) {
348 if (I->getOpcode() != Instruction::BitCast)
350 V = I->getOperand(0);
356 calculateVectorIndex(Value *Ptr,
357 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
358 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr));
362 auto I = GEPIdx.find(GEP);
363 return I == GEPIdx.end() ? nullptr : I->second;
366 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
367 // FIXME we only support simple cases
368 if (GEP->getNumOperands() != 3)
371 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
372 if (!I0 || !I0->isZero())
375 return GEP->getOperand(2);
378 // Not an instruction handled below to turn into a vector.
380 // TODO: Check isTriviallyVectorizable for calls and handle other
382 static bool canVectorizeInst(Instruction *Inst, User *User,
383 const DataLayout &DL) {
384 switch (Inst->getOpcode()) {
385 case Instruction::Load: {
386 // Currently only handle the case where the Pointer Operand is a GEP.
387 // Also we could not vectorize volatile or atomic loads.
388 LoadInst *LI = cast<LoadInst>(Inst);
389 if (isa<AllocaInst>(User) &&
390 LI->getPointerOperandType() == User->getType() &&
391 isa<VectorType>(LI->getType()))
394 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand());
398 return (PtrInst->getOpcode() == Instruction::GetElementPtr ||
399 PtrInst->getOpcode() == Instruction::BitCast) &&
402 case Instruction::BitCast:
404 case Instruction::Store: {
405 // Must be the stored pointer operand, not a stored value, plus
406 // since it should be canonical form, the User should be a GEP.
407 // Also we could not vectorize volatile or atomic stores.
408 StoreInst *SI = cast<StoreInst>(Inst);
409 if (isa<AllocaInst>(User) &&
410 SI->getPointerOperandType() == User->getType() &&
411 isa<VectorType>(SI->getValueOperand()->getType()))
414 Instruction *UserInst = dyn_cast<Instruction>(User);
418 return (SI->getPointerOperand() == User) &&
419 (UserInst->getOpcode() == Instruction::GetElementPtr ||
420 UserInst->getOpcode() == Instruction::BitCast) &&
428 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
431 if (DisablePromoteAllocaToVector) {
432 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
436 Type *AllocaTy = Alloca->getAllocatedType();
437 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
438 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
439 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
440 ArrayTy->getNumElements() > 0)
441 VectorTy = arrayTypeToVecType(ArrayTy);
444 // Use up to 1/4 of available register budget for vectorization.
445 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
448 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
449 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "
450 << MaxVGPRs << " registers available\n");
454 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
456 // FIXME: There is no reason why we can't support larger arrays, we
457 // are just being conservative for now.
458 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
459 // could also be promoted but we don't currently handle this case
460 if (!VectorTy || VectorTy->getNumElements() > 16 ||
461 VectorTy->getNumElements() < 2) {
462 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
466 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
467 std::vector<Value *> WorkList;
468 SmallVector<User *, 8> Users(Alloca->users());
469 SmallVector<User *, 8> UseUsers(Users.size(), Alloca);
470 Type *VecEltTy = VectorTy->getElementType();
471 while (!Users.empty()) {
472 User *AllocaUser = Users.pop_back_val();
473 User *UseUser = UseUsers.pop_back_val();
474 Instruction *Inst = dyn_cast<Instruction>(AllocaUser);
476 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
478 if (!canVectorizeInst(Inst, UseUser, DL))
481 if (Inst->getOpcode() == Instruction::BitCast) {
482 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType();
483 Type *ToTy = Inst->getType()->getPointerElementType();
484 if (FromTy->isAggregateType() || ToTy->isAggregateType() ||
485 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy))
488 for (User *CastUser : Inst->users()) {
489 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser)))
491 Users.push_back(CastUser);
492 UseUsers.push_back(Inst);
498 WorkList.push_back(AllocaUser);
502 Value *Index = GEPToVectorIndex(GEP);
504 // If we can't compute a vector index from this GEP, then we can't
505 // promote this alloca to vector.
507 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
512 GEPVectorIdx[GEP] = Index;
513 Users.append(GEP->user_begin(), GEP->user_end());
514 UseUsers.append(GEP->getNumUses(), GEP);
517 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
518 << *VectorTy << '\n');
520 for (Value *V : WorkList) {
521 Instruction *Inst = cast<Instruction>(V);
522 IRBuilder<> Builder(Inst);
523 switch (Inst->getOpcode()) {
524 case Instruction::Load: {
525 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy())
528 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
529 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
533 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
534 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
535 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
536 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
537 if (Inst->getType() != VecEltTy)
538 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
539 Inst->replaceAllUsesWith(ExtractElement);
540 Inst->eraseFromParent();
543 case Instruction::Store: {
544 StoreInst *SI = cast<StoreInst>(Inst);
545 if (SI->getValueOperand()->getType() == AllocaTy ||
546 SI->getValueOperand()->getType()->isVectorTy())
549 Value *Ptr = SI->getPointerOperand();
550 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
554 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
555 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
556 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
557 Value *Elt = SI->getValueOperand();
558 if (Elt->getType() != VecEltTy)
559 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
560 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
561 Builder.CreateStore(NewVecValue, BitCast);
562 Inst->eraseFromParent();
567 llvm_unreachable("Inconsistency in instructions promotable to vector");
573 static bool isCallPromotable(CallInst *CI) {
574 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
578 switch (II->getIntrinsicID()) {
579 case Intrinsic::memcpy:
580 case Intrinsic::memmove:
581 case Intrinsic::memset:
582 case Intrinsic::lifetime_start:
583 case Intrinsic::lifetime_end:
584 case Intrinsic::invariant_start:
585 case Intrinsic::invariant_end:
586 case Intrinsic::launder_invariant_group:
587 case Intrinsic::strip_invariant_group:
588 case Intrinsic::objectsize:
595 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
600 // Figure out which operand is the one we might not be promoting.
601 Value *OtherOp = Inst->getOperand(OpIdx0);
603 OtherOp = Inst->getOperand(OpIdx1);
605 if (isa<ConstantPointerNull>(OtherOp))
608 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
609 if (!isa<AllocaInst>(OtherObj))
612 // TODO: We should be able to replace undefs with the right pointer type.
614 // TODO: If we know the other base object is another promotable
615 // alloca, not necessarily this alloca, we can do this. The
616 // important part is both must have the same address space at
618 if (OtherObj != BaseAlloca) {
620 dbgs() << "Found a binary instruction with another alloca object\n");
627 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
630 std::vector<Value*> &WorkList) const {
632 for (User *User : Val->users()) {
633 if (is_contained(WorkList, User))
636 if (CallInst *CI = dyn_cast<CallInst>(User)) {
637 if (!isCallPromotable(CI))
640 WorkList.push_back(User);
644 Instruction *UseInst = cast<Instruction>(User);
645 if (UseInst->getOpcode() == Instruction::PtrToInt)
648 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
649 if (LI->isVolatile())
655 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
656 if (SI->isVolatile())
659 // Reject if the stored value is not the pointer operand.
660 if (SI->getPointerOperand() != Val)
662 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
663 if (RMW->isVolatile())
665 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
666 if (CAS->isVolatile())
670 // Only promote a select if we know that the other select operand
671 // is from another pointer that will also be promoted.
672 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
673 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
676 // May need to rewrite constant operands.
677 WorkList.push_back(ICmp);
680 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
681 // Give up if the pointer may be captured.
682 if (PointerMayBeCaptured(UseInst, true, true))
684 // Don't collect the users of this.
685 WorkList.push_back(User);
689 if (!User->getType()->isPointerTy())
692 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
693 // Be conservative if an address could be computed outside the bounds of
695 if (!GEP->isInBounds())
699 // Only promote a select if we know that the other select operand is from
700 // another pointer that will also be promoted.
701 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
702 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
707 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
708 // TODO: Handle more complex cases. We should be able to replace loops
710 switch (Phi->getNumIncomingValues()) {
714 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
722 WorkList.push_back(User);
723 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
730 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
732 FunctionType *FTy = F.getFunctionType();
733 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
735 // If the function has any arguments in the local address space, then it's
736 // possible these arguments require the entire local memory space, so
737 // we cannot use local memory in the pass.
738 for (Type *ParamTy : FTy->params()) {
739 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
740 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
742 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
743 "local memory disabled.\n");
748 LocalMemLimit = ST.getLocalMemorySize();
749 if (LocalMemLimit == 0)
752 const DataLayout &DL = Mod->getDataLayout();
754 // Check how much local memory is being used by global objects
755 CurrentLocalMemUsage = 0;
756 for (GlobalVariable &GV : Mod->globals()) {
757 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
760 for (const User *U : GV.users()) {
761 const Instruction *Use = dyn_cast<Instruction>(U);
765 if (Use->getParent()->getParent() == &F) {
767 DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
769 // FIXME: Try to account for padding here. The padding is currently
770 // determined from the inverse order of uses in the function. I'm not
771 // sure if the use list order is in any way connected to this, so the
772 // total reported size is likely incorrect.
773 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
774 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alignment);
775 CurrentLocalMemUsage += AllocSize;
781 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
784 // Restrict local memory usage so that we don't drastically reduce occupancy,
785 // unless it is already significantly reduced.
787 // TODO: Have some sort of hint or other heuristics to guess occupancy based
788 // on other factors..
789 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
790 if (OccupancyHint == 0)
793 // Clamp to max value.
794 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
796 // Check the hint but ignore it if it's obviously wrong from the existing LDS
798 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
801 // Round up to the next tier of usage.
802 unsigned MaxSizeWithWaveCount
803 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
805 // Program is possibly broken by using more local mem than available.
806 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
809 LocalMemLimit = MaxSizeWithWaveCount;
811 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
813 << " Rounding size to " << MaxSizeWithWaveCount
814 << " with a maximum occupancy of " << MaxOccupancy << '\n'
815 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
816 << " available for promotion\n");
821 // FIXME: Should try to pick the most likely to be profitable allocas first.
822 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
823 // Array allocations are probably not worth handling, since an allocation of
824 // the array type is the canonical form.
825 if (!I.isStaticAlloca() || I.isArrayAllocation())
828 const DataLayout &DL = Mod->getDataLayout();
829 IRBuilder<> Builder(&I);
831 // First try to replace the alloca with a vector
832 Type *AllocaTy = I.getAllocatedType();
834 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
836 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
837 return true; // Promoted to vector.
839 if (DisablePromoteAllocaToLDS)
842 const Function &ContainingFunction = *I.getParent()->getParent();
843 CallingConv::ID CC = ContainingFunction.getCallingConv();
845 // Don't promote the alloca to LDS for shader calling conventions as the work
846 // item ID intrinsics are not supported for these calling conventions.
847 // Furthermore not all LDS is available for some of the stages.
849 case CallingConv::AMDGPU_KERNEL:
850 case CallingConv::SPIR_KERNEL:
855 << " promote alloca to LDS not supported with calling convention.\n");
859 // Not likely to have sufficient local memory for promotion.
863 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
864 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
867 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
869 // FIXME: This computed padding is likely wrong since it depends on inverse
872 // FIXME: It is also possible that if we're allowed to use all of the memory
873 // could could end up using more than the maximum due to alignment padding.
875 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
876 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
877 NewSize += AllocSize;
879 if (NewSize > LocalMemLimit) {
880 LLVM_DEBUG(dbgs() << " " << AllocSize
881 << " bytes of local memory not available to promote\n");
885 CurrentLocalMemUsage = NewSize;
887 std::vector<Value*> WorkList;
889 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
890 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
894 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
896 Function *F = I.getParent()->getParent();
898 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
899 GlobalVariable *GV = new GlobalVariable(
900 *Mod, GVTy, false, GlobalValue::InternalLinkage,
901 UndefValue::get(GVTy),
902 Twine(F->getName()) + Twine('.') + I.getName(),
904 GlobalVariable::NotThreadLocal,
905 AMDGPUAS::LOCAL_ADDRESS);
906 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
907 GV->setAlignment(MaybeAlign(I.getAlignment()));
909 Value *TCntY, *TCntZ;
911 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
912 Value *TIdX = getWorkitemID(Builder, 0);
913 Value *TIdY = getWorkitemID(Builder, 1);
914 Value *TIdZ = getWorkitemID(Builder, 2);
916 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
917 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
918 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
919 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
920 TID = Builder.CreateAdd(TID, TIdZ);
923 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
927 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
928 I.mutateType(Offset->getType());
929 I.replaceAllUsesWith(Offset);
932 for (Value *V : WorkList) {
933 CallInst *Call = dyn_cast<CallInst>(V);
935 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
936 Value *Src0 = CI->getOperand(0);
937 Type *EltTy = Src0->getType()->getPointerElementType();
938 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
940 if (isa<ConstantPointerNull>(CI->getOperand(0)))
941 CI->setOperand(0, ConstantPointerNull::get(NewTy));
943 if (isa<ConstantPointerNull>(CI->getOperand(1)))
944 CI->setOperand(1, ConstantPointerNull::get(NewTy));
949 // The operand's value should be corrected on its own and we don't want to
951 if (isa<AddrSpaceCastInst>(V))
954 Type *EltTy = V->getType()->getPointerElementType();
955 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
957 // FIXME: It doesn't really make sense to try to do this for all
959 V->mutateType(NewTy);
961 // Adjust the types of any constant operands.
962 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
963 if (isa<ConstantPointerNull>(SI->getOperand(1)))
964 SI->setOperand(1, ConstantPointerNull::get(NewTy));
966 if (isa<ConstantPointerNull>(SI->getOperand(2)))
967 SI->setOperand(2, ConstantPointerNull::get(NewTy));
968 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
969 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
970 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
971 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
978 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
979 Builder.SetInsertPoint(Intr);
980 switch (Intr->getIntrinsicID()) {
981 case Intrinsic::lifetime_start:
982 case Intrinsic::lifetime_end:
983 // These intrinsics are for address space 0 only
984 Intr->eraseFromParent();
986 case Intrinsic::memcpy: {
987 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
988 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(),
989 MemCpy->getRawSource(), MemCpy->getSourceAlign(),
990 MemCpy->getLength(), MemCpy->isVolatile());
991 Intr->eraseFromParent();
994 case Intrinsic::memmove: {
995 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
996 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(),
997 MemMove->getRawSource(), MemMove->getSourceAlign(),
998 MemMove->getLength(), MemMove->isVolatile());
999 Intr->eraseFromParent();
1002 case Intrinsic::memset: {
1003 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1004 Builder.CreateMemSet(
1005 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1006 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1007 Intr->eraseFromParent();
1010 case Intrinsic::invariant_start:
1011 case Intrinsic::invariant_end:
1012 case Intrinsic::launder_invariant_group:
1013 case Intrinsic::strip_invariant_group:
1014 Intr->eraseFromParent();
1015 // FIXME: I think the invariant marker should still theoretically apply,
1016 // but the intrinsics need to be changed to accept pointers with any
1019 case Intrinsic::objectsize: {
1020 Value *Src = Intr->getOperand(0);
1021 Type *SrcTy = Src->getType()->getPointerElementType();
1022 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
1023 Intrinsic::objectsize,
1024 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
1027 CallInst *NewCall = Builder.CreateCall(
1029 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1030 Intr->replaceAllUsesWith(NewCall);
1031 Intr->eraseFromParent();
1035 Intr->print(errs());
1036 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1042 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1043 if (skipFunction(F) || DisablePromoteAllocaToVector)
1046 const TargetMachine *TM;
1047 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
1048 TM = &TPC->getTM<TargetMachine>();
1052 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
1053 if (!ST.isPromoteAllocaEnabled())
1056 if (TM->getTargetTriple().getArch() == Triple::amdgcn) {
1057 const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
1058 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1063 bool Changed = false;
1064 BasicBlock &EntryBB = *F.begin();
1066 SmallVector<AllocaInst *, 16> Allocas;
1067 for (Instruction &I : EntryBB) {
1068 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1069 Allocas.push_back(AI);
1072 for (AllocaInst *AI : Allocas) {
1073 if (handleAlloca(*AI))
1080 bool AMDGPUPromoteAllocaToVector::handleAlloca(AllocaInst &I) {
1081 // Array allocations are probably not worth handling, since an allocation of
1082 // the array type is the canonical form.
1083 if (!I.isStaticAlloca() || I.isArrayAllocation())
1086 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
1088 Module *Mod = I.getParent()->getParent()->getParent();
1089 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1092 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1093 return new AMDGPUPromoteAlloca();
1096 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1097 return new AMDGPUPromoteAllocaToVector();