1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass eliminates allocas by either converting them into vectors or
11 // by migrating them to local address space.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/Metadata.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/User.h"
46 #include "llvm/IR/Value.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Target/TargetMachine.h"
62 #define DEBUG_TYPE "amdgpu-promote-alloca"
68 static cl::opt<bool> DisablePromoteAllocaToVector(
69 "disable-promote-alloca-to-vector",
70 cl::desc("Disable promote alloca to vector"),
73 // FIXME: This can create globals so should be a module pass.
74 class AMDGPUPromoteAlloca : public FunctionPass {
76 const TargetMachine *TM;
77 Module *Mod = nullptr;
78 const DataLayout *DL = nullptr;
81 // FIXME: This should be per-kernel.
82 uint32_t LocalMemLimit = 0;
83 uint32_t CurrentLocalMemUsage = 0;
85 bool IsAMDGCN = false;
86 bool IsAMDHSA = false;
88 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
89 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
91 /// BaseAlloca is the alloca root the search started from.
92 /// Val may be that alloca or a recursive user of it.
93 bool collectUsesWithPtrTypes(Value *BaseAlloca,
95 std::vector<Value*> &WorkList) const;
97 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
98 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
99 /// Returns true if both operands are derived from the same alloca. Val should
100 /// be the same value as one of the input operands of UseInst.
101 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
102 Instruction *UseInst,
103 int OpIdx0, int OpIdx1) const;
105 /// Check whether we have enough local memory for promotion.
106 bool hasSufficientLocalMem(const Function &F);
111 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
113 bool doInitialization(Module &M) override;
114 bool runOnFunction(Function &F) override;
116 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
118 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
120 void getAnalysisUsage(AnalysisUsage &AU) const override {
121 AU.setPreservesCFG();
122 FunctionPass::getAnalysisUsage(AU);
126 } // end anonymous namespace
128 char AMDGPUPromoteAlloca::ID = 0;
130 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
131 "AMDGPU promote alloca to vector or LDS", false, false)
133 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
135 bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
137 DL = &Mod->getDataLayout();
142 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
147 TM = &TPC->getTM<TargetMachine>();
151 const Triple &TT = TM->getTargetTriple();
152 IsAMDGCN = TT.getArch() == Triple::amdgcn;
153 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
155 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
156 if (!ST.isPromoteAllocaEnabled())
159 AS = AMDGPU::getAMDGPUAS(*F.getParent());
161 bool SufficientLDS = hasSufficientLocalMem(F);
162 bool Changed = false;
163 BasicBlock &EntryBB = *F.begin();
164 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
165 AllocaInst *AI = dyn_cast<AllocaInst>(I);
169 Changed |= handleAlloca(*AI, SufficientLDS);
175 std::pair<Value *, Value *>
176 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
177 const Function &F = *Builder.GetInsertBlock()->getParent();
178 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
181 Function *LocalSizeYFn
182 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
183 Function *LocalSizeZFn
184 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
186 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
187 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
189 ST.makeLIDRangeMetadata(LocalSizeY);
190 ST.makeLIDRangeMetadata(LocalSizeZ);
192 return std::make_pair(LocalSizeY, LocalSizeZ);
195 // We must read the size out of the dispatch pointer.
198 // We are indexing into this struct, and want to extract the workgroup_size_*
201 // typedef struct hsa_kernel_dispatch_packet_s {
204 // uint16_t workgroup_size_x ;
205 // uint16_t workgroup_size_y;
206 // uint16_t workgroup_size_z;
207 // uint16_t reserved0;
208 // uint32_t grid_size_x ;
209 // uint32_t grid_size_y ;
210 // uint32_t grid_size_z;
212 // uint32_t private_segment_size;
213 // uint32_t group_segment_size;
214 // uint64_t kernel_object;
216 // #ifdef HSA_LARGE_MODEL
217 // void *kernarg_address;
218 // #elif defined HSA_LITTLE_ENDIAN
219 // void *kernarg_address;
220 // uint32_t reserved1;
222 // uint32_t reserved1;
223 // void *kernarg_address;
225 // uint64_t reserved2;
226 // hsa_signal_t completion_signal; // uint64_t wrapper
227 // } hsa_kernel_dispatch_packet_t
229 Function *DispatchPtrFn
230 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
232 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
233 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
234 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
236 // Size of the dispatch packet struct.
237 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
239 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
240 Value *CastDispatchPtr = Builder.CreateBitCast(
241 DispatchPtr, PointerType::get(I32Ty, AS.CONSTANT_ADDRESS));
243 // We could do a single 64-bit load here, but it's likely that the basic
244 // 32-bit and extract sequence is already present, and it is probably easier
245 // to CSE this. The loads should be mergable later anyway.
246 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
247 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
249 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
250 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
252 MDNode *MD = MDNode::get(Mod->getContext(), None);
253 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
254 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
255 ST.makeLIDRangeMetadata(LoadZU);
257 // Extract y component. Upper half of LoadZU should be zero already.
258 Value *Y = Builder.CreateLShr(LoadXY, 16);
260 return std::make_pair(Y, LoadZU);
263 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
264 const AMDGPUSubtarget &ST =
265 AMDGPUSubtarget::get(*TM, *Builder.GetInsertBlock()->getParent());
266 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
270 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
271 : Intrinsic::r600_read_tidig_x;
274 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
275 : Intrinsic::r600_read_tidig_y;
279 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
280 : Intrinsic::r600_read_tidig_z;
283 llvm_unreachable("invalid dimension");
286 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
287 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
288 ST.makeLIDRangeMetadata(CI);
293 static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
294 return VectorType::get(ArrayTy->getElementType(),
295 ArrayTy->getNumElements());
299 calculateVectorIndex(Value *Ptr,
300 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
301 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
303 auto I = GEPIdx.find(GEP);
304 return I == GEPIdx.end() ? nullptr : I->second;
307 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
308 // FIXME we only support simple cases
309 if (GEP->getNumOperands() != 3)
312 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
313 if (!I0 || !I0->isZero())
316 return GEP->getOperand(2);
319 // Not an instruction handled below to turn into a vector.
321 // TODO: Check isTriviallyVectorizable for calls and handle other
323 static bool canVectorizeInst(Instruction *Inst, User *User) {
324 switch (Inst->getOpcode()) {
325 case Instruction::Load: {
326 // Currently only handle the case where the Pointer Operand is a GEP.
327 // Also we could not vectorize volatile or atomic loads.
328 LoadInst *LI = cast<LoadInst>(Inst);
329 return isa<GetElementPtrInst>(LI->getPointerOperand()) && LI->isSimple();
331 case Instruction::BitCast:
333 case Instruction::Store: {
334 // Must be the stored pointer operand, not a stored value, plus
335 // since it should be canonical form, the User should be a GEP.
336 // Also we could not vectorize volatile or atomic stores.
337 StoreInst *SI = cast<StoreInst>(Inst);
338 return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && SI->isSimple();
345 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
347 if (DisablePromoteAllocaToVector) {
348 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
352 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
354 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
356 // FIXME: There is no reason why we can't support larger arrays, we
357 // are just being conservative for now.
358 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
359 // could also be promoted but we don't currently handle this case
361 AllocaTy->getNumElements() > 16 ||
362 AllocaTy->getNumElements() < 2 ||
363 !VectorType::isValidElementType(AllocaTy->getElementType())) {
364 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
368 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
369 std::vector<Value*> WorkList;
370 for (User *AllocaUser : Alloca->users()) {
371 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
373 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
376 WorkList.push_back(AllocaUser);
380 Value *Index = GEPToVectorIndex(GEP);
382 // If we can't compute a vector index from this GEP, then we can't
383 // promote this alloca to vector.
385 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
390 GEPVectorIdx[GEP] = Index;
391 for (User *GEPUser : AllocaUser->users()) {
392 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
395 WorkList.push_back(GEPUser);
399 VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
401 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
402 << *VectorTy << '\n');
404 for (Value *V : WorkList) {
405 Instruction *Inst = cast<Instruction>(V);
406 IRBuilder<> Builder(Inst);
407 switch (Inst->getOpcode()) {
408 case Instruction::Load: {
409 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
410 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
411 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
413 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
414 Value *VecValue = Builder.CreateLoad(BitCast);
415 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
416 Inst->replaceAllUsesWith(ExtractElement);
417 Inst->eraseFromParent();
420 case Instruction::Store: {
421 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
423 StoreInst *SI = cast<StoreInst>(Inst);
424 Value *Ptr = SI->getPointerOperand();
425 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
426 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
427 Value *VecValue = Builder.CreateLoad(BitCast);
428 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
429 SI->getValueOperand(),
431 Builder.CreateStore(NewVecValue, BitCast);
432 Inst->eraseFromParent();
435 case Instruction::BitCast:
436 case Instruction::AddrSpaceCast:
440 llvm_unreachable("Inconsistency in instructions promotable to vector");
446 static bool isCallPromotable(CallInst *CI) {
447 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
451 switch (II->getIntrinsicID()) {
452 case Intrinsic::memcpy:
453 case Intrinsic::memmove:
454 case Intrinsic::memset:
455 case Intrinsic::lifetime_start:
456 case Intrinsic::lifetime_end:
457 case Intrinsic::invariant_start:
458 case Intrinsic::invariant_end:
459 case Intrinsic::launder_invariant_group:
460 case Intrinsic::strip_invariant_group:
461 case Intrinsic::objectsize:
468 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
473 // Figure out which operand is the one we might not be promoting.
474 Value *OtherOp = Inst->getOperand(OpIdx0);
476 OtherOp = Inst->getOperand(OpIdx1);
478 if (isa<ConstantPointerNull>(OtherOp))
481 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
482 if (!isa<AllocaInst>(OtherObj))
485 // TODO: We should be able to replace undefs with the right pointer type.
487 // TODO: If we know the other base object is another promotable
488 // alloca, not necessarily this alloca, we can do this. The
489 // important part is both must have the same address space at
491 if (OtherObj != BaseAlloca) {
493 dbgs() << "Found a binary instruction with another alloca object\n");
500 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
503 std::vector<Value*> &WorkList) const {
505 for (User *User : Val->users()) {
506 if (is_contained(WorkList, User))
509 if (CallInst *CI = dyn_cast<CallInst>(User)) {
510 if (!isCallPromotable(CI))
513 WorkList.push_back(User);
517 Instruction *UseInst = cast<Instruction>(User);
518 if (UseInst->getOpcode() == Instruction::PtrToInt)
521 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
522 if (LI->isVolatile())
528 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
529 if (SI->isVolatile())
532 // Reject if the stored value is not the pointer operand.
533 if (SI->getPointerOperand() != Val)
535 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
536 if (RMW->isVolatile())
538 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
539 if (CAS->isVolatile())
543 // Only promote a select if we know that the other select operand
544 // is from another pointer that will also be promoted.
545 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
546 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
549 // May need to rewrite constant operands.
550 WorkList.push_back(ICmp);
553 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
554 // Give up if the pointer may be captured.
555 if (PointerMayBeCaptured(UseInst, true, true))
557 // Don't collect the users of this.
558 WorkList.push_back(User);
562 if (!User->getType()->isPointerTy())
565 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
566 // Be conservative if an address could be computed outside the bounds of
568 if (!GEP->isInBounds())
572 // Only promote a select if we know that the other select operand is from
573 // another pointer that will also be promoted.
574 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
575 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
580 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
581 // TODO: Handle more complex cases. We should be able to replace loops
583 switch (Phi->getNumIncomingValues()) {
587 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
595 WorkList.push_back(User);
596 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
603 bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {
605 FunctionType *FTy = F.getFunctionType();
606 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, F);
608 // If the function has any arguments in the local address space, then it's
609 // possible these arguments require the entire local memory space, so
610 // we cannot use local memory in the pass.
611 for (Type *ParamTy : FTy->params()) {
612 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
613 if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) {
615 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
616 "local memory disabled.\n");
621 LocalMemLimit = ST.getLocalMemorySize();
622 if (LocalMemLimit == 0)
625 const DataLayout &DL = Mod->getDataLayout();
627 // Check how much local memory is being used by global objects
628 CurrentLocalMemUsage = 0;
629 for (GlobalVariable &GV : Mod->globals()) {
630 if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS)
633 for (const User *U : GV.users()) {
634 const Instruction *Use = dyn_cast<Instruction>(U);
638 if (Use->getParent()->getParent() == &F) {
639 unsigned Align = GV.getAlignment();
641 Align = DL.getABITypeAlignment(GV.getValueType());
643 // FIXME: Try to account for padding here. The padding is currently
644 // determined from the inverse order of uses in the function. I'm not
645 // sure if the use list order is in any way connected to this, so the
646 // total reported size is likely incorrect.
647 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
648 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
649 CurrentLocalMemUsage += AllocSize;
655 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
658 // Restrict local memory usage so that we don't drastically reduce occupancy,
659 // unless it is already significantly reduced.
661 // TODO: Have some sort of hint or other heuristics to guess occupancy based
662 // on other factors..
663 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
664 if (OccupancyHint == 0)
667 // Clamp to max value.
668 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
670 // Check the hint but ignore it if it's obviously wrong from the existing LDS
672 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
675 // Round up to the next tier of usage.
676 unsigned MaxSizeWithWaveCount
677 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
679 // Program is possibly broken by using more local mem than available.
680 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
683 LocalMemLimit = MaxSizeWithWaveCount;
685 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
687 << " Rounding size to " << MaxSizeWithWaveCount
688 << " with a maximum occupancy of " << MaxOccupancy << '\n'
689 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
690 << " available for promotion\n");
695 // FIXME: Should try to pick the most likely to be profitable allocas first.
696 bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
697 // Array allocations are probably not worth handling, since an allocation of
698 // the array type is the canonical form.
699 if (!I.isStaticAlloca() || I.isArrayAllocation())
702 IRBuilder<> Builder(&I);
704 // First try to replace the alloca with a vector
705 Type *AllocaTy = I.getAllocatedType();
707 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
709 if (tryPromoteAllocaToVector(&I, AS))
710 return true; // Promoted to vector.
712 const Function &ContainingFunction = *I.getParent()->getParent();
713 CallingConv::ID CC = ContainingFunction.getCallingConv();
715 // Don't promote the alloca to LDS for shader calling conventions as the work
716 // item ID intrinsics are not supported for these calling conventions.
717 // Furthermore not all LDS is available for some of the stages.
719 case CallingConv::AMDGPU_KERNEL:
720 case CallingConv::SPIR_KERNEL:
725 << " promote alloca to LDS not supported with calling convention.\n");
729 // Not likely to have sufficient local memory for promotion.
733 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(*TM, ContainingFunction);
734 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
736 const DataLayout &DL = Mod->getDataLayout();
738 unsigned Align = I.getAlignment();
740 Align = DL.getABITypeAlignment(I.getAllocatedType());
742 // FIXME: This computed padding is likely wrong since it depends on inverse
745 // FIXME: It is also possible that if we're allowed to use all of the memory
746 // could could end up using more than the maximum due to alignment padding.
748 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
749 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
750 NewSize += AllocSize;
752 if (NewSize > LocalMemLimit) {
753 LLVM_DEBUG(dbgs() << " " << AllocSize
754 << " bytes of local memory not available to promote\n");
758 CurrentLocalMemUsage = NewSize;
760 std::vector<Value*> WorkList;
762 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
763 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
767 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
769 Function *F = I.getParent()->getParent();
771 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
772 GlobalVariable *GV = new GlobalVariable(
773 *Mod, GVTy, false, GlobalValue::InternalLinkage,
774 UndefValue::get(GVTy),
775 Twine(F->getName()) + Twine('.') + I.getName(),
777 GlobalVariable::NotThreadLocal,
779 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
780 GV->setAlignment(I.getAlignment());
782 Value *TCntY, *TCntZ;
784 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
785 Value *TIdX = getWorkitemID(Builder, 0);
786 Value *TIdY = getWorkitemID(Builder, 1);
787 Value *TIdZ = getWorkitemID(Builder, 2);
789 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
790 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
791 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
792 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
793 TID = Builder.CreateAdd(TID, TIdZ);
796 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
800 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
801 I.mutateType(Offset->getType());
802 I.replaceAllUsesWith(Offset);
805 for (Value *V : WorkList) {
806 CallInst *Call = dyn_cast<CallInst>(V);
808 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
809 Value *Src0 = CI->getOperand(0);
810 Type *EltTy = Src0->getType()->getPointerElementType();
811 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
813 if (isa<ConstantPointerNull>(CI->getOperand(0)))
814 CI->setOperand(0, ConstantPointerNull::get(NewTy));
816 if (isa<ConstantPointerNull>(CI->getOperand(1)))
817 CI->setOperand(1, ConstantPointerNull::get(NewTy));
822 // The operand's value should be corrected on its own and we don't want to
824 if (isa<AddrSpaceCastInst>(V))
827 Type *EltTy = V->getType()->getPointerElementType();
828 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
830 // FIXME: It doesn't really make sense to try to do this for all
832 V->mutateType(NewTy);
834 // Adjust the types of any constant operands.
835 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
836 if (isa<ConstantPointerNull>(SI->getOperand(1)))
837 SI->setOperand(1, ConstantPointerNull::get(NewTy));
839 if (isa<ConstantPointerNull>(SI->getOperand(2)))
840 SI->setOperand(2, ConstantPointerNull::get(NewTy));
841 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
842 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
843 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
844 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
851 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
852 Builder.SetInsertPoint(Intr);
853 switch (Intr->getIntrinsicID()) {
854 case Intrinsic::lifetime_start:
855 case Intrinsic::lifetime_end:
856 // These intrinsics are for address space 0 only
857 Intr->eraseFromParent();
859 case Intrinsic::memcpy: {
860 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
861 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(),
862 MemCpy->getRawSource(), MemCpy->getSourceAlignment(),
863 MemCpy->getLength(), MemCpy->isVolatile());
864 Intr->eraseFromParent();
867 case Intrinsic::memmove: {
868 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
869 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(),
870 MemMove->getRawSource(), MemMove->getSourceAlignment(),
871 MemMove->getLength(), MemMove->isVolatile());
872 Intr->eraseFromParent();
875 case Intrinsic::memset: {
876 MemSetInst *MemSet = cast<MemSetInst>(Intr);
877 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
878 MemSet->getLength(), MemSet->getDestAlignment(),
879 MemSet->isVolatile());
880 Intr->eraseFromParent();
883 case Intrinsic::invariant_start:
884 case Intrinsic::invariant_end:
885 case Intrinsic::launder_invariant_group:
886 case Intrinsic::strip_invariant_group:
887 Intr->eraseFromParent();
888 // FIXME: I think the invariant marker should still theoretically apply,
889 // but the intrinsics need to be changed to accept pointers with any
892 case Intrinsic::objectsize: {
893 Value *Src = Intr->getOperand(0);
894 Type *SrcTy = Src->getType()->getPointerElementType();
895 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
896 Intrinsic::objectsize,
897 { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) }
900 CallInst *NewCall = Builder.CreateCall(
901 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)});
902 Intr->replaceAllUsesWith(NewCall);
903 Intr->eraseFromParent();
908 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
914 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
915 return new AMDGPUPromoteAlloca();