1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass eliminates allocas by either converting them into vectors or
11 // by migrating them to local address space.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "Utils/AMDGPUBaseInfo.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/BasicBlock.h"
28 #include "llvm/IR/Constant.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Metadata.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/MathExtras.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include "llvm/Target/TargetMachine.h"
61 #define DEBUG_TYPE "amdgpu-promote-alloca"
67 // FIXME: This can create globals so should be a module pass.
68 class AMDGPUPromoteAlloca : public FunctionPass {
70 const TargetMachine *TM;
71 Module *Mod = nullptr;
72 const DataLayout *DL = nullptr;
75 // FIXME: This should be per-kernel.
76 uint32_t LocalMemLimit = 0;
77 uint32_t CurrentLocalMemUsage = 0;
79 bool IsAMDGCN = false;
80 bool IsAMDHSA = false;
82 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
83 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
85 /// BaseAlloca is the alloca root the search started from.
86 /// Val may be that alloca or a recursive user of it.
87 bool collectUsesWithPtrTypes(Value *BaseAlloca,
89 std::vector<Value*> &WorkList) const;
91 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
92 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
93 /// Returns true if both operands are derived from the same alloca. Val should
94 /// be the same value as one of the input operands of UseInst.
95 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
97 int OpIdx0, int OpIdx1) const;
102 AMDGPUPromoteAlloca(const TargetMachine *TM_ = nullptr) :
103 FunctionPass(ID), TM(TM_) {}
105 bool doInitialization(Module &M) override;
106 bool runOnFunction(Function &F) override;
108 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
110 void handleAlloca(AllocaInst &I);
112 void getAnalysisUsage(AnalysisUsage &AU) const override {
113 AU.setPreservesCFG();
114 FunctionPass::getAnalysisUsage(AU);
118 } // end anonymous namespace
120 char AMDGPUPromoteAlloca::ID = 0;
122 INITIALIZE_TM_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
123 "AMDGPU promote alloca to vector or LDS", false, false)
125 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
127 bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
132 DL = &Mod->getDataLayout();
134 const Triple &TT = TM->getTargetTriple();
136 IsAMDGCN = TT.getArch() == Triple::amdgcn;
137 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
142 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
143 if (!TM || skipFunction(F))
146 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
147 if (!ST.isPromoteAllocaEnabled())
149 AS = AMDGPU::getAMDGPUAS(*F.getParent());
151 FunctionType *FTy = F.getFunctionType();
153 // If the function has any arguments in the local address space, then it's
154 // possible these arguments require the entire local memory space, so
155 // we cannot use local memory in the pass.
156 for (Type *ParamTy : FTy->params()) {
157 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
158 if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) {
160 DEBUG(dbgs() << "Function has local memory argument. Promoting to "
161 "local memory disabled.\n");
166 LocalMemLimit = ST.getLocalMemorySize();
167 if (LocalMemLimit == 0)
170 const DataLayout &DL = Mod->getDataLayout();
172 // Check how much local memory is being used by global objects
173 CurrentLocalMemUsage = 0;
174 for (GlobalVariable &GV : Mod->globals()) {
175 if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS)
178 for (const User *U : GV.users()) {
179 const Instruction *Use = dyn_cast<Instruction>(U);
183 if (Use->getParent()->getParent() == &F) {
184 unsigned Align = GV.getAlignment();
186 Align = DL.getABITypeAlignment(GV.getValueType());
188 // FIXME: Try to account for padding here. The padding is currently
189 // determined from the inverse order of uses in the function. I'm not
190 // sure if the use list order is in any way connected to this, so the
191 // total reported size is likely incorrect.
192 uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
193 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
194 CurrentLocalMemUsage += AllocSize;
200 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
203 // Restrict local memory usage so that we don't drastically reduce occupancy,
204 // unless it is already significantly reduced.
206 // TODO: Have some sort of hint or other heuristics to guess occupancy based
207 // on other factors..
208 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
209 if (OccupancyHint == 0)
212 // Clamp to max value.
213 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
215 // Check the hint but ignore it if it's obviously wrong from the existing LDS
217 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
220 // Round up to the next tier of usage.
221 unsigned MaxSizeWithWaveCount
222 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
224 // Program is possibly broken by using more local mem than available.
225 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
228 LocalMemLimit = MaxSizeWithWaveCount;
231 dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
232 << " Rounding size to " << MaxSizeWithWaveCount
233 << " with a maximum occupancy of " << MaxOccupancy << '\n'
234 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
235 << " available for promotion\n"
238 BasicBlock &EntryBB = *F.begin();
239 for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
240 AllocaInst *AI = dyn_cast<AllocaInst>(I);
250 std::pair<Value *, Value *>
251 AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
252 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(
253 *Builder.GetInsertBlock()->getParent());
256 Function *LocalSizeYFn
257 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
258 Function *LocalSizeZFn
259 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
261 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
262 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
264 ST.makeLIDRangeMetadata(LocalSizeY);
265 ST.makeLIDRangeMetadata(LocalSizeZ);
267 return std::make_pair(LocalSizeY, LocalSizeZ);
270 // We must read the size out of the dispatch pointer.
273 // We are indexing into this struct, and want to extract the workgroup_size_*
276 // typedef struct hsa_kernel_dispatch_packet_s {
279 // uint16_t workgroup_size_x ;
280 // uint16_t workgroup_size_y;
281 // uint16_t workgroup_size_z;
282 // uint16_t reserved0;
283 // uint32_t grid_size_x ;
284 // uint32_t grid_size_y ;
285 // uint32_t grid_size_z;
287 // uint32_t private_segment_size;
288 // uint32_t group_segment_size;
289 // uint64_t kernel_object;
291 // #ifdef HSA_LARGE_MODEL
292 // void *kernarg_address;
293 // #elif defined HSA_LITTLE_ENDIAN
294 // void *kernarg_address;
295 // uint32_t reserved1;
297 // uint32_t reserved1;
298 // void *kernarg_address;
300 // uint64_t reserved2;
301 // hsa_signal_t completion_signal; // uint64_t wrapper
302 // } hsa_kernel_dispatch_packet_t
304 Function *DispatchPtrFn
305 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
307 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
308 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
309 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
311 // Size of the dispatch packet struct.
312 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
314 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
315 Value *CastDispatchPtr = Builder.CreateBitCast(
316 DispatchPtr, PointerType::get(I32Ty, AS.CONSTANT_ADDRESS));
318 // We could do a single 64-bit load here, but it's likely that the basic
319 // 32-bit and extract sequence is already present, and it is probably easier
320 // to CSE this. The loads should be mergable later anyway.
321 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
322 LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
324 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
325 LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
327 MDNode *MD = MDNode::get(Mod->getContext(), None);
328 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
329 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
330 ST.makeLIDRangeMetadata(LoadZU);
332 // Extract y component. Upper half of LoadZU should be zero already.
333 Value *Y = Builder.CreateLShr(LoadXY, 16);
335 return std::make_pair(Y, LoadZU);
338 Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
339 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(
340 *Builder.GetInsertBlock()->getParent());
341 Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
345 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
346 : Intrinsic::r600_read_tidig_x;
349 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
350 : Intrinsic::r600_read_tidig_y;
354 IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
355 : Intrinsic::r600_read_tidig_z;
358 llvm_unreachable("invalid dimension");
361 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
362 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
363 ST.makeLIDRangeMetadata(CI);
368 static VectorType *arrayTypeToVecType(Type *ArrayTy) {
369 return VectorType::get(ArrayTy->getArrayElementType(),
370 ArrayTy->getArrayNumElements());
374 calculateVectorIndex(Value *Ptr,
375 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
376 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
378 auto I = GEPIdx.find(GEP);
379 return I == GEPIdx.end() ? nullptr : I->second;
382 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
383 // FIXME we only support simple cases
384 if (GEP->getNumOperands() != 3)
387 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
388 if (!I0 || !I0->isZero())
391 return GEP->getOperand(2);
394 // Not an instruction handled below to turn into a vector.
396 // TODO: Check isTriviallyVectorizable for calls and handle other
398 static bool canVectorizeInst(Instruction *Inst, User *User) {
399 switch (Inst->getOpcode()) {
400 case Instruction::Load:
401 case Instruction::BitCast:
402 case Instruction::AddrSpaceCast:
404 case Instruction::Store: {
405 // Must be the stored pointer operand, not a stored value.
406 StoreInst *SI = cast<StoreInst>(Inst);
407 return SI->getPointerOperand() == User;
414 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, AMDGPUAS AS) {
415 ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
417 DEBUG(dbgs() << "Alloca candidate for vectorization\n");
419 // FIXME: There is no reason why we can't support larger arrays, we
420 // are just being conservative for now.
422 AllocaTy->getElementType()->isVectorTy() ||
423 AllocaTy->getNumElements() > 4 ||
424 AllocaTy->getNumElements() < 2) {
425 DEBUG(dbgs() << " Cannot convert type to vector\n");
429 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
430 std::vector<Value*> WorkList;
431 for (User *AllocaUser : Alloca->users()) {
432 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
434 if (!canVectorizeInst(cast<Instruction>(AllocaUser), Alloca))
437 WorkList.push_back(AllocaUser);
441 Value *Index = GEPToVectorIndex(GEP);
443 // If we can't compute a vector index from this GEP, then we can't
444 // promote this alloca to vector.
446 DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP << '\n');
450 GEPVectorIdx[GEP] = Index;
451 for (User *GEPUser : AllocaUser->users()) {
452 if (!canVectorizeInst(cast<Instruction>(GEPUser), AllocaUser))
455 WorkList.push_back(GEPUser);
459 VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
461 DEBUG(dbgs() << " Converting alloca to vector "
462 << *AllocaTy << " -> " << *VectorTy << '\n');
464 for (Value *V : WorkList) {
465 Instruction *Inst = cast<Instruction>(V);
466 IRBuilder<> Builder(Inst);
467 switch (Inst->getOpcode()) {
468 case Instruction::Load: {
469 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
470 Value *Ptr = Inst->getOperand(0);
471 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
473 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
474 Value *VecValue = Builder.CreateLoad(BitCast);
475 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
476 Inst->replaceAllUsesWith(ExtractElement);
477 Inst->eraseFromParent();
480 case Instruction::Store: {
481 Type *VecPtrTy = VectorTy->getPointerTo(AS.PRIVATE_ADDRESS);
483 Value *Ptr = Inst->getOperand(1);
484 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
485 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
486 Value *VecValue = Builder.CreateLoad(BitCast);
487 Value *NewVecValue = Builder.CreateInsertElement(VecValue,
490 Builder.CreateStore(NewVecValue, BitCast);
491 Inst->eraseFromParent();
494 case Instruction::BitCast:
495 case Instruction::AddrSpaceCast:
499 llvm_unreachable("Inconsistency in instructions promotable to vector");
505 static bool isCallPromotable(CallInst *CI) {
506 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
510 switch (II->getIntrinsicID()) {
511 case Intrinsic::memcpy:
512 case Intrinsic::memmove:
513 case Intrinsic::memset:
514 case Intrinsic::lifetime_start:
515 case Intrinsic::lifetime_end:
516 case Intrinsic::invariant_start:
517 case Intrinsic::invariant_end:
518 case Intrinsic::invariant_group_barrier:
519 case Intrinsic::objectsize:
526 bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
531 // Figure out which operand is the one we might not be promoting.
532 Value *OtherOp = Inst->getOperand(OpIdx0);
534 OtherOp = Inst->getOperand(OpIdx1);
536 if (isa<ConstantPointerNull>(OtherOp))
539 Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
540 if (!isa<AllocaInst>(OtherObj))
543 // TODO: We should be able to replace undefs with the right pointer type.
545 // TODO: If we know the other base object is another promotable
546 // alloca, not necessarily this alloca, we can do this. The
547 // important part is both must have the same address space at
549 if (OtherObj != BaseAlloca) {
550 DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
557 bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
560 std::vector<Value*> &WorkList) const {
562 for (User *User : Val->users()) {
563 if (is_contained(WorkList, User))
566 if (CallInst *CI = dyn_cast<CallInst>(User)) {
567 if (!isCallPromotable(CI))
570 WorkList.push_back(User);
574 Instruction *UseInst = cast<Instruction>(User);
575 if (UseInst->getOpcode() == Instruction::PtrToInt)
578 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
579 if (LI->isVolatile())
585 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
586 if (SI->isVolatile())
589 // Reject if the stored value is not the pointer operand.
590 if (SI->getPointerOperand() != Val)
592 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
593 if (RMW->isVolatile())
595 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
596 if (CAS->isVolatile())
600 // Only promote a select if we know that the other select operand
601 // is from another pointer that will also be promoted.
602 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
603 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
606 // May need to rewrite constant operands.
607 WorkList.push_back(ICmp);
610 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
611 // Give up if the pointer may be captured.
612 if (PointerMayBeCaptured(UseInst, true, true))
614 // Don't collect the users of this.
615 WorkList.push_back(User);
619 if (!User->getType()->isPointerTy())
622 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
623 // Be conservative if an address could be computed outside the bounds of
625 if (!GEP->isInBounds())
629 // Only promote a select if we know that the other select operand is from
630 // another pointer that will also be promoted.
631 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
632 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
637 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
638 // TODO: Handle more complex cases. We should be able to replace loops
640 switch (Phi->getNumIncomingValues()) {
644 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
652 WorkList.push_back(User);
653 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
660 // FIXME: Should try to pick the most likely to be profitable allocas first.
661 void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
662 // Array allocations are probably not worth handling, since an allocation of
663 // the array type is the canonical form.
664 if (!I.isStaticAlloca() || I.isArrayAllocation())
667 IRBuilder<> Builder(&I);
669 // First try to replace the alloca with a vector
670 Type *AllocaTy = I.getAllocatedType();
672 DEBUG(dbgs() << "Trying to promote " << I << '\n');
674 if (tryPromoteAllocaToVector(&I, AS)) {
675 DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
679 const Function &ContainingFunction = *I.getParent()->getParent();
681 // Don't promote the alloca to LDS for shader calling conventions as the work
682 // item ID intrinsics are not supported for these calling conventions.
683 // Furthermore not all LDS is available for some of the stages.
684 if (AMDGPU::isShader(ContainingFunction.getCallingConv()))
687 const AMDGPUSubtarget &ST =
688 TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction);
689 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
691 const DataLayout &DL = Mod->getDataLayout();
693 unsigned Align = I.getAlignment();
695 Align = DL.getABITypeAlignment(I.getAllocatedType());
697 // FIXME: This computed padding is likely wrong since it depends on inverse
700 // FIXME: It is also possible that if we're allowed to use all of the memory
701 // could could end up using more than the maximum due to alignment padding.
703 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
704 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
705 NewSize += AllocSize;
707 if (NewSize > LocalMemLimit) {
708 DEBUG(dbgs() << " " << AllocSize
709 << " bytes of local memory not available to promote\n");
713 CurrentLocalMemUsage = NewSize;
715 std::vector<Value*> WorkList;
717 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
718 DEBUG(dbgs() << " Do not know how to convert all uses\n");
722 DEBUG(dbgs() << "Promoting alloca to local memory\n");
724 Function *F = I.getParent()->getParent();
726 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
727 GlobalVariable *GV = new GlobalVariable(
728 *Mod, GVTy, false, GlobalValue::InternalLinkage,
729 UndefValue::get(GVTy),
730 Twine(F->getName()) + Twine('.') + I.getName(),
732 GlobalVariable::NotThreadLocal,
734 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
735 GV->setAlignment(I.getAlignment());
737 Value *TCntY, *TCntZ;
739 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
740 Value *TIdX = getWorkitemID(Builder, 0);
741 Value *TIdY = getWorkitemID(Builder, 1);
742 Value *TIdZ = getWorkitemID(Builder, 2);
744 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
745 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
746 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
747 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
748 TID = Builder.CreateAdd(TID, TIdZ);
751 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
755 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
756 I.mutateType(Offset->getType());
757 I.replaceAllUsesWith(Offset);
760 for (Value *V : WorkList) {
761 CallInst *Call = dyn_cast<CallInst>(V);
763 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
764 Value *Src0 = CI->getOperand(0);
765 Type *EltTy = Src0->getType()->getPointerElementType();
766 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
768 if (isa<ConstantPointerNull>(CI->getOperand(0)))
769 CI->setOperand(0, ConstantPointerNull::get(NewTy));
771 if (isa<ConstantPointerNull>(CI->getOperand(1)))
772 CI->setOperand(1, ConstantPointerNull::get(NewTy));
777 // The operand's value should be corrected on its own and we don't want to
779 if (isa<AddrSpaceCastInst>(V))
782 Type *EltTy = V->getType()->getPointerElementType();
783 PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);
785 // FIXME: It doesn't really make sense to try to do this for all
787 V->mutateType(NewTy);
789 // Adjust the types of any constant operands.
790 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
791 if (isa<ConstantPointerNull>(SI->getOperand(1)))
792 SI->setOperand(1, ConstantPointerNull::get(NewTy));
794 if (isa<ConstantPointerNull>(SI->getOperand(2)))
795 SI->setOperand(2, ConstantPointerNull::get(NewTy));
796 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
797 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
798 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
799 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
806 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
807 Builder.SetInsertPoint(Intr);
808 switch (Intr->getIntrinsicID()) {
809 case Intrinsic::lifetime_start:
810 case Intrinsic::lifetime_end:
811 // These intrinsics are for address space 0 only
812 Intr->eraseFromParent();
814 case Intrinsic::memcpy: {
815 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
816 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
817 MemCpy->getLength(), MemCpy->getAlignment(),
818 MemCpy->isVolatile());
819 Intr->eraseFromParent();
822 case Intrinsic::memmove: {
823 MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
824 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
825 MemMove->getLength(), MemMove->getAlignment(),
826 MemMove->isVolatile());
827 Intr->eraseFromParent();
830 case Intrinsic::memset: {
831 MemSetInst *MemSet = cast<MemSetInst>(Intr);
832 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
833 MemSet->getLength(), MemSet->getAlignment(),
834 MemSet->isVolatile());
835 Intr->eraseFromParent();
838 case Intrinsic::invariant_start:
839 case Intrinsic::invariant_end:
840 case Intrinsic::invariant_group_barrier:
841 Intr->eraseFromParent();
842 // FIXME: I think the invariant marker should still theoretically apply,
843 // but the intrinsics need to be changed to accept pointers with any
846 case Intrinsic::objectsize: {
847 Value *Src = Intr->getOperand(0);
848 Type *SrcTy = Src->getType()->getPointerElementType();
849 Function *ObjectSize = Intrinsic::getDeclaration(Mod,
850 Intrinsic::objectsize,
851 { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) }
854 CallInst *NewCall = Builder.CreateCall(
855 ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)});
856 Intr->replaceAllUsesWith(NewCall);
857 Intr->eraseFromParent();
862 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
867 FunctionPass *llvm::createAMDGPUPromoteAlloca(const TargetMachine *TM) {
868 return new AMDGPUPromoteAlloca(TM);