1 //===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
90 //===----------------------------------------------------------------------===//
92 #include "llvm/Transforms/Scalar.h"
93 #include "llvm/ADT/DenseSet.h"
94 #include "llvm/ADT/Optional.h"
95 #include "llvm/ADT/SetVector.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/IR/Function.h"
98 #include "llvm/IR/InstIterator.h"
99 #include "llvm/IR/Instructions.h"
100 #include "llvm/IR/Operator.h"
101 #include "llvm/Support/Debug.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Transforms/Utils/Local.h"
104 #include "llvm/Transforms/Utils/ValueMapper.h"
106 #define DEBUG_TYPE "infer-address-spaces"
108 using namespace llvm;
111 static const unsigned UninitializedAddressSpace = ~0u;
113 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
115 /// \brief InferAddressSpaces
116 class InferAddressSpaces : public FunctionPass {
117 /// Target specific address space which uses of should be replaced if
119 unsigned FlatAddrSpace;
124 InferAddressSpaces() : FunctionPass(ID) {}
126 void getAnalysisUsage(AnalysisUsage &AU) const override {
127 AU.setPreservesCFG();
128 AU.addRequired<TargetTransformInfoWrapperPass>();
131 bool runOnFunction(Function &F) override;
134 // Returns the new address space of V if updated; otherwise, returns None.
136 updateAddressSpace(const Value &V,
137 const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
139 // Tries to infer the specific address space of each address expression in
141 void inferAddressSpaces(const std::vector<Value *> &Postorder,
142 ValueToAddrSpaceMapTy *InferredAddrSpace) const;
144 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
146 // Changes the flat address expressions in function F to point to specific
147 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
148 // all flat expressions in the use-def graph of function F.
150 rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
151 const ValueToAddrSpaceMapTy &InferredAddrSpace,
154 void appendsFlatAddressExpressionToPostorderStack(
155 Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
156 DenseSet<Value *> *Visited) const;
158 bool rewriteIntrinsicOperands(IntrinsicInst *II,
159 Value *OldV, Value *NewV) const;
160 void collectRewritableIntrinsicOperands(
162 std::vector<std::pair<Value *, bool>> *PostorderStack,
163 DenseSet<Value *> *Visited) const;
165 std::vector<Value *> collectFlatAddressExpressions(Function &F) const;
167 Value *cloneValueWithNewAddressSpace(
168 Value *V, unsigned NewAddrSpace,
169 const ValueToValueMapTy &ValueWithNewAddrSpace,
170 SmallVectorImpl<const Use *> *UndefUsesToFix) const;
171 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
173 } // end anonymous namespace
175 char InferAddressSpaces::ID = 0;
178 void initializeInferAddressSpacesPass(PassRegistry &);
181 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
184 // Returns true if V is an address expression.
185 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
186 // getelementptr operators.
187 static bool isAddressExpression(const Value &V) {
188 if (!isa<Operator>(V))
191 switch (cast<Operator>(V).getOpcode()) {
192 case Instruction::PHI:
193 case Instruction::BitCast:
194 case Instruction::AddrSpaceCast:
195 case Instruction::GetElementPtr:
196 case Instruction::Select:
203 // Returns the pointer operands of V.
205 // Precondition: V is an address expression.
206 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
207 assert(isAddressExpression(V));
208 const Operator &Op = cast<Operator>(V);
209 switch (Op.getOpcode()) {
210 case Instruction::PHI: {
211 auto IncomingValues = cast<PHINode>(Op).incoming_values();
212 return SmallVector<Value *, 2>(IncomingValues.begin(),
213 IncomingValues.end());
215 case Instruction::BitCast:
216 case Instruction::AddrSpaceCast:
217 case Instruction::GetElementPtr:
218 return {Op.getOperand(0)};
219 case Instruction::Select:
220 return {Op.getOperand(1), Op.getOperand(2)};
222 llvm_unreachable("Unexpected instruction type.");
226 // TODO: Move logic to TTI?
227 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
230 Module *M = II->getParent()->getParent()->getParent();
232 switch (II->getIntrinsicID()) {
233 case Intrinsic::amdgcn_atomic_inc:
234 case Intrinsic::amdgcn_atomic_dec:{
235 const ConstantInt *IsVolatile = dyn_cast<ConstantInt>(II->getArgOperand(4));
236 if (!IsVolatile || !IsVolatile->isNullValue())
241 case Intrinsic::objectsize: {
242 Type *DestTy = II->getType();
243 Type *SrcTy = NewV->getType();
245 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
246 II->setArgOperand(0, NewV);
247 II->setCalledFunction(NewDecl);
255 // TODO: Move logic to TTI?
256 void InferAddressSpaces::collectRewritableIntrinsicOperands(
257 IntrinsicInst *II, std::vector<std::pair<Value *, bool>> *PostorderStack,
258 DenseSet<Value *> *Visited) const {
259 switch (II->getIntrinsicID()) {
260 case Intrinsic::objectsize:
261 case Intrinsic::amdgcn_atomic_inc:
262 case Intrinsic::amdgcn_atomic_dec:
263 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
264 PostorderStack, Visited);
271 // Returns all flat address expressions in function F. The elements are
272 // If V is an unvisited flat address expression, appends V to PostorderStack
273 // and marks it as visited.
274 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
275 Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
276 DenseSet<Value *> *Visited) const {
277 assert(V->getType()->isPointerTy());
278 if (isAddressExpression(*V) &&
279 V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
280 if (Visited->insert(V).second)
281 PostorderStack->push_back(std::make_pair(V, false));
285 // Returns all flat address expressions in function F. The elements are ordered
286 // ordered in postorder.
288 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
289 // This function implements a non-recursive postorder traversal of a partial
290 // use-def graph of function F.
291 std::vector<std::pair<Value *, bool>> PostorderStack;
292 // The set of visited expressions.
293 DenseSet<Value *> Visited;
295 auto PushPtrOperand = [&](Value *Ptr) {
296 appendsFlatAddressExpressionToPostorderStack(Ptr, &PostorderStack,
300 // We only explore address expressions that are reachable from loads and
301 // stores for now because we aim at generating faster loads and stores.
302 for (Instruction &I : instructions(F)) {
303 if (auto *LI = dyn_cast<LoadInst>(&I))
304 PushPtrOperand(LI->getPointerOperand());
305 else if (auto *SI = dyn_cast<StoreInst>(&I))
306 PushPtrOperand(SI->getPointerOperand());
307 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
308 PushPtrOperand(RMW->getPointerOperand());
309 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
310 PushPtrOperand(CmpX->getPointerOperand());
311 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
312 // For memset/memcpy/memmove, any pointer operand can be replaced.
313 PushPtrOperand(MI->getRawDest());
315 // Handle 2nd operand for memcpy/memmove.
316 if (auto *MTI = dyn_cast<MemTransferInst>(MI))
317 PushPtrOperand(MTI->getRawSource());
318 } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
319 collectRewritableIntrinsicOperands(II, &PostorderStack, &Visited);
320 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
321 // FIXME: Handle vectors of pointers
322 if (Cmp->getOperand(0)->getType()->isPointerTy()) {
323 PushPtrOperand(Cmp->getOperand(0));
324 PushPtrOperand(Cmp->getOperand(1));
329 std::vector<Value *> Postorder; // The resultant postorder.
330 while (!PostorderStack.empty()) {
331 // If the operands of the expression on the top are already explored,
332 // adds that expression to the resultant postorder.
333 if (PostorderStack.back().second) {
334 Postorder.push_back(PostorderStack.back().first);
335 PostorderStack.pop_back();
338 // Otherwise, adds its operands to the stack and explores them.
339 PostorderStack.back().second = true;
340 for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
341 appendsFlatAddressExpressionToPostorderStack(PtrOperand, &PostorderStack,
348 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
349 // of OperandUse.get() in the new address space. If the clone is not ready yet,
350 // returns an undef in the new address space as a placeholder.
351 static Value *operandWithNewAddressSpaceOrCreateUndef(
352 const Use &OperandUse, unsigned NewAddrSpace,
353 const ValueToValueMapTy &ValueWithNewAddrSpace,
354 SmallVectorImpl<const Use *> *UndefUsesToFix) {
355 Value *Operand = OperandUse.get();
358 Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
360 if (Constant *C = dyn_cast<Constant>(Operand))
361 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
363 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
366 UndefUsesToFix->push_back(&OperandUse);
367 return UndefValue::get(NewPtrTy);
370 // Returns a clone of `I` with its operands converted to those specified in
371 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
372 // operand whose address space needs to be modified might not exist in
373 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
374 // adds that operand use to UndefUsesToFix so that caller can fix them later.
376 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
377 // from a pointer whose type already matches. Therefore, this function returns a
378 // Value* instead of an Instruction*.
379 static Value *cloneInstructionWithNewAddressSpace(
380 Instruction *I, unsigned NewAddrSpace,
381 const ValueToValueMapTy &ValueWithNewAddrSpace,
382 SmallVectorImpl<const Use *> *UndefUsesToFix) {
384 I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
386 if (I->getOpcode() == Instruction::AddrSpaceCast) {
387 Value *Src = I->getOperand(0);
388 // Because `I` is flat, the source address space must be specific.
389 // Therefore, the inferred address space must be the source space, according
391 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
392 if (Src->getType() != NewPtrType)
393 return new BitCastInst(Src, NewPtrType);
397 // Computes the converted pointer operands.
398 SmallVector<Value *, 4> NewPointerOperands;
399 for (const Use &OperandUse : I->operands()) {
400 if (!OperandUse.get()->getType()->isPointerTy())
401 NewPointerOperands.push_back(nullptr);
403 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
404 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
407 switch (I->getOpcode()) {
408 case Instruction::BitCast:
409 return new BitCastInst(NewPointerOperands[0], NewPtrType);
410 case Instruction::PHI: {
411 assert(I->getType()->isPointerTy());
412 PHINode *PHI = cast<PHINode>(I);
413 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
414 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
415 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
416 NewPHI->addIncoming(NewPointerOperands[OperandNo],
417 PHI->getIncomingBlock(Index));
421 case Instruction::GetElementPtr: {
422 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
423 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
424 GEP->getSourceElementType(), NewPointerOperands[0],
425 SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
426 NewGEP->setIsInBounds(GEP->isInBounds());
429 case Instruction::Select: {
430 assert(I->getType()->isPointerTy());
431 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
432 NewPointerOperands[2], "", nullptr, I);
435 llvm_unreachable("Unexpected opcode");
439 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
440 // constant expression `CE` with its operands replaced as specified in
441 // ValueWithNewAddrSpace.
442 static Value *cloneConstantExprWithNewAddressSpace(
443 ConstantExpr *CE, unsigned NewAddrSpace,
444 const ValueToValueMapTy &ValueWithNewAddrSpace) {
446 CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
448 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
449 // Because CE is flat, the source address space must be specific.
450 // Therefore, the inferred address space must be the source space according
452 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
454 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
457 if (CE->getOpcode() == Instruction::BitCast) {
458 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
459 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
460 return ConstantExpr::getAddrSpaceCast(CE, TargetType);
463 if (CE->getOpcode() == Instruction::Select) {
464 Constant *Src0 = CE->getOperand(1);
465 Constant *Src1 = CE->getOperand(2);
466 if (Src0->getType()->getPointerAddressSpace() ==
467 Src1->getType()->getPointerAddressSpace()) {
469 return ConstantExpr::getSelect(
470 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
471 ConstantExpr::getAddrSpaceCast(Src1, TargetType));
475 // Computes the operands of the new constant expression.
476 SmallVector<Constant *, 4> NewOperands;
477 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
478 Constant *Operand = CE->getOperand(Index);
479 // If the address space of `Operand` needs to be modified, the new operand
480 // with the new address space should already be in ValueWithNewAddrSpace
481 // because (1) the constant expressions we consider (i.e. addrspacecast,
482 // bitcast, and getelementptr) do not incur cycles in the data flow graph
483 // and (2) this function is called on constant expressions in postorder.
484 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
485 NewOperands.push_back(cast<Constant>(NewOperand));
487 // Otherwise, reuses the old operand.
488 NewOperands.push_back(Operand);
492 if (CE->getOpcode() == Instruction::GetElementPtr) {
493 // Needs to specify the source type while constructing a getelementptr
494 // constant expression.
495 return CE->getWithOperands(
496 NewOperands, TargetType, /*OnlyIfReduced=*/false,
497 NewOperands[0]->getType()->getPointerElementType());
500 return CE->getWithOperands(NewOperands, TargetType);
503 // Returns a clone of the value `V`, with its operands replaced as specified in
504 // ValueWithNewAddrSpace. This function is called on every flat address
505 // expression whose address space needs to be modified, in postorder.
507 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
508 Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
509 Value *V, unsigned NewAddrSpace,
510 const ValueToValueMapTy &ValueWithNewAddrSpace,
511 SmallVectorImpl<const Use *> *UndefUsesToFix) const {
512 // All values in Postorder are flat address expressions.
513 assert(isAddressExpression(*V) &&
514 V->getType()->getPointerAddressSpace() == FlatAddrSpace);
516 if (Instruction *I = dyn_cast<Instruction>(V)) {
517 Value *NewV = cloneInstructionWithNewAddressSpace(
518 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
519 if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
520 if (NewI->getParent() == nullptr) {
521 NewI->insertBefore(I);
528 return cloneConstantExprWithNewAddressSpace(
529 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
532 // Defines the join operation on the address space lattice (see the file header
534 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
535 unsigned AS2) const {
536 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
537 return FlatAddrSpace;
539 if (AS1 == UninitializedAddressSpace)
541 if (AS2 == UninitializedAddressSpace)
544 // The join of two different specific address spaces is flat.
545 return (AS1 == AS2) ? AS1 : FlatAddrSpace;
548 bool InferAddressSpaces::runOnFunction(Function &F) {
552 const TargetTransformInfo &TTI =
553 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
554 FlatAddrSpace = TTI.getFlatAddressSpace();
555 if (FlatAddrSpace == UninitializedAddressSpace)
558 // Collects all flat address expressions in postorder.
559 std::vector<Value *> Postorder = collectFlatAddressExpressions(F);
561 // Runs a data-flow analysis to refine the address spaces of every expression
563 ValueToAddrSpaceMapTy InferredAddrSpace;
564 inferAddressSpaces(Postorder, &InferredAddrSpace);
566 // Changes the address spaces of the flat address expressions who are inferred
567 // to point to a specific address space.
568 return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
571 void InferAddressSpaces::inferAddressSpaces(
572 const std::vector<Value *> &Postorder,
573 ValueToAddrSpaceMapTy *InferredAddrSpace) const {
574 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
575 // Initially, all expressions are in the uninitialized address space.
576 for (Value *V : Postorder)
577 (*InferredAddrSpace)[V] = UninitializedAddressSpace;
579 while (!Worklist.empty()) {
580 Value *V = Worklist.pop_back_val();
582 // Tries to update the address space of the stack top according to the
583 // address spaces of its operands.
584 DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n');
585 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
586 if (!NewAS.hasValue())
588 // If any updates are made, grabs its users to the worklist because
589 // their address spaces can also be possibly updated.
590 DEBUG(dbgs() << " to " << NewAS.getValue() << '\n');
591 (*InferredAddrSpace)[V] = NewAS.getValue();
593 for (Value *User : V->users()) {
594 // Skip if User is already in the worklist.
595 if (Worklist.count(User))
598 auto Pos = InferredAddrSpace->find(User);
599 // Our algorithm only updates the address spaces of flat address
600 // expressions, which are those in InferredAddrSpace.
601 if (Pos == InferredAddrSpace->end())
604 // Function updateAddressSpace moves the address space down a lattice
605 // path. Therefore, nothing to do if User is already inferred as flat (the
606 // bottom element in the lattice).
607 if (Pos->second == FlatAddrSpace)
610 Worklist.insert(User);
615 Optional<unsigned> InferAddressSpaces::updateAddressSpace(
616 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
617 assert(InferredAddrSpace.count(&V));
619 // The new inferred address space equals the join of the address spaces
620 // of all its pointer operands.
621 unsigned NewAS = UninitializedAddressSpace;
623 const Operator &Op = cast<Operator>(V);
624 if (Op.getOpcode() == Instruction::Select) {
625 Value *Src0 = Op.getOperand(1);
626 Value *Src1 = Op.getOperand(2);
628 auto I = InferredAddrSpace.find(Src0);
629 unsigned Src0AS = (I != InferredAddrSpace.end()) ?
630 I->second : Src0->getType()->getPointerAddressSpace();
632 auto J = InferredAddrSpace.find(Src1);
633 unsigned Src1AS = (J != InferredAddrSpace.end()) ?
634 J->second : Src1->getType()->getPointerAddressSpace();
636 auto *C0 = dyn_cast<Constant>(Src0);
637 auto *C1 = dyn_cast<Constant>(Src1);
639 // If one of the inputs is a constant, we may be able to do a constant
640 // addrspacecast of it. Defer inferring the address space until the input
641 // address space is known.
642 if ((C1 && Src0AS == UninitializedAddressSpace) ||
643 (C0 && Src1AS == UninitializedAddressSpace))
646 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
648 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
651 NewAS = joinAddressSpaces(Src0AS, Src1AS);
653 for (Value *PtrOperand : getPointerOperands(V)) {
654 auto I = InferredAddrSpace.find(PtrOperand);
655 unsigned OperandAS = I != InferredAddrSpace.end() ?
656 I->second : PtrOperand->getType()->getPointerAddressSpace();
658 // join(flat, *) = flat. So we can break if NewAS is already flat.
659 NewAS = joinAddressSpaces(NewAS, OperandAS);
660 if (NewAS == FlatAddrSpace)
665 unsigned OldAS = InferredAddrSpace.lookup(&V);
666 assert(OldAS != FlatAddrSpace);
672 /// \p returns true if \p U is the pointer operand of a memory instruction with
673 /// a single pointer operand that can have its address space changed by simply
674 /// mutating the use to a new value.
675 static bool isSimplePointerUseValidToReplace(Use &U) {
676 User *Inst = U.getUser();
677 unsigned OpNo = U.getOperandNo();
679 if (auto *LI = dyn_cast<LoadInst>(Inst))
680 return OpNo == LoadInst::getPointerOperandIndex() && !LI->isVolatile();
682 if (auto *SI = dyn_cast<StoreInst>(Inst))
683 return OpNo == StoreInst::getPointerOperandIndex() && !SI->isVolatile();
685 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
686 return OpNo == AtomicRMWInst::getPointerOperandIndex() && !RMW->isVolatile();
688 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
689 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
696 /// Update memory intrinsic uses that require more complex processing than
697 /// simple memory instructions. Thse require re-mangling and may have multiple
698 /// pointer operands.
699 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
702 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
703 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
704 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
706 if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
707 B.CreateMemSet(NewV, MSI->getValue(),
708 MSI->getLength(), MSI->getAlignment(),
710 TBAA, ScopeMD, NoAliasMD);
711 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
712 Value *Src = MTI->getRawSource();
713 Value *Dest = MTI->getRawDest();
715 // Be careful in case this is a self-to-self copy.
722 if (isa<MemCpyInst>(MTI)) {
723 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
724 B.CreateMemCpy(Dest, Src, MTI->getLength(),
727 TBAA, TBAAStruct, ScopeMD, NoAliasMD);
729 assert(isa<MemMoveInst>(MTI));
730 B.CreateMemMove(Dest, Src, MTI->getLength(),
733 TBAA, ScopeMD, NoAliasMD);
736 llvm_unreachable("unhandled MemIntrinsic");
738 MI->eraseFromParent();
742 // \p returns true if it is OK to change the address space of constant \p C with
743 // a ConstantExpr addrspacecast.
744 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const {
745 assert(NewAS != UninitializedAddressSpace);
747 unsigned SrcAS = C->getType()->getPointerAddressSpace();
748 if (SrcAS == NewAS || isa<UndefValue>(C))
751 // Prevent illegal casts between different non-flat address spaces.
752 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
755 if (isa<ConstantPointerNull>(C))
758 if (auto *Op = dyn_cast<Operator>(C)) {
759 // If we already have a constant addrspacecast, it should be safe to cast it
761 if (Op->getOpcode() == Instruction::AddrSpaceCast)
762 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
764 if (Op->getOpcode() == Instruction::IntToPtr &&
765 Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
772 static Value::use_iterator skipToNextUser(Value::use_iterator I,
773 Value::use_iterator End) {
774 User *CurUser = I->getUser();
777 while (I != End && I->getUser() == CurUser)
783 bool InferAddressSpaces::rewriteWithNewAddressSpaces(
784 const std::vector<Value *> &Postorder,
785 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
786 // For each address expression to be modified, creates a clone of it with its
787 // pointer operands converted to the new address space. Since the pointer
788 // operands are converted, the clone is naturally in the new address space by
790 ValueToValueMapTy ValueWithNewAddrSpace;
791 SmallVector<const Use *, 32> UndefUsesToFix;
792 for (Value* V : Postorder) {
793 unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
794 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
795 ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
796 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
800 if (ValueWithNewAddrSpace.empty())
803 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
804 for (const Use *UndefUse : UndefUsesToFix) {
805 User *V = UndefUse->getUser();
806 User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
807 unsigned OperandNo = UndefUse->getOperandNo();
808 assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
809 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
812 // Replaces the uses of the old address expressions with the new ones.
813 for (Value *V : Postorder) {
814 Value *NewV = ValueWithNewAddrSpace.lookup(V);
818 DEBUG(dbgs() << "Replacing the uses of " << *V
819 << "\n with\n " << *NewV << '\n');
821 Value::use_iterator I, E, Next;
822 for (I = V->use_begin(), E = V->use_end(); I != E; ) {
825 // Some users may see the same pointer operand in multiple operands. Skip
826 // to the next instruction.
827 I = skipToNextUser(I, E);
829 if (isSimplePointerUseValidToReplace(U)) {
830 // If V is used as the pointer operand of a compatible memory operation,
831 // sets the pointer operand to NewV. This replacement does not change
832 // the element type, so the resultant load/store is still valid.
837 User *CurUser = U.getUser();
838 // Handle more complex cases like intrinsic that need to be remangled.
839 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
840 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
844 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
845 if (rewriteIntrinsicOperands(II, V, NewV))
849 if (isa<Instruction>(CurUser)) {
850 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
851 // If we can infer that both pointers are in the same addrspace,
853 // %cmp = icmp eq float* %p, %q
855 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
857 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
858 int SrcIdx = U.getOperandNo();
859 int OtherIdx = (SrcIdx == 0) ? 1 : 0;
860 Value *OtherSrc = Cmp->getOperand(OtherIdx);
862 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
863 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
864 Cmp->setOperand(OtherIdx, OtherNewV);
865 Cmp->setOperand(SrcIdx, NewV);
870 // Even if the type mismatches, we can cast the constant.
871 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
872 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
873 Cmp->setOperand(SrcIdx, NewV);
874 Cmp->setOperand(OtherIdx,
875 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
881 // Otherwise, replaces the use with flat(NewV).
882 if (Instruction *I = dyn_cast<Instruction>(V)) {
883 BasicBlock::iterator InsertPos = std::next(I->getIterator());
884 while (isa<PHINode>(InsertPos))
886 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
888 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
895 RecursivelyDeleteTriviallyDeadInstructions(V);
901 FunctionPass *llvm::createInferAddressSpacesPass() {
902 return new InferAddressSpaces();