1 //===- InferAddressSpace.cpp - --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
90 //===----------------------------------------------------------------------===//
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/None.h"
96 #include "llvm/ADT/Optional.h"
97 #include "llvm/ADT/SetVector.h"
98 #include "llvm/ADT/SmallVector.h"
99 #include "llvm/Analysis/TargetTransformInfo.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Function.h"
104 #include "llvm/IR/IRBuilder.h"
105 #include "llvm/IR/InstIterator.h"
106 #include "llvm/IR/Instruction.h"
107 #include "llvm/IR/Instructions.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/Intrinsics.h"
110 #include "llvm/IR/LLVMContext.h"
111 #include "llvm/IR/Operator.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/IR/Use.h"
114 #include "llvm/IR/User.h"
115 #include "llvm/IR/Value.h"
116 #include "llvm/IR/ValueHandle.h"
117 #include "llvm/Pass.h"
118 #include "llvm/Support/Casting.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/raw_ostream.h"
123 #include "llvm/Transforms/Scalar.h"
124 #include "llvm/Transforms/Utils/Local.h"
125 #include "llvm/Transforms/Utils/ValueMapper.h"
132 #define DEBUG_TYPE "infer-address-spaces"
134 using namespace llvm;
136 static const unsigned UninitializedAddressSpace =
137 std::numeric_limits<unsigned>::max();
141 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
143 /// \brief InferAddressSpaces
144 class InferAddressSpaces : public FunctionPass {
145 /// Target specific address space which uses of should be replaced if
147 unsigned FlatAddrSpace;
152 InferAddressSpaces() : FunctionPass(ID) {}
154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
156 AU.addRequired<TargetTransformInfoWrapperPass>();
159 bool runOnFunction(Function &F) override;
162 // Returns the new address space of V if updated; otherwise, returns None.
164 updateAddressSpace(const Value &V,
165 const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
167 // Tries to infer the specific address space of each address expression in
169 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
170 ValueToAddrSpaceMapTy *InferredAddrSpace) const;
172 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
174 // Changes the flat address expressions in function F to point to specific
175 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
176 // all flat expressions in the use-def graph of function F.
177 bool rewriteWithNewAddressSpaces(
178 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
179 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const;
181 void appendsFlatAddressExpressionToPostorderStack(
182 Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
183 DenseSet<Value *> &Visited) const;
185 bool rewriteIntrinsicOperands(IntrinsicInst *II,
186 Value *OldV, Value *NewV) const;
187 void collectRewritableIntrinsicOperands(
189 std::vector<std::pair<Value *, bool>> &PostorderStack,
190 DenseSet<Value *> &Visited) const;
192 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
194 Value *cloneValueWithNewAddressSpace(
195 Value *V, unsigned NewAddrSpace,
196 const ValueToValueMapTy &ValueWithNewAddrSpace,
197 SmallVectorImpl<const Use *> *UndefUsesToFix) const;
198 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
201 } // end anonymous namespace
203 char InferAddressSpaces::ID = 0;
207 void initializeInferAddressSpacesPass(PassRegistry &);
209 } // end namespace llvm
211 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
214 // Returns true if V is an address expression.
215 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
216 // getelementptr operators.
217 static bool isAddressExpression(const Value &V) {
218 if (!isa<Operator>(V))
221 switch (cast<Operator>(V).getOpcode()) {
222 case Instruction::PHI:
223 case Instruction::BitCast:
224 case Instruction::AddrSpaceCast:
225 case Instruction::GetElementPtr:
226 case Instruction::Select:
233 // Returns the pointer operands of V.
235 // Precondition: V is an address expression.
236 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
237 const Operator &Op = cast<Operator>(V);
238 switch (Op.getOpcode()) {
239 case Instruction::PHI: {
240 auto IncomingValues = cast<PHINode>(Op).incoming_values();
241 return SmallVector<Value *, 2>(IncomingValues.begin(),
242 IncomingValues.end());
244 case Instruction::BitCast:
245 case Instruction::AddrSpaceCast:
246 case Instruction::GetElementPtr:
247 return {Op.getOperand(0)};
248 case Instruction::Select:
249 return {Op.getOperand(1), Op.getOperand(2)};
251 llvm_unreachable("Unexpected instruction type.");
255 // TODO: Move logic to TTI?
256 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
259 Module *M = II->getParent()->getParent()->getParent();
261 switch (II->getIntrinsicID()) {
262 case Intrinsic::amdgcn_atomic_inc:
263 case Intrinsic::amdgcn_atomic_dec:{
264 const ConstantInt *IsVolatile = dyn_cast<ConstantInt>(II->getArgOperand(4));
265 if (!IsVolatile || !IsVolatile->isZero())
270 case Intrinsic::objectsize: {
271 Type *DestTy = II->getType();
272 Type *SrcTy = NewV->getType();
274 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
275 II->setArgOperand(0, NewV);
276 II->setCalledFunction(NewDecl);
284 // TODO: Move logic to TTI?
285 void InferAddressSpaces::collectRewritableIntrinsicOperands(
286 IntrinsicInst *II, std::vector<std::pair<Value *, bool>> &PostorderStack,
287 DenseSet<Value *> &Visited) const {
288 switch (II->getIntrinsicID()) {
289 case Intrinsic::objectsize:
290 case Intrinsic::amdgcn_atomic_inc:
291 case Intrinsic::amdgcn_atomic_dec:
292 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
293 PostorderStack, Visited);
300 // Returns all flat address expressions in function F. The elements are
301 // If V is an unvisited flat address expression, appends V to PostorderStack
302 // and marks it as visited.
303 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
304 Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
305 DenseSet<Value *> &Visited) const {
306 assert(V->getType()->isPointerTy());
308 // Generic addressing expressions may be hidden in nested constant
310 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
311 // TODO: Look in non-address parts, like icmp operands.
312 if (isAddressExpression(*CE) && Visited.insert(CE).second)
313 PostorderStack.push_back(std::make_pair(CE, false));
318 if (isAddressExpression(*V) &&
319 V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
320 if (Visited.insert(V).second) {
321 PostorderStack.push_back(std::make_pair(V, false));
323 Operator *Op = cast<Operator>(V);
324 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
325 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
326 if (isAddressExpression(*CE) && Visited.insert(CE).second)
327 PostorderStack.emplace_back(CE, false);
334 // Returns all flat address expressions in function F. The elements are ordered
335 // ordered in postorder.
336 std::vector<WeakTrackingVH>
337 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
338 // This function implements a non-recursive postorder traversal of a partial
339 // use-def graph of function F.
340 std::vector<std::pair<Value *, bool>> PostorderStack;
341 // The set of visited expressions.
342 DenseSet<Value *> Visited;
344 auto PushPtrOperand = [&](Value *Ptr) {
345 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
349 // Look at operations that may be interesting accelerate by moving to a known
350 // address space. We aim at generating after loads and stores, but pure
351 // addressing calculations may also be faster.
352 for (Instruction &I : instructions(F)) {
353 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
354 if (!GEP->getType()->isVectorTy())
355 PushPtrOperand(GEP->getPointerOperand());
356 } else if (auto *LI = dyn_cast<LoadInst>(&I))
357 PushPtrOperand(LI->getPointerOperand());
358 else if (auto *SI = dyn_cast<StoreInst>(&I))
359 PushPtrOperand(SI->getPointerOperand());
360 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
361 PushPtrOperand(RMW->getPointerOperand());
362 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
363 PushPtrOperand(CmpX->getPointerOperand());
364 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
365 // For memset/memcpy/memmove, any pointer operand can be replaced.
366 PushPtrOperand(MI->getRawDest());
368 // Handle 2nd operand for memcpy/memmove.
369 if (auto *MTI = dyn_cast<MemTransferInst>(MI))
370 PushPtrOperand(MTI->getRawSource());
371 } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
372 collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
373 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
374 // FIXME: Handle vectors of pointers
375 if (Cmp->getOperand(0)->getType()->isPointerTy()) {
376 PushPtrOperand(Cmp->getOperand(0));
377 PushPtrOperand(Cmp->getOperand(1));
379 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
380 if (!ASC->getType()->isVectorTy())
381 PushPtrOperand(ASC->getPointerOperand());
385 std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
386 while (!PostorderStack.empty()) {
387 Value *TopVal = PostorderStack.back().first;
388 // If the operands of the expression on the top are already explored,
389 // adds that expression to the resultant postorder.
390 if (PostorderStack.back().second) {
391 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
392 Postorder.push_back(TopVal);
393 PostorderStack.pop_back();
396 // Otherwise, adds its operands to the stack and explores them.
397 PostorderStack.back().second = true;
398 for (Value *PtrOperand : getPointerOperands(*TopVal)) {
399 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
406 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
407 // of OperandUse.get() in the new address space. If the clone is not ready yet,
408 // returns an undef in the new address space as a placeholder.
409 static Value *operandWithNewAddressSpaceOrCreateUndef(
410 const Use &OperandUse, unsigned NewAddrSpace,
411 const ValueToValueMapTy &ValueWithNewAddrSpace,
412 SmallVectorImpl<const Use *> *UndefUsesToFix) {
413 Value *Operand = OperandUse.get();
416 Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
418 if (Constant *C = dyn_cast<Constant>(Operand))
419 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
421 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
424 UndefUsesToFix->push_back(&OperandUse);
425 return UndefValue::get(NewPtrTy);
428 // Returns a clone of `I` with its operands converted to those specified in
429 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
430 // operand whose address space needs to be modified might not exist in
431 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
432 // adds that operand use to UndefUsesToFix so that caller can fix them later.
434 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
435 // from a pointer whose type already matches. Therefore, this function returns a
436 // Value* instead of an Instruction*.
437 static Value *cloneInstructionWithNewAddressSpace(
438 Instruction *I, unsigned NewAddrSpace,
439 const ValueToValueMapTy &ValueWithNewAddrSpace,
440 SmallVectorImpl<const Use *> *UndefUsesToFix) {
442 I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
444 if (I->getOpcode() == Instruction::AddrSpaceCast) {
445 Value *Src = I->getOperand(0);
446 // Because `I` is flat, the source address space must be specific.
447 // Therefore, the inferred address space must be the source space, according
449 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
450 if (Src->getType() != NewPtrType)
451 return new BitCastInst(Src, NewPtrType);
455 // Computes the converted pointer operands.
456 SmallVector<Value *, 4> NewPointerOperands;
457 for (const Use &OperandUse : I->operands()) {
458 if (!OperandUse.get()->getType()->isPointerTy())
459 NewPointerOperands.push_back(nullptr);
461 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
462 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
465 switch (I->getOpcode()) {
466 case Instruction::BitCast:
467 return new BitCastInst(NewPointerOperands[0], NewPtrType);
468 case Instruction::PHI: {
469 assert(I->getType()->isPointerTy());
470 PHINode *PHI = cast<PHINode>(I);
471 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
472 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
473 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
474 NewPHI->addIncoming(NewPointerOperands[OperandNo],
475 PHI->getIncomingBlock(Index));
479 case Instruction::GetElementPtr: {
480 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
481 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
482 GEP->getSourceElementType(), NewPointerOperands[0],
483 SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
484 NewGEP->setIsInBounds(GEP->isInBounds());
487 case Instruction::Select:
488 assert(I->getType()->isPointerTy());
489 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
490 NewPointerOperands[2], "", nullptr, I);
492 llvm_unreachable("Unexpected opcode");
496 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
497 // constant expression `CE` with its operands replaced as specified in
498 // ValueWithNewAddrSpace.
499 static Value *cloneConstantExprWithNewAddressSpace(
500 ConstantExpr *CE, unsigned NewAddrSpace,
501 const ValueToValueMapTy &ValueWithNewAddrSpace) {
503 CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
505 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
506 // Because CE is flat, the source address space must be specific.
507 // Therefore, the inferred address space must be the source space according
509 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
511 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
514 if (CE->getOpcode() == Instruction::BitCast) {
515 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
516 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
517 return ConstantExpr::getAddrSpaceCast(CE, TargetType);
520 if (CE->getOpcode() == Instruction::Select) {
521 Constant *Src0 = CE->getOperand(1);
522 Constant *Src1 = CE->getOperand(2);
523 if (Src0->getType()->getPointerAddressSpace() ==
524 Src1->getType()->getPointerAddressSpace()) {
526 return ConstantExpr::getSelect(
527 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
528 ConstantExpr::getAddrSpaceCast(Src1, TargetType));
532 // Computes the operands of the new constant expression.
534 SmallVector<Constant *, 4> NewOperands;
535 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
536 Constant *Operand = CE->getOperand(Index);
537 // If the address space of `Operand` needs to be modified, the new operand
538 // with the new address space should already be in ValueWithNewAddrSpace
539 // because (1) the constant expressions we consider (i.e. addrspacecast,
540 // bitcast, and getelementptr) do not incur cycles in the data flow graph
541 // and (2) this function is called on constant expressions in postorder.
542 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
544 NewOperands.push_back(cast<Constant>(NewOperand));
546 // Otherwise, reuses the old operand.
547 NewOperands.push_back(Operand);
551 // If !IsNew, we will replace the Value with itself. However, replaced values
552 // are assumed to wrapped in a addrspace cast later so drop it now.
556 if (CE->getOpcode() == Instruction::GetElementPtr) {
557 // Needs to specify the source type while constructing a getelementptr
558 // constant expression.
559 return CE->getWithOperands(
560 NewOperands, TargetType, /*OnlyIfReduced=*/false,
561 NewOperands[0]->getType()->getPointerElementType());
564 return CE->getWithOperands(NewOperands, TargetType);
567 // Returns a clone of the value `V`, with its operands replaced as specified in
568 // ValueWithNewAddrSpace. This function is called on every flat address
569 // expression whose address space needs to be modified, in postorder.
571 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
572 Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
573 Value *V, unsigned NewAddrSpace,
574 const ValueToValueMapTy &ValueWithNewAddrSpace,
575 SmallVectorImpl<const Use *> *UndefUsesToFix) const {
576 // All values in Postorder are flat address expressions.
577 assert(isAddressExpression(*V) &&
578 V->getType()->getPointerAddressSpace() == FlatAddrSpace);
580 if (Instruction *I = dyn_cast<Instruction>(V)) {
581 Value *NewV = cloneInstructionWithNewAddressSpace(
582 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
583 if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
584 if (NewI->getParent() == nullptr) {
585 NewI->insertBefore(I);
592 return cloneConstantExprWithNewAddressSpace(
593 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
596 // Defines the join operation on the address space lattice (see the file header
598 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
599 unsigned AS2) const {
600 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
601 return FlatAddrSpace;
603 if (AS1 == UninitializedAddressSpace)
605 if (AS2 == UninitializedAddressSpace)
608 // The join of two different specific address spaces is flat.
609 return (AS1 == AS2) ? AS1 : FlatAddrSpace;
612 bool InferAddressSpaces::runOnFunction(Function &F) {
616 const TargetTransformInfo &TTI =
617 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
618 FlatAddrSpace = TTI.getFlatAddressSpace();
619 if (FlatAddrSpace == UninitializedAddressSpace)
622 // Collects all flat address expressions in postorder.
623 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
625 // Runs a data-flow analysis to refine the address spaces of every expression
627 ValueToAddrSpaceMapTy InferredAddrSpace;
628 inferAddressSpaces(Postorder, &InferredAddrSpace);
630 // Changes the address spaces of the flat address expressions who are inferred
631 // to point to a specific address space.
632 return rewriteWithNewAddressSpaces(TTI, Postorder, InferredAddrSpace, &F);
635 // Constants need to be tracked through RAUW to handle cases with nested
636 // constant expressions, so wrap values in WeakTrackingVH.
637 void InferAddressSpaces::inferAddressSpaces(
638 ArrayRef<WeakTrackingVH> Postorder,
639 ValueToAddrSpaceMapTy *InferredAddrSpace) const {
640 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
641 // Initially, all expressions are in the uninitialized address space.
642 for (Value *V : Postorder)
643 (*InferredAddrSpace)[V] = UninitializedAddressSpace;
645 while (!Worklist.empty()) {
646 Value *V = Worklist.pop_back_val();
648 // Tries to update the address space of the stack top according to the
649 // address spaces of its operands.
650 DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n');
651 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
652 if (!NewAS.hasValue())
654 // If any updates are made, grabs its users to the worklist because
655 // their address spaces can also be possibly updated.
656 DEBUG(dbgs() << " to " << NewAS.getValue() << '\n');
657 (*InferredAddrSpace)[V] = NewAS.getValue();
659 for (Value *User : V->users()) {
660 // Skip if User is already in the worklist.
661 if (Worklist.count(User))
664 auto Pos = InferredAddrSpace->find(User);
665 // Our algorithm only updates the address spaces of flat address
666 // expressions, which are those in InferredAddrSpace.
667 if (Pos == InferredAddrSpace->end())
670 // Function updateAddressSpace moves the address space down a lattice
671 // path. Therefore, nothing to do if User is already inferred as flat (the
672 // bottom element in the lattice).
673 if (Pos->second == FlatAddrSpace)
676 Worklist.insert(User);
681 Optional<unsigned> InferAddressSpaces::updateAddressSpace(
682 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
683 assert(InferredAddrSpace.count(&V));
685 // The new inferred address space equals the join of the address spaces
686 // of all its pointer operands.
687 unsigned NewAS = UninitializedAddressSpace;
689 const Operator &Op = cast<Operator>(V);
690 if (Op.getOpcode() == Instruction::Select) {
691 Value *Src0 = Op.getOperand(1);
692 Value *Src1 = Op.getOperand(2);
694 auto I = InferredAddrSpace.find(Src0);
695 unsigned Src0AS = (I != InferredAddrSpace.end()) ?
696 I->second : Src0->getType()->getPointerAddressSpace();
698 auto J = InferredAddrSpace.find(Src1);
699 unsigned Src1AS = (J != InferredAddrSpace.end()) ?
700 J->second : Src1->getType()->getPointerAddressSpace();
702 auto *C0 = dyn_cast<Constant>(Src0);
703 auto *C1 = dyn_cast<Constant>(Src1);
705 // If one of the inputs is a constant, we may be able to do a constant
706 // addrspacecast of it. Defer inferring the address space until the input
707 // address space is known.
708 if ((C1 && Src0AS == UninitializedAddressSpace) ||
709 (C0 && Src1AS == UninitializedAddressSpace))
712 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
714 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
717 NewAS = joinAddressSpaces(Src0AS, Src1AS);
719 for (Value *PtrOperand : getPointerOperands(V)) {
720 auto I = InferredAddrSpace.find(PtrOperand);
721 unsigned OperandAS = I != InferredAddrSpace.end() ?
722 I->second : PtrOperand->getType()->getPointerAddressSpace();
724 // join(flat, *) = flat. So we can break if NewAS is already flat.
725 NewAS = joinAddressSpaces(NewAS, OperandAS);
726 if (NewAS == FlatAddrSpace)
731 unsigned OldAS = InferredAddrSpace.lookup(&V);
732 assert(OldAS != FlatAddrSpace);
738 /// \p returns true if \p U is the pointer operand of a memory instruction with
739 /// a single pointer operand that can have its address space changed by simply
740 /// mutating the use to a new value. If the memory instruction is volatile,
741 /// return true only if the target allows the memory instruction to be volatile
742 /// in the new address space.
743 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI,
744 Use &U, unsigned AddrSpace) {
745 User *Inst = U.getUser();
746 unsigned OpNo = U.getOperandNo();
747 bool VolatileIsAllowed = false;
748 if (auto *I = dyn_cast<Instruction>(Inst))
749 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
751 if (auto *LI = dyn_cast<LoadInst>(Inst))
752 return OpNo == LoadInst::getPointerOperandIndex() &&
753 (VolatileIsAllowed || !LI->isVolatile());
755 if (auto *SI = dyn_cast<StoreInst>(Inst))
756 return OpNo == StoreInst::getPointerOperandIndex() &&
757 (VolatileIsAllowed || !SI->isVolatile());
759 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
760 return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
761 (VolatileIsAllowed || !RMW->isVolatile());
763 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
764 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
765 (VolatileIsAllowed || !CmpX->isVolatile());
770 /// Update memory intrinsic uses that require more complex processing than
771 /// simple memory instructions. Thse require re-mangling and may have multiple
772 /// pointer operands.
773 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
776 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
777 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
778 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
780 if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
781 B.CreateMemSet(NewV, MSI->getValue(),
782 MSI->getLength(), MSI->getAlignment(),
784 TBAA, ScopeMD, NoAliasMD);
785 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
786 Value *Src = MTI->getRawSource();
787 Value *Dest = MTI->getRawDest();
789 // Be careful in case this is a self-to-self copy.
796 if (isa<MemCpyInst>(MTI)) {
797 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
798 B.CreateMemCpy(Dest, Src, MTI->getLength(),
801 TBAA, TBAAStruct, ScopeMD, NoAliasMD);
803 assert(isa<MemMoveInst>(MTI));
804 B.CreateMemMove(Dest, Src, MTI->getLength(),
807 TBAA, ScopeMD, NoAliasMD);
810 llvm_unreachable("unhandled MemIntrinsic");
812 MI->eraseFromParent();
816 // \p returns true if it is OK to change the address space of constant \p C with
817 // a ConstantExpr addrspacecast.
818 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const {
819 assert(NewAS != UninitializedAddressSpace);
821 unsigned SrcAS = C->getType()->getPointerAddressSpace();
822 if (SrcAS == NewAS || isa<UndefValue>(C))
825 // Prevent illegal casts between different non-flat address spaces.
826 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
829 if (isa<ConstantPointerNull>(C))
832 if (auto *Op = dyn_cast<Operator>(C)) {
833 // If we already have a constant addrspacecast, it should be safe to cast it
835 if (Op->getOpcode() == Instruction::AddrSpaceCast)
836 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
838 if (Op->getOpcode() == Instruction::IntToPtr &&
839 Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
846 static Value::use_iterator skipToNextUser(Value::use_iterator I,
847 Value::use_iterator End) {
848 User *CurUser = I->getUser();
851 while (I != End && I->getUser() == CurUser)
857 bool InferAddressSpaces::rewriteWithNewAddressSpaces(
858 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
859 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
860 // For each address expression to be modified, creates a clone of it with its
861 // pointer operands converted to the new address space. Since the pointer
862 // operands are converted, the clone is naturally in the new address space by
864 ValueToValueMapTy ValueWithNewAddrSpace;
865 SmallVector<const Use *, 32> UndefUsesToFix;
866 for (Value* V : Postorder) {
867 unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
868 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
869 ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
870 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
874 if (ValueWithNewAddrSpace.empty())
877 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
878 for (const Use *UndefUse : UndefUsesToFix) {
879 User *V = UndefUse->getUser();
880 User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
881 unsigned OperandNo = UndefUse->getOperandNo();
882 assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
883 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
886 SmallVector<Instruction *, 16> DeadInstructions;
888 // Replaces the uses of the old address expressions with the new ones.
889 for (const WeakTrackingVH &WVH : Postorder) {
890 assert(WVH && "value was unexpectedly deleted");
892 Value *NewV = ValueWithNewAddrSpace.lookup(V);
896 DEBUG(dbgs() << "Replacing the uses of " << *V
897 << "\n with\n " << *NewV << '\n');
899 if (Constant *C = dyn_cast<Constant>(V)) {
900 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
903 DEBUG(dbgs() << "Inserting replacement const cast: "
904 << Replace << ": " << *Replace << '\n');
905 C->replaceAllUsesWith(Replace);
910 Value::use_iterator I, E, Next;
911 for (I = V->use_begin(), E = V->use_end(); I != E; ) {
914 // Some users may see the same pointer operand in multiple operands. Skip
915 // to the next instruction.
916 I = skipToNextUser(I, E);
918 if (isSimplePointerUseValidToReplace(
919 TTI, U, V->getType()->getPointerAddressSpace())) {
920 // If V is used as the pointer operand of a compatible memory operation,
921 // sets the pointer operand to NewV. This replacement does not change
922 // the element type, so the resultant load/store is still valid.
927 User *CurUser = U.getUser();
928 // Handle more complex cases like intrinsic that need to be remangled.
929 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
930 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
934 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
935 if (rewriteIntrinsicOperands(II, V, NewV))
939 if (isa<Instruction>(CurUser)) {
940 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
941 // If we can infer that both pointers are in the same addrspace,
943 // %cmp = icmp eq float* %p, %q
945 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
947 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
948 int SrcIdx = U.getOperandNo();
949 int OtherIdx = (SrcIdx == 0) ? 1 : 0;
950 Value *OtherSrc = Cmp->getOperand(OtherIdx);
952 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
953 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
954 Cmp->setOperand(OtherIdx, OtherNewV);
955 Cmp->setOperand(SrcIdx, NewV);
960 // Even if the type mismatches, we can cast the constant.
961 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
962 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
963 Cmp->setOperand(SrcIdx, NewV);
964 Cmp->setOperand(OtherIdx,
965 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
971 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
972 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
973 if (ASC->getDestAddressSpace() == NewAS) {
974 if (ASC->getType()->getPointerElementType() !=
975 NewV->getType()->getPointerElementType()) {
976 NewV = CastInst::Create(Instruction::BitCast, NewV,
977 ASC->getType(), "", ASC);
979 ASC->replaceAllUsesWith(NewV);
980 DeadInstructions.push_back(ASC);
985 // Otherwise, replaces the use with flat(NewV).
986 if (Instruction *I = dyn_cast<Instruction>(V)) {
987 BasicBlock::iterator InsertPos = std::next(I->getIterator());
988 while (isa<PHINode>(InsertPos))
990 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
992 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
998 if (V->use_empty()) {
999 if (Instruction *I = dyn_cast<Instruction>(V))
1000 DeadInstructions.push_back(I);
1004 for (Instruction *I : DeadInstructions)
1005 RecursivelyDeleteTriviallyDeadInstructions(I);
1010 FunctionPass *llvm::createInferAddressSpacesPass() {
1011 return new InferAddressSpaces();