1 //===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
90 //===----------------------------------------------------------------------===//
92 #define DEBUG_TYPE "nvptx-infer-addrspace"
95 #include "MCTargetDesc/NVPTXBaseInfo.h"
96 #include "llvm/ADT/DenseSet.h"
97 #include "llvm/ADT/Optional.h"
98 #include "llvm/ADT/SetVector.h"
99 #include "llvm/IR/Function.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/Instructions.h"
102 #include "llvm/IR/Operator.h"
103 #include "llvm/Support/Debug.h"
104 #include "llvm/Support/raw_ostream.h"
105 #include "llvm/Transforms/Utils/Local.h"
106 #include "llvm/Transforms/Utils/ValueMapper.h"
108 using namespace llvm;
111 const unsigned ADDRESS_SPACE_UNINITIALIZED = (unsigned)-1;
113 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
115 /// \brief NVPTXInferAddressSpaces
116 class NVPTXInferAddressSpaces: public FunctionPass {
120 NVPTXInferAddressSpaces() : FunctionPass(ID) {}
122 bool runOnFunction(Function &F) override;
125 // Returns the new address space of V if updated; otherwise, returns None.
127 updateAddressSpace(const Value &V,
128 const ValueToAddrSpaceMapTy &InferredAddrSpace);
130 // Tries to infer the specific address space of each address expression in
132 void inferAddressSpaces(const std::vector<Value *> &Postorder,
133 ValueToAddrSpaceMapTy *InferredAddrSpace);
135 // Changes the generic address expressions in function F to point to specific
136 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
137 // all generic address expressions in the use-def graph of function F.
139 rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
140 const ValueToAddrSpaceMapTy &InferredAddrSpace,
143 } // end anonymous namespace
145 char NVPTXInferAddressSpaces::ID = 0;
148 void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
150 INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
151 "Infer address spaces",
154 // Returns true if V is an address expression.
155 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
156 // getelementptr operators.
157 static bool isAddressExpression(const Value &V) {
158 if (!isa<Operator>(V))
161 switch (cast<Operator>(V).getOpcode()) {
162 case Instruction::PHI:
163 case Instruction::BitCast:
164 case Instruction::AddrSpaceCast:
165 case Instruction::GetElementPtr:
172 // Returns the pointer operands of V.
174 // Precondition: V is an address expression.
175 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
176 assert(isAddressExpression(V));
177 const Operator& Op = cast<Operator>(V);
178 switch (Op.getOpcode()) {
179 case Instruction::PHI: {
180 auto IncomingValues = cast<PHINode>(Op).incoming_values();
181 return SmallVector<Value *, 2>(IncomingValues.begin(),
182 IncomingValues.end());
184 case Instruction::BitCast:
185 case Instruction::AddrSpaceCast:
186 case Instruction::GetElementPtr:
187 return {Op.getOperand(0)};
189 llvm_unreachable("Unexpected instruction type.");
193 // If V is an unvisited generic address expression, appends V to PostorderStack
194 // and marks it as visited.
195 static void appendsGenericAddressExpressionToPostorderStack(
196 Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
197 DenseSet<Value *> *Visited) {
198 assert(V->getType()->isPointerTy());
199 if (isAddressExpression(*V) &&
200 V->getType()->getPointerAddressSpace() ==
201 AddressSpace::ADDRESS_SPACE_GENERIC) {
202 if (Visited->insert(V).second)
203 PostorderStack->push_back(std::make_pair(V, false));
207 // Returns all generic address expressions in function F. The elements are
208 // ordered in postorder.
209 static std::vector<Value *> collectGenericAddressExpressions(Function &F) {
210 // This function implements a non-recursive postorder traversal of a partial
211 // use-def graph of function F.
212 std::vector<std::pair<Value*, bool>> PostorderStack;
213 // The set of visited expressions.
214 DenseSet<Value*> Visited;
215 // We only explore address expressions that are reachable from loads and
216 // stores for now because we aim at generating faster loads and stores.
217 for (Instruction &I : instructions(F)) {
218 if (isa<LoadInst>(I)) {
219 appendsGenericAddressExpressionToPostorderStack(
220 I.getOperand(0), &PostorderStack, &Visited);
221 } else if (isa<StoreInst>(I)) {
222 appendsGenericAddressExpressionToPostorderStack(
223 I.getOperand(1), &PostorderStack, &Visited);
227 std::vector<Value *> Postorder; // The resultant postorder.
228 while (!PostorderStack.empty()) {
229 // If the operands of the expression on the top are already explored,
230 // adds that expression to the resultant postorder.
231 if (PostorderStack.back().second) {
232 Postorder.push_back(PostorderStack.back().first);
233 PostorderStack.pop_back();
236 // Otherwise, adds its operands to the stack and explores them.
237 PostorderStack.back().second = true;
238 for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
239 appendsGenericAddressExpressionToPostorderStack(
240 PtrOperand, &PostorderStack, &Visited);
246 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
247 // of OperandUse.get() in the new address space. If the clone is not ready yet,
248 // returns an undef in the new address space as a placeholder.
249 static Value *operandWithNewAddressSpaceOrCreateUndef(
250 const Use &OperandUse, unsigned NewAddrSpace,
251 const ValueToValueMapTy &ValueWithNewAddrSpace,
252 SmallVectorImpl<const Use *> *UndefUsesToFix) {
253 Value *Operand = OperandUse.get();
254 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
257 UndefUsesToFix->push_back(&OperandUse);
258 return UndefValue::get(
259 Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
262 // Returns a clone of `I` with its operands converted to those specified in
263 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
264 // operand whose address space needs to be modified might not exist in
265 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
266 // adds that operand use to UndefUsesToFix so that caller can fix them later.
268 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
269 // from a pointer whose type already matches. Therefore, this function returns a
270 // Value* instead of an Instruction*.
271 static Value *cloneInstructionWithNewAddressSpace(
272 Instruction *I, unsigned NewAddrSpace,
273 const ValueToValueMapTy &ValueWithNewAddrSpace,
274 SmallVectorImpl<const Use *> *UndefUsesToFix) {
276 I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
278 if (I->getOpcode() == Instruction::AddrSpaceCast) {
279 Value *Src = I->getOperand(0);
280 // Because `I` is generic, the source address space must be specific.
281 // Therefore, the inferred address space must be the source space, according
283 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
284 if (Src->getType() != NewPtrType)
285 return new BitCastInst(Src, NewPtrType);
289 // Computes the converted pointer operands.
290 SmallVector<Value *, 4> NewPointerOperands;
291 for (const Use &OperandUse : I->operands()) {
292 if (!OperandUse.get()->getType()->isPointerTy())
293 NewPointerOperands.push_back(nullptr);
295 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
296 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
299 switch (I->getOpcode()) {
300 case Instruction::BitCast:
301 return new BitCastInst(NewPointerOperands[0], NewPtrType);
302 case Instruction::PHI: {
303 assert(I->getType()->isPointerTy());
304 PHINode *PHI = cast<PHINode>(I);
305 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
306 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
307 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
308 NewPHI->addIncoming(NewPointerOperands[OperandNo],
309 PHI->getIncomingBlock(Index));
313 case Instruction::GetElementPtr: {
314 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
315 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
316 GEP->getSourceElementType(), NewPointerOperands[0],
317 SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
318 NewGEP->setIsInBounds(GEP->isInBounds());
322 llvm_unreachable("Unexpected opcode");
326 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
327 // constant expression `CE` with its operands replaced as specified in
328 // ValueWithNewAddrSpace.
329 static Value *cloneConstantExprWithNewAddressSpace(
330 ConstantExpr *CE, unsigned NewAddrSpace,
331 const ValueToValueMapTy &ValueWithNewAddrSpace) {
333 CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
335 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
336 // Because CE is generic, the source address space must be specific.
337 // Therefore, the inferred address space must be the source space according
339 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
341 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
344 // Computes the operands of the new constant expression.
345 SmallVector<Constant *, 4> NewOperands;
346 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
347 Constant *Operand = CE->getOperand(Index);
348 // If the address space of `Operand` needs to be modified, the new operand
349 // with the new address space should already be in ValueWithNewAddrSpace
350 // because (1) the constant expressions we consider (i.e. addrspacecast,
351 // bitcast, and getelementptr) do not incur cycles in the data flow graph
352 // and (2) this function is called on constant expressions in postorder.
353 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
354 NewOperands.push_back(cast<Constant>(NewOperand));
356 // Otherwise, reuses the old operand.
357 NewOperands.push_back(Operand);
361 if (CE->getOpcode() == Instruction::GetElementPtr) {
362 // Needs to specify the source type while constructing a getelementptr
363 // constant expression.
364 return CE->getWithOperands(
365 NewOperands, TargetType, /*OnlyIfReduced=*/false,
366 NewOperands[0]->getType()->getPointerElementType());
369 return CE->getWithOperands(NewOperands, TargetType);
372 // Returns a clone of the value `V`, with its operands replaced as specified in
373 // ValueWithNewAddrSpace. This function is called on every generic address
374 // expression whose address space needs to be modified, in postorder.
376 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
378 cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace,
379 const ValueToValueMapTy &ValueWithNewAddrSpace,
380 SmallVectorImpl<const Use *> *UndefUsesToFix) {
381 // All values in Postorder are generic address expressions.
382 assert(isAddressExpression(*V) &&
383 V->getType()->getPointerAddressSpace() ==
384 AddressSpace::ADDRESS_SPACE_GENERIC);
386 if (Instruction *I = dyn_cast<Instruction>(V)) {
387 Value *NewV = cloneInstructionWithNewAddressSpace(
388 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
389 if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
390 if (NewI->getParent() == nullptr) {
391 NewI->insertBefore(I);
398 return cloneConstantExprWithNewAddressSpace(
399 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
402 // Defines the join operation on the address space lattice (see the file header
404 static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) {
405 if (AS1 == AddressSpace::ADDRESS_SPACE_GENERIC ||
406 AS2 == AddressSpace::ADDRESS_SPACE_GENERIC)
407 return AddressSpace::ADDRESS_SPACE_GENERIC;
409 if (AS1 == ADDRESS_SPACE_UNINITIALIZED)
411 if (AS2 == ADDRESS_SPACE_UNINITIALIZED)
414 // The join of two different specific address spaces is generic.
415 return AS1 == AS2 ? AS1 : (unsigned)AddressSpace::ADDRESS_SPACE_GENERIC;
418 bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
422 // Collects all generic address expressions in postorder.
423 std::vector<Value *> Postorder = collectGenericAddressExpressions(F);
425 // Runs a data-flow analysis to refine the address spaces of every expression
427 ValueToAddrSpaceMapTy InferredAddrSpace;
428 inferAddressSpaces(Postorder, &InferredAddrSpace);
430 // Changes the address spaces of the generic address expressions who are
431 // inferred to point to a specific address space.
432 return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
435 void NVPTXInferAddressSpaces::inferAddressSpaces(
436 const std::vector<Value *> &Postorder,
437 ValueToAddrSpaceMapTy *InferredAddrSpace) {
438 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
439 // Initially, all expressions are in the uninitialized address space.
440 for (Value *V : Postorder)
441 (*InferredAddrSpace)[V] = ADDRESS_SPACE_UNINITIALIZED;
443 while (!Worklist.empty()) {
444 Value* V = Worklist.pop_back_val();
446 // Tries to update the address space of the stack top according to the
447 // address spaces of its operands.
448 DEBUG(dbgs() << "Updating the address space of\n"
449 << " " << *V << "\n");
450 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
451 if (!NewAS.hasValue())
453 // If any updates are made, grabs its users to the worklist because
454 // their address spaces can also be possibly updated.
455 DEBUG(dbgs() << " to " << NewAS.getValue() << "\n");
456 (*InferredAddrSpace)[V] = NewAS.getValue();
458 for (Value *User : V->users()) {
459 // Skip if User is already in the worklist.
460 if (Worklist.count(User))
463 auto Pos = InferredAddrSpace->find(User);
464 // Our algorithm only updates the address spaces of generic address
465 // expressions, which are those in InferredAddrSpace.
466 if (Pos == InferredAddrSpace->end())
469 // Function updateAddressSpace moves the address space down a lattice
470 // path. Therefore, nothing to do if User is already inferred as
471 // generic (the bottom element in the lattice).
472 if (Pos->second == AddressSpace::ADDRESS_SPACE_GENERIC)
475 Worklist.insert(User);
480 Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
481 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) {
482 assert(InferredAddrSpace.count(&V));
484 // The new inferred address space equals the join of the address spaces
485 // of all its pointer operands.
486 unsigned NewAS = ADDRESS_SPACE_UNINITIALIZED;
487 for (Value *PtrOperand : getPointerOperands(V)) {
489 if (InferredAddrSpace.count(PtrOperand))
490 OperandAS = InferredAddrSpace.lookup(PtrOperand);
492 OperandAS = PtrOperand->getType()->getPointerAddressSpace();
493 NewAS = joinAddressSpaces(NewAS, OperandAS);
494 // join(generic, *) = generic. So we can break if NewAS is already generic.
495 if (NewAS == AddressSpace::ADDRESS_SPACE_GENERIC)
499 unsigned OldAS = InferredAddrSpace.lookup(&V);
500 assert(OldAS != AddressSpace::ADDRESS_SPACE_GENERIC);
506 bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
507 const std::vector<Value *> &Postorder,
508 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) {
509 // For each address expression to be modified, creates a clone of it with its
510 // pointer operands converted to the new address space. Since the pointer
511 // operands are converted, the clone is naturally in the new address space by
513 ValueToValueMapTy ValueWithNewAddrSpace;
514 SmallVector<const Use *, 32> UndefUsesToFix;
515 for (Value* V : Postorder) {
516 unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
517 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
518 ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
519 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
523 if (ValueWithNewAddrSpace.empty())
526 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
527 for (const Use* UndefUse : UndefUsesToFix) {
528 User *V = UndefUse->getUser();
529 User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
530 unsigned OperandNo = UndefUse->getOperandNo();
531 assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
532 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
535 // Replaces the uses of the old address expressions with the new ones.
536 for (Value *V : Postorder) {
537 Value *NewV = ValueWithNewAddrSpace.lookup(V);
541 SmallVector<Use *, 4> Uses;
542 for (Use &U : V->uses())
544 DEBUG(dbgs() << "Replacing the uses of " << *V << "\n to\n " << *NewV
546 for (Use *U : Uses) {
547 if (isa<LoadInst>(U->getUser()) ||
548 (isa<StoreInst>(U->getUser()) && U->getOperandNo() == 1)) {
549 // If V is used as the pointer operand of a load/store, sets the pointer
550 // operand to NewV. This replacement does not change the element type,
551 // so the resultant load/store is still valid.
553 } else if (isa<Instruction>(U->getUser())) {
554 // Otherwise, replaces the use with generic(NewV).
555 // TODO: Some optimization opportunities are missed. For example, in
556 // %0 = icmp eq float* %p, %q
557 // if both p and q are inferred to be shared, we can rewrite %0 as
558 // %0 = icmp eq float addrspace(3)* %new_p, %new_q
559 // instead of currently
560 // %generic_p = addrspacecast float addrspace(3)* %new_p to float*
561 // %generic_q = addrspacecast float addrspace(3)* %new_q to float*
562 // %0 = icmp eq float* %generic_p, %generic_q
563 if (Instruction *I = dyn_cast<Instruction>(V)) {
564 BasicBlock::iterator InsertPos = std::next(I->getIterator());
565 while (isa<PHINode>(InsertPos))
567 U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
569 U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
575 RecursivelyDeleteTriviallyDeadInstructions(V);
581 FunctionPass *llvm::createNVPTXInferAddressSpacesPass() {
582 return new NVPTXInferAddressSpaces();