1 //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass adds amdgpu.uniform metadata to IR values so this information
12 /// can be used during instruction selection.
14 //===----------------------------------------------------------------------===//
17 #include "AMDGPUIntrinsicInfo.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/Analysis/DivergenceAnalysis.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/InstVisitor.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
27 #define DEBUG_TYPE "amdgpu-annotate-uniform"
33 class AMDGPUAnnotateUniformValues : public FunctionPass,
34 public InstVisitor<AMDGPUAnnotateUniformValues> {
35 DivergenceAnalysis *DA;
36 MemoryDependenceResults *MDR;
38 DenseMap<Value*, GetElementPtrInst*> noClobberClones;
44 AMDGPUAnnotateUniformValues() :
46 bool doInitialization(Module &M) override;
47 bool runOnFunction(Function &F) override;
48 StringRef getPassName() const override {
49 return "AMDGPU Annotate Uniform Values";
51 void getAnalysisUsage(AnalysisUsage &AU) const override {
52 AU.addRequired<DivergenceAnalysis>();
53 AU.addRequired<MemoryDependenceWrapperPass>();
54 AU.addRequired<LoopInfoWrapperPass>();
58 void visitBranchInst(BranchInst &I);
59 void visitLoadInst(LoadInst &I);
60 bool isClobberedInFunction(LoadInst * Load);
63 } // End anonymous namespace
65 INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
66 "Add AMDGPU uniform metadata", false, false)
67 INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
68 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
69 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
70 INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
71 "Add AMDGPU uniform metadata", false, false)
73 char AMDGPUAnnotateUniformValues::ID = 0;
75 static void setUniformMetadata(Instruction *I) {
76 I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
78 static void setNoClobberMetadata(Instruction *I) {
79 I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
82 static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) {
83 for (auto I : predecessors(Root))
88 bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
89 // 1. get Loop for the Load->getparent();
90 // 2. if it exists, collect all the BBs from the most outer
91 // loop and check for the writes. If NOT - start DFS over all preds.
92 // 3. Start DFS over all preds from the most outer loop header.
93 SetVector<BasicBlock *> Checklist;
94 BasicBlock *Start = Load->getParent();
95 Checklist.insert(Start);
96 const Value *Ptr = Load->getPointerOperand();
97 const Loop *L = LI->getLoopFor(Start);
102 P = P->getParentLoop();
104 Checklist.insert(L->block_begin(), L->block_end());
105 Start = L->getHeader();
108 DFS(Start, Checklist);
109 for (auto &BB : Checklist) {
110 BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
111 BasicBlock::iterator(Load) : BB->end();
112 if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
113 true, StartIt, BB, Load).isClobber())
119 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
120 if (I.isUnconditional())
123 Value *Cond = I.getCondition();
124 if (!DA->isUniform(Cond))
127 setUniformMetadata(I.getParent()->getTerminator());
130 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
131 Value *Ptr = I.getPointerOperand();
132 if (!DA->isUniform(Ptr))
134 auto isGlobalLoad = [&](LoadInst &Load)->bool {
135 return Load.getPointerAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS;
137 // We're tracking up to the Function boundaries
138 // We cannot go beyond because of FunctionPass restrictions
139 // Thus we can ensure that memory not clobbered for memory
140 // operations that live in kernel only.
141 bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I);
142 Instruction *PtrI = dyn_cast<Instruction>(Ptr);
143 if (!PtrI && NotClobbered && isGlobalLoad(I)) {
144 if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
145 // Lookup for the existing GEP
146 if (noClobberClones.count(Ptr)) {
147 PtrI = noClobberClones[Ptr];
149 // Create GEP of the Value
150 Function *F = I.getParent()->getParent();
151 Value *Idx = Constant::getIntegerValue(
152 Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
153 // Insert GEP at the entry to make it dominate all uses
154 PtrI = GetElementPtrInst::Create(
155 Ptr->getType()->getPointerElementType(), Ptr,
156 ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI());
158 I.replaceUsesOfWith(Ptr, PtrI);
163 setUniformMetadata(PtrI);
165 setNoClobberMetadata(PtrI);
169 bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
170 AMDGPUASI = AMDGPU::getAMDGPUAS(M);
174 bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
178 DA = &getAnalysis<DivergenceAnalysis>();
179 MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
180 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
181 isKernelFunc = F.getCallingConv() == CallingConv::AMDGPU_KERNEL;
184 noClobberClones.clear();
189 llvm::createAMDGPUAnnotateUniformValues() {
190 return new AMDGPUAnnotateUniformValues();