1 //===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring
11 // there is at most one ret and one unreachable instruction, it ensures there is
12 // at most one divergent exiting block.
14 // StructurizeCFG can't deal with multi-exit regions formed by branches to
15 // multiple return nodes. It is not desirable to structurize regions with
16 // uniform branches, so unifying those to the same return block as divergent
17 // branches inhibits use of scalar branching. It still can't deal with the case
18 // where one branch goes to return, and one unreachable. Replace unreachable in
19 // this case with a return.
21 //===----------------------------------------------------------------------===//
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/Analysis/DivergenceAnalysis.h"
27 #include "llvm/Analysis/PostDominators.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Transforms/Scalar.h"
35 #include "llvm/Transforms/Utils/Local.h"
38 #define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes"
42 class AMDGPUUnifyDivergentExitNodes : public FunctionPass {
44 static char ID; // Pass identification, replacement for typeid
45 AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) {
46 initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry());
49 // We can preserve non-critical-edgeness when we unify function exit nodes
50 void getAnalysisUsage(AnalysisUsage &AU) const override;
51 bool runOnFunction(Function &F) override;
56 char AMDGPUUnifyDivergentExitNodes::ID = 0;
57 INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
58 "Unify divergent function exit nodes", false, false)
59 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
60 INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
61 INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
62 "Unify divergent function exit nodes", false, false)
64 char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID;
66 void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{
67 // TODO: Preserve dominator tree.
68 AU.addRequired<PostDominatorTreeWrapperPass>();
70 AU.addRequired<DivergenceAnalysis>();
72 // No divergent values are changed, only blocks and branch edges.
73 AU.addPreserved<DivergenceAnalysis>();
75 // We preserve the non-critical-edgeness property
76 AU.addPreservedID(BreakCriticalEdgesID);
78 // This is a cluster of orthogonal Transforms
79 AU.addPreservedID(LowerSwitchID);
80 FunctionPass::getAnalysisUsage(AU);
82 AU.addRequired<TargetTransformInfoWrapperPass>();
85 /// \returns true if \p BB is reachable through only uniform branches.
86 /// XXX - Is there a more efficient way to find this?
87 static bool isUniformlyReached(const DivergenceAnalysis &DA,
89 SmallVector<BasicBlock *, 8> Stack;
90 SmallPtrSet<BasicBlock *, 8> Visited;
92 for (BasicBlock *Pred : predecessors(&BB))
93 Stack.push_back(Pred);
95 while (!Stack.empty()) {
96 BasicBlock *Top = Stack.pop_back_val();
97 if (!DA.isUniform(Top->getTerminator()))
100 for (BasicBlock *Pred : predecessors(Top)) {
101 if (Visited.insert(Pred).second)
102 Stack.push_back(Pred);
109 static BasicBlock *unifyReturnBlockSet(Function &F,
110 ArrayRef<BasicBlock *> ReturningBlocks,
111 const TargetTransformInfo &TTI,
113 // Otherwise, we need to insert a new basic block into the function, add a PHI
114 // nodes (if the function returns values), and convert all of the return
115 // instructions into unconditional branches.
117 BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F);
119 PHINode *PN = nullptr;
120 if (F.getReturnType()->isVoidTy()) {
121 ReturnInst::Create(F.getContext(), nullptr, NewRetBlock);
123 // If the function doesn't return void... add a PHI node to the block...
124 PN = PHINode::Create(F.getReturnType(), ReturningBlocks.size(),
126 NewRetBlock->getInstList().push_back(PN);
127 ReturnInst::Create(F.getContext(), PN, NewRetBlock);
130 // Loop over all of the blocks, replacing the return instruction with an
131 // unconditional branch.
133 for (BasicBlock *BB : ReturningBlocks) {
134 // Add an incoming element to the PHI node for every return instruction that
135 // is merging into this new block...
137 PN->addIncoming(BB->getTerminator()->getOperand(0), BB);
139 BB->getInstList().pop_back(); // Remove the return insn
140 BranchInst::Create(NewRetBlock, BB);
143 for (BasicBlock *BB : ReturningBlocks) {
144 // Cleanup possible branch to unconditional branch to the return.
145 SimplifyCFG(BB, TTI, 2);
151 bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) {
152 auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
153 if (PDT.getRoots().size() <= 1)
156 DivergenceAnalysis &DA = getAnalysis<DivergenceAnalysis>();
158 // Loop over all of the blocks in a function, tracking all of the blocks that
161 SmallVector<BasicBlock *, 4> ReturningBlocks;
162 SmallVector<BasicBlock *, 4> UnreachableBlocks;
164 for (BasicBlock *BB : PDT.getRoots()) {
165 if (isa<ReturnInst>(BB->getTerminator())) {
166 if (!isUniformlyReached(DA, *BB))
167 ReturningBlocks.push_back(BB);
168 } else if (isa<UnreachableInst>(BB->getTerminator())) {
169 if (!isUniformlyReached(DA, *BB))
170 UnreachableBlocks.push_back(BB);
174 if (!UnreachableBlocks.empty()) {
175 BasicBlock *UnreachableBlock = nullptr;
177 if (UnreachableBlocks.size() == 1) {
178 UnreachableBlock = UnreachableBlocks.front();
180 UnreachableBlock = BasicBlock::Create(F.getContext(),
181 "UnifiedUnreachableBlock", &F);
182 new UnreachableInst(F.getContext(), UnreachableBlock);
184 for (BasicBlock *BB : UnreachableBlocks) {
185 BB->getInstList().pop_back(); // Remove the unreachable inst.
186 BranchInst::Create(UnreachableBlock, BB);
190 if (!ReturningBlocks.empty()) {
191 // Don't create a new unreachable inst if we have a return. The
192 // structurizer/annotator can't handle the multiple exits
194 Type *RetTy = F.getReturnType();
195 Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
196 UnreachableBlock->getInstList().pop_back(); // Remove the unreachable inst.
198 Function *UnreachableIntrin =
199 Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable);
201 // Insert a call to an intrinsic tracking that this is an unreachable
202 // point, in case we want to kill the active lanes or something later.
203 CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock);
205 // Don't create a scalar trap. We would only want to trap if this code was
206 // really reached, but a scalar trap would happen even if no lanes
207 // actually reached here.
208 ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock);
209 ReturningBlocks.push_back(UnreachableBlock);
213 // Now handle return blocks.
214 if (ReturningBlocks.empty())
215 return false; // No blocks return
217 if (ReturningBlocks.size() == 1)
218 return false; // Already has a single return block
220 const TargetTransformInfo &TTI
221 = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
223 unifyReturnBlockSet(F, ReturningBlocks, TTI, "UnifiedReturnBlock");