1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Jump Threading pass.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/JumpThreading.h"
15 #include "llvm/Transforms/Scalar.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/GlobalsModRef.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/Loads.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Transforms/Utils/Local.h"
39 #include "llvm/Transforms/Utils/SSAUpdater.h"
43 using namespace jumpthreading;
45 #define DEBUG_TYPE "jump-threading"
47 STATISTIC(NumThreads, "Number of jumps threaded");
48 STATISTIC(NumFolds, "Number of terminators folded");
49 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi");
51 static cl::opt<unsigned>
52 BBDuplicateThreshold("jump-threading-threshold",
53 cl::desc("Max block size to duplicate for jump threading"),
54 cl::init(6), cl::Hidden);
56 static cl::opt<unsigned>
57 ImplicationSearchThreshold(
58 "jump-threading-implication-search-threshold",
59 cl::desc("The number of predecessors to search for a stronger "
60 "condition to use to thread over a weaker condition"),
61 cl::init(3), cl::Hidden);
64 /// This pass performs 'jump threading', which looks at blocks that have
65 /// multiple predecessors and multiple successors. If one or more of the
66 /// predecessors of the block can be proven to always jump to one of the
67 /// successors, we forward the edge from the predecessor to the successor by
68 /// duplicating the contents of this block.
70 /// An example of when this can occur is code like this:
77 /// In this case, the unconditional branch at the end of the first if can be
78 /// revectored to the false side of the second if.
80 class JumpThreading : public FunctionPass {
81 JumpThreadingPass Impl;
84 static char ID; // Pass identification
85 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) {
86 initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
89 bool runOnFunction(Function &F) override;
91 void getAnalysisUsage(AnalysisUsage &AU) const override {
92 AU.addRequired<LazyValueInfoWrapperPass>();
93 AU.addPreserved<LazyValueInfoWrapperPass>();
94 AU.addPreserved<GlobalsAAWrapperPass>();
95 AU.addRequired<TargetLibraryInfoWrapperPass>();
98 void releaseMemory() override { Impl.releaseMemory(); }
102 char JumpThreading::ID = 0;
103 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
104 "Jump Threading", false, false)
105 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
106 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
107 INITIALIZE_PASS_END(JumpThreading, "jump-threading",
108 "Jump Threading", false, false)
110 // Public interface to the Jump Threading pass
111 FunctionPass *llvm::createJumpThreadingPass(int Threshold) { return new JumpThreading(Threshold); }
113 JumpThreadingPass::JumpThreadingPass(int T) {
114 BBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T);
117 /// runOnFunction - Top level algorithm.
119 bool JumpThreading::runOnFunction(Function &F) {
122 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
123 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
124 std::unique_ptr<BlockFrequencyInfo> BFI;
125 std::unique_ptr<BranchProbabilityInfo> BPI;
126 bool HasProfileData = F.getEntryCount().hasValue();
127 if (HasProfileData) {
128 LoopInfo LI{DominatorTree(F)};
129 BPI.reset(new BranchProbabilityInfo(F, LI));
130 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
132 return Impl.runImpl(F, TLI, LVI, HasProfileData, std::move(BFI),
136 PreservedAnalyses JumpThreadingPass::run(Function &F,
137 FunctionAnalysisManager &AM) {
139 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
140 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
141 std::unique_ptr<BlockFrequencyInfo> BFI;
142 std::unique_ptr<BranchProbabilityInfo> BPI;
143 bool HasProfileData = F.getEntryCount().hasValue();
144 if (HasProfileData) {
145 LoopInfo LI{DominatorTree(F)};
146 BPI.reset(new BranchProbabilityInfo(F, LI));
147 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
150 runImpl(F, &TLI, &LVI, HasProfileData, std::move(BFI), std::move(BPI));
152 // FIXME: We need to invalidate LVI to avoid PR28400. Is there a better
154 AM.invalidate<LazyValueAnalysis>(F);
157 return PreservedAnalyses::all();
158 PreservedAnalyses PA;
159 PA.preserve<GlobalsAA>();
163 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
164 LazyValueInfo *LVI_, bool HasProfileData_,
165 std::unique_ptr<BlockFrequencyInfo> BFI_,
166 std::unique_ptr<BranchProbabilityInfo> BPI_) {
168 DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
173 // When profile data is available, we need to update edge weights after
174 // successful jump threading, which requires both BPI and BFI being available.
175 HasProfileData = HasProfileData_;
176 if (HasProfileData) {
177 BPI = std::move(BPI_);
178 BFI = std::move(BFI_);
181 // Remove unreachable blocks from function as they may result in infinite
182 // loop. We do threading if we found something profitable. Jump threading a
183 // branch can create other opportunities. If these opportunities form a cycle
184 // i.e. if any jump threading is undoing previous threading in the path, then
185 // we will loop forever. We take care of this issue by not jump threading for
186 // back edges. This works for normal cases but not for unreachable blocks as
187 // they may have cycle with no back edge.
188 bool EverChanged = false;
189 EverChanged |= removeUnreachableBlocks(F, LVI);
196 for (Function::iterator I = F.begin(), E = F.end(); I != E;) {
197 BasicBlock *BB = &*I;
198 // Thread all of the branches we can over this block.
199 while (ProcessBlock(BB))
204 // If the block is trivially dead, zap it. This eliminates the successor
205 // edges which simplifies the CFG.
206 if (pred_empty(BB) &&
207 BB != &BB->getParent()->getEntryBlock()) {
208 DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName()
209 << "' with terminator: " << *BB->getTerminator() << '\n');
210 LoopHeaders.erase(BB);
217 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
219 // Can't thread an unconditional jump, but if the block is "almost
220 // empty", we can replace uses of it with uses of the successor and make
222 // We should not eliminate the loop header either, because eliminating
223 // a loop header might later prevent LoopSimplify from transforming nested
224 // loops into simplified form.
225 if (BI && BI->isUnconditional() &&
226 BB != &BB->getParent()->getEntryBlock() &&
227 // If the terminator is the only non-phi instruction, try to nuke it.
228 BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB)) {
229 // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the
230 // block, we have to make sure it isn't in the LoopHeaders set. We
231 // reinsert afterward if needed.
232 bool ErasedFromLoopHeaders = LoopHeaders.erase(BB);
233 BasicBlock *Succ = BI->getSuccessor(0);
235 // FIXME: It is always conservatively correct to drop the info
236 // for a block even if it doesn't get erased. This isn't totally
237 // awesome, but it allows us to use AssertingVH to prevent nasty
238 // dangling pointer issues within LazyValueInfo.
240 if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) {
242 // If we deleted BB and BB was the header of a loop, then the
243 // successor is now the header of the loop.
247 if (ErasedFromLoopHeaders)
248 LoopHeaders.insert(BB);
251 EverChanged |= Changed;
258 /// getJumpThreadDuplicationCost - Return the cost of duplicating this block to
259 /// thread across it. Stop scanning the block when passing the threshold.
260 static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB,
261 unsigned Threshold) {
262 /// Ignore PHI nodes, these will be flattened when duplication happens.
263 BasicBlock::const_iterator I(BB->getFirstNonPHI());
265 // FIXME: THREADING will delete values that are just used to compute the
266 // branch, so they shouldn't count against the duplication cost.
269 const TerminatorInst *BBTerm = BB->getTerminator();
270 // Threading through a switch statement is particularly profitable. If this
271 // block ends in a switch, decrease its cost to make it more likely to happen.
272 if (isa<SwitchInst>(BBTerm))
275 // The same holds for indirect branches, but slightly more so.
276 if (isa<IndirectBrInst>(BBTerm))
279 // Bump the threshold up so the early exit from the loop doesn't skip the
280 // terminator-based Size adjustment at the end.
283 // Sum up the cost of each instruction until we get to the terminator. Don't
284 // include the terminator because the copy won't include it.
286 for (; !isa<TerminatorInst>(I); ++I) {
288 // Stop scanning the block if we've reached the threshold.
289 if (Size > Threshold)
292 // Debugger intrinsics don't incur code size.
293 if (isa<DbgInfoIntrinsic>(I)) continue;
295 // If this is a pointer->pointer bitcast, it is free.
296 if (isa<BitCastInst>(I) && I->getType()->isPointerTy())
299 // Bail out if this instruction gives back a token type, it is not possible
300 // to duplicate it if it is used outside this BB.
301 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB))
304 // All other instructions count for at least one unit.
307 // Calls are more expensive. If they are non-intrinsic calls, we model them
308 // as having cost of 4. If they are a non-vector intrinsic, we model them
309 // as having cost of 2 total, and if they are a vector intrinsic, we model
310 // them as having cost 1.
311 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
312 if (CI->cannotDuplicate() || CI->isConvergent())
313 // Blocks with NoDuplicate are modelled as having infinite cost, so they
314 // are never duplicated.
316 else if (!isa<IntrinsicInst>(CI))
318 else if (!CI->getType()->isVectorTy())
323 return Size > Bonus ? Size - Bonus : 0;
326 /// FindLoopHeaders - We do not want jump threading to turn proper loop
327 /// structures into irreducible loops. Doing this breaks up the loop nesting
328 /// hierarchy and pessimizes later transformations. To prevent this from
329 /// happening, we first have to find the loop headers. Here we approximate this
330 /// by finding targets of backedges in the CFG.
332 /// Note that there definitely are cases when we want to allow threading of
333 /// edges across a loop header. For example, threading a jump from outside the
334 /// loop (the preheader) to an exit block of the loop is definitely profitable.
335 /// It is also almost always profitable to thread backedges from within the loop
336 /// to exit blocks, and is often profitable to thread backedges to other blocks
337 /// within the loop (forming a nested loop). This simple analysis is not rich
338 /// enough to track all of these properties and keep it up-to-date as the CFG
339 /// mutates, so we don't allow any of these transformations.
341 void JumpThreadingPass::FindLoopHeaders(Function &F) {
342 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
343 FindFunctionBackedges(F, Edges);
345 for (const auto &Edge : Edges)
346 LoopHeaders.insert(Edge.second);
349 /// getKnownConstant - Helper method to determine if we can thread over a
350 /// terminator with the given value as its condition, and if so what value to
351 /// use for that. What kind of value this is depends on whether we want an
352 /// integer or a block address, but an undef is always accepted.
353 /// Returns null if Val is null or not an appropriate constant.
354 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
358 // Undef is "known" enough.
359 if (UndefValue *U = dyn_cast<UndefValue>(Val))
362 if (Preference == WantBlockAddress)
363 return dyn_cast<BlockAddress>(Val->stripPointerCasts());
365 return dyn_cast<ConstantInt>(Val);
368 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see
369 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef
370 /// in any of our predecessors. If so, return the known list of value and pred
371 /// BB in the result vector.
373 /// This returns true if there were any known values.
375 bool JumpThreadingPass::ComputeValueKnownInPredecessors(
376 Value *V, BasicBlock *BB, PredValueInfo &Result,
377 ConstantPreference Preference, Instruction *CxtI) {
378 // This method walks up use-def chains recursively. Because of this, we could
379 // get into an infinite loop going around loops in the use-def chain. To
380 // prevent this, keep track of what (value, block) pairs we've already visited
381 // and terminate the search if we loop back to them
382 if (!RecursionSet.insert(std::make_pair(V, BB)).second)
385 // An RAII help to remove this pair from the recursion set once the recursion
386 // stack pops back out again.
387 RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB));
389 // If V is a constant, then it is known in all predecessors.
390 if (Constant *KC = getKnownConstant(V, Preference)) {
391 for (BasicBlock *Pred : predecessors(BB))
392 Result.push_back(std::make_pair(KC, Pred));
394 return !Result.empty();
397 // If V is a non-instruction value, or an instruction in a different block,
398 // then it can't be derived from a PHI.
399 Instruction *I = dyn_cast<Instruction>(V);
400 if (!I || I->getParent() != BB) {
402 // Okay, if this is a live-in value, see if it has a known value at the end
403 // of any of our predecessors.
405 // FIXME: This should be an edge property, not a block end property.
406 /// TODO: Per PR2563, we could infer value range information about a
407 /// predecessor based on its terminator.
409 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
410 // "I" is a non-local compare-with-a-constant instruction. This would be
411 // able to handle value inequalities better, for example if the compare is
412 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
413 // Perhaps getConstantOnEdge should be smart enough to do this?
415 for (BasicBlock *P : predecessors(BB)) {
416 // If the value is known by LazyValueInfo to be a constant in a
417 // predecessor, use that information to try to thread this block.
418 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI);
419 if (Constant *KC = getKnownConstant(PredCst, Preference))
420 Result.push_back(std::make_pair(KC, P));
423 return !Result.empty();
426 /// If I is a PHI node, then we know the incoming values for any constants.
427 if (PHINode *PN = dyn_cast<PHINode>(I)) {
428 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
429 Value *InVal = PN->getIncomingValue(i);
430 if (Constant *KC = getKnownConstant(InVal, Preference)) {
431 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
433 Constant *CI = LVI->getConstantOnEdge(InVal,
434 PN->getIncomingBlock(i),
436 if (Constant *KC = getKnownConstant(CI, Preference))
437 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
441 return !Result.empty();
444 // Handle Cast instructions. Only see through Cast when the source operand is
445 // PHI or Cmp and the source type is i1 to save the compilation time.
446 if (CastInst *CI = dyn_cast<CastInst>(I)) {
447 Value *Source = CI->getOperand(0);
448 if (!Source->getType()->isIntegerTy(1))
450 if (!isa<PHINode>(Source) && !isa<CmpInst>(Source))
452 ComputeValueKnownInPredecessors(Source, BB, Result, Preference, CxtI);
456 // Convert the known values.
457 for (auto &R : Result)
458 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType());
463 PredValueInfoTy LHSVals, RHSVals;
465 // Handle some boolean conditions.
466 if (I->getType()->getPrimitiveSizeInBits() == 1) {
467 assert(Preference == WantInteger && "One-bit non-integer type?");
469 // X & false -> false
470 if (I->getOpcode() == Instruction::Or ||
471 I->getOpcode() == Instruction::And) {
472 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
474 ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals,
477 if (LHSVals.empty() && RHSVals.empty())
480 ConstantInt *InterestingVal;
481 if (I->getOpcode() == Instruction::Or)
482 InterestingVal = ConstantInt::getTrue(I->getContext());
484 InterestingVal = ConstantInt::getFalse(I->getContext());
486 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
488 // Scan for the sentinel. If we find an undef, force it to the
489 // interesting value: x|undef -> true and x&undef -> false.
490 for (const auto &LHSVal : LHSVals)
491 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) {
492 Result.emplace_back(InterestingVal, LHSVal.second);
493 LHSKnownBBs.insert(LHSVal.second);
495 for (const auto &RHSVal : RHSVals)
496 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) {
497 // If we already inferred a value for this block on the LHS, don't
499 if (!LHSKnownBBs.count(RHSVal.second))
500 Result.emplace_back(InterestingVal, RHSVal.second);
503 return !Result.empty();
506 // Handle the NOT form of XOR.
507 if (I->getOpcode() == Instruction::Xor &&
508 isa<ConstantInt>(I->getOperand(1)) &&
509 cast<ConstantInt>(I->getOperand(1))->isOne()) {
510 ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result,
515 // Invert the known values.
516 for (auto &R : Result)
517 R.first = ConstantExpr::getNot(R.first);
522 // Try to simplify some other binary operator values.
523 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
524 assert(Preference != WantBlockAddress
525 && "A binary operator creating a block address?");
526 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
527 PredValueInfoTy LHSVals;
528 ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals,
531 // Try to use constant folding to simplify the binary operator.
532 for (const auto &LHSVal : LHSVals) {
533 Constant *V = LHSVal.first;
534 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI);
536 if (Constant *KC = getKnownConstant(Folded, WantInteger))
537 Result.push_back(std::make_pair(KC, LHSVal.second));
541 return !Result.empty();
544 // Handle compare with phi operand, where the PHI is defined in this block.
545 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
546 assert(Preference == WantInteger && "Compares only produce integers");
547 PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0));
548 if (PN && PN->getParent() == BB) {
549 const DataLayout &DL = PN->getModule()->getDataLayout();
550 // We can do this simplification if any comparisons fold to true or false.
552 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
553 BasicBlock *PredBB = PN->getIncomingBlock(i);
554 Value *LHS = PN->getIncomingValue(i);
555 Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
557 Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);
559 if (!isa<Constant>(RHS))
562 LazyValueInfo::Tristate
563 ResT = LVI->getPredicateOnEdge(Cmp->getPredicate(), LHS,
564 cast<Constant>(RHS), PredBB, BB,
566 if (ResT == LazyValueInfo::Unknown)
568 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
571 if (Constant *KC = getKnownConstant(Res, WantInteger))
572 Result.push_back(std::make_pair(KC, PredBB));
575 return !Result.empty();
578 // If comparing a live-in value against a constant, see if we know the
579 // live-in value on any predecessors.
580 if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) {
581 if (!isa<Instruction>(Cmp->getOperand(0)) ||
582 cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) {
583 Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
585 for (BasicBlock *P : predecessors(BB)) {
586 // If the value is known by LazyValueInfo to be a constant in a
587 // predecessor, use that information to try to thread this block.
588 LazyValueInfo::Tristate Res =
589 LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
590 RHSCst, P, BB, CxtI ? CxtI : Cmp);
591 if (Res == LazyValueInfo::Unknown)
594 Constant *ResC = ConstantInt::get(Cmp->getType(), Res);
595 Result.push_back(std::make_pair(ResC, P));
598 return !Result.empty();
601 // Try to find a constant value for the LHS of a comparison,
602 // and evaluate it statically if we can.
603 if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) {
604 PredValueInfoTy LHSVals;
605 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
608 for (const auto &LHSVal : LHSVals) {
609 Constant *V = LHSVal.first;
610 Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
612 if (Constant *KC = getKnownConstant(Folded, WantInteger))
613 Result.push_back(std::make_pair(KC, LHSVal.second));
616 return !Result.empty();
621 if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
622 // Handle select instructions where at least one operand is a known constant
623 // and we can figure out the condition value for any predecessor block.
624 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference);
625 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference);
626 PredValueInfoTy Conds;
627 if ((TrueVal || FalseVal) &&
628 ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds,
629 WantInteger, CxtI)) {
630 for (auto &C : Conds) {
631 Constant *Cond = C.first;
633 // Figure out what value to use for the condition.
635 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) {
637 KnownCond = CI->isOne();
639 assert(isa<UndefValue>(Cond) && "Unexpected condition value");
640 // Either operand will do, so be sure to pick the one that's a known
642 // FIXME: Do this more cleverly if both values are known constants?
643 KnownCond = (TrueVal != nullptr);
646 // See if the select has a known constant value for this predecessor.
647 if (Constant *Val = KnownCond ? TrueVal : FalseVal)
648 Result.push_back(std::make_pair(Val, C.second));
651 return !Result.empty();
655 // If all else fails, see if LVI can figure out a constant value for us.
656 Constant *CI = LVI->getConstant(V, BB, CxtI);
657 if (Constant *KC = getKnownConstant(CI, Preference)) {
658 for (BasicBlock *Pred : predecessors(BB))
659 Result.push_back(std::make_pair(KC, Pred));
662 return !Result.empty();
667 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends
668 /// in an undefined jump, decide which block is best to revector to.
670 /// Since we can pick an arbitrary destination, we pick the successor with the
671 /// fewest predecessors. This should reduce the in-degree of the others.
673 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) {
674 TerminatorInst *BBTerm = BB->getTerminator();
675 unsigned MinSucc = 0;
676 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc);
677 // Compute the successor with the minimum number of predecessors.
678 unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB));
679 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) {
680 TestBB = BBTerm->getSuccessor(i);
681 unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB));
682 if (NumPreds < MinNumPreds) {
684 MinNumPreds = NumPreds;
691 static bool hasAddressTakenAndUsed(BasicBlock *BB) {
692 if (!BB->hasAddressTaken()) return false;
694 // If the block has its address taken, it may be a tree of dead constants
695 // hanging off of it. These shouldn't keep the block alive.
696 BlockAddress *BA = BlockAddress::get(BB);
697 BA->removeDeadConstantUsers();
698 return !BA->use_empty();
701 /// ProcessBlock - If there are any predecessors whose control can be threaded
702 /// through to a successor, transform them now.
703 bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) {
704 // If the block is trivially dead, just return and let the caller nuke it.
705 // This simplifies other transformations.
706 if (pred_empty(BB) &&
707 BB != &BB->getParent()->getEntryBlock())
710 // If this block has a single predecessor, and if that pred has a single
711 // successor, merge the blocks. This encourages recursive jump threading
712 // because now the condition in this block can be threaded through
713 // predecessors of our predecessor block.
714 if (BasicBlock *SinglePred = BB->getSinglePredecessor()) {
715 const TerminatorInst *TI = SinglePred->getTerminator();
716 if (!TI->isExceptional() && TI->getNumSuccessors() == 1 &&
717 SinglePred != BB && !hasAddressTakenAndUsed(BB)) {
718 // If SinglePred was a loop header, BB becomes one.
719 if (LoopHeaders.erase(SinglePred))
720 LoopHeaders.insert(BB);
722 LVI->eraseBlock(SinglePred);
723 MergeBasicBlockIntoOnlyPred(BB);
729 if (TryToUnfoldSelectInCurrBB(BB))
732 // What kind of constant we're looking for.
733 ConstantPreference Preference = WantInteger;
735 // Look to see if the terminator is a conditional branch, switch or indirect
736 // branch, if not we can't thread it.
738 Instruction *Terminator = BB->getTerminator();
739 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) {
740 // Can't thread an unconditional jump.
741 if (BI->isUnconditional()) return false;
742 Condition = BI->getCondition();
743 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
744 Condition = SI->getCondition();
745 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
746 // Can't thread indirect branch with no successors.
747 if (IB->getNumSuccessors() == 0) return false;
748 Condition = IB->getAddress()->stripPointerCasts();
749 Preference = WantBlockAddress;
751 return false; // Must be an invoke.
754 // Run constant folding to see if we can reduce the condition to a simple
756 if (Instruction *I = dyn_cast<Instruction>(Condition)) {
758 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI);
760 I->replaceAllUsesWith(SimpleVal);
761 if (isInstructionTriviallyDead(I, TLI))
762 I->eraseFromParent();
763 Condition = SimpleVal;
767 // If the terminator is branching on an undef, we can pick any of the
768 // successors to branch to. Let GetBestDestForJumpOnUndef decide.
769 if (isa<UndefValue>(Condition)) {
770 unsigned BestSucc = GetBestDestForJumpOnUndef(BB);
772 // Fold the branch/switch.
773 TerminatorInst *BBTerm = BB->getTerminator();
774 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) {
775 if (i == BestSucc) continue;
776 BBTerm->getSuccessor(i)->removePredecessor(BB, true);
779 DEBUG(dbgs() << " In block '" << BB->getName()
780 << "' folding undef terminator: " << *BBTerm << '\n');
781 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
782 BBTerm->eraseFromParent();
786 // If the terminator of this block is branching on a constant, simplify the
787 // terminator to an unconditional branch. This can occur due to threading in
789 if (getKnownConstant(Condition, Preference)) {
790 DEBUG(dbgs() << " In block '" << BB->getName()
791 << "' folding terminator: " << *BB->getTerminator() << '\n');
793 ConstantFoldTerminator(BB, true);
797 Instruction *CondInst = dyn_cast<Instruction>(Condition);
799 // All the rest of our checks depend on the condition being an instruction.
801 // FIXME: Unify this with code below.
802 if (ProcessThreadableEdges(Condition, BB, Preference, Terminator))
808 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
809 // If we're branching on a conditional, LVI might be able to determine
810 // it's value at the branch instruction. We only handle comparisons
811 // against a constant at this time.
812 // TODO: This should be extended to handle switches as well.
813 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
814 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
815 if (CondBr && CondConst && CondBr->isConditional()) {
816 LazyValueInfo::Tristate Ret =
817 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
819 if (Ret != LazyValueInfo::Unknown) {
820 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0;
821 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1;
822 CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
823 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
824 CondBr->eraseFromParent();
825 if (CondCmp->use_empty())
826 CondCmp->eraseFromParent();
827 else if (CondCmp->getParent() == BB) {
828 // If the fact we just learned is true for all uses of the
829 // condition, replace it with a constant value
830 auto *CI = Ret == LazyValueInfo::True ?
831 ConstantInt::getTrue(CondCmp->getType()) :
832 ConstantInt::getFalse(CondCmp->getType());
833 CondCmp->replaceAllUsesWith(CI);
834 CondCmp->eraseFromParent();
840 if (CondBr && CondConst && TryToUnfoldSelect(CondCmp, BB))
844 // Check for some cases that are worth simplifying. Right now we want to look
845 // for loads that are used by a switch or by the condition for the branch. If
846 // we see one, check to see if it's partially redundant. If so, insert a PHI
847 // which can then be used to thread the values.
849 Value *SimplifyValue = CondInst;
850 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue))
851 if (isa<Constant>(CondCmp->getOperand(1)))
852 SimplifyValue = CondCmp->getOperand(0);
854 // TODO: There are other places where load PRE would be profitable, such as
855 // more complex comparisons.
856 if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue))
857 if (SimplifyPartiallyRedundantLoad(LI))
861 // Handle a variety of cases where we are branching on something derived from
862 // a PHI node in the current block. If we can prove that any predecessors
863 // compute a predictable value based on a PHI node, thread those predecessors.
865 if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator))
868 // If this is an otherwise-unfoldable branch on a phi node in the current
869 // block, see if we can simplify.
870 if (PHINode *PN = dyn_cast<PHINode>(CondInst))
871 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
872 return ProcessBranchOnPHI(PN);
875 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
876 if (CondInst->getOpcode() == Instruction::Xor &&
877 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
878 return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst));
880 // Search for a stronger dominating condition that can be used to simplify a
881 // conditional branch leaving BB.
882 if (ProcessImpliedCondition(BB))
888 bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) {
889 auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
890 if (!BI || !BI->isConditional())
893 Value *Cond = BI->getCondition();
894 BasicBlock *CurrentBB = BB;
895 BasicBlock *CurrentPred = BB->getSinglePredecessor();
898 auto &DL = BB->getModule()->getDataLayout();
900 while (CurrentPred && Iter++ < ImplicationSearchThreshold) {
901 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator());
902 if (!PBI || !PBI->isConditional())
904 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB)
907 bool FalseDest = PBI->getSuccessor(1) == CurrentBB;
908 Optional<bool> Implication =
909 isImpliedCondition(PBI->getCondition(), Cond, DL, FalseDest);
911 BI->getSuccessor(*Implication ? 1 : 0)->removePredecessor(BB);
912 BranchInst::Create(BI->getSuccessor(*Implication ? 0 : 1), BI);
913 BI->eraseFromParent();
916 CurrentBB = CurrentPred;
917 CurrentPred = CurrentBB->getSinglePredecessor();
923 /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant
924 /// load instruction, eliminate it by replacing it with a PHI node. This is an
925 /// important optimization that encourages jump threading, and needs to be run
926 /// interlaced with other jump threading tasks.
927 bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
928 // Don't hack volatile and ordered loads.
929 if (!LI->isUnordered()) return false;
931 // If the load is defined in a block with exactly one predecessor, it can't be
932 // partially redundant.
933 BasicBlock *LoadBB = LI->getParent();
934 if (LoadBB->getSinglePredecessor())
937 // If the load is defined in an EH pad, it can't be partially redundant,
938 // because the edges between the invoke and the EH pad cannot have other
939 // instructions between them.
940 if (LoadBB->isEHPad())
943 Value *LoadedPtr = LI->getOperand(0);
945 // If the loaded operand is defined in the LoadBB, it can't be available.
946 // TODO: Could do simple PHI translation, that would be fun :)
947 if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr))
948 if (PtrOp->getParent() == LoadBB)
951 // Scan a few instructions up from the load, to see if it is obviously live at
952 // the entry to its block.
953 BasicBlock::iterator BBIt(LI);
955 if (Value *AvailableVal =
956 FindAvailableLoadedValue(LI, LoadBB, BBIt, DefMaxInstsToScan, nullptr, &IsLoadCSE)) {
957 // If the value of the load is locally available within the block, just use
958 // it. This frequently occurs for reg2mem'd allocas.
961 LoadInst *NLI = cast<LoadInst>(AvailableVal);
962 combineMetadataForCSE(NLI, LI);
965 // If the returned value is the load itself, replace with an undef. This can
966 // only happen in dead loops.
967 if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType());
968 if (AvailableVal->getType() != LI->getType())
970 CastInst::CreateBitOrPointerCast(AvailableVal, LI->getType(), "", LI);
971 LI->replaceAllUsesWith(AvailableVal);
972 LI->eraseFromParent();
976 // Otherwise, if we scanned the whole block and got to the top of the block,
977 // we know the block is locally transparent to the load. If not, something
978 // might clobber its value.
979 if (BBIt != LoadBB->begin())
982 // If all of the loads and stores that feed the value have the same AA tags,
983 // then we can propagate them onto any newly inserted loads.
985 LI->getAAMetadata(AATags);
987 SmallPtrSet<BasicBlock*, 8> PredsScanned;
988 typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
989 AvailablePredsTy AvailablePreds;
990 BasicBlock *OneUnavailablePred = nullptr;
991 SmallVector<LoadInst*, 8> CSELoads;
993 // If we got here, the loaded value is transparent through to the start of the
994 // block. Check to see if it is available in any of the predecessor blocks.
995 for (BasicBlock *PredBB : predecessors(LoadBB)) {
996 // If we already scanned this predecessor, skip it.
997 if (!PredsScanned.insert(PredBB).second)
1000 // Scan the predecessor to see if the value is available in the pred.
1001 BBIt = PredBB->end();
1002 Value *PredAvailable = FindAvailableLoadedValue(LI, PredBB, BBIt,
1006 if (!PredAvailable) {
1007 OneUnavailablePred = PredBB;
1012 CSELoads.push_back(cast<LoadInst>(PredAvailable));
1014 // If so, this load is partially redundant. Remember this info so that we
1015 // can create a PHI node.
1016 AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable));
1019 // If the loaded value isn't available in any predecessor, it isn't partially
1021 if (AvailablePreds.empty()) return false;
1023 // Okay, the loaded value is available in at least one (and maybe all!)
1024 // predecessors. If the value is unavailable in more than one unique
1025 // predecessor, we want to insert a merge block for those common predecessors.
1026 // This ensures that we only have to insert one reload, thus not increasing
1028 BasicBlock *UnavailablePred = nullptr;
1030 // If there is exactly one predecessor where the value is unavailable, the
1031 // already computed 'OneUnavailablePred' block is it. If it ends in an
1032 // unconditional branch, we know that it isn't a critical edge.
1033 if (PredsScanned.size() == AvailablePreds.size()+1 &&
1034 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) {
1035 UnavailablePred = OneUnavailablePred;
1036 } else if (PredsScanned.size() != AvailablePreds.size()) {
1037 // Otherwise, we had multiple unavailable predecessors or we had a critical
1038 // edge from the one.
1039 SmallVector<BasicBlock*, 8> PredsToSplit;
1040 SmallPtrSet<BasicBlock*, 8> AvailablePredSet;
1042 for (const auto &AvailablePred : AvailablePreds)
1043 AvailablePredSet.insert(AvailablePred.first);
1045 // Add all the unavailable predecessors to the PredsToSplit list.
1046 for (BasicBlock *P : predecessors(LoadBB)) {
1047 // If the predecessor is an indirect goto, we can't split the edge.
1048 if (isa<IndirectBrInst>(P->getTerminator()))
1051 if (!AvailablePredSet.count(P))
1052 PredsToSplit.push_back(P);
1055 // Split them out to their own block.
1056 UnavailablePred = SplitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split");
1059 // If the value isn't available in all predecessors, then there will be
1060 // exactly one where it isn't available. Insert a load on that edge and add
1061 // it to the AvailablePreds list.
1062 if (UnavailablePred) {
1063 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
1064 "Can't handle critical edge here!");
1066 new LoadInst(LoadedPtr, LI->getName() + ".pr", false,
1067 LI->getAlignment(), LI->getOrdering(), LI->getSynchScope(),
1068 UnavailablePred->getTerminator());
1069 NewVal->setDebugLoc(LI->getDebugLoc());
1071 NewVal->setAAMetadata(AATags);
1073 AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal));
1076 // Now we know that each predecessor of this block has a value in
1077 // AvailablePreds, sort them for efficient access as we're walking the preds.
1078 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end());
1080 // Create a PHI node at the start of the block for the PRE'd load value.
1081 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB);
1082 PHINode *PN = PHINode::Create(LI->getType(), std::distance(PB, PE), "",
1085 PN->setDebugLoc(LI->getDebugLoc());
1087 // Insert new entries into the PHI for each predecessor. A single block may
1088 // have multiple entries here.
1089 for (pred_iterator PI = PB; PI != PE; ++PI) {
1090 BasicBlock *P = *PI;
1091 AvailablePredsTy::iterator I =
1092 std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
1093 std::make_pair(P, (Value*)nullptr));
1095 assert(I != AvailablePreds.end() && I->first == P &&
1096 "Didn't find entry for predecessor!");
1098 // If we have an available predecessor but it requires casting, insert the
1099 // cast in the predecessor and use the cast. Note that we have to update the
1100 // AvailablePreds vector as we go so that all of the PHI entries for this
1101 // predecessor use the same bitcast.
1102 Value *&PredV = I->second;
1103 if (PredV->getType() != LI->getType())
1104 PredV = CastInst::CreateBitOrPointerCast(PredV, LI->getType(), "",
1105 P->getTerminator());
1107 PN->addIncoming(PredV, I->first);
1110 for (LoadInst *PredLI : CSELoads) {
1111 combineMetadataForCSE(PredLI, LI);
1114 LI->replaceAllUsesWith(PN);
1115 LI->eraseFromParent();
1120 /// FindMostPopularDest - The specified list contains multiple possible
1121 /// threadable destinations. Pick the one that occurs the most frequently in
1124 FindMostPopularDest(BasicBlock *BB,
1125 const SmallVectorImpl<std::pair<BasicBlock*,
1126 BasicBlock*> > &PredToDestList) {
1127 assert(!PredToDestList.empty());
1129 // Determine popularity. If there are multiple possible destinations, we
1130 // explicitly choose to ignore 'undef' destinations. We prefer to thread
1131 // blocks with known and real destinations to threading undef. We'll handle
1132 // them later if interesting.
1133 DenseMap<BasicBlock*, unsigned> DestPopularity;
1134 for (const auto &PredToDest : PredToDestList)
1135 if (PredToDest.second)
1136 DestPopularity[PredToDest.second]++;
1138 // Find the most popular dest.
1139 DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin();
1140 BasicBlock *MostPopularDest = DPI->first;
1141 unsigned Popularity = DPI->second;
1142 SmallVector<BasicBlock*, 4> SamePopularity;
1144 for (++DPI; DPI != DestPopularity.end(); ++DPI) {
1145 // If the popularity of this entry isn't higher than the popularity we've
1146 // seen so far, ignore it.
1147 if (DPI->second < Popularity)
1149 else if (DPI->second == Popularity) {
1150 // If it is the same as what we've seen so far, keep track of it.
1151 SamePopularity.push_back(DPI->first);
1153 // If it is more popular, remember it.
1154 SamePopularity.clear();
1155 MostPopularDest = DPI->first;
1156 Popularity = DPI->second;
1160 // Okay, now we know the most popular destination. If there is more than one
1161 // destination, we need to determine one. This is arbitrary, but we need
1162 // to make a deterministic decision. Pick the first one that appears in the
1164 if (!SamePopularity.empty()) {
1165 SamePopularity.push_back(MostPopularDest);
1166 TerminatorInst *TI = BB->getTerminator();
1167 for (unsigned i = 0; ; ++i) {
1168 assert(i != TI->getNumSuccessors() && "Didn't find any successor!");
1170 if (!is_contained(SamePopularity, TI->getSuccessor(i)))
1173 MostPopularDest = TI->getSuccessor(i);
1178 // Okay, we have finally picked the most popular destination.
1179 return MostPopularDest;
1182 bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
1183 ConstantPreference Preference,
1184 Instruction *CxtI) {
1185 // If threading this would thread across a loop header, don't even try to
1187 if (LoopHeaders.count(BB))
1190 PredValueInfoTy PredValues;
1191 if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, CxtI))
1194 assert(!PredValues.empty() &&
1195 "ComputeValueKnownInPredecessors returned true with no values");
1197 DEBUG(dbgs() << "IN BB: " << *BB;
1198 for (const auto &PredValue : PredValues) {
1199 dbgs() << " BB '" << BB->getName() << "': FOUND condition = "
1201 << " for pred '" << PredValue.second->getName() << "'.\n";
1204 // Decide what we want to thread through. Convert our list of known values to
1205 // a list of known destinations for each pred. This also discards duplicate
1206 // predecessors and keeps track of the undefined inputs (which are represented
1207 // as a null dest in the PredToDestList).
1208 SmallPtrSet<BasicBlock*, 16> SeenPreds;
1209 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
1211 BasicBlock *OnlyDest = nullptr;
1212 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
1214 for (const auto &PredValue : PredValues) {
1215 BasicBlock *Pred = PredValue.second;
1216 if (!SeenPreds.insert(Pred).second)
1217 continue; // Duplicate predecessor entry.
1219 // If the predecessor ends with an indirect goto, we can't change its
1221 if (isa<IndirectBrInst>(Pred->getTerminator()))
1224 Constant *Val = PredValue.first;
1227 if (isa<UndefValue>(Val))
1229 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
1230 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
1231 else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
1232 DestBB = SI->findCaseValue(cast<ConstantInt>(Val)).getCaseSuccessor();
1234 assert(isa<IndirectBrInst>(BB->getTerminator())
1235 && "Unexpected terminator");
1236 DestBB = cast<BlockAddress>(Val)->getBasicBlock();
1239 // If we have exactly one destination, remember it for efficiency below.
1240 if (PredToDestList.empty())
1242 else if (OnlyDest != DestBB)
1243 OnlyDest = MultipleDestSentinel;
1245 PredToDestList.push_back(std::make_pair(Pred, DestBB));
1248 // If all edges were unthreadable, we fail.
1249 if (PredToDestList.empty())
1252 // Determine which is the most common successor. If we have many inputs and
1253 // this block is a switch, we want to start by threading the batch that goes
1254 // to the most popular destination first. If we only know about one
1255 // threadable destination (the common case) we can avoid this.
1256 BasicBlock *MostPopularDest = OnlyDest;
1258 if (MostPopularDest == MultipleDestSentinel)
1259 MostPopularDest = FindMostPopularDest(BB, PredToDestList);
1261 // Now that we know what the most popular destination is, factor all
1262 // predecessors that will jump to it into a single predecessor.
1263 SmallVector<BasicBlock*, 16> PredsToFactor;
1264 for (const auto &PredToDest : PredToDestList)
1265 if (PredToDest.second == MostPopularDest) {
1266 BasicBlock *Pred = PredToDest.first;
1268 // This predecessor may be a switch or something else that has multiple
1269 // edges to the block. Factor each of these edges by listing them
1270 // according to # occurrences in PredsToFactor.
1271 for (BasicBlock *Succ : successors(Pred))
1273 PredsToFactor.push_back(Pred);
1276 // If the threadable edges are branching on an undefined value, we get to pick
1277 // the destination that these predecessors should get to.
1278 if (!MostPopularDest)
1279 MostPopularDest = BB->getTerminator()->
1280 getSuccessor(GetBestDestForJumpOnUndef(BB));
1282 // Ok, try to thread it!
1283 return ThreadEdge(BB, PredsToFactor, MostPopularDest);
1286 /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on
1287 /// a PHI node in the current block. See if there are any simplifications we
1288 /// can do based on inputs to the phi node.
1290 bool JumpThreadingPass::ProcessBranchOnPHI(PHINode *PN) {
1291 BasicBlock *BB = PN->getParent();
1293 // TODO: We could make use of this to do it once for blocks with common PHI
1295 SmallVector<BasicBlock*, 1> PredBBs;
1298 // If any of the predecessor blocks end in an unconditional branch, we can
1299 // *duplicate* the conditional branch into that block in order to further
1300 // encourage jump threading and to eliminate cases where we have branch on a
1301 // phi of an icmp (branch on icmp is much better).
1302 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1303 BasicBlock *PredBB = PN->getIncomingBlock(i);
1304 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()))
1305 if (PredBr->isUnconditional()) {
1306 PredBBs[0] = PredBB;
1307 // Try to duplicate BB into PredBB.
1308 if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs))
1316 /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on
1317 /// a xor instruction in the current block. See if there are any
1318 /// simplifications we can do based on inputs to the xor.
1320 bool JumpThreadingPass::ProcessBranchOnXOR(BinaryOperator *BO) {
1321 BasicBlock *BB = BO->getParent();
1323 // If either the LHS or RHS of the xor is a constant, don't do this
1325 if (isa<ConstantInt>(BO->getOperand(0)) ||
1326 isa<ConstantInt>(BO->getOperand(1)))
1329 // If the first instruction in BB isn't a phi, we won't be able to infer
1330 // anything special about any particular predecessor.
1331 if (!isa<PHINode>(BB->front()))
1334 // If this BB is a landing pad, we won't be able to split the edge into it.
1338 // If we have a xor as the branch input to this block, and we know that the
1339 // LHS or RHS of the xor in any predecessor is true/false, then we can clone
1340 // the condition into the predecessor and fix that value to true, saving some
1341 // logical ops on that path and encouraging other paths to simplify.
1343 // This copies something like this:
1346 // %X = phi i1 [1], [%X']
1347 // %Y = icmp eq i32 %A, %B
1348 // %Z = xor i1 %X, %Y
1353 // %Y = icmp ne i32 %A, %B
1356 PredValueInfoTy XorOpValues;
1358 if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues,
1360 assert(XorOpValues.empty());
1361 if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues,
1367 assert(!XorOpValues.empty() &&
1368 "ComputeValueKnownInPredecessors returned true with no values");
1370 // Scan the information to see which is most popular: true or false. The
1371 // predecessors can be of the set true, false, or undef.
1372 unsigned NumTrue = 0, NumFalse = 0;
1373 for (const auto &XorOpValue : XorOpValues) {
1374 if (isa<UndefValue>(XorOpValue.first))
1375 // Ignore undefs for the count.
1377 if (cast<ConstantInt>(XorOpValue.first)->isZero())
1383 // Determine which value to split on, true, false, or undef if neither.
1384 ConstantInt *SplitVal = nullptr;
1385 if (NumTrue > NumFalse)
1386 SplitVal = ConstantInt::getTrue(BB->getContext());
1387 else if (NumTrue != 0 || NumFalse != 0)
1388 SplitVal = ConstantInt::getFalse(BB->getContext());
1390 // Collect all of the blocks that this can be folded into so that we can
1391 // factor this once and clone it once.
1392 SmallVector<BasicBlock*, 8> BlocksToFoldInto;
1393 for (const auto &XorOpValue : XorOpValues) {
1394 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first))
1397 BlocksToFoldInto.push_back(XorOpValue.second);
1400 // If we inferred a value for all of the predecessors, then duplication won't
1401 // help us. However, we can just replace the LHS or RHS with the constant.
1402 if (BlocksToFoldInto.size() ==
1403 cast<PHINode>(BB->front()).getNumIncomingValues()) {
1405 // If all preds provide undef, just nuke the xor, because it is undef too.
1406 BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
1407 BO->eraseFromParent();
1408 } else if (SplitVal->isZero()) {
1409 // If all preds provide 0, replace the xor with the other input.
1410 BO->replaceAllUsesWith(BO->getOperand(isLHS));
1411 BO->eraseFromParent();
1413 // If all preds provide 1, set the computed value to 1.
1414 BO->setOperand(!isLHS, SplitVal);
1420 // Try to duplicate BB into PredBB.
1421 return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
1425 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new
1426 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for
1427 /// NewPred using the entries from OldPred (suitably mapped).
1428 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
1429 BasicBlock *OldPred,
1430 BasicBlock *NewPred,
1431 DenseMap<Instruction*, Value*> &ValueMap) {
1432 for (BasicBlock::iterator PNI = PHIBB->begin();
1433 PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) {
1434 // Ok, we have a PHI node. Figure out what the incoming value was for the
1436 Value *IV = PN->getIncomingValueForBlock(OldPred);
1438 // Remap the value if necessary.
1439 if (Instruction *Inst = dyn_cast<Instruction>(IV)) {
1440 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst);
1441 if (I != ValueMap.end())
1445 PN->addIncoming(IV, NewPred);
1449 /// ThreadEdge - We have decided that it is safe and profitable to factor the
1450 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
1451 /// across BB. Transform the IR to reflect this change.
1452 bool JumpThreadingPass::ThreadEdge(BasicBlock *BB,
1453 const SmallVectorImpl<BasicBlock *> &PredBBs,
1454 BasicBlock *SuccBB) {
1455 // If threading to the same block as we come from, we would infinite loop.
1457 DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
1458 << "' - would thread to self!\n");
1462 // If threading this would thread across a loop header, don't thread the edge.
1463 // See the comments above FindLoopHeaders for justifications and caveats.
1464 if (LoopHeaders.count(BB)) {
1465 DEBUG(dbgs() << " Not threading across loop header BB '" << BB->getName()
1466 << "' to dest BB '" << SuccBB->getName()
1467 << "' - it might create an irreducible loop!\n");
1471 unsigned JumpThreadCost = getJumpThreadDuplicationCost(BB, BBDupThreshold);
1472 if (JumpThreadCost > BBDupThreshold) {
1473 DEBUG(dbgs() << " Not threading BB '" << BB->getName()
1474 << "' - Cost is too high: " << JumpThreadCost << "\n");
1478 // And finally, do it! Start by factoring the predecessors if needed.
1480 if (PredBBs.size() == 1)
1481 PredBB = PredBBs[0];
1483 DEBUG(dbgs() << " Factoring out " << PredBBs.size()
1484 << " common predecessors.\n");
1485 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
1488 // And finally, do it!
1489 DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '"
1490 << SuccBB->getName() << "' with cost: " << JumpThreadCost
1491 << ", across block:\n "
1494 LVI->threadEdge(PredBB, BB, SuccBB);
1496 // We are going to have to map operands from the original BB block to the new
1497 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
1498 // account for entry from PredBB.
1499 DenseMap<Instruction*, Value*> ValueMapping;
1501 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
1502 BB->getName()+".thread",
1503 BB->getParent(), BB);
1504 NewBB->moveAfter(PredBB);
1506 // Set the block frequency of NewBB.
1507 if (HasProfileData) {
1509 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB);
1510 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
1513 BasicBlock::iterator BI = BB->begin();
1514 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
1515 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
1517 // Clone the non-phi instructions of BB into NewBB, keeping track of the
1518 // mapping and using it to remap operands in the cloned instructions.
1519 for (; !isa<TerminatorInst>(BI); ++BI) {
1520 Instruction *New = BI->clone();
1521 New->setName(BI->getName());
1522 NewBB->getInstList().push_back(New);
1523 ValueMapping[&*BI] = New;
1525 // Remap operands to patch up intra-block references.
1526 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1527 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
1528 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
1529 if (I != ValueMapping.end())
1530 New->setOperand(i, I->second);
1534 // We didn't copy the terminator from BB over to NewBB, because there is now
1535 // an unconditional jump to SuccBB. Insert the unconditional jump.
1536 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB);
1537 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc());
1539 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
1540 // PHI nodes for NewBB now.
1541 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping);
1543 // If there were values defined in BB that are used outside the block, then we
1544 // now have to update all uses of the value to use either the original value,
1545 // the cloned value, or some PHI derived value. This can require arbitrary
1546 // PHI insertion, of which we are prepared to do, clean these up now.
1547 SSAUpdater SSAUpdate;
1548 SmallVector<Use*, 16> UsesToRename;
1549 for (Instruction &I : *BB) {
1550 // Scan all uses of this instruction to see if it is used outside of its
1551 // block, and if so, record them in UsesToRename.
1552 for (Use &U : I.uses()) {
1553 Instruction *User = cast<Instruction>(U.getUser());
1554 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
1555 if (UserPN->getIncomingBlock(U) == BB)
1557 } else if (User->getParent() == BB)
1560 UsesToRename.push_back(&U);
1563 // If there are no uses outside the block, we're done with this instruction.
1564 if (UsesToRename.empty())
1567 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
1569 // We found a use of I outside of BB. Rename all uses of I that are outside
1570 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
1571 // with the two values we know.
1572 SSAUpdate.Initialize(I.getType(), I.getName());
1573 SSAUpdate.AddAvailableValue(BB, &I);
1574 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]);
1576 while (!UsesToRename.empty())
1577 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
1578 DEBUG(dbgs() << "\n");
1582 // Ok, NewBB is good to go. Update the terminator of PredBB to jump to
1583 // NewBB instead of BB. This eliminates predecessors from BB, which requires
1584 // us to simplify any PHI nodes in BB.
1585 TerminatorInst *PredTerm = PredBB->getTerminator();
1586 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i)
1587 if (PredTerm->getSuccessor(i) == BB) {
1588 BB->removePredecessor(PredBB, true);
1589 PredTerm->setSuccessor(i, NewBB);
1592 // At this point, the IR is fully up to date and consistent. Do a quick scan
1593 // over the new instructions and zap any that are constants or dead. This
1594 // frequently happens because of phi translation.
1595 SimplifyInstructionsInBlock(NewBB, TLI);
1597 // Update the edge weight from BB to SuccBB, which should be less than before.
1598 UpdateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB);
1600 // Threaded an edge!
1605 /// Create a new basic block that will be the predecessor of BB and successor of
1606 /// all blocks in Preds. When profile data is available, update the frequency of
1608 BasicBlock *JumpThreadingPass::SplitBlockPreds(BasicBlock *BB,
1609 ArrayRef<BasicBlock *> Preds,
1610 const char *Suffix) {
1611 // Collect the frequencies of all predecessors of BB, which will be used to
1612 // update the edge weight on BB->SuccBB.
1613 BlockFrequency PredBBFreq(0);
1615 for (auto Pred : Preds)
1616 PredBBFreq += BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB);
1618 BasicBlock *PredBB = SplitBlockPredecessors(BB, Preds, Suffix);
1620 // Set the block frequency of the newly created PredBB, which is the sum of
1621 // frequencies of Preds.
1623 BFI->setBlockFreq(PredBB, PredBBFreq.getFrequency());
1627 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) {
1628 const TerminatorInst *TI = BB->getTerminator();
1629 assert(TI->getNumSuccessors() > 1 && "not a split");
1631 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
1635 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0));
1636 if (MDName->getString() != "branch_weights")
1639 // Ensure there are weights for all of the successors. Note that the first
1640 // operand to the metadata node is a name, not a weight.
1641 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1;
1644 /// Update the block frequency of BB and branch weight and the metadata on the
1645 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 -
1646 /// Freq(PredBB->BB) / Freq(BB->SuccBB).
1647 void JumpThreadingPass::UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB,
1650 BasicBlock *SuccBB) {
1651 if (!HasProfileData)
1654 assert(BFI && BPI && "BFI & BPI should have been created here");
1656 // As the edge from PredBB to BB is deleted, we have to update the block
1658 auto BBOrigFreq = BFI->getBlockFreq(BB);
1659 auto NewBBFreq = BFI->getBlockFreq(NewBB);
1660 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB);
1661 auto BBNewFreq = BBOrigFreq - NewBBFreq;
1662 BFI->setBlockFreq(BB, BBNewFreq.getFrequency());
1664 // Collect updated outgoing edges' frequencies from BB and use them to update
1665 // edge probabilities.
1666 SmallVector<uint64_t, 4> BBSuccFreq;
1667 for (BasicBlock *Succ : successors(BB)) {
1668 auto SuccFreq = (Succ == SuccBB)
1669 ? BB2SuccBBFreq - NewBBFreq
1670 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ);
1671 BBSuccFreq.push_back(SuccFreq.getFrequency());
1674 uint64_t MaxBBSuccFreq =
1675 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end());
1677 SmallVector<BranchProbability, 4> BBSuccProbs;
1678 if (MaxBBSuccFreq == 0)
1679 BBSuccProbs.assign(BBSuccFreq.size(),
1680 {1, static_cast<unsigned>(BBSuccFreq.size())});
1682 for (uint64_t Freq : BBSuccFreq)
1683 BBSuccProbs.push_back(
1684 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq));
1685 // Normalize edge probabilities so that they sum up to one.
1686 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(),
1690 // Update edge probabilities in BPI.
1691 for (int I = 0, E = BBSuccProbs.size(); I < E; I++)
1692 BPI->setEdgeProbability(BB, I, BBSuccProbs[I]);
1694 // Update the profile metadata as well.
1696 // Don't do this if the profile of the transformed blocks was statically
1697 // estimated. (This could occur despite the function having an entry
1698 // frequency in completely cold parts of the CFG.)
1700 // In this case we don't want to suggest to subsequent passes that the
1701 // calculated weights are fully consistent. Consider this graph:
1716 // Assuming the blocks check_* all compare the same value against 1, 2 and 3,
1717 // the overall probabilities are inconsistent; the total probability that the
1718 // value is either 1, 2 or 3 is 150%.
1720 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3
1721 // becomes 0%. This is even worse if the edge whose probability becomes 0% is
1722 // the loop exit edge. Then based solely on static estimation we would assume
1723 // the loop was extremely hot.
1725 // FIXME this locally as well so that BPI and BFI are consistent as well. We
1726 // shouldn't make edges extremely likely or unlikely based solely on static
1728 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) {
1729 SmallVector<uint32_t, 4> Weights;
1730 for (auto Prob : BBSuccProbs)
1731 Weights.push_back(Prob.getNumerator());
1733 auto TI = BB->getTerminator();
1735 LLVMContext::MD_prof,
1736 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights));
1740 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch
1741 /// to BB which contains an i1 PHI node and a conditional branch on that PHI.
1742 /// If we can duplicate the contents of BB up into PredBB do so now, this
1743 /// improves the odds that the branch will be on an analyzable instruction like
1745 bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
1746 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) {
1747 assert(!PredBBs.empty() && "Can't handle an empty set");
1749 // If BB is a loop header, then duplicating this block outside the loop would
1750 // cause us to transform this into an irreducible loop, don't do this.
1751 // See the comments above FindLoopHeaders for justifications and caveats.
1752 if (LoopHeaders.count(BB)) {
1753 DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()
1754 << "' into predecessor block '" << PredBBs[0]->getName()
1755 << "' - it might create an irreducible loop!\n");
1759 unsigned DuplicationCost = getJumpThreadDuplicationCost(BB, BBDupThreshold);
1760 if (DuplicationCost > BBDupThreshold) {
1761 DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
1762 << "' - Cost is too high: " << DuplicationCost << "\n");
1766 // And finally, do it! Start by factoring the predecessors if needed.
1768 if (PredBBs.size() == 1)
1769 PredBB = PredBBs[0];
1771 DEBUG(dbgs() << " Factoring out " << PredBBs.size()
1772 << " common predecessors.\n");
1773 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
1776 // Okay, we decided to do this! Clone all the instructions in BB onto the end
1778 DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '"
1779 << PredBB->getName() << "' to eliminate branch on phi. Cost: "
1780 << DuplicationCost << " block is:" << *BB << "\n");
1782 // Unless PredBB ends with an unconditional branch, split the edge so that we
1783 // can just clone the bits from BB into the end of the new PredBB.
1784 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
1786 if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
1787 PredBB = SplitEdge(PredBB, BB);
1788 OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
1791 // We are going to have to map operands from the original BB block into the
1792 // PredBB block. Evaluate PHI nodes in BB.
1793 DenseMap<Instruction*, Value*> ValueMapping;
1795 BasicBlock::iterator BI = BB->begin();
1796 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
1797 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
1798 // Clone the non-phi instructions of BB into PredBB, keeping track of the
1799 // mapping and using it to remap operands in the cloned instructions.
1800 for (; BI != BB->end(); ++BI) {
1801 Instruction *New = BI->clone();
1803 // Remap operands to patch up intra-block references.
1804 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1805 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
1806 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
1807 if (I != ValueMapping.end())
1808 New->setOperand(i, I->second);
1811 // If this instruction can be simplified after the operands are updated,
1812 // just use the simplified value instead. This frequently happens due to
1815 SimplifyInstruction(New, BB->getModule()->getDataLayout())) {
1816 ValueMapping[&*BI] = IV;
1817 if (!New->mayHaveSideEffects()) {
1822 ValueMapping[&*BI] = New;
1825 // Otherwise, insert the new instruction into the block.
1826 New->setName(BI->getName());
1827 PredBB->getInstList().insert(OldPredBranch->getIterator(), New);
1831 // Check to see if the targets of the branch had PHI nodes. If so, we need to
1832 // add entries to the PHI nodes for branch from PredBB now.
1833 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator());
1834 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB,
1836 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB,
1839 // If there were values defined in BB that are used outside the block, then we
1840 // now have to update all uses of the value to use either the original value,
1841 // the cloned value, or some PHI derived value. This can require arbitrary
1842 // PHI insertion, of which we are prepared to do, clean these up now.
1843 SSAUpdater SSAUpdate;
1844 SmallVector<Use*, 16> UsesToRename;
1845 for (Instruction &I : *BB) {
1846 // Scan all uses of this instruction to see if it is used outside of its
1847 // block, and if so, record them in UsesToRename.
1848 for (Use &U : I.uses()) {
1849 Instruction *User = cast<Instruction>(U.getUser());
1850 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
1851 if (UserPN->getIncomingBlock(U) == BB)
1853 } else if (User->getParent() == BB)
1856 UsesToRename.push_back(&U);
1859 // If there are no uses outside the block, we're done with this instruction.
1860 if (UsesToRename.empty())
1863 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
1865 // We found a use of I outside of BB. Rename all uses of I that are outside
1866 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
1867 // with the two values we know.
1868 SSAUpdate.Initialize(I.getType(), I.getName());
1869 SSAUpdate.AddAvailableValue(BB, &I);
1870 SSAUpdate.AddAvailableValue(PredBB, ValueMapping[&I]);
1872 while (!UsesToRename.empty())
1873 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
1874 DEBUG(dbgs() << "\n");
1877 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge
1879 BB->removePredecessor(PredBB, true);
1881 // Remove the unconditional branch at the end of the PredBB block.
1882 OldPredBranch->eraseFromParent();
1888 /// TryToUnfoldSelect - Look for blocks of the form
1894 /// %p = phi [%a, %bb] ...
1898 /// And expand the select into a branch structure if one of its arms allows %c
1899 /// to be folded. This later enables threading from bb1 over bb2.
1900 bool JumpThreadingPass::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) {
1901 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
1902 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0));
1903 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1));
1905 if (!CondBr || !CondBr->isConditional() || !CondLHS ||
1906 CondLHS->getParent() != BB)
1909 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) {
1910 BasicBlock *Pred = CondLHS->getIncomingBlock(I);
1911 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I));
1913 // Look if one of the incoming values is a select in the corresponding
1915 if (!SI || SI->getParent() != Pred || !SI->hasOneUse())
1918 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
1919 if (!PredTerm || !PredTerm->isUnconditional())
1922 // Now check if one of the select values would allow us to constant fold the
1923 // terminator in BB. We don't do the transform if both sides fold, those
1924 // cases will be threaded in any case.
1925 LazyValueInfo::Tristate LHSFolds =
1926 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1),
1927 CondRHS, Pred, BB, CondCmp);
1928 LazyValueInfo::Tristate RHSFolds =
1929 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2),
1930 CondRHS, Pred, BB, CondCmp);
1931 if ((LHSFolds != LazyValueInfo::Unknown ||
1932 RHSFolds != LazyValueInfo::Unknown) &&
1933 LHSFolds != RHSFolds) {
1934 // Expand the select.
1943 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
1944 BB->getParent(), BB);
1945 // Move the unconditional branch to NewBB.
1946 PredTerm->removeFromParent();
1947 NewBB->getInstList().insert(NewBB->end(), PredTerm);
1948 // Create a conditional branch and update PHI nodes.
1949 BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
1950 CondLHS->setIncomingValue(I, SI->getFalseValue());
1951 CondLHS->addIncoming(SI->getTrueValue(), NewBB);
1952 // The select is now dead.
1953 SI->eraseFromParent();
1955 // Update any other PHI nodes in BB.
1956 for (BasicBlock::iterator BI = BB->begin();
1957 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
1959 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
1966 /// TryToUnfoldSelectInCurrBB - Look for PHI/Select in the same BB of the form
1968 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ...
1969 /// %s = select p, trueval, falseval
1971 /// And expand the select into a branch structure. This later enables
1972 /// jump-threading over bb in this pass.
1974 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold
1975 /// select if the associated PHI has at least one constant. If the unfolded
1976 /// select is not jump-threaded, it will be folded again in the later
1978 bool JumpThreadingPass::TryToUnfoldSelectInCurrBB(BasicBlock *BB) {
1979 // If threading this would thread across a loop header, don't thread the edge.
1980 // See the comments above FindLoopHeaders for justifications and caveats.
1981 if (LoopHeaders.count(BB))
1984 // Look for a Phi/Select pair in the same basic block. The Phi feeds the
1985 // condition of the Select and at least one of the incoming values is a
1987 for (BasicBlock::iterator BI = BB->begin();
1988 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
1989 unsigned NumPHIValues = PN->getNumIncomingValues();
1990 if (NumPHIValues == 0 || !PN->hasOneUse())
1993 SelectInst *SI = dyn_cast<SelectInst>(PN->user_back());
1994 if (!SI || SI->getParent() != BB)
1997 Value *Cond = SI->getCondition();
1998 if (!Cond || Cond != PN || !Cond->getType()->isIntegerTy(1))
2001 bool HasConst = false;
2002 for (unsigned i = 0; i != NumPHIValues; ++i) {
2003 if (PN->getIncomingBlock(i) == BB)
2005 if (isa<ConstantInt>(PN->getIncomingValue(i)))
2010 // Expand the select.
2011 TerminatorInst *Term =
2012 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false);
2013 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI);
2014 NewPN->addIncoming(SI->getTrueValue(), Term->getParent());
2015 NewPN->addIncoming(SI->getFalseValue(), BB);
2016 SI->replaceAllUsesWith(NewPN);
2017 SI->eraseFromParent();