1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Jump Threading pass.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/JumpThreading.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/CFG.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/TargetLibraryInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/Constant.h"
37 #include "llvm/IR/ConstantRange.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/MDBuilder.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/IR/Module.h"
51 #include "llvm/IR/PassManager.h"
52 #include "llvm/IR/PatternMatch.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Use.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/BlockFrequency.h"
59 #include "llvm/Support/BranchProbability.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/Scalar.h"
65 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
66 #include "llvm/Transforms/Utils/Cloning.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/SSAUpdater.h"
69 #include "llvm/Transforms/Utils/ValueMapper.h"
79 using namespace jumpthreading;
81 #define DEBUG_TYPE "jump-threading"
83 STATISTIC(NumThreads, "Number of jumps threaded");
84 STATISTIC(NumFolds, "Number of terminators folded");
85 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi");
87 static cl::opt<unsigned>
88 BBDuplicateThreshold("jump-threading-threshold",
89 cl::desc("Max block size to duplicate for jump threading"),
90 cl::init(6), cl::Hidden);
92 static cl::opt<unsigned>
93 ImplicationSearchThreshold(
94 "jump-threading-implication-search-threshold",
95 cl::desc("The number of predecessors to search for a stronger "
96 "condition to use to thread over a weaker condition"),
97 cl::init(3), cl::Hidden);
99 static cl::opt<bool> PrintLVIAfterJumpThreading(
100 "print-lvi-after-jump-threading",
101 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false),
106 /// This pass performs 'jump threading', which looks at blocks that have
107 /// multiple predecessors and multiple successors. If one or more of the
108 /// predecessors of the block can be proven to always jump to one of the
109 /// successors, we forward the edge from the predecessor to the successor by
110 /// duplicating the contents of this block.
112 /// An example of when this can occur is code like this:
119 /// In this case, the unconditional branch at the end of the first if can be
120 /// revectored to the false side of the second if.
121 class JumpThreading : public FunctionPass {
122 JumpThreadingPass Impl;
125 static char ID; // Pass identification
127 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) {
128 initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
131 bool runOnFunction(Function &F) override;
133 void getAnalysisUsage(AnalysisUsage &AU) const override {
134 if (PrintLVIAfterJumpThreading)
135 AU.addRequired<DominatorTreeWrapperPass>();
136 AU.addRequired<AAResultsWrapperPass>();
137 AU.addRequired<LazyValueInfoWrapperPass>();
138 AU.addPreserved<GlobalsAAWrapperPass>();
139 AU.addRequired<TargetLibraryInfoWrapperPass>();
142 void releaseMemory() override { Impl.releaseMemory(); }
145 } // end anonymous namespace
147 char JumpThreading::ID = 0;
149 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
150 "Jump Threading", false, false)
151 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
152 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
153 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
154 INITIALIZE_PASS_END(JumpThreading, "jump-threading",
155 "Jump Threading", false, false)
157 // Public interface to the Jump Threading pass
158 FunctionPass *llvm::createJumpThreadingPass(int Threshold) {
159 return new JumpThreading(Threshold);
162 JumpThreadingPass::JumpThreadingPass(int T) {
163 BBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T);
166 // Update branch probability information according to conditional
167 // branch probablity. This is usually made possible for cloned branches
168 // in inline instances by the context specific profile in the caller.
180 // cond = PN([true, %A], [..., %B]); // PHI node
183 // ... // P(cond == true) = 1%
186 // Here we know that when block A is taken, cond must be true, which means
187 // P(cond == true | A) = 1
189 // Given that P(cond == true) = P(cond == true | A) * P(A) +
190 // P(cond == true | B) * P(B)
192 // P(cond == true ) = P(A) + P(cond == true | B) * P(B)
195 // P(A) is less than P(cond == true), i.e.
196 // P(t == true) <= P(cond == true)
198 // In other words, if we know P(cond == true) is unlikely, we know
199 // that P(t == true) is also unlikely.
201 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) {
202 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
206 BranchProbability BP;
207 uint64_t TrueWeight, FalseWeight;
208 if (!CondBr->extractProfMetadata(TrueWeight, FalseWeight))
211 // Returns the outgoing edge of the dominating predecessor block
212 // that leads to the PhiNode's incoming block:
213 auto GetPredOutEdge =
214 [](BasicBlock *IncomingBB,
215 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> {
216 auto *PredBB = IncomingBB;
217 auto *SuccBB = PhiBB;
219 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator());
220 if (PredBr && PredBr->isConditional())
221 return {PredBB, SuccBB};
222 auto *SinglePredBB = PredBB->getSinglePredecessor();
224 return {nullptr, nullptr};
226 PredBB = SinglePredBB;
230 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
231 Value *PhiOpnd = PN->getIncomingValue(i);
232 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd);
234 if (!CI || !CI->getType()->isIntegerTy(1))
237 BP = (CI->isOne() ? BranchProbability::getBranchProbability(
238 TrueWeight, TrueWeight + FalseWeight)
239 : BranchProbability::getBranchProbability(
240 FalseWeight, TrueWeight + FalseWeight));
242 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB);
243 if (!PredOutEdge.first)
246 BasicBlock *PredBB = PredOutEdge.first;
247 BranchInst *PredBr = cast<BranchInst>(PredBB->getTerminator());
249 uint64_t PredTrueWeight, PredFalseWeight;
250 // FIXME: We currently only set the profile data when it is missing.
251 // With PGO, this can be used to refine even existing profile data with
252 // context information. This needs to be done after more performance
254 if (PredBr->extractProfMetadata(PredTrueWeight, PredFalseWeight))
257 // We can not infer anything useful when BP >= 50%, because BP is the
258 // upper bound probability value.
259 if (BP >= BranchProbability(50, 100))
262 SmallVector<uint32_t, 2> Weights;
263 if (PredBr->getSuccessor(0) == PredOutEdge.second) {
264 Weights.push_back(BP.getNumerator());
265 Weights.push_back(BP.getCompl().getNumerator());
267 Weights.push_back(BP.getCompl().getNumerator());
268 Weights.push_back(BP.getNumerator());
270 PredBr->setMetadata(LLVMContext::MD_prof,
271 MDBuilder(PredBr->getParent()->getContext())
272 .createBranchWeights(Weights));
276 /// runOnFunction - Toplevel algorithm.
277 bool JumpThreading::runOnFunction(Function &F) {
280 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
281 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
282 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
283 std::unique_ptr<BlockFrequencyInfo> BFI;
284 std::unique_ptr<BranchProbabilityInfo> BPI;
285 bool HasProfileData = F.hasProfileData();
286 if (HasProfileData) {
287 LoopInfo LI{DominatorTree(F)};
288 BPI.reset(new BranchProbabilityInfo(F, LI, TLI));
289 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
292 bool Changed = Impl.runImpl(F, TLI, LVI, AA, HasProfileData, std::move(BFI),
294 if (PrintLVIAfterJumpThreading) {
295 dbgs() << "LVI for function '" << F.getName() << "':\n";
296 LVI->printLVI(F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
302 PreservedAnalyses JumpThreadingPass::run(Function &F,
303 FunctionAnalysisManager &AM) {
304 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
305 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
306 auto &AA = AM.getResult<AAManager>(F);
308 std::unique_ptr<BlockFrequencyInfo> BFI;
309 std::unique_ptr<BranchProbabilityInfo> BPI;
310 if (F.hasProfileData()) {
311 LoopInfo LI{DominatorTree(F)};
312 BPI.reset(new BranchProbabilityInfo(F, LI, &TLI));
313 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
316 bool Changed = runImpl(F, &TLI, &LVI, &AA, HasProfileData, std::move(BFI),
320 return PreservedAnalyses::all();
321 PreservedAnalyses PA;
322 PA.preserve<GlobalsAA>();
326 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
327 LazyValueInfo *LVI_, AliasAnalysis *AA_,
328 bool HasProfileData_,
329 std::unique_ptr<BlockFrequencyInfo> BFI_,
330 std::unique_ptr<BranchProbabilityInfo> BPI_) {
331 DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
337 // When profile data is available, we need to update edge weights after
338 // successful jump threading, which requires both BPI and BFI being available.
339 HasProfileData = HasProfileData_;
340 auto *GuardDecl = F.getParent()->getFunction(
341 Intrinsic::getName(Intrinsic::experimental_guard));
342 HasGuards = GuardDecl && !GuardDecl->use_empty();
343 if (HasProfileData) {
344 BPI = std::move(BPI_);
345 BFI = std::move(BFI_);
348 // Remove unreachable blocks from function as they may result in infinite
349 // loop. We do threading if we found something profitable. Jump threading a
350 // branch can create other opportunities. If these opportunities form a cycle
351 // i.e. if any jump threading is undoing previous threading in the path, then
352 // we will loop forever. We take care of this issue by not jump threading for
353 // back edges. This works for normal cases but not for unreachable blocks as
354 // they may have cycle with no back edge.
355 bool EverChanged = false;
356 EverChanged |= removeUnreachableBlocks(F, LVI);
363 for (Function::iterator I = F.begin(), E = F.end(); I != E;) {
364 BasicBlock *BB = &*I;
365 // Thread all of the branches we can over this block.
366 while (ProcessBlock(BB))
371 // If the block is trivially dead, zap it. This eliminates the successor
372 // edges which simplifies the CFG.
373 if (pred_empty(BB) &&
374 BB != &BB->getParent()->getEntryBlock()) {
375 DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName()
376 << "' with terminator: " << *BB->getTerminator() << '\n');
377 LoopHeaders.erase(BB);
384 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
386 // Can't thread an unconditional jump, but if the block is "almost
387 // empty", we can replace uses of it with uses of the successor and make
389 // We should not eliminate the loop header or latch either, because
390 // eliminating a loop header or latch might later prevent LoopSimplify
391 // from transforming nested loops into simplified form. We will rely on
392 // later passes in backend to clean up empty blocks.
393 if (BI && BI->isUnconditional() &&
394 BB != &BB->getParent()->getEntryBlock() &&
395 // If the terminator is the only non-phi instruction, try to nuke it.
396 BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB) &&
397 !LoopHeaders.count(BI->getSuccessor(0))) {
398 // FIXME: It is always conservatively correct to drop the info
399 // for a block even if it doesn't get erased. This isn't totally
400 // awesome, but it allows us to use AssertingVH to prevent nasty
401 // dangling pointer issues within LazyValueInfo.
403 if (TryToSimplifyUncondBranchFromEmptyBlock(BB))
407 EverChanged |= Changed;
414 // Replace uses of Cond with ToVal when safe to do so. If all uses are
415 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond
416 // because we may incorrectly replace uses when guards/assumes are uses of
417 // of `Cond` and we used the guards/assume to reason about the `Cond` value
418 // at the end of block. RAUW unconditionally replaces all uses
419 // including the guards/assumes themselves and the uses before the
421 static void ReplaceFoldableUses(Instruction *Cond, Value *ToVal) {
422 assert(Cond->getType() == ToVal->getType());
423 auto *BB = Cond->getParent();
424 // We can unconditionally replace all uses in non-local blocks (i.e. uses
425 // strictly dominated by BB), since LVI information is true from the
427 replaceNonLocalUsesWith(Cond, ToVal);
428 for (Instruction &I : reverse(*BB)) {
429 // Reached the Cond whose uses we are trying to replace, so there are no
433 // We only replace uses in instructions that are guaranteed to reach the end
434 // of BB, where we know Cond is ToVal.
435 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
437 I.replaceUsesOfWith(Cond, ToVal);
439 if (Cond->use_empty() && !Cond->mayHaveSideEffects())
440 Cond->eraseFromParent();
443 /// Return the cost of duplicating a piece of this block from first non-phi
444 /// and before StopAt instruction to thread across it. Stop scanning the block
445 /// when exceeding the threshold. If duplication is impossible, returns ~0U.
446 static unsigned getJumpThreadDuplicationCost(BasicBlock *BB,
448 unsigned Threshold) {
449 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?");
450 /// Ignore PHI nodes, these will be flattened when duplication happens.
451 BasicBlock::const_iterator I(BB->getFirstNonPHI());
453 // FIXME: THREADING will delete values that are just used to compute the
454 // branch, so they shouldn't count against the duplication cost.
457 if (BB->getTerminator() == StopAt) {
458 // Threading through a switch statement is particularly profitable. If this
459 // block ends in a switch, decrease its cost to make it more likely to
461 if (isa<SwitchInst>(StopAt))
464 // The same holds for indirect branches, but slightly more so.
465 if (isa<IndirectBrInst>(StopAt))
469 // Bump the threshold up so the early exit from the loop doesn't skip the
470 // terminator-based Size adjustment at the end.
473 // Sum up the cost of each instruction until we get to the terminator. Don't
474 // include the terminator because the copy won't include it.
476 for (; &*I != StopAt; ++I) {
478 // Stop scanning the block if we've reached the threshold.
479 if (Size > Threshold)
482 // Debugger intrinsics don't incur code size.
483 if (isa<DbgInfoIntrinsic>(I)) continue;
485 // If this is a pointer->pointer bitcast, it is free.
486 if (isa<BitCastInst>(I) && I->getType()->isPointerTy())
489 // Bail out if this instruction gives back a token type, it is not possible
490 // to duplicate it if it is used outside this BB.
491 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB))
494 // All other instructions count for at least one unit.
497 // Calls are more expensive. If they are non-intrinsic calls, we model them
498 // as having cost of 4. If they are a non-vector intrinsic, we model them
499 // as having cost of 2 total, and if they are a vector intrinsic, we model
500 // them as having cost 1.
501 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
502 if (CI->cannotDuplicate() || CI->isConvergent())
503 // Blocks with NoDuplicate are modelled as having infinite cost, so they
504 // are never duplicated.
506 else if (!isa<IntrinsicInst>(CI))
508 else if (!CI->getType()->isVectorTy())
513 return Size > Bonus ? Size - Bonus : 0;
516 /// FindLoopHeaders - We do not want jump threading to turn proper loop
517 /// structures into irreducible loops. Doing this breaks up the loop nesting
518 /// hierarchy and pessimizes later transformations. To prevent this from
519 /// happening, we first have to find the loop headers. Here we approximate this
520 /// by finding targets of backedges in the CFG.
522 /// Note that there definitely are cases when we want to allow threading of
523 /// edges across a loop header. For example, threading a jump from outside the
524 /// loop (the preheader) to an exit block of the loop is definitely profitable.
525 /// It is also almost always profitable to thread backedges from within the loop
526 /// to exit blocks, and is often profitable to thread backedges to other blocks
527 /// within the loop (forming a nested loop). This simple analysis is not rich
528 /// enough to track all of these properties and keep it up-to-date as the CFG
529 /// mutates, so we don't allow any of these transformations.
530 void JumpThreadingPass::FindLoopHeaders(Function &F) {
531 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
532 FindFunctionBackedges(F, Edges);
534 for (const auto &Edge : Edges)
535 LoopHeaders.insert(Edge.second);
538 /// getKnownConstant - Helper method to determine if we can thread over a
539 /// terminator with the given value as its condition, and if so what value to
540 /// use for that. What kind of value this is depends on whether we want an
541 /// integer or a block address, but an undef is always accepted.
542 /// Returns null if Val is null or not an appropriate constant.
543 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
547 // Undef is "known" enough.
548 if (UndefValue *U = dyn_cast<UndefValue>(Val))
551 if (Preference == WantBlockAddress)
552 return dyn_cast<BlockAddress>(Val->stripPointerCasts());
554 return dyn_cast<ConstantInt>(Val);
557 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see
558 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef
559 /// in any of our predecessors. If so, return the known list of value and pred
560 /// BB in the result vector.
562 /// This returns true if there were any known values.
563 bool JumpThreadingPass::ComputeValueKnownInPredecessors(
564 Value *V, BasicBlock *BB, PredValueInfo &Result,
565 ConstantPreference Preference, Instruction *CxtI) {
566 // This method walks up use-def chains recursively. Because of this, we could
567 // get into an infinite loop going around loops in the use-def chain. To
568 // prevent this, keep track of what (value, block) pairs we've already visited
569 // and terminate the search if we loop back to them
570 if (!RecursionSet.insert(std::make_pair(V, BB)).second)
573 // An RAII help to remove this pair from the recursion set once the recursion
574 // stack pops back out again.
575 RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB));
577 // If V is a constant, then it is known in all predecessors.
578 if (Constant *KC = getKnownConstant(V, Preference)) {
579 for (BasicBlock *Pred : predecessors(BB))
580 Result.push_back(std::make_pair(KC, Pred));
582 return !Result.empty();
585 // If V is a non-instruction value, or an instruction in a different block,
586 // then it can't be derived from a PHI.
587 Instruction *I = dyn_cast<Instruction>(V);
588 if (!I || I->getParent() != BB) {
590 // Okay, if this is a live-in value, see if it has a known value at the end
591 // of any of our predecessors.
593 // FIXME: This should be an edge property, not a block end property.
594 /// TODO: Per PR2563, we could infer value range information about a
595 /// predecessor based on its terminator.
597 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
598 // "I" is a non-local compare-with-a-constant instruction. This would be
599 // able to handle value inequalities better, for example if the compare is
600 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
601 // Perhaps getConstantOnEdge should be smart enough to do this?
603 for (BasicBlock *P : predecessors(BB)) {
604 // If the value is known by LazyValueInfo to be a constant in a
605 // predecessor, use that information to try to thread this block.
606 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI);
607 if (Constant *KC = getKnownConstant(PredCst, Preference))
608 Result.push_back(std::make_pair(KC, P));
611 return !Result.empty();
614 /// If I is a PHI node, then we know the incoming values for any constants.
615 if (PHINode *PN = dyn_cast<PHINode>(I)) {
616 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
617 Value *InVal = PN->getIncomingValue(i);
618 if (Constant *KC = getKnownConstant(InVal, Preference)) {
619 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
621 Constant *CI = LVI->getConstantOnEdge(InVal,
622 PN->getIncomingBlock(i),
624 if (Constant *KC = getKnownConstant(CI, Preference))
625 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i)));
629 return !Result.empty();
632 // Handle Cast instructions. Only see through Cast when the source operand is
633 // PHI or Cmp and the source type is i1 to save the compilation time.
634 if (CastInst *CI = dyn_cast<CastInst>(I)) {
635 Value *Source = CI->getOperand(0);
636 if (!Source->getType()->isIntegerTy(1))
638 if (!isa<PHINode>(Source) && !isa<CmpInst>(Source))
640 ComputeValueKnownInPredecessors(Source, BB, Result, Preference, CxtI);
644 // Convert the known values.
645 for (auto &R : Result)
646 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType());
651 // Handle some boolean conditions.
652 if (I->getType()->getPrimitiveSizeInBits() == 1) {
653 assert(Preference == WantInteger && "One-bit non-integer type?");
655 // X & false -> false
656 if (I->getOpcode() == Instruction::Or ||
657 I->getOpcode() == Instruction::And) {
658 PredValueInfoTy LHSVals, RHSVals;
660 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
662 ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals,
665 if (LHSVals.empty() && RHSVals.empty())
668 ConstantInt *InterestingVal;
669 if (I->getOpcode() == Instruction::Or)
670 InterestingVal = ConstantInt::getTrue(I->getContext());
672 InterestingVal = ConstantInt::getFalse(I->getContext());
674 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
676 // Scan for the sentinel. If we find an undef, force it to the
677 // interesting value: x|undef -> true and x&undef -> false.
678 for (const auto &LHSVal : LHSVals)
679 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) {
680 Result.emplace_back(InterestingVal, LHSVal.second);
681 LHSKnownBBs.insert(LHSVal.second);
683 for (const auto &RHSVal : RHSVals)
684 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) {
685 // If we already inferred a value for this block on the LHS, don't
687 if (!LHSKnownBBs.count(RHSVal.second))
688 Result.emplace_back(InterestingVal, RHSVal.second);
691 return !Result.empty();
694 // Handle the NOT form of XOR.
695 if (I->getOpcode() == Instruction::Xor &&
696 isa<ConstantInt>(I->getOperand(1)) &&
697 cast<ConstantInt>(I->getOperand(1))->isOne()) {
698 ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result,
703 // Invert the known values.
704 for (auto &R : Result)
705 R.first = ConstantExpr::getNot(R.first);
710 // Try to simplify some other binary operator values.
711 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
712 assert(Preference != WantBlockAddress
713 && "A binary operator creating a block address?");
714 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
715 PredValueInfoTy LHSVals;
716 ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals,
719 // Try to use constant folding to simplify the binary operator.
720 for (const auto &LHSVal : LHSVals) {
721 Constant *V = LHSVal.first;
722 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI);
724 if (Constant *KC = getKnownConstant(Folded, WantInteger))
725 Result.push_back(std::make_pair(KC, LHSVal.second));
729 return !Result.empty();
732 // Handle compare with phi operand, where the PHI is defined in this block.
733 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
734 assert(Preference == WantInteger && "Compares only produce integers");
735 Type *CmpType = Cmp->getType();
736 Value *CmpLHS = Cmp->getOperand(0);
737 Value *CmpRHS = Cmp->getOperand(1);
738 CmpInst::Predicate Pred = Cmp->getPredicate();
740 PHINode *PN = dyn_cast<PHINode>(CmpLHS);
741 if (PN && PN->getParent() == BB) {
742 const DataLayout &DL = PN->getModule()->getDataLayout();
743 // We can do this simplification if any comparisons fold to true or false.
745 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
746 BasicBlock *PredBB = PN->getIncomingBlock(i);
747 Value *LHS = PN->getIncomingValue(i);
748 Value *RHS = CmpRHS->DoPHITranslation(BB, PredBB);
750 Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL});
752 if (!isa<Constant>(RHS))
755 LazyValueInfo::Tristate
756 ResT = LVI->getPredicateOnEdge(Pred, LHS,
757 cast<Constant>(RHS), PredBB, BB,
759 if (ResT == LazyValueInfo::Unknown)
761 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
764 if (Constant *KC = getKnownConstant(Res, WantInteger))
765 Result.push_back(std::make_pair(KC, PredBB));
768 return !Result.empty();
771 // If comparing a live-in value against a constant, see if we know the
772 // live-in value on any predecessors.
773 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) {
774 Constant *CmpConst = cast<Constant>(CmpRHS);
776 if (!isa<Instruction>(CmpLHS) ||
777 cast<Instruction>(CmpLHS)->getParent() != BB) {
778 for (BasicBlock *P : predecessors(BB)) {
779 // If the value is known by LazyValueInfo to be a constant in a
780 // predecessor, use that information to try to thread this block.
781 LazyValueInfo::Tristate Res =
782 LVI->getPredicateOnEdge(Pred, CmpLHS,
783 CmpConst, P, BB, CxtI ? CxtI : Cmp);
784 if (Res == LazyValueInfo::Unknown)
787 Constant *ResC = ConstantInt::get(CmpType, Res);
788 Result.push_back(std::make_pair(ResC, P));
791 return !Result.empty();
794 // InstCombine can fold some forms of constant range checks into
795 // (icmp (add (x, C1)), C2). See if we have we have such a thing with
798 using namespace PatternMatch;
801 ConstantInt *AddConst;
802 if (isa<ConstantInt>(CmpConst) &&
803 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) {
804 if (!isa<Instruction>(AddLHS) ||
805 cast<Instruction>(AddLHS)->getParent() != BB) {
806 for (BasicBlock *P : predecessors(BB)) {
807 // If the value is known by LazyValueInfo to be a ConstantRange in
808 // a predecessor, use that information to try to thread this
810 ConstantRange CR = LVI->getConstantRangeOnEdge(
811 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS));
812 // Propagate the range through the addition.
813 CR = CR.add(AddConst->getValue());
815 // Get the range where the compare returns true.
816 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion(
817 Pred, cast<ConstantInt>(CmpConst)->getValue());
820 if (CmpRange.contains(CR))
821 ResC = ConstantInt::getTrue(CmpType);
822 else if (CmpRange.inverse().contains(CR))
823 ResC = ConstantInt::getFalse(CmpType);
827 Result.push_back(std::make_pair(ResC, P));
830 return !Result.empty();
835 // Try to find a constant value for the LHS of a comparison,
836 // and evaluate it statically if we can.
837 PredValueInfoTy LHSVals;
838 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
841 for (const auto &LHSVal : LHSVals) {
842 Constant *V = LHSVal.first;
843 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst);
844 if (Constant *KC = getKnownConstant(Folded, WantInteger))
845 Result.push_back(std::make_pair(KC, LHSVal.second));
848 return !Result.empty();
852 if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
853 // Handle select instructions where at least one operand is a known constant
854 // and we can figure out the condition value for any predecessor block.
855 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference);
856 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference);
857 PredValueInfoTy Conds;
858 if ((TrueVal || FalseVal) &&
859 ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds,
860 WantInteger, CxtI)) {
861 for (auto &C : Conds) {
862 Constant *Cond = C.first;
864 // Figure out what value to use for the condition.
866 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) {
868 KnownCond = CI->isOne();
870 assert(isa<UndefValue>(Cond) && "Unexpected condition value");
871 // Either operand will do, so be sure to pick the one that's a known
873 // FIXME: Do this more cleverly if both values are known constants?
874 KnownCond = (TrueVal != nullptr);
877 // See if the select has a known constant value for this predecessor.
878 if (Constant *Val = KnownCond ? TrueVal : FalseVal)
879 Result.push_back(std::make_pair(Val, C.second));
882 return !Result.empty();
886 // If all else fails, see if LVI can figure out a constant value for us.
887 Constant *CI = LVI->getConstant(V, BB, CxtI);
888 if (Constant *KC = getKnownConstant(CI, Preference)) {
889 for (BasicBlock *Pred : predecessors(BB))
890 Result.push_back(std::make_pair(KC, Pred));
893 return !Result.empty();
896 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends
897 /// in an undefined jump, decide which block is best to revector to.
899 /// Since we can pick an arbitrary destination, we pick the successor with the
900 /// fewest predecessors. This should reduce the in-degree of the others.
901 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) {
902 TerminatorInst *BBTerm = BB->getTerminator();
903 unsigned MinSucc = 0;
904 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc);
905 // Compute the successor with the minimum number of predecessors.
906 unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB));
907 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) {
908 TestBB = BBTerm->getSuccessor(i);
909 unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB));
910 if (NumPreds < MinNumPreds) {
912 MinNumPreds = NumPreds;
919 static bool hasAddressTakenAndUsed(BasicBlock *BB) {
920 if (!BB->hasAddressTaken()) return false;
922 // If the block has its address taken, it may be a tree of dead constants
923 // hanging off of it. These shouldn't keep the block alive.
924 BlockAddress *BA = BlockAddress::get(BB);
925 BA->removeDeadConstantUsers();
926 return !BA->use_empty();
929 /// ProcessBlock - If there are any predecessors whose control can be threaded
930 /// through to a successor, transform them now.
931 bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) {
932 // If the block is trivially dead, just return and let the caller nuke it.
933 // This simplifies other transformations.
934 if (pred_empty(BB) &&
935 BB != &BB->getParent()->getEntryBlock())
938 // If this block has a single predecessor, and if that pred has a single
939 // successor, merge the blocks. This encourages recursive jump threading
940 // because now the condition in this block can be threaded through
941 // predecessors of our predecessor block.
942 if (BasicBlock *SinglePred = BB->getSinglePredecessor()) {
943 const TerminatorInst *TI = SinglePred->getTerminator();
944 if (!TI->isExceptional() && TI->getNumSuccessors() == 1 &&
945 SinglePred != BB && !hasAddressTakenAndUsed(BB)) {
946 // If SinglePred was a loop header, BB becomes one.
947 if (LoopHeaders.erase(SinglePred))
948 LoopHeaders.insert(BB);
950 LVI->eraseBlock(SinglePred);
951 MergeBasicBlockIntoOnlyPred(BB);
953 // Now that BB is merged into SinglePred (i.e. SinglePred Code followed by
954 // BB code within one basic block `BB`), we need to invalidate the LVI
955 // information associated with BB, because the LVI information need not be
956 // true for all of BB after the merge. For example,
957 // Before the merge, LVI info and code is as follows:
958 // SinglePred: <LVI info1 for %p val>
960 // call @exit() // need not transfer execution to successor.
961 // assume(%p) // from this point on %p is true
963 // BB: <LVI info2 for %p val, i.e. %p is true>
967 // Note that this LVI info for blocks BB and SinglPred is correct for %p
968 // (info2 and info1 respectively). After the merge and the deletion of the
969 // LVI info1 for SinglePred. We have the following code:
970 // BB: <LVI info2 for %p val>
974 // %x = use of %p <-- LVI info2 is correct from here onwards.
976 // LVI info2 for BB is incorrect at the beginning of BB.
978 // Invalidate LVI information for BB if the LVI is not provably true for
980 if (any_of(*BB, [](Instruction &I) {
981 return !isGuaranteedToTransferExecutionToSuccessor(&I);
988 if (TryToUnfoldSelectInCurrBB(BB))
991 // Look if we can propagate guards to predecessors.
992 if (HasGuards && ProcessGuards(BB))
995 // What kind of constant we're looking for.
996 ConstantPreference Preference = WantInteger;
998 // Look to see if the terminator is a conditional branch, switch or indirect
999 // branch, if not we can't thread it.
1001 Instruction *Terminator = BB->getTerminator();
1002 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) {
1003 // Can't thread an unconditional jump.
1004 if (BI->isUnconditional()) return false;
1005 Condition = BI->getCondition();
1006 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
1007 Condition = SI->getCondition();
1008 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
1009 // Can't thread indirect branch with no successors.
1010 if (IB->getNumSuccessors() == 0) return false;
1011 Condition = IB->getAddress()->stripPointerCasts();
1012 Preference = WantBlockAddress;
1014 return false; // Must be an invoke.
1017 // Run constant folding to see if we can reduce the condition to a simple
1019 if (Instruction *I = dyn_cast<Instruction>(Condition)) {
1021 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI);
1023 I->replaceAllUsesWith(SimpleVal);
1024 if (isInstructionTriviallyDead(I, TLI))
1025 I->eraseFromParent();
1026 Condition = SimpleVal;
1030 // If the terminator is branching on an undef, we can pick any of the
1031 // successors to branch to. Let GetBestDestForJumpOnUndef decide.
1032 if (isa<UndefValue>(Condition)) {
1033 unsigned BestSucc = GetBestDestForJumpOnUndef(BB);
1035 // Fold the branch/switch.
1036 TerminatorInst *BBTerm = BB->getTerminator();
1037 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) {
1038 if (i == BestSucc) continue;
1039 BBTerm->getSuccessor(i)->removePredecessor(BB, true);
1042 DEBUG(dbgs() << " In block '" << BB->getName()
1043 << "' folding undef terminator: " << *BBTerm << '\n');
1044 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
1045 BBTerm->eraseFromParent();
1049 // If the terminator of this block is branching on a constant, simplify the
1050 // terminator to an unconditional branch. This can occur due to threading in
1052 if (getKnownConstant(Condition, Preference)) {
1053 DEBUG(dbgs() << " In block '" << BB->getName()
1054 << "' folding terminator: " << *BB->getTerminator() << '\n');
1056 ConstantFoldTerminator(BB, true);
1060 Instruction *CondInst = dyn_cast<Instruction>(Condition);
1062 // All the rest of our checks depend on the condition being an instruction.
1064 // FIXME: Unify this with code below.
1065 if (ProcessThreadableEdges(Condition, BB, Preference, Terminator))
1070 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
1071 // If we're branching on a conditional, LVI might be able to determine
1072 // it's value at the branch instruction. We only handle comparisons
1073 // against a constant at this time.
1074 // TODO: This should be extended to handle switches as well.
1075 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
1076 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
1077 if (CondBr && CondConst) {
1078 // We should have returned as soon as we turn a conditional branch to
1079 // unconditional. Because its no longer interesting as far as jump
1080 // threading is concerned.
1081 assert(CondBr->isConditional() && "Threading on unconditional terminator");
1083 LazyValueInfo::Tristate Ret =
1084 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
1086 if (Ret != LazyValueInfo::Unknown) {
1087 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0;
1088 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1;
1089 CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
1090 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
1091 CondBr->eraseFromParent();
1092 if (CondCmp->use_empty())
1093 CondCmp->eraseFromParent();
1094 // We can safely replace *some* uses of the CondInst if it has
1095 // exactly one value as returned by LVI. RAUW is incorrect in the
1096 // presence of guards and assumes, that have the `Cond` as the use. This
1097 // is because we use the guards/assume to reason about the `Cond` value
1098 // at the end of block, but RAUW unconditionally replaces all uses
1099 // including the guards/assumes themselves and the uses before the
1101 else if (CondCmp->getParent() == BB) {
1102 auto *CI = Ret == LazyValueInfo::True ?
1103 ConstantInt::getTrue(CondCmp->getType()) :
1104 ConstantInt::getFalse(CondCmp->getType());
1105 ReplaceFoldableUses(CondCmp, CI);
1110 // We did not manage to simplify this branch, try to see whether
1111 // CondCmp depends on a known phi-select pattern.
1112 if (TryToUnfoldSelect(CondCmp, BB))
1117 // Check for some cases that are worth simplifying. Right now we want to look
1118 // for loads that are used by a switch or by the condition for the branch. If
1119 // we see one, check to see if it's partially redundant. If so, insert a PHI
1120 // which can then be used to thread the values.
1121 Value *SimplifyValue = CondInst;
1122 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue))
1123 if (isa<Constant>(CondCmp->getOperand(1)))
1124 SimplifyValue = CondCmp->getOperand(0);
1126 // TODO: There are other places where load PRE would be profitable, such as
1127 // more complex comparisons.
1128 if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue))
1129 if (SimplifyPartiallyRedundantLoad(LI))
1132 // Before threading, try to propagate profile data backwards:
1133 if (PHINode *PN = dyn_cast<PHINode>(CondInst))
1134 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1135 updatePredecessorProfileMetadata(PN, BB);
1137 // Handle a variety of cases where we are branching on something derived from
1138 // a PHI node in the current block. If we can prove that any predecessors
1139 // compute a predictable value based on a PHI node, thread those predecessors.
1140 if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator))
1143 // If this is an otherwise-unfoldable branch on a phi node in the current
1144 // block, see if we can simplify.
1145 if (PHINode *PN = dyn_cast<PHINode>(CondInst))
1146 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1147 return ProcessBranchOnPHI(PN);
1149 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
1150 if (CondInst->getOpcode() == Instruction::Xor &&
1151 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1152 return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst));
1154 // Search for a stronger dominating condition that can be used to simplify a
1155 // conditional branch leaving BB.
1156 if (ProcessImpliedCondition(BB))
1162 bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) {
1163 auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
1164 if (!BI || !BI->isConditional())
1167 Value *Cond = BI->getCondition();
1168 BasicBlock *CurrentBB = BB;
1169 BasicBlock *CurrentPred = BB->getSinglePredecessor();
1172 auto &DL = BB->getModule()->getDataLayout();
1174 while (CurrentPred && Iter++ < ImplicationSearchThreshold) {
1175 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator());
1176 if (!PBI || !PBI->isConditional())
1178 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB)
1181 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB;
1182 Optional<bool> Implication =
1183 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue);
1185 BI->getSuccessor(*Implication ? 1 : 0)->removePredecessor(BB);
1186 BranchInst::Create(BI->getSuccessor(*Implication ? 0 : 1), BI);
1187 BI->eraseFromParent();
1190 CurrentBB = CurrentPred;
1191 CurrentPred = CurrentBB->getSinglePredecessor();
1197 /// Return true if Op is an instruction defined in the given block.
1198 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) {
1199 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1200 if (OpInst->getParent() == BB)
1205 /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant
1206 /// load instruction, eliminate it by replacing it with a PHI node. This is an
1207 /// important optimization that encourages jump threading, and needs to be run
1208 /// interlaced with other jump threading tasks.
1209 bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
1210 // Don't hack volatile and ordered loads.
1211 if (!LI->isUnordered()) return false;
1213 // If the load is defined in a block with exactly one predecessor, it can't be
1214 // partially redundant.
1215 BasicBlock *LoadBB = LI->getParent();
1216 if (LoadBB->getSinglePredecessor())
1219 // If the load is defined in an EH pad, it can't be partially redundant,
1220 // because the edges between the invoke and the EH pad cannot have other
1221 // instructions between them.
1222 if (LoadBB->isEHPad())
1225 Value *LoadedPtr = LI->getOperand(0);
1227 // If the loaded operand is defined in the LoadBB and its not a phi,
1228 // it can't be available in predecessors.
1229 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr))
1232 // Scan a few instructions up from the load, to see if it is obviously live at
1233 // the entry to its block.
1234 BasicBlock::iterator BBIt(LI);
1236 if (Value *AvailableVal = FindAvailableLoadedValue(
1237 LI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1238 // If the value of the load is locally available within the block, just use
1239 // it. This frequently occurs for reg2mem'd allocas.
1242 LoadInst *NLI = cast<LoadInst>(AvailableVal);
1243 combineMetadataForCSE(NLI, LI);
1246 // If the returned value is the load itself, replace with an undef. This can
1247 // only happen in dead loops.
1248 if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType());
1249 if (AvailableVal->getType() != LI->getType())
1251 CastInst::CreateBitOrPointerCast(AvailableVal, LI->getType(), "", LI);
1252 LI->replaceAllUsesWith(AvailableVal);
1253 LI->eraseFromParent();
1257 // Otherwise, if we scanned the whole block and got to the top of the block,
1258 // we know the block is locally transparent to the load. If not, something
1259 // might clobber its value.
1260 if (BBIt != LoadBB->begin())
1263 // If all of the loads and stores that feed the value have the same AA tags,
1264 // then we can propagate them onto any newly inserted loads.
1266 LI->getAAMetadata(AATags);
1268 SmallPtrSet<BasicBlock*, 8> PredsScanned;
1270 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>;
1272 AvailablePredsTy AvailablePreds;
1273 BasicBlock *OneUnavailablePred = nullptr;
1274 SmallVector<LoadInst*, 8> CSELoads;
1276 // If we got here, the loaded value is transparent through to the start of the
1277 // block. Check to see if it is available in any of the predecessor blocks.
1278 for (BasicBlock *PredBB : predecessors(LoadBB)) {
1279 // If we already scanned this predecessor, skip it.
1280 if (!PredsScanned.insert(PredBB).second)
1283 BBIt = PredBB->end();
1284 unsigned NumScanedInst = 0;
1285 Value *PredAvailable = nullptr;
1286 // NOTE: We don't CSE load that is volatile or anything stronger than
1287 // unordered, that should have been checked when we entered the function.
1288 assert(LI->isUnordered() && "Attempting to CSE volatile or atomic loads");
1289 // If this is a load on a phi pointer, phi-translate it and search
1290 // for available load/store to the pointer in predecessors.
1291 Value *Ptr = LoadedPtr->DoPHITranslation(LoadBB, PredBB);
1292 PredAvailable = FindAvailablePtrLoadStore(
1293 Ptr, LI->getType(), LI->isAtomic(), PredBB, BBIt, DefMaxInstsToScan,
1294 AA, &IsLoadCSE, &NumScanedInst);
1296 // If PredBB has a single predecessor, continue scanning through the
1297 // single precessor.
1298 BasicBlock *SinglePredBB = PredBB;
1299 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() &&
1300 NumScanedInst < DefMaxInstsToScan) {
1301 SinglePredBB = SinglePredBB->getSinglePredecessor();
1303 BBIt = SinglePredBB->end();
1304 PredAvailable = FindAvailablePtrLoadStore(
1305 Ptr, LI->getType(), LI->isAtomic(), SinglePredBB, BBIt,
1306 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE,
1311 if (!PredAvailable) {
1312 OneUnavailablePred = PredBB;
1317 CSELoads.push_back(cast<LoadInst>(PredAvailable));
1319 // If so, this load is partially redundant. Remember this info so that we
1320 // can create a PHI node.
1321 AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable));
1324 // If the loaded value isn't available in any predecessor, it isn't partially
1326 if (AvailablePreds.empty()) return false;
1328 // Okay, the loaded value is available in at least one (and maybe all!)
1329 // predecessors. If the value is unavailable in more than one unique
1330 // predecessor, we want to insert a merge block for those common predecessors.
1331 // This ensures that we only have to insert one reload, thus not increasing
1333 BasicBlock *UnavailablePred = nullptr;
1335 // If the value is unavailable in one of predecessors, we will end up
1336 // inserting a new instruction into them. It is only valid if all the
1337 // instructions before LI are guaranteed to pass execution to its successor,
1338 // or if LI is safe to speculate.
1339 // TODO: If this logic becomes more complex, and we will perform PRE insertion
1340 // farther than to a predecessor, we need to reuse the code from GVN's PRE.
1341 // It requires domination tree analysis, so for this simple case it is an
1343 if (PredsScanned.size() != AvailablePreds.size() &&
1344 !isSafeToSpeculativelyExecute(LI))
1345 for (auto I = LoadBB->begin(); &*I != LI; ++I)
1346 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
1349 // If there is exactly one predecessor where the value is unavailable, the
1350 // already computed 'OneUnavailablePred' block is it. If it ends in an
1351 // unconditional branch, we know that it isn't a critical edge.
1352 if (PredsScanned.size() == AvailablePreds.size()+1 &&
1353 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) {
1354 UnavailablePred = OneUnavailablePred;
1355 } else if (PredsScanned.size() != AvailablePreds.size()) {
1356 // Otherwise, we had multiple unavailable predecessors or we had a critical
1357 // edge from the one.
1358 SmallVector<BasicBlock*, 8> PredsToSplit;
1359 SmallPtrSet<BasicBlock*, 8> AvailablePredSet;
1361 for (const auto &AvailablePred : AvailablePreds)
1362 AvailablePredSet.insert(AvailablePred.first);
1364 // Add all the unavailable predecessors to the PredsToSplit list.
1365 for (BasicBlock *P : predecessors(LoadBB)) {
1366 // If the predecessor is an indirect goto, we can't split the edge.
1367 if (isa<IndirectBrInst>(P->getTerminator()))
1370 if (!AvailablePredSet.count(P))
1371 PredsToSplit.push_back(P);
1374 // Split them out to their own block.
1375 UnavailablePred = SplitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split");
1378 // If the value isn't available in all predecessors, then there will be
1379 // exactly one where it isn't available. Insert a load on that edge and add
1380 // it to the AvailablePreds list.
1381 if (UnavailablePred) {
1382 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
1383 "Can't handle critical edge here!");
1384 LoadInst *NewVal = new LoadInst(
1385 LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
1386 LI->getName() + ".pr", false, LI->getAlignment(), LI->getOrdering(),
1387 LI->getSyncScopeID(), UnavailablePred->getTerminator());
1388 NewVal->setDebugLoc(LI->getDebugLoc());
1390 NewVal->setAAMetadata(AATags);
1392 AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal));
1395 // Now we know that each predecessor of this block has a value in
1396 // AvailablePreds, sort them for efficient access as we're walking the preds.
1397 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end());
1399 // Create a PHI node at the start of the block for the PRE'd load value.
1400 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB);
1401 PHINode *PN = PHINode::Create(LI->getType(), std::distance(PB, PE), "",
1404 PN->setDebugLoc(LI->getDebugLoc());
1406 // Insert new entries into the PHI for each predecessor. A single block may
1407 // have multiple entries here.
1408 for (pred_iterator PI = PB; PI != PE; ++PI) {
1409 BasicBlock *P = *PI;
1410 AvailablePredsTy::iterator I =
1411 std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
1412 std::make_pair(P, (Value*)nullptr));
1414 assert(I != AvailablePreds.end() && I->first == P &&
1415 "Didn't find entry for predecessor!");
1417 // If we have an available predecessor but it requires casting, insert the
1418 // cast in the predecessor and use the cast. Note that we have to update the
1419 // AvailablePreds vector as we go so that all of the PHI entries for this
1420 // predecessor use the same bitcast.
1421 Value *&PredV = I->second;
1422 if (PredV->getType() != LI->getType())
1423 PredV = CastInst::CreateBitOrPointerCast(PredV, LI->getType(), "",
1424 P->getTerminator());
1426 PN->addIncoming(PredV, I->first);
1429 for (LoadInst *PredLI : CSELoads) {
1430 combineMetadataForCSE(PredLI, LI);
1433 LI->replaceAllUsesWith(PN);
1434 LI->eraseFromParent();
1439 /// FindMostPopularDest - The specified list contains multiple possible
1440 /// threadable destinations. Pick the one that occurs the most frequently in
1443 FindMostPopularDest(BasicBlock *BB,
1444 const SmallVectorImpl<std::pair<BasicBlock *,
1445 BasicBlock *>> &PredToDestList) {
1446 assert(!PredToDestList.empty());
1448 // Determine popularity. If there are multiple possible destinations, we
1449 // explicitly choose to ignore 'undef' destinations. We prefer to thread
1450 // blocks with known and real destinations to threading undef. We'll handle
1451 // them later if interesting.
1452 DenseMap<BasicBlock*, unsigned> DestPopularity;
1453 for (const auto &PredToDest : PredToDestList)
1454 if (PredToDest.second)
1455 DestPopularity[PredToDest.second]++;
1457 // Find the most popular dest.
1458 DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin();
1459 BasicBlock *MostPopularDest = DPI->first;
1460 unsigned Popularity = DPI->second;
1461 SmallVector<BasicBlock*, 4> SamePopularity;
1463 for (++DPI; DPI != DestPopularity.end(); ++DPI) {
1464 // If the popularity of this entry isn't higher than the popularity we've
1465 // seen so far, ignore it.
1466 if (DPI->second < Popularity)
1468 else if (DPI->second == Popularity) {
1469 // If it is the same as what we've seen so far, keep track of it.
1470 SamePopularity.push_back(DPI->first);
1472 // If it is more popular, remember it.
1473 SamePopularity.clear();
1474 MostPopularDest = DPI->first;
1475 Popularity = DPI->second;
1479 // Okay, now we know the most popular destination. If there is more than one
1480 // destination, we need to determine one. This is arbitrary, but we need
1481 // to make a deterministic decision. Pick the first one that appears in the
1483 if (!SamePopularity.empty()) {
1484 SamePopularity.push_back(MostPopularDest);
1485 TerminatorInst *TI = BB->getTerminator();
1486 for (unsigned i = 0; ; ++i) {
1487 assert(i != TI->getNumSuccessors() && "Didn't find any successor!");
1489 if (!is_contained(SamePopularity, TI->getSuccessor(i)))
1492 MostPopularDest = TI->getSuccessor(i);
1497 // Okay, we have finally picked the most popular destination.
1498 return MostPopularDest;
1501 bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
1502 ConstantPreference Preference,
1503 Instruction *CxtI) {
1504 // If threading this would thread across a loop header, don't even try to
1506 if (LoopHeaders.count(BB))
1509 PredValueInfoTy PredValues;
1510 if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, CxtI))
1513 assert(!PredValues.empty() &&
1514 "ComputeValueKnownInPredecessors returned true with no values");
1516 DEBUG(dbgs() << "IN BB: " << *BB;
1517 for (const auto &PredValue : PredValues) {
1518 dbgs() << " BB '" << BB->getName() << "': FOUND condition = "
1520 << " for pred '" << PredValue.second->getName() << "'.\n";
1523 // Decide what we want to thread through. Convert our list of known values to
1524 // a list of known destinations for each pred. This also discards duplicate
1525 // predecessors and keeps track of the undefined inputs (which are represented
1526 // as a null dest in the PredToDestList).
1527 SmallPtrSet<BasicBlock*, 16> SeenPreds;
1528 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
1530 BasicBlock *OnlyDest = nullptr;
1531 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
1532 Constant *OnlyVal = nullptr;
1533 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL;
1535 unsigned PredWithKnownDest = 0;
1536 for (const auto &PredValue : PredValues) {
1537 BasicBlock *Pred = PredValue.second;
1538 if (!SeenPreds.insert(Pred).second)
1539 continue; // Duplicate predecessor entry.
1541 Constant *Val = PredValue.first;
1544 if (isa<UndefValue>(Val))
1546 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
1547 assert(isa<ConstantInt>(Val) && "Expecting a constant integer");
1548 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
1549 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
1550 assert(isa<ConstantInt>(Val) && "Expecting a constant integer");
1551 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor();
1553 assert(isa<IndirectBrInst>(BB->getTerminator())
1554 && "Unexpected terminator");
1555 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress");
1556 DestBB = cast<BlockAddress>(Val)->getBasicBlock();
1559 // If we have exactly one destination, remember it for efficiency below.
1560 if (PredToDestList.empty()) {
1564 if (OnlyDest != DestBB)
1565 OnlyDest = MultipleDestSentinel;
1566 // It possible we have same destination, but different value, e.g. default
1567 // case in switchinst.
1569 OnlyVal = MultipleVal;
1572 // We know where this predecessor is going.
1573 ++PredWithKnownDest;
1575 // If the predecessor ends with an indirect goto, we can't change its
1577 if (isa<IndirectBrInst>(Pred->getTerminator()))
1580 PredToDestList.push_back(std::make_pair(Pred, DestBB));
1583 // If all edges were unthreadable, we fail.
1584 if (PredToDestList.empty())
1587 // If all the predecessors go to a single known successor, we want to fold,
1588 // not thread. By doing so, we do not need to duplicate the current block and
1589 // also miss potential opportunities in case we dont/cant duplicate.
1590 if (OnlyDest && OnlyDest != MultipleDestSentinel) {
1591 if (PredWithKnownDest ==
1592 (size_t)std::distance(pred_begin(BB), pred_end(BB))) {
1593 bool SeenFirstBranchToOnlyDest = false;
1594 for (BasicBlock *SuccBB : successors(BB)) {
1595 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest)
1596 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch.
1598 SuccBB->removePredecessor(BB, true); // This is unreachable successor.
1601 // Finally update the terminator.
1602 TerminatorInst *Term = BB->getTerminator();
1603 BranchInst::Create(OnlyDest, Term);
1604 Term->eraseFromParent();
1606 // If the condition is now dead due to the removal of the old terminator,
1608 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
1609 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects())
1610 CondInst->eraseFromParent();
1611 // We can safely replace *some* uses of the CondInst if it has
1612 // exactly one value as returned by LVI. RAUW is incorrect in the
1613 // presence of guards and assumes, that have the `Cond` as the use. This
1614 // is because we use the guards/assume to reason about the `Cond` value
1615 // at the end of block, but RAUW unconditionally replaces all uses
1616 // including the guards/assumes themselves and the uses before the
1618 else if (OnlyVal && OnlyVal != MultipleVal &&
1619 CondInst->getParent() == BB)
1620 ReplaceFoldableUses(CondInst, OnlyVal);
1626 // Determine which is the most common successor. If we have many inputs and
1627 // this block is a switch, we want to start by threading the batch that goes
1628 // to the most popular destination first. If we only know about one
1629 // threadable destination (the common case) we can avoid this.
1630 BasicBlock *MostPopularDest = OnlyDest;
1632 if (MostPopularDest == MultipleDestSentinel)
1633 MostPopularDest = FindMostPopularDest(BB, PredToDestList);
1635 // Now that we know what the most popular destination is, factor all
1636 // predecessors that will jump to it into a single predecessor.
1637 SmallVector<BasicBlock*, 16> PredsToFactor;
1638 for (const auto &PredToDest : PredToDestList)
1639 if (PredToDest.second == MostPopularDest) {
1640 BasicBlock *Pred = PredToDest.first;
1642 // This predecessor may be a switch or something else that has multiple
1643 // edges to the block. Factor each of these edges by listing them
1644 // according to # occurrences in PredsToFactor.
1645 for (BasicBlock *Succ : successors(Pred))
1647 PredsToFactor.push_back(Pred);
1650 // If the threadable edges are branching on an undefined value, we get to pick
1651 // the destination that these predecessors should get to.
1652 if (!MostPopularDest)
1653 MostPopularDest = BB->getTerminator()->
1654 getSuccessor(GetBestDestForJumpOnUndef(BB));
1656 // Ok, try to thread it!
1657 return ThreadEdge(BB, PredsToFactor, MostPopularDest);
1660 /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on
1661 /// a PHI node in the current block. See if there are any simplifications we
1662 /// can do based on inputs to the phi node.
1663 bool JumpThreadingPass::ProcessBranchOnPHI(PHINode *PN) {
1664 BasicBlock *BB = PN->getParent();
1666 // TODO: We could make use of this to do it once for blocks with common PHI
1668 SmallVector<BasicBlock*, 1> PredBBs;
1671 // If any of the predecessor blocks end in an unconditional branch, we can
1672 // *duplicate* the conditional branch into that block in order to further
1673 // encourage jump threading and to eliminate cases where we have branch on a
1674 // phi of an icmp (branch on icmp is much better).
1675 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1676 BasicBlock *PredBB = PN->getIncomingBlock(i);
1677 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()))
1678 if (PredBr->isUnconditional()) {
1679 PredBBs[0] = PredBB;
1680 // Try to duplicate BB into PredBB.
1681 if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs))
1689 /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on
1690 /// a xor instruction in the current block. See if there are any
1691 /// simplifications we can do based on inputs to the xor.
1692 bool JumpThreadingPass::ProcessBranchOnXOR(BinaryOperator *BO) {
1693 BasicBlock *BB = BO->getParent();
1695 // If either the LHS or RHS of the xor is a constant, don't do this
1697 if (isa<ConstantInt>(BO->getOperand(0)) ||
1698 isa<ConstantInt>(BO->getOperand(1)))
1701 // If the first instruction in BB isn't a phi, we won't be able to infer
1702 // anything special about any particular predecessor.
1703 if (!isa<PHINode>(BB->front()))
1706 // If this BB is a landing pad, we won't be able to split the edge into it.
1710 // If we have a xor as the branch input to this block, and we know that the
1711 // LHS or RHS of the xor in any predecessor is true/false, then we can clone
1712 // the condition into the predecessor and fix that value to true, saving some
1713 // logical ops on that path and encouraging other paths to simplify.
1715 // This copies something like this:
1718 // %X = phi i1 [1], [%X']
1719 // %Y = icmp eq i32 %A, %B
1720 // %Z = xor i1 %X, %Y
1725 // %Y = icmp ne i32 %A, %B
1728 PredValueInfoTy XorOpValues;
1730 if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues,
1732 assert(XorOpValues.empty());
1733 if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues,
1739 assert(!XorOpValues.empty() &&
1740 "ComputeValueKnownInPredecessors returned true with no values");
1742 // Scan the information to see which is most popular: true or false. The
1743 // predecessors can be of the set true, false, or undef.
1744 unsigned NumTrue = 0, NumFalse = 0;
1745 for (const auto &XorOpValue : XorOpValues) {
1746 if (isa<UndefValue>(XorOpValue.first))
1747 // Ignore undefs for the count.
1749 if (cast<ConstantInt>(XorOpValue.first)->isZero())
1755 // Determine which value to split on, true, false, or undef if neither.
1756 ConstantInt *SplitVal = nullptr;
1757 if (NumTrue > NumFalse)
1758 SplitVal = ConstantInt::getTrue(BB->getContext());
1759 else if (NumTrue != 0 || NumFalse != 0)
1760 SplitVal = ConstantInt::getFalse(BB->getContext());
1762 // Collect all of the blocks that this can be folded into so that we can
1763 // factor this once and clone it once.
1764 SmallVector<BasicBlock*, 8> BlocksToFoldInto;
1765 for (const auto &XorOpValue : XorOpValues) {
1766 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first))
1769 BlocksToFoldInto.push_back(XorOpValue.second);
1772 // If we inferred a value for all of the predecessors, then duplication won't
1773 // help us. However, we can just replace the LHS or RHS with the constant.
1774 if (BlocksToFoldInto.size() ==
1775 cast<PHINode>(BB->front()).getNumIncomingValues()) {
1777 // If all preds provide undef, just nuke the xor, because it is undef too.
1778 BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
1779 BO->eraseFromParent();
1780 } else if (SplitVal->isZero()) {
1781 // If all preds provide 0, replace the xor with the other input.
1782 BO->replaceAllUsesWith(BO->getOperand(isLHS));
1783 BO->eraseFromParent();
1785 // If all preds provide 1, set the computed value to 1.
1786 BO->setOperand(!isLHS, SplitVal);
1792 // Try to duplicate BB into PredBB.
1793 return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
1796 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new
1797 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for
1798 /// NewPred using the entries from OldPred (suitably mapped).
1799 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
1800 BasicBlock *OldPred,
1801 BasicBlock *NewPred,
1802 DenseMap<Instruction*, Value*> &ValueMap) {
1803 for (BasicBlock::iterator PNI = PHIBB->begin();
1804 PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) {
1805 // Ok, we have a PHI node. Figure out what the incoming value was for the
1807 Value *IV = PN->getIncomingValueForBlock(OldPred);
1809 // Remap the value if necessary.
1810 if (Instruction *Inst = dyn_cast<Instruction>(IV)) {
1811 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst);
1812 if (I != ValueMap.end())
1816 PN->addIncoming(IV, NewPred);
1820 /// ThreadEdge - We have decided that it is safe and profitable to factor the
1821 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
1822 /// across BB. Transform the IR to reflect this change.
1823 bool JumpThreadingPass::ThreadEdge(BasicBlock *BB,
1824 const SmallVectorImpl<BasicBlock *> &PredBBs,
1825 BasicBlock *SuccBB) {
1826 // If threading to the same block as we come from, we would infinite loop.
1828 DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
1829 << "' - would thread to self!\n");
1833 // If threading this would thread across a loop header, don't thread the edge.
1834 // See the comments above FindLoopHeaders for justifications and caveats.
1835 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
1837 bool BBIsHeader = LoopHeaders.count(BB);
1838 bool SuccIsHeader = LoopHeaders.count(SuccBB);
1839 dbgs() << " Not threading across "
1840 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName()
1841 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '")
1842 << SuccBB->getName() << "' - it might create an irreducible loop!\n";
1847 unsigned JumpThreadCost =
1848 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold);
1849 if (JumpThreadCost > BBDupThreshold) {
1850 DEBUG(dbgs() << " Not threading BB '" << BB->getName()
1851 << "' - Cost is too high: " << JumpThreadCost << "\n");
1855 // And finally, do it! Start by factoring the predecessors if needed.
1857 if (PredBBs.size() == 1)
1858 PredBB = PredBBs[0];
1860 DEBUG(dbgs() << " Factoring out " << PredBBs.size()
1861 << " common predecessors.\n");
1862 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
1865 // And finally, do it!
1866 DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '"
1867 << SuccBB->getName() << "' with cost: " << JumpThreadCost
1868 << ", across block:\n "
1871 LVI->threadEdge(PredBB, BB, SuccBB);
1873 // We are going to have to map operands from the original BB block to the new
1874 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
1875 // account for entry from PredBB.
1876 DenseMap<Instruction*, Value*> ValueMapping;
1878 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
1879 BB->getName()+".thread",
1880 BB->getParent(), BB);
1881 NewBB->moveAfter(PredBB);
1883 // Set the block frequency of NewBB.
1884 if (HasProfileData) {
1886 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB);
1887 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
1890 BasicBlock::iterator BI = BB->begin();
1891 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
1892 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
1894 // Clone the non-phi instructions of BB into NewBB, keeping track of the
1895 // mapping and using it to remap operands in the cloned instructions.
1896 for (; !isa<TerminatorInst>(BI); ++BI) {
1897 Instruction *New = BI->clone();
1898 New->setName(BI->getName());
1899 NewBB->getInstList().push_back(New);
1900 ValueMapping[&*BI] = New;
1902 // Remap operands to patch up intra-block references.
1903 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
1904 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
1905 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
1906 if (I != ValueMapping.end())
1907 New->setOperand(i, I->second);
1911 // We didn't copy the terminator from BB over to NewBB, because there is now
1912 // an unconditional jump to SuccBB. Insert the unconditional jump.
1913 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB);
1914 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc());
1916 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
1917 // PHI nodes for NewBB now.
1918 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping);
1920 // If there were values defined in BB that are used outside the block, then we
1921 // now have to update all uses of the value to use either the original value,
1922 // the cloned value, or some PHI derived value. This can require arbitrary
1923 // PHI insertion, of which we are prepared to do, clean these up now.
1924 SSAUpdater SSAUpdate;
1925 SmallVector<Use*, 16> UsesToRename;
1926 for (Instruction &I : *BB) {
1927 // Scan all uses of this instruction to see if it is used outside of its
1928 // block, and if so, record them in UsesToRename.
1929 for (Use &U : I.uses()) {
1930 Instruction *User = cast<Instruction>(U.getUser());
1931 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
1932 if (UserPN->getIncomingBlock(U) == BB)
1934 } else if (User->getParent() == BB)
1937 UsesToRename.push_back(&U);
1940 // If there are no uses outside the block, we're done with this instruction.
1941 if (UsesToRename.empty())
1944 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
1946 // We found a use of I outside of BB. Rename all uses of I that are outside
1947 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
1948 // with the two values we know.
1949 SSAUpdate.Initialize(I.getType(), I.getName());
1950 SSAUpdate.AddAvailableValue(BB, &I);
1951 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]);
1953 while (!UsesToRename.empty())
1954 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
1955 DEBUG(dbgs() << "\n");
1958 // Ok, NewBB is good to go. Update the terminator of PredBB to jump to
1959 // NewBB instead of BB. This eliminates predecessors from BB, which requires
1960 // us to simplify any PHI nodes in BB.
1961 TerminatorInst *PredTerm = PredBB->getTerminator();
1962 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i)
1963 if (PredTerm->getSuccessor(i) == BB) {
1964 BB->removePredecessor(PredBB, true);
1965 PredTerm->setSuccessor(i, NewBB);
1968 // At this point, the IR is fully up to date and consistent. Do a quick scan
1969 // over the new instructions and zap any that are constants or dead. This
1970 // frequently happens because of phi translation.
1971 SimplifyInstructionsInBlock(NewBB, TLI);
1973 // Update the edge weight from BB to SuccBB, which should be less than before.
1974 UpdateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB);
1976 // Threaded an edge!
1981 /// Create a new basic block that will be the predecessor of BB and successor of
1982 /// all blocks in Preds. When profile data is available, update the frequency of
1984 BasicBlock *JumpThreadingPass::SplitBlockPreds(BasicBlock *BB,
1985 ArrayRef<BasicBlock *> Preds,
1986 const char *Suffix) {
1987 // Collect the frequencies of all predecessors of BB, which will be used to
1988 // update the edge weight on BB->SuccBB.
1989 BlockFrequency PredBBFreq(0);
1991 for (auto Pred : Preds)
1992 PredBBFreq += BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB);
1994 BasicBlock *PredBB = SplitBlockPredecessors(BB, Preds, Suffix);
1996 // Set the block frequency of the newly created PredBB, which is the sum of
1997 // frequencies of Preds.
1999 BFI->setBlockFreq(PredBB, PredBBFreq.getFrequency());
2003 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) {
2004 const TerminatorInst *TI = BB->getTerminator();
2005 assert(TI->getNumSuccessors() > 1 && "not a split");
2007 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
2011 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0));
2012 if (MDName->getString() != "branch_weights")
2015 // Ensure there are weights for all of the successors. Note that the first
2016 // operand to the metadata node is a name, not a weight.
2017 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1;
2020 /// Update the block frequency of BB and branch weight and the metadata on the
2021 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 -
2022 /// Freq(PredBB->BB) / Freq(BB->SuccBB).
2023 void JumpThreadingPass::UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB,
2026 BasicBlock *SuccBB) {
2027 if (!HasProfileData)
2030 assert(BFI && BPI && "BFI & BPI should have been created here");
2032 // As the edge from PredBB to BB is deleted, we have to update the block
2034 auto BBOrigFreq = BFI->getBlockFreq(BB);
2035 auto NewBBFreq = BFI->getBlockFreq(NewBB);
2036 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB);
2037 auto BBNewFreq = BBOrigFreq - NewBBFreq;
2038 BFI->setBlockFreq(BB, BBNewFreq.getFrequency());
2040 // Collect updated outgoing edges' frequencies from BB and use them to update
2041 // edge probabilities.
2042 SmallVector<uint64_t, 4> BBSuccFreq;
2043 for (BasicBlock *Succ : successors(BB)) {
2044 auto SuccFreq = (Succ == SuccBB)
2045 ? BB2SuccBBFreq - NewBBFreq
2046 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ);
2047 BBSuccFreq.push_back(SuccFreq.getFrequency());
2050 uint64_t MaxBBSuccFreq =
2051 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end());
2053 SmallVector<BranchProbability, 4> BBSuccProbs;
2054 if (MaxBBSuccFreq == 0)
2055 BBSuccProbs.assign(BBSuccFreq.size(),
2056 {1, static_cast<unsigned>(BBSuccFreq.size())});
2058 for (uint64_t Freq : BBSuccFreq)
2059 BBSuccProbs.push_back(
2060 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq));
2061 // Normalize edge probabilities so that they sum up to one.
2062 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(),
2066 // Update edge probabilities in BPI.
2067 for (int I = 0, E = BBSuccProbs.size(); I < E; I++)
2068 BPI->setEdgeProbability(BB, I, BBSuccProbs[I]);
2070 // Update the profile metadata as well.
2072 // Don't do this if the profile of the transformed blocks was statically
2073 // estimated. (This could occur despite the function having an entry
2074 // frequency in completely cold parts of the CFG.)
2076 // In this case we don't want to suggest to subsequent passes that the
2077 // calculated weights are fully consistent. Consider this graph:
2092 // Assuming the blocks check_* all compare the same value against 1, 2 and 3,
2093 // the overall probabilities are inconsistent; the total probability that the
2094 // value is either 1, 2 or 3 is 150%.
2096 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3
2097 // becomes 0%. This is even worse if the edge whose probability becomes 0% is
2098 // the loop exit edge. Then based solely on static estimation we would assume
2099 // the loop was extremely hot.
2101 // FIXME this locally as well so that BPI and BFI are consistent as well. We
2102 // shouldn't make edges extremely likely or unlikely based solely on static
2104 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) {
2105 SmallVector<uint32_t, 4> Weights;
2106 for (auto Prob : BBSuccProbs)
2107 Weights.push_back(Prob.getNumerator());
2109 auto TI = BB->getTerminator();
2111 LLVMContext::MD_prof,
2112 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights));
2116 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch
2117 /// to BB which contains an i1 PHI node and a conditional branch on that PHI.
2118 /// If we can duplicate the contents of BB up into PredBB do so now, this
2119 /// improves the odds that the branch will be on an analyzable instruction like
2121 bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred(
2122 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) {
2123 assert(!PredBBs.empty() && "Can't handle an empty set");
2125 // If BB is a loop header, then duplicating this block outside the loop would
2126 // cause us to transform this into an irreducible loop, don't do this.
2127 // See the comments above FindLoopHeaders for justifications and caveats.
2128 if (LoopHeaders.count(BB)) {
2129 DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()
2130 << "' into predecessor block '" << PredBBs[0]->getName()
2131 << "' - it might create an irreducible loop!\n");
2135 unsigned DuplicationCost =
2136 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold);
2137 if (DuplicationCost > BBDupThreshold) {
2138 DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
2139 << "' - Cost is too high: " << DuplicationCost << "\n");
2143 // And finally, do it! Start by factoring the predecessors if needed.
2145 if (PredBBs.size() == 1)
2146 PredBB = PredBBs[0];
2148 DEBUG(dbgs() << " Factoring out " << PredBBs.size()
2149 << " common predecessors.\n");
2150 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm");
2153 // Okay, we decided to do this! Clone all the instructions in BB onto the end
2155 DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '"
2156 << PredBB->getName() << "' to eliminate branch on phi. Cost: "
2157 << DuplicationCost << " block is:" << *BB << "\n");
2159 // Unless PredBB ends with an unconditional branch, split the edge so that we
2160 // can just clone the bits from BB into the end of the new PredBB.
2161 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
2163 if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
2164 PredBB = SplitEdge(PredBB, BB);
2165 OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
2168 // We are going to have to map operands from the original BB block into the
2169 // PredBB block. Evaluate PHI nodes in BB.
2170 DenseMap<Instruction*, Value*> ValueMapping;
2172 BasicBlock::iterator BI = BB->begin();
2173 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
2174 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
2175 // Clone the non-phi instructions of BB into PredBB, keeping track of the
2176 // mapping and using it to remap operands in the cloned instructions.
2177 for (; BI != BB->end(); ++BI) {
2178 Instruction *New = BI->clone();
2180 // Remap operands to patch up intra-block references.
2181 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2182 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
2183 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
2184 if (I != ValueMapping.end())
2185 New->setOperand(i, I->second);
2188 // If this instruction can be simplified after the operands are updated,
2189 // just use the simplified value instead. This frequently happens due to
2191 if (Value *IV = SimplifyInstruction(
2193 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) {
2194 ValueMapping[&*BI] = IV;
2195 if (!New->mayHaveSideEffects()) {
2200 ValueMapping[&*BI] = New;
2203 // Otherwise, insert the new instruction into the block.
2204 New->setName(BI->getName());
2205 PredBB->getInstList().insert(OldPredBranch->getIterator(), New);
2209 // Check to see if the targets of the branch had PHI nodes. If so, we need to
2210 // add entries to the PHI nodes for branch from PredBB now.
2211 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator());
2212 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB,
2214 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB,
2217 // If there were values defined in BB that are used outside the block, then we
2218 // now have to update all uses of the value to use either the original value,
2219 // the cloned value, or some PHI derived value. This can require arbitrary
2220 // PHI insertion, of which we are prepared to do, clean these up now.
2221 SSAUpdater SSAUpdate;
2222 SmallVector<Use*, 16> UsesToRename;
2223 for (Instruction &I : *BB) {
2224 // Scan all uses of this instruction to see if it is used outside of its
2225 // block, and if so, record them in UsesToRename.
2226 for (Use &U : I.uses()) {
2227 Instruction *User = cast<Instruction>(U.getUser());
2228 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
2229 if (UserPN->getIncomingBlock(U) == BB)
2231 } else if (User->getParent() == BB)
2234 UsesToRename.push_back(&U);
2237 // If there are no uses outside the block, we're done with this instruction.
2238 if (UsesToRename.empty())
2241 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
2243 // We found a use of I outside of BB. Rename all uses of I that are outside
2244 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
2245 // with the two values we know.
2246 SSAUpdate.Initialize(I.getType(), I.getName());
2247 SSAUpdate.AddAvailableValue(BB, &I);
2248 SSAUpdate.AddAvailableValue(PredBB, ValueMapping[&I]);
2250 while (!UsesToRename.empty())
2251 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
2252 DEBUG(dbgs() << "\n");
2255 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge
2257 BB->removePredecessor(PredBB, true);
2259 // Remove the unconditional branch at the end of the PredBB block.
2260 OldPredBranch->eraseFromParent();
2266 /// TryToUnfoldSelect - Look for blocks of the form
2272 /// %p = phi [%a, %bb1] ...
2276 /// And expand the select into a branch structure if one of its arms allows %c
2277 /// to be folded. This later enables threading from bb1 over bb2.
2278 bool JumpThreadingPass::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) {
2279 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
2280 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0));
2281 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1));
2283 if (!CondBr || !CondBr->isConditional() || !CondLHS ||
2284 CondLHS->getParent() != BB)
2287 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) {
2288 BasicBlock *Pred = CondLHS->getIncomingBlock(I);
2289 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I));
2291 // Look if one of the incoming values is a select in the corresponding
2293 if (!SI || SI->getParent() != Pred || !SI->hasOneUse())
2296 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
2297 if (!PredTerm || !PredTerm->isUnconditional())
2300 // Now check if one of the select values would allow us to constant fold the
2301 // terminator in BB. We don't do the transform if both sides fold, those
2302 // cases will be threaded in any case.
2303 LazyValueInfo::Tristate LHSFolds =
2304 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1),
2305 CondRHS, Pred, BB, CondCmp);
2306 LazyValueInfo::Tristate RHSFolds =
2307 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2),
2308 CondRHS, Pred, BB, CondCmp);
2309 if ((LHSFolds != LazyValueInfo::Unknown ||
2310 RHSFolds != LazyValueInfo::Unknown) &&
2311 LHSFolds != RHSFolds) {
2312 // Expand the select.
2321 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
2322 BB->getParent(), BB);
2323 // Move the unconditional branch to NewBB.
2324 PredTerm->removeFromParent();
2325 NewBB->getInstList().insert(NewBB->end(), PredTerm);
2326 // Create a conditional branch and update PHI nodes.
2327 BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
2328 CondLHS->setIncomingValue(I, SI->getFalseValue());
2329 CondLHS->addIncoming(SI->getTrueValue(), NewBB);
2330 // The select is now dead.
2331 SI->eraseFromParent();
2333 // Update any other PHI nodes in BB.
2334 for (BasicBlock::iterator BI = BB->begin();
2335 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
2337 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
2344 /// TryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the
2345 /// same BB in the form
2347 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ...
2348 /// %s = select %p, trueval, falseval
2353 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ...
2355 /// %s = select %c, trueval, falseval
2357 /// And expand the select into a branch structure. This later enables
2358 /// jump-threading over bb in this pass.
2360 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold
2361 /// select if the associated PHI has at least one constant. If the unfolded
2362 /// select is not jump-threaded, it will be folded again in the later
2364 bool JumpThreadingPass::TryToUnfoldSelectInCurrBB(BasicBlock *BB) {
2365 // If threading this would thread across a loop header, don't thread the edge.
2366 // See the comments above FindLoopHeaders for justifications and caveats.
2367 if (LoopHeaders.count(BB))
2370 for (BasicBlock::iterator BI = BB->begin();
2371 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
2372 // Look for a Phi having at least one constant incoming value.
2373 if (llvm::all_of(PN->incoming_values(),
2374 [](Value *V) { return !isa<ConstantInt>(V); }))
2377 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) {
2378 // Check if SI is in BB and use V as condition.
2379 if (SI->getParent() != BB)
2381 Value *Cond = SI->getCondition();
2382 return (Cond && Cond == V && Cond->getType()->isIntegerTy(1));
2385 SelectInst *SI = nullptr;
2386 for (Use &U : PN->uses()) {
2387 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2388 // Look for a ICmp in BB that compares PN with a constant and is the
2389 // condition of a Select.
2390 if (Cmp->getParent() == BB && Cmp->hasOneUse() &&
2391 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo())))
2392 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back()))
2393 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) {
2397 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) {
2398 // Look for a Select in BB that uses PN as condtion.
2399 if (isUnfoldCandidate(SelectI, U.get())) {
2408 // Expand the select.
2409 TerminatorInst *Term =
2410 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false);
2411 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI);
2412 NewPN->addIncoming(SI->getTrueValue(), Term->getParent());
2413 NewPN->addIncoming(SI->getFalseValue(), BB);
2414 SI->replaceAllUsesWith(NewPN);
2415 SI->eraseFromParent();
2421 /// Try to propagate a guard from the current BB into one of its predecessors
2422 /// in case if another branch of execution implies that the condition of this
2423 /// guard is always true. Currently we only process the simplest case that
2428 /// br i1 %cond, label %T1, label %F1
2434 /// %condGuard = ...
2435 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ]
2437 /// And cond either implies condGuard or !condGuard. In this case all the
2438 /// instructions before the guard can be duplicated in both branches, and the
2439 /// guard is then threaded to one of them.
2440 bool JumpThreadingPass::ProcessGuards(BasicBlock *BB) {
2441 using namespace PatternMatch;
2443 // We only want to deal with two predecessors.
2444 BasicBlock *Pred1, *Pred2;
2445 auto PI = pred_begin(BB), PE = pred_end(BB);
2457 // Try to thread one of the guards of the block.
2458 // TODO: Look up deeper than to immediate predecessor?
2459 auto *Parent = Pred1->getSinglePredecessor();
2460 if (!Parent || Parent != Pred2->getSinglePredecessor())
2463 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator()))
2465 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>()))
2466 if (ThreadGuard(BB, cast<IntrinsicInst>(&I), BI))
2472 /// Try to propagate the guard from BB which is the lower block of a diamond
2473 /// to one of its branches, in case if diamond's condition implies guard's
2475 bool JumpThreadingPass::ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard,
2477 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?");
2478 assert(BI->isConditional() && "Unconditional branch has 2 successors?");
2479 Value *GuardCond = Guard->getArgOperand(0);
2480 Value *BranchCond = BI->getCondition();
2481 BasicBlock *TrueDest = BI->getSuccessor(0);
2482 BasicBlock *FalseDest = BI->getSuccessor(1);
2484 auto &DL = BB->getModule()->getDataLayout();
2485 bool TrueDestIsSafe = false;
2486 bool FalseDestIsSafe = false;
2488 // True dest is safe if BranchCond => GuardCond.
2489 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL);
2491 TrueDestIsSafe = true;
2493 // False dest is safe if !BranchCond => GuardCond.
2494 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false);
2496 FalseDestIsSafe = true;
2499 if (!TrueDestIsSafe && !FalseDestIsSafe)
2502 BasicBlock *UnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest;
2503 BasicBlock *GuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest;
2505 ValueToValueMapTy UnguardedMapping, GuardedMapping;
2506 Instruction *AfterGuard = Guard->getNextNode();
2507 unsigned Cost = getJumpThreadDuplicationCost(BB, AfterGuard, BBDupThreshold);
2508 if (Cost > BBDupThreshold)
2510 // Duplicate all instructions before the guard and the guard itself to the
2511 // branch where implication is not proved.
2512 GuardedBlock = DuplicateInstructionsInSplitBetween(
2513 BB, GuardedBlock, AfterGuard, GuardedMapping);
2514 assert(GuardedBlock && "Could not create the guarded block?");
2515 // Duplicate all instructions before the guard in the unguarded branch.
2516 // Since we have successfully duplicated the guarded block and this block
2517 // has fewer instructions, we expect it to succeed.
2518 UnguardedBlock = DuplicateInstructionsInSplitBetween(BB, UnguardedBlock,
2519 Guard, UnguardedMapping);
2520 assert(UnguardedBlock && "Could not create the unguarded block?");
2521 DEBUG(dbgs() << "Moved guard " << *Guard << " to block "
2522 << GuardedBlock->getName() << "\n");
2524 // Some instructions before the guard may still have uses. For them, we need
2525 // to create Phi nodes merging their copies in both guarded and unguarded
2526 // branches. Those instructions that have no uses can be just removed.
2527 SmallVector<Instruction *, 4> ToRemove;
2528 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI)
2529 if (!isa<PHINode>(&*BI))
2530 ToRemove.push_back(&*BI);
2532 Instruction *InsertionPoint = &*BB->getFirstInsertionPt();
2533 assert(InsertionPoint && "Empty block?");
2534 // Substitute with Phis & remove.
2535 for (auto *Inst : reverse(ToRemove)) {
2536 if (!Inst->use_empty()) {
2537 PHINode *NewPN = PHINode::Create(Inst->getType(), 2);
2538 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock);
2539 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock);
2540 NewPN->insertBefore(InsertionPoint);
2541 Inst->replaceAllUsesWith(NewPN);
2543 Inst->eraseFromParent();