1 //===- Local.cpp - Functions to perform local transformations -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This family of functions perform various local transformations to the
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Local.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/TinyPtrVector.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/BinaryFormat/Dwarf.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/CFG.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DIBuilder.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfoMetadata.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalObject.h"
54 #include "llvm/IR/IRBuilder.h"
55 #include "llvm/IR/InstrTypes.h"
56 #include "llvm/IR/Instruction.h"
57 #include "llvm/IR/Instructions.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/IR/Intrinsics.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/Operator.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/KnownBits.h"
75 #include "llvm/Support/raw_ostream.h"
85 using namespace llvm::PatternMatch;
87 #define DEBUG_TYPE "local"
89 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
91 //===----------------------------------------------------------------------===//
92 // Local constant propagation.
95 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
96 /// constant value, convert it into an unconditional branch to the constant
97 /// destination. This is a nontrivial operation because the successors of this
98 /// basic block must have their PHI nodes updated.
99 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
100 /// conditions and indirectbr addresses this might make dead if
101 /// DeleteDeadConditions is true.
102 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
103 const TargetLibraryInfo *TLI) {
104 TerminatorInst *T = BB->getTerminator();
105 IRBuilder<> Builder(T);
107 // Branch - See if we are conditional jumping on constant
108 if (auto *BI = dyn_cast<BranchInst>(T)) {
109 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
110 BasicBlock *Dest1 = BI->getSuccessor(0);
111 BasicBlock *Dest2 = BI->getSuccessor(1);
113 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
114 // Are we branching on constant?
115 // YES. Change to unconditional branch...
116 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
117 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
119 // Let the basic block know that we are letting go of it. Based on this,
120 // it will adjust it's PHI nodes.
121 OldDest->removePredecessor(BB);
123 // Replace the conditional branch with an unconditional one.
124 Builder.CreateBr(Destination);
125 BI->eraseFromParent();
129 if (Dest2 == Dest1) { // Conditional branch to same location?
130 // This branch matches something like this:
131 // br bool %cond, label %Dest, label %Dest
132 // and changes it into: br label %Dest
134 // Let the basic block know that we are letting go of one copy of it.
135 assert(BI->getParent() && "Terminator not inserted in block!");
136 Dest1->removePredecessor(BI->getParent());
138 // Replace the conditional branch with an unconditional one.
139 Builder.CreateBr(Dest1);
140 Value *Cond = BI->getCondition();
141 BI->eraseFromParent();
142 if (DeleteDeadConditions)
143 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
149 if (auto *SI = dyn_cast<SwitchInst>(T)) {
150 // If we are switching on a constant, we can convert the switch to an
151 // unconditional branch.
152 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
153 BasicBlock *DefaultDest = SI->getDefaultDest();
154 BasicBlock *TheOnlyDest = DefaultDest;
156 // If the default is unreachable, ignore it when searching for TheOnlyDest.
157 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
158 SI->getNumCases() > 0) {
159 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
162 // Figure out which case it goes to.
163 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
164 // Found case matching a constant operand?
165 if (i->getCaseValue() == CI) {
166 TheOnlyDest = i->getCaseSuccessor();
170 // Check to see if this branch is going to the same place as the default
171 // dest. If so, eliminate it as an explicit compare.
172 if (i->getCaseSuccessor() == DefaultDest) {
173 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
174 unsigned NCases = SI->getNumCases();
175 // Fold the case metadata into the default if there will be any branches
176 // left, unless the metadata doesn't match the switch.
177 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
178 // Collect branch weights into a vector.
179 SmallVector<uint32_t, 8> Weights;
180 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
182 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
183 Weights.push_back(CI->getValue().getZExtValue());
185 // Merge weight of this case to the default weight.
186 unsigned idx = i->getCaseIndex();
187 Weights[0] += Weights[idx+1];
188 // Remove weight for this case.
189 std::swap(Weights[idx+1], Weights.back());
191 SI->setMetadata(LLVMContext::MD_prof,
192 MDBuilder(BB->getContext()).
193 createBranchWeights(Weights));
195 // Remove this entry.
196 DefaultDest->removePredecessor(SI->getParent());
197 i = SI->removeCase(i);
202 // Otherwise, check to see if the switch only branches to one destination.
203 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
205 if (i->getCaseSuccessor() != TheOnlyDest)
206 TheOnlyDest = nullptr;
208 // Increment this iterator as we haven't removed the case.
212 if (CI && !TheOnlyDest) {
213 // Branching on a constant, but not any of the cases, go to the default
215 TheOnlyDest = SI->getDefaultDest();
218 // If we found a single destination that we can fold the switch into, do so
221 // Insert the new branch.
222 Builder.CreateBr(TheOnlyDest);
223 BasicBlock *BB = SI->getParent();
225 // Remove entries from PHI nodes which we no longer branch to...
226 for (BasicBlock *Succ : SI->successors()) {
227 // Found case matching a constant operand?
228 if (Succ == TheOnlyDest)
229 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
231 Succ->removePredecessor(BB);
234 // Delete the old switch.
235 Value *Cond = SI->getCondition();
236 SI->eraseFromParent();
237 if (DeleteDeadConditions)
238 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
242 if (SI->getNumCases() == 1) {
243 // Otherwise, we can fold this switch into a conditional branch
244 // instruction if it has only one non-default destination.
245 auto FirstCase = *SI->case_begin();
246 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
247 FirstCase.getCaseValue(), "cond");
249 // Insert the new branch.
250 BranchInst *NewBr = Builder.CreateCondBr(Cond,
251 FirstCase.getCaseSuccessor(),
252 SI->getDefaultDest());
253 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
254 if (MD && MD->getNumOperands() == 3) {
255 ConstantInt *SICase =
256 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
258 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
259 assert(SICase && SIDef);
260 // The TrueWeight should be the weight for the single case of SI.
261 NewBr->setMetadata(LLVMContext::MD_prof,
262 MDBuilder(BB->getContext()).
263 createBranchWeights(SICase->getValue().getZExtValue(),
264 SIDef->getValue().getZExtValue()));
267 // Update make.implicit metadata to the newly-created conditional branch.
268 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
270 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
272 // Delete the old switch.
273 SI->eraseFromParent();
279 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
280 // indirectbr blockaddress(@F, @BB) -> br label @BB
282 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
283 BasicBlock *TheOnlyDest = BA->getBasicBlock();
284 // Insert the new branch.
285 Builder.CreateBr(TheOnlyDest);
287 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
288 if (IBI->getDestination(i) == TheOnlyDest)
289 TheOnlyDest = nullptr;
291 IBI->getDestination(i)->removePredecessor(IBI->getParent());
293 Value *Address = IBI->getAddress();
294 IBI->eraseFromParent();
295 if (DeleteDeadConditions)
296 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
298 // If we didn't find our destination in the IBI successor list, then we
299 // have undefined behavior. Replace the unconditional branch with an
300 // 'unreachable' instruction.
302 BB->getTerminator()->eraseFromParent();
303 new UnreachableInst(BB->getContext(), BB);
313 //===----------------------------------------------------------------------===//
314 // Local dead code elimination.
317 /// isInstructionTriviallyDead - Return true if the result produced by the
318 /// instruction is not used, and the instruction has no side effects.
320 bool llvm::isInstructionTriviallyDead(Instruction *I,
321 const TargetLibraryInfo *TLI) {
324 return wouldInstructionBeTriviallyDead(I, TLI);
327 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
328 const TargetLibraryInfo *TLI) {
329 if (isa<TerminatorInst>(I))
332 // We don't want the landingpad-like instructions removed by anything this
337 // We don't want debug info removed by anything this general, unless
338 // debug info is empty.
339 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
340 if (DDI->getAddress())
344 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
350 if (!I->mayHaveSideEffects())
353 // Special case intrinsics that "may have side effects" but can be deleted
355 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
356 // Safe to delete llvm.stacksave if dead.
357 if (II->getIntrinsicID() == Intrinsic::stacksave)
360 // Lifetime intrinsics are dead when their right-hand is undef.
361 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
362 II->getIntrinsicID() == Intrinsic::lifetime_end)
363 return isa<UndefValue>(II->getArgOperand(1));
365 // Assumptions are dead if their condition is trivially true. Guards on
366 // true are operationally no-ops. In the future we can consider more
367 // sophisticated tradeoffs for guards considering potential for check
368 // widening, but for now we keep things simple.
369 if (II->getIntrinsicID() == Intrinsic::assume ||
370 II->getIntrinsicID() == Intrinsic::experimental_guard) {
371 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
372 return !Cond->isZero();
378 if (isAllocLikeFn(I, TLI))
381 if (CallInst *CI = isFreeCall(I, TLI))
382 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
383 return C->isNullValue() || isa<UndefValue>(C);
385 if (CallSite CS = CallSite(I))
386 if (isMathLibCallNoop(CS, TLI))
392 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
393 /// trivially dead instruction, delete it. If that makes any of its operands
394 /// trivially dead, delete them too, recursively. Return true if any
395 /// instructions were deleted.
397 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
398 const TargetLibraryInfo *TLI) {
399 Instruction *I = dyn_cast<Instruction>(V);
400 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
403 SmallVector<Instruction*, 16> DeadInsts;
404 DeadInsts.push_back(I);
407 I = DeadInsts.pop_back_val();
409 // Null out all of the instruction's operands to see if any operand becomes
411 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
412 Value *OpV = I->getOperand(i);
413 I->setOperand(i, nullptr);
415 if (!OpV->use_empty()) continue;
417 // If the operand is an instruction that became dead as we nulled out the
418 // operand, and if it is 'trivially' dead, delete it in a future loop
420 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
421 if (isInstructionTriviallyDead(OpI, TLI))
422 DeadInsts.push_back(OpI);
425 I->eraseFromParent();
426 } while (!DeadInsts.empty());
431 /// areAllUsesEqual - Check whether the uses of a value are all the same.
432 /// This is similar to Instruction::hasOneUse() except this will also return
433 /// true when there are no uses or multiple uses that all refer to the same
435 static bool areAllUsesEqual(Instruction *I) {
436 Value::user_iterator UI = I->user_begin();
437 Value::user_iterator UE = I->user_end();
442 for (++UI; UI != UE; ++UI) {
449 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
450 /// dead PHI node, due to being a def-use chain of single-use nodes that
451 /// either forms a cycle or is terminated by a trivially dead instruction,
452 /// delete it. If that makes any of its operands trivially dead, delete them
453 /// too, recursively. Return true if a change was made.
454 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
455 const TargetLibraryInfo *TLI) {
456 SmallPtrSet<Instruction*, 4> Visited;
457 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
458 I = cast<Instruction>(*I->user_begin())) {
460 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
462 // If we find an instruction more than once, we're on a cycle that
463 // won't prove fruitful.
464 if (!Visited.insert(I).second) {
465 // Break the cycle and delete the instruction and its operands.
466 I->replaceAllUsesWith(UndefValue::get(I->getType()));
467 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
475 simplifyAndDCEInstruction(Instruction *I,
476 SmallSetVector<Instruction *, 16> &WorkList,
477 const DataLayout &DL,
478 const TargetLibraryInfo *TLI) {
479 if (isInstructionTriviallyDead(I, TLI)) {
480 // Null out all of the instruction's operands to see if any operand becomes
482 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
483 Value *OpV = I->getOperand(i);
484 I->setOperand(i, nullptr);
486 if (!OpV->use_empty() || I == OpV)
489 // If the operand is an instruction that became dead as we nulled out the
490 // operand, and if it is 'trivially' dead, delete it in a future loop
492 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
493 if (isInstructionTriviallyDead(OpI, TLI))
494 WorkList.insert(OpI);
497 I->eraseFromParent();
502 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
503 // Add the users to the worklist. CAREFUL: an instruction can use itself,
504 // in the case of a phi node.
505 for (User *U : I->users()) {
507 WorkList.insert(cast<Instruction>(U));
511 // Replace the instruction with its simplified value.
512 bool Changed = false;
513 if (!I->use_empty()) {
514 I->replaceAllUsesWith(SimpleV);
517 if (isInstructionTriviallyDead(I, TLI)) {
518 I->eraseFromParent();
526 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
527 /// simplify any instructions in it and recursively delete dead instructions.
529 /// This returns true if it changed the code, note that it can delete
530 /// instructions in other blocks as well in this block.
531 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
532 const TargetLibraryInfo *TLI) {
533 bool MadeChange = false;
534 const DataLayout &DL = BB->getModule()->getDataLayout();
537 // In debug builds, ensure that the terminator of the block is never replaced
538 // or deleted by these simplifications. The idea of simplification is that it
539 // cannot introduce new instructions, and there is no way to replace the
540 // terminator of a block without introducing a new instruction.
541 AssertingVH<Instruction> TerminatorVH(&BB->back());
544 SmallSetVector<Instruction *, 16> WorkList;
545 // Iterate over the original function, only adding insts to the worklist
546 // if they actually need to be revisited. This avoids having to pre-init
547 // the worklist with the entire function's worth of instructions.
548 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
550 assert(!BI->isTerminator());
551 Instruction *I = &*BI;
554 // We're visiting this instruction now, so make sure it's not in the
555 // worklist from an earlier visit.
556 if (!WorkList.count(I))
557 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
560 while (!WorkList.empty()) {
561 Instruction *I = WorkList.pop_back_val();
562 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
567 //===----------------------------------------------------------------------===//
568 // Control Flow Graph Restructuring.
571 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
572 /// method is called when we're about to delete Pred as a predecessor of BB. If
573 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
575 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
576 /// nodes that collapse into identity values. For example, if we have:
577 /// x = phi(1, 0, 0, 0)
580 /// .. and delete the predecessor corresponding to the '1', this will attempt to
581 /// recursively fold the and to 0.
582 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
583 // This only adjusts blocks with PHI nodes.
584 if (!isa<PHINode>(BB->begin()))
587 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
588 // them down. This will leave us with single entry phi nodes and other phis
589 // that can be removed.
590 BB->removePredecessor(Pred, true);
592 WeakTrackingVH PhiIt = &BB->front();
593 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
594 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
595 Value *OldPhiIt = PhiIt;
597 if (!recursivelySimplifyInstruction(PN))
600 // If recursive simplification ended up deleting the next PHI node we would
601 // iterate to, then our iterator is invalid, restart scanning from the top
603 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
607 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
608 /// predecessor is known to have one successor (DestBB!). Eliminate the edge
609 /// between them, moving the instructions in the predecessor into DestBB and
610 /// deleting the predecessor block.
611 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
612 // If BB has single-entry PHI nodes, fold them.
613 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
614 Value *NewVal = PN->getIncomingValue(0);
615 // Replace self referencing PHI with undef, it must be dead.
616 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
617 PN->replaceAllUsesWith(NewVal);
618 PN->eraseFromParent();
621 BasicBlock *PredBB = DestBB->getSinglePredecessor();
622 assert(PredBB && "Block doesn't have a single predecessor!");
624 // Zap anything that took the address of DestBB. Not doing this will give the
625 // address an invalid value.
626 if (DestBB->hasAddressTaken()) {
627 BlockAddress *BA = BlockAddress::get(DestBB);
628 Constant *Replacement =
629 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
630 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
632 BA->destroyConstant();
635 // Anything that branched to PredBB now branches to DestBB.
636 PredBB->replaceAllUsesWith(DestBB);
638 // Splice all the instructions from PredBB to DestBB.
639 PredBB->getTerminator()->eraseFromParent();
640 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
642 // If the PredBB is the entry block of the function, move DestBB up to
643 // become the entry block after we erase PredBB.
644 if (PredBB == &DestBB->getParent()->getEntryBlock())
645 DestBB->moveAfter(PredBB);
648 // For some irreducible CFG we end up having forward-unreachable blocks
649 // so check if getNode returns a valid node before updating the domtree.
650 if (DomTreeNode *DTN = DT->getNode(PredBB)) {
651 BasicBlock *PredBBIDom = DTN->getIDom()->getBlock();
652 DT->changeImmediateDominator(DestBB, PredBBIDom);
653 DT->eraseNode(PredBB);
657 PredBB->eraseFromParent();
660 /// CanMergeValues - Return true if we can choose one of these values to use
661 /// in place of the other. Note that we will always choose the non-undef
663 static bool CanMergeValues(Value *First, Value *Second) {
664 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
667 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
668 /// almost-empty BB ending in an unconditional branch to Succ, into Succ.
670 /// Assumption: Succ is the single successor for BB.
671 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
672 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
674 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
675 << Succ->getName() << "\n");
676 // Shortcut, if there is only a single predecessor it must be BB and merging
678 if (Succ->getSinglePredecessor()) return true;
680 // Make a list of the predecessors of BB
681 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
683 // Look at all the phi nodes in Succ, to see if they present a conflict when
684 // merging these blocks
685 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
686 PHINode *PN = cast<PHINode>(I);
688 // If the incoming value from BB is again a PHINode in
689 // BB which has the same incoming value for *PI as PN does, we can
690 // merge the phi nodes and then the blocks can still be merged
691 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
692 if (BBPN && BBPN->getParent() == BB) {
693 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
694 BasicBlock *IBB = PN->getIncomingBlock(PI);
695 if (BBPreds.count(IBB) &&
696 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
697 PN->getIncomingValue(PI))) {
698 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
699 << Succ->getName() << " is conflicting with "
700 << BBPN->getName() << " with regard to common predecessor "
701 << IBB->getName() << "\n");
706 Value* Val = PN->getIncomingValueForBlock(BB);
707 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
708 // See if the incoming value for the common predecessor is equal to the
709 // one for BB, in which case this phi node will not prevent the merging
711 BasicBlock *IBB = PN->getIncomingBlock(PI);
712 if (BBPreds.count(IBB) &&
713 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
714 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
715 << Succ->getName() << " is conflicting with regard to common "
716 << "predecessor " << IBB->getName() << "\n");
726 using PredBlockVector = SmallVector<BasicBlock *, 16>;
727 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
729 /// \brief Determines the value to use as the phi node input for a block.
731 /// Select between \p OldVal any value that we know flows from \p BB
732 /// to a particular phi on the basis of which one (if either) is not
733 /// undef. Update IncomingValues based on the selected value.
735 /// \param OldVal The value we are considering selecting.
736 /// \param BB The block that the value flows in from.
737 /// \param IncomingValues A map from block-to-value for other phi inputs
738 /// that we have examined.
740 /// \returns the selected value.
741 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
742 IncomingValueMap &IncomingValues) {
743 if (!isa<UndefValue>(OldVal)) {
744 assert((!IncomingValues.count(BB) ||
745 IncomingValues.find(BB)->second == OldVal) &&
746 "Expected OldVal to match incoming value from BB!");
748 IncomingValues.insert(std::make_pair(BB, OldVal));
752 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
753 if (It != IncomingValues.end()) return It->second;
758 /// \brief Create a map from block to value for the operands of a
761 /// Create a map from block to value for each non-undef value flowing
764 /// \param PN The phi we are collecting the map for.
765 /// \param IncomingValues [out] The map from block to value for this phi.
766 static void gatherIncomingValuesToPhi(PHINode *PN,
767 IncomingValueMap &IncomingValues) {
768 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
769 BasicBlock *BB = PN->getIncomingBlock(i);
770 Value *V = PN->getIncomingValue(i);
772 if (!isa<UndefValue>(V))
773 IncomingValues.insert(std::make_pair(BB, V));
777 /// \brief Replace the incoming undef values to a phi with the values
778 /// from a block-to-value map.
780 /// \param PN The phi we are replacing the undefs in.
781 /// \param IncomingValues A map from block to value.
782 static void replaceUndefValuesInPhi(PHINode *PN,
783 const IncomingValueMap &IncomingValues) {
784 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
785 Value *V = PN->getIncomingValue(i);
787 if (!isa<UndefValue>(V)) continue;
789 BasicBlock *BB = PN->getIncomingBlock(i);
790 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
791 if (It == IncomingValues.end()) continue;
793 PN->setIncomingValue(i, It->second);
797 /// \brief Replace a value flowing from a block to a phi with
798 /// potentially multiple instances of that value flowing from the
799 /// block's predecessors to the phi.
801 /// \param BB The block with the value flowing into the phi.
802 /// \param BBPreds The predecessors of BB.
803 /// \param PN The phi that we are updating.
804 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
805 const PredBlockVector &BBPreds,
807 Value *OldVal = PN->removeIncomingValue(BB, false);
808 assert(OldVal && "No entry in PHI for Pred BB!");
810 IncomingValueMap IncomingValues;
812 // We are merging two blocks - BB, and the block containing PN - and
813 // as a result we need to redirect edges from the predecessors of BB
814 // to go to the block containing PN, and update PN
815 // accordingly. Since we allow merging blocks in the case where the
816 // predecessor and successor blocks both share some predecessors,
817 // and where some of those common predecessors might have undef
818 // values flowing into PN, we want to rewrite those values to be
819 // consistent with the non-undef values.
821 gatherIncomingValuesToPhi(PN, IncomingValues);
823 // If this incoming value is one of the PHI nodes in BB, the new entries
824 // in the PHI node are the entries from the old PHI.
825 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
826 PHINode *OldValPN = cast<PHINode>(OldVal);
827 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
828 // Note that, since we are merging phi nodes and BB and Succ might
829 // have common predecessors, we could end up with a phi node with
830 // identical incoming branches. This will be cleaned up later (and
831 // will trigger asserts if we try to clean it up now, without also
832 // simplifying the corresponding conditional branch).
833 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
834 Value *PredVal = OldValPN->getIncomingValue(i);
835 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
838 // And add a new incoming value for this predecessor for the
839 // newly retargeted branch.
840 PN->addIncoming(Selected, PredBB);
843 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
844 // Update existing incoming values in PN for this
845 // predecessor of BB.
846 BasicBlock *PredBB = BBPreds[i];
847 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
850 // And add a new incoming value for this predecessor for the
851 // newly retargeted branch.
852 PN->addIncoming(Selected, PredBB);
856 replaceUndefValuesInPhi(PN, IncomingValues);
859 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
860 /// unconditional branch, and contains no instructions other than PHI nodes,
861 /// potential side-effect free intrinsics and the branch. If possible,
862 /// eliminate BB by rewriting all the predecessors to branch to the successor
863 /// block and return true. If we can't transform, return false.
864 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
865 assert(BB != &BB->getParent()->getEntryBlock() &&
866 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
868 // We can't eliminate infinite loops.
869 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
870 if (BB == Succ) return false;
872 // Check to see if merging these blocks would cause conflicts for any of the
873 // phi nodes in BB or Succ. If not, we can safely merge.
874 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
876 // Check for cases where Succ has multiple predecessors and a PHI node in BB
877 // has uses which will not disappear when the PHI nodes are merged. It is
878 // possible to handle such cases, but difficult: it requires checking whether
879 // BB dominates Succ, which is non-trivial to calculate in the case where
880 // Succ has multiple predecessors. Also, it requires checking whether
881 // constructing the necessary self-referential PHI node doesn't introduce any
882 // conflicts; this isn't too difficult, but the previous code for doing this
885 // Note that if this check finds a live use, BB dominates Succ, so BB is
886 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
887 // folding the branch isn't profitable in that case anyway.
888 if (!Succ->getSinglePredecessor()) {
889 BasicBlock::iterator BBI = BB->begin();
890 while (isa<PHINode>(*BBI)) {
891 for (Use &U : BBI->uses()) {
892 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
893 if (PN->getIncomingBlock(U) != BB)
903 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
905 if (isa<PHINode>(Succ->begin())) {
906 // If there is more than one pred of succ, and there are PHI nodes in
907 // the successor, then we need to add incoming edges for the PHI nodes
909 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
911 // Loop over all of the PHI nodes in the successor of BB.
912 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
913 PHINode *PN = cast<PHINode>(I);
915 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
919 if (Succ->getSinglePredecessor()) {
920 // BB is the only predecessor of Succ, so Succ will end up with exactly
921 // the same predecessors BB had.
923 // Copy over any phi, debug or lifetime instruction.
924 BB->getTerminator()->eraseFromParent();
925 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
928 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
929 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
930 assert(PN->use_empty() && "There shouldn't be any uses here!");
931 PN->eraseFromParent();
935 // If the unconditional branch we replaced contains llvm.loop metadata, we
936 // add the metadata to the branch instructions in the predecessors.
937 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
938 Instruction *TI = BB->getTerminator();
940 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
941 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
942 BasicBlock *Pred = *PI;
943 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
946 // Everything that jumped to BB now goes to Succ.
947 BB->replaceAllUsesWith(Succ);
948 if (!Succ->hasName()) Succ->takeName(BB);
949 BB->eraseFromParent(); // Delete the old basic block.
953 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
954 /// nodes in this block. This doesn't try to be clever about PHI nodes
955 /// which differ only in the order of the incoming values, but instcombine
956 /// orders them so it usually won't matter.
957 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
958 // This implementation doesn't currently consider undef operands
959 // specially. Theoretically, two phis which are identical except for
960 // one having an undef where the other doesn't could be collapsed.
962 struct PHIDenseMapInfo {
963 static PHINode *getEmptyKey() {
964 return DenseMapInfo<PHINode *>::getEmptyKey();
967 static PHINode *getTombstoneKey() {
968 return DenseMapInfo<PHINode *>::getTombstoneKey();
971 static unsigned getHashValue(PHINode *PN) {
972 // Compute a hash value on the operands. Instcombine will likely have
973 // sorted them, which helps expose duplicates, but we have to check all
974 // the operands to be safe in case instcombine hasn't run.
975 return static_cast<unsigned>(hash_combine(
976 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
977 hash_combine_range(PN->block_begin(), PN->block_end())));
980 static bool isEqual(PHINode *LHS, PHINode *RHS) {
981 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
982 RHS == getEmptyKey() || RHS == getTombstoneKey())
984 return LHS->isIdenticalTo(RHS);
988 // Set of unique PHINodes.
989 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
992 bool Changed = false;
993 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
994 auto Inserted = PHISet.insert(PN);
995 if (!Inserted.second) {
996 // A duplicate. Replace this PHI with its duplicate.
997 PN->replaceAllUsesWith(*Inserted.first);
998 PN->eraseFromParent();
1001 // The RAUW can change PHIs that we already visited. Start over from the
1011 /// enforceKnownAlignment - If the specified pointer points to an object that
1012 /// we control, modify the object's alignment to PrefAlign. This isn't
1013 /// often possible though. If alignment is important, a more reliable approach
1014 /// is to simply align all global variables and allocation instructions to
1015 /// their preferred alignment from the beginning.
1016 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1018 const DataLayout &DL) {
1019 assert(PrefAlign > Align);
1021 V = V->stripPointerCasts();
1023 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1024 // TODO: ideally, computeKnownBits ought to have used
1025 // AllocaInst::getAlignment() in its computation already, making
1026 // the below max redundant. But, as it turns out,
1027 // stripPointerCasts recurses through infinite layers of bitcasts,
1028 // while computeKnownBits is not allowed to traverse more than 6
1030 Align = std::max(AI->getAlignment(), Align);
1031 if (PrefAlign <= Align)
1034 // If the preferred alignment is greater than the natural stack alignment
1035 // then don't round up. This avoids dynamic stack realignment.
1036 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1038 AI->setAlignment(PrefAlign);
1042 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1043 // TODO: as above, this shouldn't be necessary.
1044 Align = std::max(GO->getAlignment(), Align);
1045 if (PrefAlign <= Align)
1048 // If there is a large requested alignment and we can, bump up the alignment
1049 // of the global. If the memory we set aside for the global may not be the
1050 // memory used by the final program then it is impossible for us to reliably
1051 // enforce the preferred alignment.
1052 if (!GO->canIncreaseAlignment())
1055 GO->setAlignment(PrefAlign);
1062 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1063 const DataLayout &DL,
1064 const Instruction *CxtI,
1065 AssumptionCache *AC,
1066 const DominatorTree *DT) {
1067 assert(V->getType()->isPointerTy() &&
1068 "getOrEnforceKnownAlignment expects a pointer!");
1070 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1071 unsigned TrailZ = Known.countMinTrailingZeros();
1073 // Avoid trouble with ridiculously large TrailZ values, such as
1074 // those computed from a null pointer.
1075 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
1077 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1079 // LLVM doesn't support alignments larger than this currently.
1080 Align = std::min(Align, +Value::MaximumAlignment);
1082 if (PrefAlign > Align)
1083 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1085 // We don't need to make any adjustment.
1089 ///===---------------------------------------------------------------------===//
1090 /// Dbg Intrinsic utilities
1093 /// See if there is a dbg.value intrinsic for DIVar before I.
1094 static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1096 // Since we can't guarantee that the original dbg.declare instrinsic
1097 // is removed by LowerDbgDeclare(), we need to make sure that we are
1098 // not inserting the same dbg.value intrinsic over and over.
1099 BasicBlock::InstListType::iterator PrevI(I);
1100 if (PrevI != I->getParent()->getInstList().begin()) {
1102 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1103 if (DVI->getValue() == I->getOperand(0) &&
1104 DVI->getVariable() == DIVar &&
1105 DVI->getExpression() == DIExpr)
1111 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1112 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1113 DIExpression *DIExpr,
1115 // Since we can't guarantee that the original dbg.declare instrinsic
1116 // is removed by LowerDbgDeclare(), we need to make sure that we are
1117 // not inserting the same dbg.value intrinsic over and over.
1118 SmallVector<DbgValueInst *, 1> DbgValues;
1119 findDbgValues(DbgValues, APN);
1120 for (auto *DVI : DbgValues) {
1121 assert(DVI->getValue() == APN);
1122 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1128 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1129 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1130 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1131 StoreInst *SI, DIBuilder &Builder) {
1132 assert(DII->isAddressOfVariable());
1133 auto *DIVar = DII->getVariable();
1134 assert(DIVar && "Missing variable");
1135 auto *DIExpr = DII->getExpression();
1136 Value *DV = SI->getOperand(0);
1138 // If an argument is zero extended then use argument directly. The ZExt
1139 // may be zapped by an optimization pass in future.
1140 Argument *ExtendedArg = nullptr;
1141 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1142 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1143 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1144 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1146 // If this DII was already describing only a fragment of a variable, ensure
1147 // that fragment is appropriately narrowed here.
1148 // But if a fragment wasn't used, describe the value as the original
1149 // argument (rather than the zext or sext) so that it remains described even
1150 // if the sext/zext is optimized away. This widens the variable description,
1151 // leaving it up to the consumer to know how the smaller value may be
1152 // represented in a larger register.
1153 if (auto Fragment = DIExpr->getFragmentInfo()) {
1154 unsigned FragmentOffset = Fragment->OffsetInBits;
1155 SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1156 DIExpr->elements_end() - 3);
1157 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1158 Ops.push_back(FragmentOffset);
1159 const DataLayout &DL = DII->getModule()->getDataLayout();
1160 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1161 DIExpr = Builder.createExpression(Ops);
1165 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1166 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1170 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1171 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1172 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1173 LoadInst *LI, DIBuilder &Builder) {
1174 auto *DIVar = DII->getVariable();
1175 auto *DIExpr = DII->getExpression();
1176 assert(DIVar && "Missing variable");
1178 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1181 // We are now tracking the loaded value instead of the address. In the
1182 // future if multi-location support is added to the IR, it might be
1183 // preferable to keep tracking both the loaded value and the original
1184 // address in case the alloca can not be elided.
1185 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1186 LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1187 DbgValue->insertAfter(LI);
1190 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1191 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1192 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1193 PHINode *APN, DIBuilder &Builder) {
1194 auto *DIVar = DII->getVariable();
1195 auto *DIExpr = DII->getExpression();
1196 assert(DIVar && "Missing variable");
1198 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1201 BasicBlock *BB = APN->getParent();
1202 auto InsertionPt = BB->getFirstInsertionPt();
1204 // The block may be a catchswitch block, which does not have a valid
1206 // FIXME: Insert dbg.value markers in the successors when appropriate.
1207 if (InsertionPt != BB->end())
1208 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1212 /// Determine whether this alloca is either a VLA or an array.
1213 static bool isArray(AllocaInst *AI) {
1214 return AI->isArrayAllocation() ||
1215 AI->getType()->getElementType()->isArrayTy();
1218 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1219 /// of llvm.dbg.value intrinsics.
1220 bool llvm::LowerDbgDeclare(Function &F) {
1221 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1222 SmallVector<DbgDeclareInst *, 4> Dbgs;
1224 for (Instruction &BI : FI)
1225 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1226 Dbgs.push_back(DDI);
1231 for (auto &I : Dbgs) {
1232 DbgDeclareInst *DDI = I;
1233 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1234 // If this is an alloca for a scalar variable, insert a dbg.value
1235 // at each load and store to the alloca and erase the dbg.declare.
1236 // The dbg.values allow tracking a variable even if it is not
1237 // stored on the stack, while the dbg.declare can only describe
1238 // the stack slot (and at a lexical-scope granularity). Later
1239 // passes will attempt to elide the stack slot.
1240 if (AI && !isArray(AI)) {
1241 for (auto &AIUse : AI->uses()) {
1242 User *U = AIUse.getUser();
1243 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1244 if (AIUse.getOperandNo() == 1)
1245 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1246 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1247 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1248 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1249 // This is a call by-value or some other instruction that
1250 // takes a pointer to the variable. Insert a *value*
1251 // intrinsic that describes the alloca.
1252 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(),
1253 DDI->getExpression(), DDI->getDebugLoc(),
1257 DDI->eraseFromParent();
1263 /// Finds all intrinsics declaring local variables as living in the memory that
1264 /// 'V' points to. This may include a mix of dbg.declare and
1265 /// dbg.addr intrinsics.
1266 TinyPtrVector<DbgInfoIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1267 auto *L = LocalAsMetadata::getIfExists(V);
1270 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1274 TinyPtrVector<DbgInfoIntrinsic *> Declares;
1275 for (User *U : MDV->users()) {
1276 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(U))
1277 if (DII->isAddressOfVariable())
1278 Declares.push_back(DII);
1284 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1285 if (auto *L = LocalAsMetadata::getIfExists(V))
1286 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1287 for (User *U : MDV->users())
1288 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1289 DbgValues.push_back(DVI);
1292 static void findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgUsers,
1294 if (auto *L = LocalAsMetadata::getIfExists(V))
1295 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1296 for (User *U : MDV->users())
1297 if (DbgInfoIntrinsic *DII = dyn_cast<DbgInfoIntrinsic>(U))
1298 DbgUsers.push_back(DII);
1301 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1302 Instruction *InsertBefore, DIBuilder &Builder,
1303 bool DerefBefore, int Offset, bool DerefAfter) {
1304 auto DbgAddrs = FindDbgAddrUses(Address);
1305 for (DbgInfoIntrinsic *DII : DbgAddrs) {
1306 DebugLoc Loc = DII->getDebugLoc();
1307 auto *DIVar = DII->getVariable();
1308 auto *DIExpr = DII->getExpression();
1309 assert(DIVar && "Missing variable");
1310 DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1311 // Insert llvm.dbg.declare immediately after InsertBefore, and remove old
1312 // llvm.dbg.declare.
1313 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1314 if (DII == InsertBefore)
1315 InsertBefore = &*std::next(InsertBefore->getIterator());
1316 DII->eraseFromParent();
1318 return !DbgAddrs.empty();
1321 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1322 DIBuilder &Builder, bool DerefBefore,
1323 int Offset, bool DerefAfter) {
1324 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1325 DerefBefore, Offset, DerefAfter);
1328 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1329 DIBuilder &Builder, int Offset) {
1330 DebugLoc Loc = DVI->getDebugLoc();
1331 auto *DIVar = DVI->getVariable();
1332 auto *DIExpr = DVI->getExpression();
1333 assert(DIVar && "Missing variable");
1335 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1336 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1338 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1339 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1342 // Insert the offset immediately after the first deref.
1343 // We could just change the offset argument of dbg.value, but it's unsigned...
1345 SmallVector<uint64_t, 4> Ops;
1346 Ops.push_back(dwarf::DW_OP_deref);
1347 DIExpression::appendOffset(Ops, Offset);
1348 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1349 DIExpr = Builder.createExpression(Ops);
1352 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1353 DVI->eraseFromParent();
1356 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1357 DIBuilder &Builder, int Offset) {
1358 if (auto *L = LocalAsMetadata::getIfExists(AI))
1359 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1360 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1362 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1363 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1367 void llvm::salvageDebugInfo(Instruction &I) {
1368 SmallVector<DbgValueInst *, 1> DbgValues;
1369 auto &M = *I.getModule();
1371 auto wrapMD = [&](Value *V) {
1372 return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
1375 auto applyOffset = [&](DbgValueInst *DVI, uint64_t Offset) {
1376 auto *DIExpr = DVI->getExpression();
1377 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
1378 DIExpression::NoDeref,
1379 DIExpression::WithStackValue);
1380 DVI->setOperand(0, wrapMD(I.getOperand(0)));
1381 DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1382 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1385 if (isa<BitCastInst>(&I) || isa<IntToPtrInst>(&I)) {
1386 // Bitcasts are entirely irrelevant for debug info. Rewrite dbg.value,
1387 // dbg.addr, and dbg.declare to use the cast's source.
1388 SmallVector<DbgInfoIntrinsic *, 1> DbgUsers;
1389 findDbgUsers(DbgUsers, &I);
1390 for (auto *DII : DbgUsers) {
1391 DII->setOperand(0, wrapMD(I.getOperand(0)));
1392 DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1394 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1395 findDbgValues(DbgValues, &I);
1396 for (auto *DVI : DbgValues) {
1398 M.getDataLayout().getPointerSizeInBits(GEP->getPointerAddressSpace());
1399 APInt Offset(BitWidth, 0);
1400 // Rewrite a constant GEP into a DIExpression. Since we are performing
1401 // arithmetic to compute the variable's *value* in the DIExpression, we
1402 // need to mark the expression with a DW_OP_stack_value.
1403 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1404 // GEP offsets are i32 and thus always fit into an int64_t.
1405 applyOffset(DVI, Offset.getSExtValue());
1407 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1408 if (BI->getOpcode() == Instruction::Add)
1409 if (auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)))
1410 if (ConstInt->getBitWidth() <= 64) {
1411 APInt Offset = ConstInt->getValue();
1412 findDbgValues(DbgValues, &I);
1413 for (auto *DVI : DbgValues)
1414 applyOffset(DVI, Offset.getSExtValue());
1416 } else if (isa<LoadInst>(&I)) {
1417 findDbgValues(DbgValues, &I);
1418 for (auto *DVI : DbgValues) {
1419 // Rewrite the load into DW_OP_deref.
1420 auto *DIExpr = DVI->getExpression();
1421 DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1422 DVI->setOperand(0, wrapMD(I.getOperand(0)));
1423 DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1424 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1429 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1430 unsigned NumDeadInst = 0;
1431 // Delete the instructions backwards, as it has a reduced likelihood of
1432 // having to update as many def-use and use-def chains.
1433 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1434 while (EndInst != &BB->front()) {
1435 // Delete the next to last instruction.
1436 Instruction *Inst = &*--EndInst->getIterator();
1437 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1438 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1439 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1443 if (!isa<DbgInfoIntrinsic>(Inst))
1445 Inst->eraseFromParent();
1450 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1451 bool PreserveLCSSA) {
1452 BasicBlock *BB = I->getParent();
1453 // Loop over all of the successors, removing BB's entry from any PHI
1455 for (BasicBlock *Successor : successors(BB))
1456 Successor->removePredecessor(BB, PreserveLCSSA);
1458 // Insert a call to llvm.trap right before this. This turns the undefined
1459 // behavior into a hard fail instead of falling through into random code.
1462 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1463 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1464 CallTrap->setDebugLoc(I->getDebugLoc());
1466 new UnreachableInst(I->getContext(), I);
1468 // All instructions after this are dead.
1469 unsigned NumInstrsRemoved = 0;
1470 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1471 while (BBI != BBE) {
1472 if (!BBI->use_empty())
1473 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1474 BB->getInstList().erase(BBI++);
1477 return NumInstrsRemoved;
1480 /// changeToCall - Convert the specified invoke into a normal call.
1481 static void changeToCall(InvokeInst *II) {
1482 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1483 SmallVector<OperandBundleDef, 1> OpBundles;
1484 II->getOperandBundlesAsDefs(OpBundles);
1485 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1487 NewCall->takeName(II);
1488 NewCall->setCallingConv(II->getCallingConv());
1489 NewCall->setAttributes(II->getAttributes());
1490 NewCall->setDebugLoc(II->getDebugLoc());
1491 II->replaceAllUsesWith(NewCall);
1493 // Follow the call by a branch to the normal destination.
1494 BranchInst::Create(II->getNormalDest(), II);
1496 // Update PHI nodes in the unwind destination
1497 II->getUnwindDest()->removePredecessor(II->getParent());
1498 II->eraseFromParent();
1501 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1502 BasicBlock *UnwindEdge) {
1503 BasicBlock *BB = CI->getParent();
1505 // Convert this function call into an invoke instruction. First, split the
1508 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1510 // Delete the unconditional branch inserted by splitBasicBlock
1511 BB->getInstList().pop_back();
1513 // Create the new invoke instruction.
1514 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1515 SmallVector<OperandBundleDef, 1> OpBundles;
1517 CI->getOperandBundlesAsDefs(OpBundles);
1519 // Note: we're round tripping operand bundles through memory here, and that
1520 // can potentially be avoided with a cleverer API design that we do not have
1523 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1524 InvokeArgs, OpBundles, CI->getName(), BB);
1525 II->setDebugLoc(CI->getDebugLoc());
1526 II->setCallingConv(CI->getCallingConv());
1527 II->setAttributes(CI->getAttributes());
1529 // Make sure that anything using the call now uses the invoke! This also
1530 // updates the CallGraph if present, because it uses a WeakTrackingVH.
1531 CI->replaceAllUsesWith(II);
1533 // Delete the original call
1534 Split->getInstList().pop_front();
1538 static bool markAliveBlocks(Function &F,
1539 SmallPtrSetImpl<BasicBlock*> &Reachable) {
1540 SmallVector<BasicBlock*, 128> Worklist;
1541 BasicBlock *BB = &F.front();
1542 Worklist.push_back(BB);
1543 Reachable.insert(BB);
1544 bool Changed = false;
1546 BB = Worklist.pop_back_val();
1548 // Do a quick scan of the basic block, turning any obviously unreachable
1549 // instructions into LLVM unreachable insts. The instruction combining pass
1550 // canonicalizes unreachable insts into stores to null or undef.
1551 for (Instruction &I : *BB) {
1552 // Assumptions that are known to be false are equivalent to unreachable.
1553 // Also, if the condition is undefined, then we make the choice most
1554 // beneficial to the optimizer, and choose that to also be unreachable.
1555 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1556 if (II->getIntrinsicID() == Intrinsic::assume) {
1557 if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1558 // Don't insert a call to llvm.trap right before the unreachable.
1559 changeToUnreachable(II, false);
1565 if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1566 // A call to the guard intrinsic bails out of the current compilation
1567 // unit if the predicate passed to it is false. If the predicate is a
1568 // constant false, then we know the guard will bail out of the current
1569 // compile unconditionally, so all code following it is dead.
1571 // Note: unlike in llvm.assume, it is not "obviously profitable" for
1572 // guards to treat `undef` as `false` since a guard on `undef` can
1573 // still be useful for widening.
1574 if (match(II->getArgOperand(0), m_Zero()))
1575 if (!isa<UnreachableInst>(II->getNextNode())) {
1576 changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
1583 if (auto *CI = dyn_cast<CallInst>(&I)) {
1584 Value *Callee = CI->getCalledValue();
1585 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1586 changeToUnreachable(CI, /*UseLLVMTrap=*/false);
1590 if (CI->doesNotReturn()) {
1591 // If we found a call to a no-return function, insert an unreachable
1592 // instruction after it. Make sure there isn't *already* one there
1594 if (!isa<UnreachableInst>(CI->getNextNode())) {
1595 // Don't insert a call to llvm.trap right before the unreachable.
1596 changeToUnreachable(CI->getNextNode(), false);
1603 // Store to undef and store to null are undefined and used to signal that
1604 // they should be changed to unreachable by passes that can't modify the
1606 if (auto *SI = dyn_cast<StoreInst>(&I)) {
1607 // Don't touch volatile stores.
1608 if (SI->isVolatile()) continue;
1610 Value *Ptr = SI->getOperand(1);
1612 if (isa<UndefValue>(Ptr) ||
1613 (isa<ConstantPointerNull>(Ptr) &&
1614 SI->getPointerAddressSpace() == 0)) {
1615 changeToUnreachable(SI, true);
1622 TerminatorInst *Terminator = BB->getTerminator();
1623 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
1624 // Turn invokes that call 'nounwind' functions into ordinary calls.
1625 Value *Callee = II->getCalledValue();
1626 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1627 changeToUnreachable(II, true);
1629 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1630 if (II->use_empty() && II->onlyReadsMemory()) {
1631 // jump to the normal destination branch.
1632 BranchInst::Create(II->getNormalDest(), II);
1633 II->getUnwindDest()->removePredecessor(II->getParent());
1634 II->eraseFromParent();
1639 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
1640 // Remove catchpads which cannot be reached.
1641 struct CatchPadDenseMapInfo {
1642 static CatchPadInst *getEmptyKey() {
1643 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1646 static CatchPadInst *getTombstoneKey() {
1647 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1650 static unsigned getHashValue(CatchPadInst *CatchPad) {
1651 return static_cast<unsigned>(hash_combine_range(
1652 CatchPad->value_op_begin(), CatchPad->value_op_end()));
1655 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1656 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1657 RHS == getEmptyKey() || RHS == getTombstoneKey())
1659 return LHS->isIdenticalTo(RHS);
1663 // Set of unique CatchPads.
1664 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1665 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1667 detail::DenseSetEmpty Empty;
1668 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1669 E = CatchSwitch->handler_end();
1671 BasicBlock *HandlerBB = *I;
1672 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1673 if (!HandlerSet.insert({CatchPad, Empty}).second) {
1674 CatchSwitch->removeHandler(I);
1682 Changed |= ConstantFoldTerminator(BB, true);
1683 for (BasicBlock *Successor : successors(BB))
1684 if (Reachable.insert(Successor).second)
1685 Worklist.push_back(Successor);
1686 } while (!Worklist.empty());
1690 void llvm::removeUnwindEdge(BasicBlock *BB) {
1691 TerminatorInst *TI = BB->getTerminator();
1693 if (auto *II = dyn_cast<InvokeInst>(TI)) {
1698 TerminatorInst *NewTI;
1699 BasicBlock *UnwindDest;
1701 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1702 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1703 UnwindDest = CRI->getUnwindDest();
1704 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1705 auto *NewCatchSwitch = CatchSwitchInst::Create(
1706 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1707 CatchSwitch->getName(), CatchSwitch);
1708 for (BasicBlock *PadBB : CatchSwitch->handlers())
1709 NewCatchSwitch->addHandler(PadBB);
1711 NewTI = NewCatchSwitch;
1712 UnwindDest = CatchSwitch->getUnwindDest();
1714 llvm_unreachable("Could not find unwind successor");
1717 NewTI->takeName(TI);
1718 NewTI->setDebugLoc(TI->getDebugLoc());
1719 UnwindDest->removePredecessor(BB);
1720 TI->replaceAllUsesWith(NewTI);
1721 TI->eraseFromParent();
1724 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
1725 /// if they are in a dead cycle. Return true if a change was made, false
1726 /// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
1727 /// after modifying the CFG.
1728 bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
1729 SmallPtrSet<BasicBlock*, 16> Reachable;
1730 bool Changed = markAliveBlocks(F, Reachable);
1732 // If there are unreachable blocks in the CFG...
1733 if (Reachable.size() == F.size())
1736 assert(Reachable.size() < F.size());
1737 NumRemoved += F.size()-Reachable.size();
1739 // Loop over all of the basic blocks that are not reachable, dropping all of
1740 // their internal references...
1741 for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
1742 if (Reachable.count(&*BB))
1745 for (BasicBlock *Successor : successors(&*BB))
1746 if (Reachable.count(Successor))
1747 Successor->removePredecessor(&*BB);
1749 LVI->eraseBlock(&*BB);
1750 BB->dropAllReferences();
1753 for (Function::iterator I = ++F.begin(); I != F.end();)
1754 if (!Reachable.count(&*I))
1755 I = F.getBasicBlockList().erase(I);
1762 void llvm::combineMetadata(Instruction *K, const Instruction *J,
1763 ArrayRef<unsigned> KnownIDs) {
1764 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1765 K->dropUnknownNonDebugMetadata(KnownIDs);
1766 K->getAllMetadataOtherThanDebugLoc(Metadata);
1767 for (const auto &MD : Metadata) {
1768 unsigned Kind = MD.first;
1769 MDNode *JMD = J->getMetadata(Kind);
1770 MDNode *KMD = MD.second;
1774 K->setMetadata(Kind, nullptr); // Remove unknown metadata
1776 case LLVMContext::MD_dbg:
1777 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
1778 case LLVMContext::MD_tbaa:
1779 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
1781 case LLVMContext::MD_alias_scope:
1782 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
1784 case LLVMContext::MD_noalias:
1785 case LLVMContext::MD_mem_parallel_loop_access:
1786 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
1788 case LLVMContext::MD_range:
1789 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
1791 case LLVMContext::MD_fpmath:
1792 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
1794 case LLVMContext::MD_invariant_load:
1795 // Only set the !invariant.load if it is present in both instructions.
1796 K->setMetadata(Kind, JMD);
1798 case LLVMContext::MD_nonnull:
1799 // Only set the !nonnull if it is present in both instructions.
1800 K->setMetadata(Kind, JMD);
1802 case LLVMContext::MD_invariant_group:
1803 // Preserve !invariant.group in K.
1805 case LLVMContext::MD_align:
1806 K->setMetadata(Kind,
1807 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1809 case LLVMContext::MD_dereferenceable:
1810 case LLVMContext::MD_dereferenceable_or_null:
1811 K->setMetadata(Kind,
1812 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1816 // Set !invariant.group from J if J has it. If both instructions have it
1817 // then we will just pick it from J - even when they are different.
1818 // Also make sure that K is load or store - f.e. combining bitcast with load
1819 // could produce bitcast with invariant.group metadata, which is invalid.
1820 // FIXME: we should try to preserve both invariant.group md if they are
1821 // different, but right now instruction can only have one invariant.group.
1822 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
1823 if (isa<LoadInst>(K) || isa<StoreInst>(K))
1824 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
1827 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
1828 unsigned KnownIDs[] = {
1829 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1830 LLVMContext::MD_noalias, LLVMContext::MD_range,
1831 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
1832 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
1833 LLVMContext::MD_dereferenceable,
1834 LLVMContext::MD_dereferenceable_or_null};
1835 combineMetadata(K, J, KnownIDs);
1838 template <typename RootType, typename DominatesFn>
1839 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
1840 const RootType &Root,
1841 const DominatesFn &Dominates) {
1842 assert(From->getType() == To->getType());
1845 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1848 if (!Dominates(Root, U))
1851 DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
1852 << *To << " in " << *U << "\n");
1858 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
1859 assert(From->getType() == To->getType());
1860 auto *BB = From->getParent();
1863 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1866 auto *I = cast<Instruction>(U.getUser());
1867 if (I->getParent() == BB)
1875 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1877 const BasicBlockEdge &Root) {
1878 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
1879 return DT.dominates(Root, U);
1881 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
1884 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1886 const BasicBlock *BB) {
1887 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
1888 auto *I = cast<Instruction>(U.getUser())->getParent();
1889 return DT.properlyDominates(BB, I);
1891 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
1894 bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
1895 const TargetLibraryInfo &TLI) {
1896 // Check if the function is specifically marked as a gc leaf function.
1897 if (CS.hasFnAttr("gc-leaf-function"))
1899 if (const Function *F = CS.getCalledFunction()) {
1900 if (F->hasFnAttribute("gc-leaf-function"))
1903 if (auto IID = F->getIntrinsicID())
1904 // Most LLVM intrinsics do not take safepoints.
1905 return IID != Intrinsic::experimental_gc_statepoint &&
1906 IID != Intrinsic::experimental_deoptimize;
1909 // Lib calls can be materialized by some passes, and won't be
1910 // marked as 'gc-leaf-function.' All available Libcalls are
1913 if (TLI.getLibFunc(CS, LF)) {
1920 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
1922 auto *NewTy = NewLI.getType();
1924 // This only directly applies if the new type is also a pointer.
1925 if (NewTy->isPointerTy()) {
1926 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
1930 // The only other translation we can do is to integral loads with !range
1932 if (!NewTy->isIntegerTy())
1935 MDBuilder MDB(NewLI.getContext());
1936 const Value *Ptr = OldLI.getPointerOperand();
1937 auto *ITy = cast<IntegerType>(NewTy);
1938 auto *NullInt = ConstantExpr::getPtrToInt(
1939 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
1940 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
1941 NewLI.setMetadata(LLVMContext::MD_range,
1942 MDB.createRange(NonNullInt, NullInt));
1945 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
1946 MDNode *N, LoadInst &NewLI) {
1947 auto *NewTy = NewLI.getType();
1949 // Give up unless it is converted to a pointer where there is a single very
1950 // valuable mapping we can do reliably.
1951 // FIXME: It would be nice to propagate this in more ways, but the type
1952 // conversions make it hard.
1953 if (!NewTy->isPointerTy())
1956 unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
1957 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
1958 MDNode *NN = MDNode::get(OldLI.getContext(), None);
1959 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
1965 /// A potential constituent of a bitreverse or bswap expression. See
1966 /// collectBitParts for a fuller explanation.
1968 BitPart(Value *P, unsigned BW) : Provider(P) {
1969 Provenance.resize(BW);
1972 /// The Value that this is a bitreverse/bswap of.
1975 /// The "provenance" of each bit. Provenance[A] = B means that bit A
1976 /// in Provider becomes bit B in the result of this expression.
1977 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
1979 enum { Unset = -1 };
1982 } // end anonymous namespace
1984 /// Analyze the specified subexpression and see if it is capable of providing
1985 /// pieces of a bswap or bitreverse. The subexpression provides a potential
1986 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
1987 /// the output of the expression came from a corresponding bit in some other
1988 /// value. This function is recursive, and the end result is a mapping of
1989 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
1990 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
1992 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
1993 /// that the expression deposits the low byte of %X into the high byte of the
1994 /// result and that all other bits are zero. This expression is accepted and a
1995 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
1998 /// To avoid revisiting values, the BitPart results are memoized into the
1999 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2000 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2001 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2002 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2003 /// type instead to provide the same functionality.
2005 /// Because we pass around references into \c BPS, we must use a container that
2006 /// does not invalidate internal references (std::map instead of DenseMap).
2007 static const Optional<BitPart> &
2008 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2009 std::map<Value *, Optional<BitPart>> &BPS) {
2010 auto I = BPS.find(V);
2014 auto &Result = BPS[V] = None;
2015 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2017 if (Instruction *I = dyn_cast<Instruction>(V)) {
2018 // If this is an or instruction, it may be an inner node of the bswap.
2019 if (I->getOpcode() == Instruction::Or) {
2020 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2021 MatchBitReversals, BPS);
2022 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2023 MatchBitReversals, BPS);
2027 // Try and merge the two together.
2028 if (!A->Provider || A->Provider != B->Provider)
2031 Result = BitPart(A->Provider, BitWidth);
2032 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2033 if (A->Provenance[i] != BitPart::Unset &&
2034 B->Provenance[i] != BitPart::Unset &&
2035 A->Provenance[i] != B->Provenance[i])
2036 return Result = None;
2038 if (A->Provenance[i] == BitPart::Unset)
2039 Result->Provenance[i] = B->Provenance[i];
2041 Result->Provenance[i] = A->Provenance[i];
2047 // If this is a logical shift by a constant, recurse then shift the result.
2048 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2050 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2051 // Ensure the shift amount is defined.
2052 if (BitShift > BitWidth)
2055 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2056 MatchBitReversals, BPS);
2061 // Perform the "shift" on BitProvenance.
2062 auto &P = Result->Provenance;
2063 if (I->getOpcode() == Instruction::Shl) {
2064 P.erase(std::prev(P.end(), BitShift), P.end());
2065 P.insert(P.begin(), BitShift, BitPart::Unset);
2067 P.erase(P.begin(), std::next(P.begin(), BitShift));
2068 P.insert(P.end(), BitShift, BitPart::Unset);
2074 // If this is a logical 'and' with a mask that clears bits, recurse then
2075 // unset the appropriate bits.
2076 if (I->getOpcode() == Instruction::And &&
2077 isa<ConstantInt>(I->getOperand(1))) {
2078 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2079 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2081 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2083 unsigned NumMaskedBits = AndMask.countPopulation();
2084 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2087 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2088 MatchBitReversals, BPS);
2093 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2094 // If the AndMask is zero for this bit, clear the bit.
2095 if ((AndMask & Bit) == 0)
2096 Result->Provenance[i] = BitPart::Unset;
2100 // If this is a zext instruction zero extend the result.
2101 if (I->getOpcode() == Instruction::ZExt) {
2102 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2103 MatchBitReversals, BPS);
2107 Result = BitPart(Res->Provider, BitWidth);
2108 auto NarrowBitWidth =
2109 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2110 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2111 Result->Provenance[i] = Res->Provenance[i];
2112 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2113 Result->Provenance[i] = BitPart::Unset;
2118 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2119 // the input value to the bswap/bitreverse.
2120 Result = BitPart(V, BitWidth);
2121 for (unsigned i = 0; i < BitWidth; ++i)
2122 Result->Provenance[i] = i;
2126 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2127 unsigned BitWidth) {
2128 if (From % 8 != To % 8)
2130 // Convert from bit indices to byte indices and check for a byte reversal.
2134 return From == BitWidth - To - 1;
2137 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2138 unsigned BitWidth) {
2139 return From == BitWidth - To - 1;
2142 bool llvm::recognizeBSwapOrBitReverseIdiom(
2143 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2144 SmallVectorImpl<Instruction *> &InsertedInsts) {
2145 if (Operator::getOpcode(I) != Instruction::Or)
2147 if (!MatchBSwaps && !MatchBitReversals)
2149 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2150 if (!ITy || ITy->getBitWidth() > 128)
2151 return false; // Can't do vectors or integers > 128 bits.
2152 unsigned BW = ITy->getBitWidth();
2154 unsigned DemandedBW = BW;
2155 IntegerType *DemandedTy = ITy;
2156 if (I->hasOneUse()) {
2157 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2158 DemandedTy = cast<IntegerType>(Trunc->getType());
2159 DemandedBW = DemandedTy->getBitWidth();
2163 // Try to find all the pieces corresponding to the bswap.
2164 std::map<Value *, Optional<BitPart>> BPS;
2165 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2168 auto &BitProvenance = Res->Provenance;
2170 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2171 // only byteswap values with an even number of bytes.
2172 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2173 for (unsigned i = 0; i < DemandedBW; ++i) {
2175 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2177 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2180 Intrinsic::ID Intrin;
2181 if (OKForBSwap && MatchBSwaps)
2182 Intrin = Intrinsic::bswap;
2183 else if (OKForBitReverse && MatchBitReversals)
2184 Intrin = Intrinsic::bitreverse;
2188 if (ITy != DemandedTy) {
2189 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2190 Value *Provider = Res->Provider;
2191 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2192 // We may need to truncate the provider.
2193 if (DemandedTy != ProviderTy) {
2194 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2196 InsertedInsts.push_back(Trunc);
2199 auto *CI = CallInst::Create(F, Provider, "rev", I);
2200 InsertedInsts.push_back(CI);
2201 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2202 InsertedInsts.push_back(ExtInst);
2206 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2207 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2211 // CodeGen has special handling for some string functions that may replace
2212 // them with target-specific intrinsics. Since that'd skip our interceptors
2213 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2214 // we mark affected calls as NoBuiltin, which will disable optimization
2216 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2217 CallInst *CI, const TargetLibraryInfo *TLI) {
2218 Function *F = CI->getCalledFunction();
2220 if (F && !F->hasLocalLinkage() && F->hasName() &&
2221 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2222 !F->doesNotAccessMemory())
2223 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2226 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2227 // We can't have a PHI with a metadata type.
2228 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2232 if (!isa<Constant>(I->getOperand(OpIdx)))
2235 switch (I->getOpcode()) {
2238 case Instruction::Call:
2239 case Instruction::Invoke:
2240 // Can't handle inline asm. Skip it.
2241 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2243 // Many arithmetic intrinsics have no issue taking a
2244 // variable, however it's hard to distingish these from
2245 // specials such as @llvm.frameaddress that require a constant.
2246 if (isa<IntrinsicInst>(I))
2249 // Constant bundle operands may need to retain their constant-ness for
2251 if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2254 case Instruction::ShuffleVector:
2255 // Shufflevector masks are constant.
2257 case Instruction::Switch:
2258 case Instruction::ExtractValue:
2259 // All operands apart from the first are constant.
2261 case Instruction::InsertValue:
2262 // All operands apart from the first and the second are constant.
2264 case Instruction::Alloca:
2265 // Static allocas (constant size in the entry block) are handled by
2266 // prologue/epilogue insertion so they're free anyway. We definitely don't
2267 // want to make them non-constant.
2268 return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
2269 case Instruction::GetElementPtr:
2272 gep_type_iterator It = gep_type_begin(I);
2273 for (auto E = std::next(It, OpIdx); It != E; ++It)