1 //===-- Local.cpp - Functions to perform local transformations ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This family of functions perform various local transformations to the
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Local.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/EHPersonalities.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/LazyValueInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DIBuilder.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfo.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/MDBuilder.h"
43 #include "llvm/IR/Metadata.h"
44 #include "llvm/IR/Operator.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/ValueHandle.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/raw_ostream.h"
51 using namespace llvm::PatternMatch;
53 #define DEBUG_TYPE "local"
55 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
57 //===----------------------------------------------------------------------===//
58 // Local constant propagation.
61 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
62 /// constant value, convert it into an unconditional branch to the constant
63 /// destination. This is a nontrivial operation because the successors of this
64 /// basic block must have their PHI nodes updated.
65 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
66 /// conditions and indirectbr addresses this might make dead if
67 /// DeleteDeadConditions is true.
68 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
69 const TargetLibraryInfo *TLI) {
70 TerminatorInst *T = BB->getTerminator();
71 IRBuilder<> Builder(T);
73 // Branch - See if we are conditional jumping on constant
74 if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
75 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
76 BasicBlock *Dest1 = BI->getSuccessor(0);
77 BasicBlock *Dest2 = BI->getSuccessor(1);
79 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
80 // Are we branching on constant?
81 // YES. Change to unconditional branch...
82 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
83 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
85 //cerr << "Function: " << T->getParent()->getParent()
86 // << "\nRemoving branch from " << T->getParent()
87 // << "\n\nTo: " << OldDest << endl;
89 // Let the basic block know that we are letting go of it. Based on this,
90 // it will adjust it's PHI nodes.
91 OldDest->removePredecessor(BB);
93 // Replace the conditional branch with an unconditional one.
94 Builder.CreateBr(Destination);
95 BI->eraseFromParent();
99 if (Dest2 == Dest1) { // Conditional branch to same location?
100 // This branch matches something like this:
101 // br bool %cond, label %Dest, label %Dest
102 // and changes it into: br label %Dest
104 // Let the basic block know that we are letting go of one copy of it.
105 assert(BI->getParent() && "Terminator not inserted in block!");
106 Dest1->removePredecessor(BI->getParent());
108 // Replace the conditional branch with an unconditional one.
109 Builder.CreateBr(Dest1);
110 Value *Cond = BI->getCondition();
111 BI->eraseFromParent();
112 if (DeleteDeadConditions)
113 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
119 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
120 // If we are switching on a constant, we can convert the switch to an
121 // unconditional branch.
122 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
123 BasicBlock *DefaultDest = SI->getDefaultDest();
124 BasicBlock *TheOnlyDest = DefaultDest;
126 // If the default is unreachable, ignore it when searching for TheOnlyDest.
127 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
128 SI->getNumCases() > 0) {
129 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
132 // Figure out which case it goes to.
133 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
134 // Found case matching a constant operand?
135 if (i->getCaseValue() == CI) {
136 TheOnlyDest = i->getCaseSuccessor();
140 // Check to see if this branch is going to the same place as the default
141 // dest. If so, eliminate it as an explicit compare.
142 if (i->getCaseSuccessor() == DefaultDest) {
143 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
144 unsigned NCases = SI->getNumCases();
145 // Fold the case metadata into the default if there will be any branches
146 // left, unless the metadata doesn't match the switch.
147 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
148 // Collect branch weights into a vector.
149 SmallVector<uint32_t, 8> Weights;
150 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
152 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
153 Weights.push_back(CI->getValue().getZExtValue());
155 // Merge weight of this case to the default weight.
156 unsigned idx = i->getCaseIndex();
157 Weights[0] += Weights[idx+1];
158 // Remove weight for this case.
159 std::swap(Weights[idx+1], Weights.back());
161 SI->setMetadata(LLVMContext::MD_prof,
162 MDBuilder(BB->getContext()).
163 createBranchWeights(Weights));
165 // Remove this entry.
166 DefaultDest->removePredecessor(SI->getParent());
167 i = SI->removeCase(i);
172 // Otherwise, check to see if the switch only branches to one destination.
173 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
175 if (i->getCaseSuccessor() != TheOnlyDest)
176 TheOnlyDest = nullptr;
178 // Increment this iterator as we haven't removed the case.
182 if (CI && !TheOnlyDest) {
183 // Branching on a constant, but not any of the cases, go to the default
185 TheOnlyDest = SI->getDefaultDest();
188 // If we found a single destination that we can fold the switch into, do so
191 // Insert the new branch.
192 Builder.CreateBr(TheOnlyDest);
193 BasicBlock *BB = SI->getParent();
195 // Remove entries from PHI nodes which we no longer branch to...
196 for (BasicBlock *Succ : SI->successors()) {
197 // Found case matching a constant operand?
198 if (Succ == TheOnlyDest)
199 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
201 Succ->removePredecessor(BB);
204 // Delete the old switch.
205 Value *Cond = SI->getCondition();
206 SI->eraseFromParent();
207 if (DeleteDeadConditions)
208 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
212 if (SI->getNumCases() == 1) {
213 // Otherwise, we can fold this switch into a conditional branch
214 // instruction if it has only one non-default destination.
215 auto FirstCase = *SI->case_begin();
216 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
217 FirstCase.getCaseValue(), "cond");
219 // Insert the new branch.
220 BranchInst *NewBr = Builder.CreateCondBr(Cond,
221 FirstCase.getCaseSuccessor(),
222 SI->getDefaultDest());
223 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
224 if (MD && MD->getNumOperands() == 3) {
225 ConstantInt *SICase =
226 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
228 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
229 assert(SICase && SIDef);
230 // The TrueWeight should be the weight for the single case of SI.
231 NewBr->setMetadata(LLVMContext::MD_prof,
232 MDBuilder(BB->getContext()).
233 createBranchWeights(SICase->getValue().getZExtValue(),
234 SIDef->getValue().getZExtValue()));
237 // Update make.implicit metadata to the newly-created conditional branch.
238 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
240 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
242 // Delete the old switch.
243 SI->eraseFromParent();
249 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
250 // indirectbr blockaddress(@F, @BB) -> br label @BB
251 if (BlockAddress *BA =
252 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
253 BasicBlock *TheOnlyDest = BA->getBasicBlock();
254 // Insert the new branch.
255 Builder.CreateBr(TheOnlyDest);
257 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
258 if (IBI->getDestination(i) == TheOnlyDest)
259 TheOnlyDest = nullptr;
261 IBI->getDestination(i)->removePredecessor(IBI->getParent());
263 Value *Address = IBI->getAddress();
264 IBI->eraseFromParent();
265 if (DeleteDeadConditions)
266 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
268 // If we didn't find our destination in the IBI successor list, then we
269 // have undefined behavior. Replace the unconditional branch with an
270 // 'unreachable' instruction.
272 BB->getTerminator()->eraseFromParent();
273 new UnreachableInst(BB->getContext(), BB);
284 //===----------------------------------------------------------------------===//
285 // Local dead code elimination.
288 /// isInstructionTriviallyDead - Return true if the result produced by the
289 /// instruction is not used, and the instruction has no side effects.
291 bool llvm::isInstructionTriviallyDead(Instruction *I,
292 const TargetLibraryInfo *TLI) {
295 return wouldInstructionBeTriviallyDead(I, TLI);
298 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
299 const TargetLibraryInfo *TLI) {
300 if (isa<TerminatorInst>(I))
303 // We don't want the landingpad-like instructions removed by anything this
308 // We don't want debug info removed by anything this general, unless
309 // debug info is empty.
310 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
311 if (DDI->getAddress())
315 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
321 if (!I->mayHaveSideEffects())
324 // Special case intrinsics that "may have side effects" but can be deleted
326 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
327 // Safe to delete llvm.stacksave if dead.
328 if (II->getIntrinsicID() == Intrinsic::stacksave)
331 // Lifetime intrinsics are dead when their right-hand is undef.
332 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
333 II->getIntrinsicID() == Intrinsic::lifetime_end)
334 return isa<UndefValue>(II->getArgOperand(1));
336 // Assumptions are dead if their condition is trivially true. Guards on
337 // true are operationally no-ops. In the future we can consider more
338 // sophisticated tradeoffs for guards considering potential for check
339 // widening, but for now we keep things simple.
340 if (II->getIntrinsicID() == Intrinsic::assume ||
341 II->getIntrinsicID() == Intrinsic::experimental_guard) {
342 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
343 return !Cond->isZero();
349 if (isAllocLikeFn(I, TLI))
352 if (CallInst *CI = isFreeCall(I, TLI))
353 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
354 return C->isNullValue() || isa<UndefValue>(C);
356 if (CallSite CS = CallSite(I))
357 if (isMathLibCallNoop(CS, TLI))
363 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
364 /// trivially dead instruction, delete it. If that makes any of its operands
365 /// trivially dead, delete them too, recursively. Return true if any
366 /// instructions were deleted.
368 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
369 const TargetLibraryInfo *TLI) {
370 Instruction *I = dyn_cast<Instruction>(V);
371 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
374 SmallVector<Instruction*, 16> DeadInsts;
375 DeadInsts.push_back(I);
378 I = DeadInsts.pop_back_val();
380 // Null out all of the instruction's operands to see if any operand becomes
382 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
383 Value *OpV = I->getOperand(i);
384 I->setOperand(i, nullptr);
386 if (!OpV->use_empty()) continue;
388 // If the operand is an instruction that became dead as we nulled out the
389 // operand, and if it is 'trivially' dead, delete it in a future loop
391 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
392 if (isInstructionTriviallyDead(OpI, TLI))
393 DeadInsts.push_back(OpI);
396 I->eraseFromParent();
397 } while (!DeadInsts.empty());
402 /// areAllUsesEqual - Check whether the uses of a value are all the same.
403 /// This is similar to Instruction::hasOneUse() except this will also return
404 /// true when there are no uses or multiple uses that all refer to the same
406 static bool areAllUsesEqual(Instruction *I) {
407 Value::user_iterator UI = I->user_begin();
408 Value::user_iterator UE = I->user_end();
413 for (++UI; UI != UE; ++UI) {
420 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
421 /// dead PHI node, due to being a def-use chain of single-use nodes that
422 /// either forms a cycle or is terminated by a trivially dead instruction,
423 /// delete it. If that makes any of its operands trivially dead, delete them
424 /// too, recursively. Return true if a change was made.
425 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
426 const TargetLibraryInfo *TLI) {
427 SmallPtrSet<Instruction*, 4> Visited;
428 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
429 I = cast<Instruction>(*I->user_begin())) {
431 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
433 // If we find an instruction more than once, we're on a cycle that
434 // won't prove fruitful.
435 if (!Visited.insert(I).second) {
436 // Break the cycle and delete the instruction and its operands.
437 I->replaceAllUsesWith(UndefValue::get(I->getType()));
438 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
446 simplifyAndDCEInstruction(Instruction *I,
447 SmallSetVector<Instruction *, 16> &WorkList,
448 const DataLayout &DL,
449 const TargetLibraryInfo *TLI) {
450 if (isInstructionTriviallyDead(I, TLI)) {
451 // Null out all of the instruction's operands to see if any operand becomes
453 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
454 Value *OpV = I->getOperand(i);
455 I->setOperand(i, nullptr);
457 if (!OpV->use_empty() || I == OpV)
460 // If the operand is an instruction that became dead as we nulled out the
461 // operand, and if it is 'trivially' dead, delete it in a future loop
463 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
464 if (isInstructionTriviallyDead(OpI, TLI))
465 WorkList.insert(OpI);
468 I->eraseFromParent();
473 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
474 // Add the users to the worklist. CAREFUL: an instruction can use itself,
475 // in the case of a phi node.
476 for (User *U : I->users()) {
478 WorkList.insert(cast<Instruction>(U));
482 // Replace the instruction with its simplified value.
483 bool Changed = false;
484 if (!I->use_empty()) {
485 I->replaceAllUsesWith(SimpleV);
488 if (isInstructionTriviallyDead(I, TLI)) {
489 I->eraseFromParent();
497 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
498 /// simplify any instructions in it and recursively delete dead instructions.
500 /// This returns true if it changed the code, note that it can delete
501 /// instructions in other blocks as well in this block.
502 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
503 const TargetLibraryInfo *TLI) {
504 bool MadeChange = false;
505 const DataLayout &DL = BB->getModule()->getDataLayout();
508 // In debug builds, ensure that the terminator of the block is never replaced
509 // or deleted by these simplifications. The idea of simplification is that it
510 // cannot introduce new instructions, and there is no way to replace the
511 // terminator of a block without introducing a new instruction.
512 AssertingVH<Instruction> TerminatorVH(&BB->back());
515 SmallSetVector<Instruction *, 16> WorkList;
516 // Iterate over the original function, only adding insts to the worklist
517 // if they actually need to be revisited. This avoids having to pre-init
518 // the worklist with the entire function's worth of instructions.
519 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
521 assert(!BI->isTerminator());
522 Instruction *I = &*BI;
525 // We're visiting this instruction now, so make sure it's not in the
526 // worklist from an earlier visit.
527 if (!WorkList.count(I))
528 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
531 while (!WorkList.empty()) {
532 Instruction *I = WorkList.pop_back_val();
533 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
538 //===----------------------------------------------------------------------===//
539 // Control Flow Graph Restructuring.
543 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
544 /// method is called when we're about to delete Pred as a predecessor of BB. If
545 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
547 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
548 /// nodes that collapse into identity values. For example, if we have:
549 /// x = phi(1, 0, 0, 0)
552 /// .. and delete the predecessor corresponding to the '1', this will attempt to
553 /// recursively fold the and to 0.
554 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
555 // This only adjusts blocks with PHI nodes.
556 if (!isa<PHINode>(BB->begin()))
559 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
560 // them down. This will leave us with single entry phi nodes and other phis
561 // that can be removed.
562 BB->removePredecessor(Pred, true);
564 WeakVH PhiIt = &BB->front();
565 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
566 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
567 Value *OldPhiIt = PhiIt;
569 if (!recursivelySimplifyInstruction(PN))
572 // If recursive simplification ended up deleting the next PHI node we would
573 // iterate to, then our iterator is invalid, restart scanning from the top
575 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
580 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
581 /// predecessor is known to have one successor (DestBB!). Eliminate the edge
582 /// between them, moving the instructions in the predecessor into DestBB and
583 /// deleting the predecessor block.
585 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
586 // If BB has single-entry PHI nodes, fold them.
587 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
588 Value *NewVal = PN->getIncomingValue(0);
589 // Replace self referencing PHI with undef, it must be dead.
590 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
591 PN->replaceAllUsesWith(NewVal);
592 PN->eraseFromParent();
595 BasicBlock *PredBB = DestBB->getSinglePredecessor();
596 assert(PredBB && "Block doesn't have a single predecessor!");
598 // Zap anything that took the address of DestBB. Not doing this will give the
599 // address an invalid value.
600 if (DestBB->hasAddressTaken()) {
601 BlockAddress *BA = BlockAddress::get(DestBB);
602 Constant *Replacement =
603 ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1);
604 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
606 BA->destroyConstant();
609 // Anything that branched to PredBB now branches to DestBB.
610 PredBB->replaceAllUsesWith(DestBB);
612 // Splice all the instructions from PredBB to DestBB.
613 PredBB->getTerminator()->eraseFromParent();
614 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
616 // If the PredBB is the entry block of the function, move DestBB up to
617 // become the entry block after we erase PredBB.
618 if (PredBB == &DestBB->getParent()->getEntryBlock())
619 DestBB->moveAfter(PredBB);
622 BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock();
623 DT->changeImmediateDominator(DestBB, PredBBIDom);
624 DT->eraseNode(PredBB);
627 PredBB->eraseFromParent();
630 /// CanMergeValues - Return true if we can choose one of these values to use
631 /// in place of the other. Note that we will always choose the non-undef
633 static bool CanMergeValues(Value *First, Value *Second) {
634 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
637 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
638 /// almost-empty BB ending in an unconditional branch to Succ, into Succ.
640 /// Assumption: Succ is the single successor for BB.
642 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
643 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
645 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
646 << Succ->getName() << "\n");
647 // Shortcut, if there is only a single predecessor it must be BB and merging
649 if (Succ->getSinglePredecessor()) return true;
651 // Make a list of the predecessors of BB
652 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
654 // Look at all the phi nodes in Succ, to see if they present a conflict when
655 // merging these blocks
656 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
657 PHINode *PN = cast<PHINode>(I);
659 // If the incoming value from BB is again a PHINode in
660 // BB which has the same incoming value for *PI as PN does, we can
661 // merge the phi nodes and then the blocks can still be merged
662 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
663 if (BBPN && BBPN->getParent() == BB) {
664 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
665 BasicBlock *IBB = PN->getIncomingBlock(PI);
666 if (BBPreds.count(IBB) &&
667 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
668 PN->getIncomingValue(PI))) {
669 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
670 << Succ->getName() << " is conflicting with "
671 << BBPN->getName() << " with regard to common predecessor "
672 << IBB->getName() << "\n");
677 Value* Val = PN->getIncomingValueForBlock(BB);
678 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
679 // See if the incoming value for the common predecessor is equal to the
680 // one for BB, in which case this phi node will not prevent the merging
682 BasicBlock *IBB = PN->getIncomingBlock(PI);
683 if (BBPreds.count(IBB) &&
684 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
685 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
686 << Succ->getName() << " is conflicting with regard to common "
687 << "predecessor " << IBB->getName() << "\n");
697 typedef SmallVector<BasicBlock *, 16> PredBlockVector;
698 typedef DenseMap<BasicBlock *, Value *> IncomingValueMap;
700 /// \brief Determines the value to use as the phi node input for a block.
702 /// Select between \p OldVal any value that we know flows from \p BB
703 /// to a particular phi on the basis of which one (if either) is not
704 /// undef. Update IncomingValues based on the selected value.
706 /// \param OldVal The value we are considering selecting.
707 /// \param BB The block that the value flows in from.
708 /// \param IncomingValues A map from block-to-value for other phi inputs
709 /// that we have examined.
711 /// \returns the selected value.
712 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
713 IncomingValueMap &IncomingValues) {
714 if (!isa<UndefValue>(OldVal)) {
715 assert((!IncomingValues.count(BB) ||
716 IncomingValues.find(BB)->second == OldVal) &&
717 "Expected OldVal to match incoming value from BB!");
719 IncomingValues.insert(std::make_pair(BB, OldVal));
723 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
724 if (It != IncomingValues.end()) return It->second;
729 /// \brief Create a map from block to value for the operands of a
732 /// Create a map from block to value for each non-undef value flowing
735 /// \param PN The phi we are collecting the map for.
736 /// \param IncomingValues [out] The map from block to value for this phi.
737 static void gatherIncomingValuesToPhi(PHINode *PN,
738 IncomingValueMap &IncomingValues) {
739 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
740 BasicBlock *BB = PN->getIncomingBlock(i);
741 Value *V = PN->getIncomingValue(i);
743 if (!isa<UndefValue>(V))
744 IncomingValues.insert(std::make_pair(BB, V));
748 /// \brief Replace the incoming undef values to a phi with the values
749 /// from a block-to-value map.
751 /// \param PN The phi we are replacing the undefs in.
752 /// \param IncomingValues A map from block to value.
753 static void replaceUndefValuesInPhi(PHINode *PN,
754 const IncomingValueMap &IncomingValues) {
755 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
756 Value *V = PN->getIncomingValue(i);
758 if (!isa<UndefValue>(V)) continue;
760 BasicBlock *BB = PN->getIncomingBlock(i);
761 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
762 if (It == IncomingValues.end()) continue;
764 PN->setIncomingValue(i, It->second);
768 /// \brief Replace a value flowing from a block to a phi with
769 /// potentially multiple instances of that value flowing from the
770 /// block's predecessors to the phi.
772 /// \param BB The block with the value flowing into the phi.
773 /// \param BBPreds The predecessors of BB.
774 /// \param PN The phi that we are updating.
775 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
776 const PredBlockVector &BBPreds,
778 Value *OldVal = PN->removeIncomingValue(BB, false);
779 assert(OldVal && "No entry in PHI for Pred BB!");
781 IncomingValueMap IncomingValues;
783 // We are merging two blocks - BB, and the block containing PN - and
784 // as a result we need to redirect edges from the predecessors of BB
785 // to go to the block containing PN, and update PN
786 // accordingly. Since we allow merging blocks in the case where the
787 // predecessor and successor blocks both share some predecessors,
788 // and where some of those common predecessors might have undef
789 // values flowing into PN, we want to rewrite those values to be
790 // consistent with the non-undef values.
792 gatherIncomingValuesToPhi(PN, IncomingValues);
794 // If this incoming value is one of the PHI nodes in BB, the new entries
795 // in the PHI node are the entries from the old PHI.
796 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
797 PHINode *OldValPN = cast<PHINode>(OldVal);
798 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
799 // Note that, since we are merging phi nodes and BB and Succ might
800 // have common predecessors, we could end up with a phi node with
801 // identical incoming branches. This will be cleaned up later (and
802 // will trigger asserts if we try to clean it up now, without also
803 // simplifying the corresponding conditional branch).
804 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
805 Value *PredVal = OldValPN->getIncomingValue(i);
806 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
809 // And add a new incoming value for this predecessor for the
810 // newly retargeted branch.
811 PN->addIncoming(Selected, PredBB);
814 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
815 // Update existing incoming values in PN for this
816 // predecessor of BB.
817 BasicBlock *PredBB = BBPreds[i];
818 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
821 // And add a new incoming value for this predecessor for the
822 // newly retargeted branch.
823 PN->addIncoming(Selected, PredBB);
827 replaceUndefValuesInPhi(PN, IncomingValues);
830 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
831 /// unconditional branch, and contains no instructions other than PHI nodes,
832 /// potential side-effect free intrinsics and the branch. If possible,
833 /// eliminate BB by rewriting all the predecessors to branch to the successor
834 /// block and return true. If we can't transform, return false.
835 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
836 assert(BB != &BB->getParent()->getEntryBlock() &&
837 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
839 // We can't eliminate infinite loops.
840 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
841 if (BB == Succ) return false;
843 // Check to see if merging these blocks would cause conflicts for any of the
844 // phi nodes in BB or Succ. If not, we can safely merge.
845 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
847 // Check for cases where Succ has multiple predecessors and a PHI node in BB
848 // has uses which will not disappear when the PHI nodes are merged. It is
849 // possible to handle such cases, but difficult: it requires checking whether
850 // BB dominates Succ, which is non-trivial to calculate in the case where
851 // Succ has multiple predecessors. Also, it requires checking whether
852 // constructing the necessary self-referential PHI node doesn't introduce any
853 // conflicts; this isn't too difficult, but the previous code for doing this
856 // Note that if this check finds a live use, BB dominates Succ, so BB is
857 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
858 // folding the branch isn't profitable in that case anyway.
859 if (!Succ->getSinglePredecessor()) {
860 BasicBlock::iterator BBI = BB->begin();
861 while (isa<PHINode>(*BBI)) {
862 for (Use &U : BBI->uses()) {
863 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
864 if (PN->getIncomingBlock(U) != BB)
874 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
876 if (isa<PHINode>(Succ->begin())) {
877 // If there is more than one pred of succ, and there are PHI nodes in
878 // the successor, then we need to add incoming edges for the PHI nodes
880 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
882 // Loop over all of the PHI nodes in the successor of BB.
883 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
884 PHINode *PN = cast<PHINode>(I);
886 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
890 if (Succ->getSinglePredecessor()) {
891 // BB is the only predecessor of Succ, so Succ will end up with exactly
892 // the same predecessors BB had.
894 // Copy over any phi, debug or lifetime instruction.
895 BB->getTerminator()->eraseFromParent();
896 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
899 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
900 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
901 assert(PN->use_empty() && "There shouldn't be any uses here!");
902 PN->eraseFromParent();
906 // If the unconditional branch we replaced contains llvm.loop metadata, we
907 // add the metadata to the branch instructions in the predecessors.
908 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
909 Instruction *TI = BB->getTerminator();
911 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
912 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
913 BasicBlock *Pred = *PI;
914 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
917 // Everything that jumped to BB now goes to Succ.
918 BB->replaceAllUsesWith(Succ);
919 if (!Succ->hasName()) Succ->takeName(BB);
920 BB->eraseFromParent(); // Delete the old basic block.
924 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
925 /// nodes in this block. This doesn't try to be clever about PHI nodes
926 /// which differ only in the order of the incoming values, but instcombine
927 /// orders them so it usually won't matter.
929 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
930 // This implementation doesn't currently consider undef operands
931 // specially. Theoretically, two phis which are identical except for
932 // one having an undef where the other doesn't could be collapsed.
934 struct PHIDenseMapInfo {
935 static PHINode *getEmptyKey() {
936 return DenseMapInfo<PHINode *>::getEmptyKey();
938 static PHINode *getTombstoneKey() {
939 return DenseMapInfo<PHINode *>::getTombstoneKey();
941 static unsigned getHashValue(PHINode *PN) {
942 // Compute a hash value on the operands. Instcombine will likely have
943 // sorted them, which helps expose duplicates, but we have to check all
944 // the operands to be safe in case instcombine hasn't run.
945 return static_cast<unsigned>(hash_combine(
946 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
947 hash_combine_range(PN->block_begin(), PN->block_end())));
949 static bool isEqual(PHINode *LHS, PHINode *RHS) {
950 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
951 RHS == getEmptyKey() || RHS == getTombstoneKey())
953 return LHS->isIdenticalTo(RHS);
957 // Set of unique PHINodes.
958 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
961 bool Changed = false;
962 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
963 auto Inserted = PHISet.insert(PN);
964 if (!Inserted.second) {
965 // A duplicate. Replace this PHI with its duplicate.
966 PN->replaceAllUsesWith(*Inserted.first);
967 PN->eraseFromParent();
970 // The RAUW can change PHIs that we already visited. Start over from the
980 /// enforceKnownAlignment - If the specified pointer points to an object that
981 /// we control, modify the object's alignment to PrefAlign. This isn't
982 /// often possible though. If alignment is important, a more reliable approach
983 /// is to simply align all global variables and allocation instructions to
984 /// their preferred alignment from the beginning.
986 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
988 const DataLayout &DL) {
989 assert(PrefAlign > Align);
991 V = V->stripPointerCasts();
993 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
994 // TODO: ideally, computeKnownBits ought to have used
995 // AllocaInst::getAlignment() in its computation already, making
996 // the below max redundant. But, as it turns out,
997 // stripPointerCasts recurses through infinite layers of bitcasts,
998 // while computeKnownBits is not allowed to traverse more than 6
1000 Align = std::max(AI->getAlignment(), Align);
1001 if (PrefAlign <= Align)
1004 // If the preferred alignment is greater than the natural stack alignment
1005 // then don't round up. This avoids dynamic stack realignment.
1006 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1008 AI->setAlignment(PrefAlign);
1012 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1013 // TODO: as above, this shouldn't be necessary.
1014 Align = std::max(GO->getAlignment(), Align);
1015 if (PrefAlign <= Align)
1018 // If there is a large requested alignment and we can, bump up the alignment
1019 // of the global. If the memory we set aside for the global may not be the
1020 // memory used by the final program then it is impossible for us to reliably
1021 // enforce the preferred alignment.
1022 if (!GO->canIncreaseAlignment())
1025 GO->setAlignment(PrefAlign);
1032 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1033 const DataLayout &DL,
1034 const Instruction *CxtI,
1035 AssumptionCache *AC,
1036 const DominatorTree *DT) {
1037 assert(V->getType()->isPointerTy() &&
1038 "getOrEnforceKnownAlignment expects a pointer!");
1039 unsigned BitWidth = DL.getPointerTypeSizeInBits(V->getType());
1041 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
1042 computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC, CxtI, DT);
1043 unsigned TrailZ = KnownZero.countTrailingOnes();
1045 // Avoid trouble with ridiculously large TrailZ values, such as
1046 // those computed from a null pointer.
1047 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
1049 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
1051 // LLVM doesn't support alignments larger than this currently.
1052 Align = std::min(Align, +Value::MaximumAlignment);
1054 if (PrefAlign > Align)
1055 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1057 // We don't need to make any adjustment.
1061 ///===---------------------------------------------------------------------===//
1062 /// Dbg Intrinsic utilities
1065 /// See if there is a dbg.value intrinsic for DIVar before I.
1066 static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1068 // Since we can't guarantee that the original dbg.declare instrinsic
1069 // is removed by LowerDbgDeclare(), we need to make sure that we are
1070 // not inserting the same dbg.value intrinsic over and over.
1071 llvm::BasicBlock::InstListType::iterator PrevI(I);
1072 if (PrevI != I->getParent()->getInstList().begin()) {
1074 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1075 if (DVI->getValue() == I->getOperand(0) &&
1076 DVI->getOffset() == 0 &&
1077 DVI->getVariable() == DIVar &&
1078 DVI->getExpression() == DIExpr)
1084 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1085 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1086 DIExpression *DIExpr,
1088 // Since we can't guarantee that the original dbg.declare instrinsic
1089 // is removed by LowerDbgDeclare(), we need to make sure that we are
1090 // not inserting the same dbg.value intrinsic over and over.
1091 SmallVector<DbgValueInst *, 1> DbgValues;
1092 findDbgValues(DbgValues, APN);
1093 for (auto *DVI : DbgValues) {
1094 assert(DVI->getValue() == APN);
1095 assert(DVI->getOffset() == 0);
1096 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1102 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1103 /// that has an associated llvm.dbg.decl intrinsic.
1104 void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
1105 StoreInst *SI, DIBuilder &Builder) {
1106 auto *DIVar = DDI->getVariable();
1107 auto *DIExpr = DDI->getExpression();
1108 assert(DIVar && "Missing variable");
1110 // If an argument is zero extended then use argument directly. The ZExt
1111 // may be zapped by an optimization pass in future.
1112 Argument *ExtendedArg = nullptr;
1113 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1114 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1115 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1116 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1118 // We're now only describing a subset of the variable. The fragment we're
1119 // describing will always be smaller than the variable size, because
1120 // VariableSize == Size of Alloca described by DDI. Since SI stores
1121 // to the alloca described by DDI, if it's first operand is an extend,
1122 // we're guaranteed that before extension, the value was narrower than
1123 // the size of the alloca, hence the size of the described variable.
1124 SmallVector<uint64_t, 3> Ops;
1125 unsigned FragmentOffset = 0;
1126 // If this already is a bit fragment, we drop the bit fragment from the
1127 // expression and record the offset.
1128 auto Fragment = DIExpr->getFragmentInfo();
1130 Ops.append(DIExpr->elements_begin(), DIExpr->elements_end()-3);
1131 FragmentOffset = Fragment->OffsetInBits;
1133 Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
1135 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1136 Ops.push_back(FragmentOffset);
1137 const DataLayout &DL = DDI->getModule()->getDataLayout();
1138 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1139 auto NewDIExpr = Builder.createExpression(Ops);
1140 if (!LdStHasDebugValue(DIVar, NewDIExpr, SI))
1141 Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, NewDIExpr,
1142 DDI->getDebugLoc(), SI);
1143 } else if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1144 Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, DIExpr,
1145 DDI->getDebugLoc(), SI);
1148 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1149 /// that has an associated llvm.dbg.decl intrinsic.
1150 void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
1151 LoadInst *LI, DIBuilder &Builder) {
1152 auto *DIVar = DDI->getVariable();
1153 auto *DIExpr = DDI->getExpression();
1154 assert(DIVar && "Missing variable");
1156 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1159 // We are now tracking the loaded value instead of the address. In the
1160 // future if multi-location support is added to the IR, it might be
1161 // preferable to keep tracking both the loaded value and the original
1162 // address in case the alloca can not be elided.
1163 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1164 LI, 0, DIVar, DIExpr, DDI->getDebugLoc(), (Instruction *)nullptr);
1165 DbgValue->insertAfter(LI);
1168 /// Inserts a llvm.dbg.value intrinsic after a phi
1169 /// that has an associated llvm.dbg.decl intrinsic.
1170 void llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
1171 PHINode *APN, DIBuilder &Builder) {
1172 auto *DIVar = DDI->getVariable();
1173 auto *DIExpr = DDI->getExpression();
1174 assert(DIVar && "Missing variable");
1176 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1179 BasicBlock *BB = APN->getParent();
1180 auto InsertionPt = BB->getFirstInsertionPt();
1182 // The block may be a catchswitch block, which does not have a valid
1184 // FIXME: Insert dbg.value markers in the successors when appropriate.
1185 if (InsertionPt != BB->end())
1186 Builder.insertDbgValueIntrinsic(APN, 0, DIVar, DIExpr, DDI->getDebugLoc(),
1190 /// Determine whether this alloca is either a VLA or an array.
1191 static bool isArray(AllocaInst *AI) {
1192 return AI->isArrayAllocation() ||
1193 AI->getType()->getElementType()->isArrayTy();
1196 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1197 /// of llvm.dbg.value intrinsics.
1198 bool llvm::LowerDbgDeclare(Function &F) {
1199 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1200 SmallVector<DbgDeclareInst *, 4> Dbgs;
1202 for (Instruction &BI : FI)
1203 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1204 Dbgs.push_back(DDI);
1209 for (auto &I : Dbgs) {
1210 DbgDeclareInst *DDI = I;
1211 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1212 // If this is an alloca for a scalar variable, insert a dbg.value
1213 // at each load and store to the alloca and erase the dbg.declare.
1214 // The dbg.values allow tracking a variable even if it is not
1215 // stored on the stack, while the dbg.declare can only describe
1216 // the stack slot (and at a lexical-scope granularity). Later
1217 // passes will attempt to elide the stack slot.
1218 if (AI && !isArray(AI)) {
1219 for (auto &AIUse : AI->uses()) {
1220 User *U = AIUse.getUser();
1221 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1222 if (AIUse.getOperandNo() == 1)
1223 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1224 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1225 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1226 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1227 // This is a call by-value or some other instruction that
1228 // takes a pointer to the variable. Insert a *value*
1229 // intrinsic that describes the alloca.
1230 DIB.insertDbgValueIntrinsic(AI, 0, DDI->getVariable(),
1231 DDI->getExpression(), DDI->getDebugLoc(),
1235 DDI->eraseFromParent();
1241 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the
1242 /// alloca 'V', if any.
1243 DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
1244 if (auto *L = LocalAsMetadata::getIfExists(V))
1245 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1246 for (User *U : MDV->users())
1247 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
1253 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1254 if (auto *L = LocalAsMetadata::getIfExists(V))
1255 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1256 for (User *U : MDV->users())
1257 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1258 DbgValues.push_back(DVI);
1261 static void appendOffset(SmallVectorImpl<uint64_t> &Ops, int64_t Offset) {
1263 Ops.push_back(dwarf::DW_OP_plus);
1264 Ops.push_back(Offset);
1265 } else if (Offset < 0) {
1266 Ops.push_back(dwarf::DW_OP_minus);
1267 Ops.push_back(-Offset);
1271 /// Prepend \p DIExpr with a deref and offset operation.
1272 static DIExpression *prependDIExpr(DIBuilder &Builder, DIExpression *DIExpr,
1273 bool Deref, int64_t Offset) {
1274 if (!Deref && !Offset)
1276 // Create a copy of the original DIDescriptor for user variable, prepending
1277 // "deref" operation to a list of address elements, as new llvm.dbg.declare
1278 // will take a value storing address of the memory for variable, not
1280 SmallVector<uint64_t, 4> Ops;
1282 Ops.push_back(dwarf::DW_OP_deref);
1283 appendOffset(Ops, Offset);
1285 Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
1286 return Builder.createExpression(Ops);
1289 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1290 Instruction *InsertBefore, DIBuilder &Builder,
1291 bool Deref, int Offset) {
1292 DbgDeclareInst *DDI = FindAllocaDbgDeclare(Address);
1295 DebugLoc Loc = DDI->getDebugLoc();
1296 auto *DIVar = DDI->getVariable();
1297 auto *DIExpr = DDI->getExpression();
1298 assert(DIVar && "Missing variable");
1300 DIExpr = prependDIExpr(Builder, DIExpr, Deref, Offset);
1302 // Insert llvm.dbg.declare immediately after the original alloca, and remove
1303 // old llvm.dbg.declare.
1304 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1305 DDI->eraseFromParent();
1309 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1310 DIBuilder &Builder, bool Deref, int Offset) {
1311 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1315 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1316 DIBuilder &Builder, int Offset) {
1317 DebugLoc Loc = DVI->getDebugLoc();
1318 auto *DIVar = DVI->getVariable();
1319 auto *DIExpr = DVI->getExpression();
1320 assert(DIVar && "Missing variable");
1322 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1323 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1325 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1326 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1329 // Insert the offset immediately after the first deref.
1330 // We could just change the offset argument of dbg.value, but it's unsigned...
1332 SmallVector<uint64_t, 4> Ops;
1333 Ops.push_back(dwarf::DW_OP_deref);
1334 appendOffset(Ops, Offset);
1335 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1336 DIExpr = Builder.createExpression(Ops);
1339 Builder.insertDbgValueIntrinsic(NewAddress, DVI->getOffset(), DIVar, DIExpr,
1341 DVI->eraseFromParent();
1344 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1345 DIBuilder &Builder, int Offset) {
1346 if (auto *L = LocalAsMetadata::getIfExists(AI))
1347 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1348 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1350 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1351 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1355 void llvm::salvageDebugInfo(Instruction &I) {
1356 SmallVector<DbgValueInst *, 1> DbgValues;
1357 auto &M = *I.getModule();
1359 auto MDWrap = [&](Value *V) {
1360 return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
1363 if (isa<BitCastInst>(&I)) {
1364 findDbgValues(DbgValues, &I);
1365 for (auto *DVI : DbgValues) {
1366 // Bitcasts are entirely irrelevant for debug info. Rewrite the dbg.value
1367 // to use the cast's source.
1368 DVI->setOperand(0, MDWrap(I.getOperand(0)));
1369 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1371 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1372 findDbgValues(DbgValues, &I);
1373 for (auto *DVI : DbgValues) {
1375 M.getDataLayout().getPointerSizeInBits(GEP->getPointerAddressSpace());
1376 APInt Offset(BitWidth, 0);
1377 // Rewrite a constant GEP into a DIExpression.
1378 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) {
1379 auto *DIExpr = DVI->getExpression();
1380 DIBuilder DIB(M, /*AllowUnresolved*/ false);
1381 // GEP offsets are i32 and thus alwaus fit into an int64_t.
1382 DIExpr = prependDIExpr(DIB, DIExpr, NoDeref, Offset.getSExtValue());
1383 DVI->setOperand(0, MDWrap(I.getOperand(0)));
1384 DVI->setOperand(3, MetadataAsValue::get(I.getContext(), DIExpr));
1385 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1388 } else if (isa<LoadInst>(&I)) {
1389 findDbgValues(DbgValues, &I);
1390 for (auto *DVI : DbgValues) {
1391 // Rewrite the load into DW_OP_deref.
1392 auto *DIExpr = DVI->getExpression();
1393 DIBuilder DIB(M, /*AllowUnresolved*/ false);
1394 DIExpr = prependDIExpr(DIB, DIExpr, WithDeref, 0);
1395 DVI->setOperand(0, MDWrap(I.getOperand(0)));
1396 DVI->setOperand(3, MetadataAsValue::get(I.getContext(), DIExpr));
1397 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1402 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1403 unsigned NumDeadInst = 0;
1404 // Delete the instructions backwards, as it has a reduced likelihood of
1405 // having to update as many def-use and use-def chains.
1406 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1407 while (EndInst != &BB->front()) {
1408 // Delete the next to last instruction.
1409 Instruction *Inst = &*--EndInst->getIterator();
1410 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1411 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1412 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1416 if (!isa<DbgInfoIntrinsic>(Inst))
1418 Inst->eraseFromParent();
1423 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1424 bool PreserveLCSSA) {
1425 BasicBlock *BB = I->getParent();
1426 // Loop over all of the successors, removing BB's entry from any PHI
1428 for (BasicBlock *Successor : successors(BB))
1429 Successor->removePredecessor(BB, PreserveLCSSA);
1431 // Insert a call to llvm.trap right before this. This turns the undefined
1432 // behavior into a hard fail instead of falling through into random code.
1435 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1436 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1437 CallTrap->setDebugLoc(I->getDebugLoc());
1439 new UnreachableInst(I->getContext(), I);
1441 // All instructions after this are dead.
1442 unsigned NumInstrsRemoved = 0;
1443 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1444 while (BBI != BBE) {
1445 if (!BBI->use_empty())
1446 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1447 BB->getInstList().erase(BBI++);
1450 return NumInstrsRemoved;
1453 /// changeToCall - Convert the specified invoke into a normal call.
1454 static void changeToCall(InvokeInst *II) {
1455 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1456 SmallVector<OperandBundleDef, 1> OpBundles;
1457 II->getOperandBundlesAsDefs(OpBundles);
1458 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1460 NewCall->takeName(II);
1461 NewCall->setCallingConv(II->getCallingConv());
1462 NewCall->setAttributes(II->getAttributes());
1463 NewCall->setDebugLoc(II->getDebugLoc());
1464 II->replaceAllUsesWith(NewCall);
1466 // Follow the call by a branch to the normal destination.
1467 BranchInst::Create(II->getNormalDest(), II);
1469 // Update PHI nodes in the unwind destination
1470 II->getUnwindDest()->removePredecessor(II->getParent());
1471 II->eraseFromParent();
1474 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1475 BasicBlock *UnwindEdge) {
1476 BasicBlock *BB = CI->getParent();
1478 // Convert this function call into an invoke instruction. First, split the
1481 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1483 // Delete the unconditional branch inserted by splitBasicBlock
1484 BB->getInstList().pop_back();
1486 // Create the new invoke instruction.
1487 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1488 SmallVector<OperandBundleDef, 1> OpBundles;
1490 CI->getOperandBundlesAsDefs(OpBundles);
1492 // Note: we're round tripping operand bundles through memory here, and that
1493 // can potentially be avoided with a cleverer API design that we do not have
1496 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1497 InvokeArgs, OpBundles, CI->getName(), BB);
1498 II->setDebugLoc(CI->getDebugLoc());
1499 II->setCallingConv(CI->getCallingConv());
1500 II->setAttributes(CI->getAttributes());
1502 // Make sure that anything using the call now uses the invoke! This also
1503 // updates the CallGraph if present, because it uses a WeakVH.
1504 CI->replaceAllUsesWith(II);
1506 // Delete the original call
1507 Split->getInstList().pop_front();
1511 static bool markAliveBlocks(Function &F,
1512 SmallPtrSetImpl<BasicBlock*> &Reachable) {
1514 SmallVector<BasicBlock*, 128> Worklist;
1515 BasicBlock *BB = &F.front();
1516 Worklist.push_back(BB);
1517 Reachable.insert(BB);
1518 bool Changed = false;
1520 BB = Worklist.pop_back_val();
1522 // Do a quick scan of the basic block, turning any obviously unreachable
1523 // instructions into LLVM unreachable insts. The instruction combining pass
1524 // canonicalizes unreachable insts into stores to null or undef.
1525 for (Instruction &I : *BB) {
1526 // Assumptions that are known to be false are equivalent to unreachable.
1527 // Also, if the condition is undefined, then we make the choice most
1528 // beneficial to the optimizer, and choose that to also be unreachable.
1529 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1530 if (II->getIntrinsicID() == Intrinsic::assume) {
1531 if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1532 // Don't insert a call to llvm.trap right before the unreachable.
1533 changeToUnreachable(II, false);
1539 if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1540 // A call to the guard intrinsic bails out of the current compilation
1541 // unit if the predicate passed to it is false. If the predicate is a
1542 // constant false, then we know the guard will bail out of the current
1543 // compile unconditionally, so all code following it is dead.
1545 // Note: unlike in llvm.assume, it is not "obviously profitable" for
1546 // guards to treat `undef` as `false` since a guard on `undef` can
1547 // still be useful for widening.
1548 if (match(II->getArgOperand(0), m_Zero()))
1549 if (!isa<UnreachableInst>(II->getNextNode())) {
1550 changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
1557 if (auto *CI = dyn_cast<CallInst>(&I)) {
1558 Value *Callee = CI->getCalledValue();
1559 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1560 changeToUnreachable(CI, /*UseLLVMTrap=*/false);
1564 if (CI->doesNotReturn()) {
1565 // If we found a call to a no-return function, insert an unreachable
1566 // instruction after it. Make sure there isn't *already* one there
1568 if (!isa<UnreachableInst>(CI->getNextNode())) {
1569 // Don't insert a call to llvm.trap right before the unreachable.
1570 changeToUnreachable(CI->getNextNode(), false);
1577 // Store to undef and store to null are undefined and used to signal that
1578 // they should be changed to unreachable by passes that can't modify the
1580 if (auto *SI = dyn_cast<StoreInst>(&I)) {
1581 // Don't touch volatile stores.
1582 if (SI->isVolatile()) continue;
1584 Value *Ptr = SI->getOperand(1);
1586 if (isa<UndefValue>(Ptr) ||
1587 (isa<ConstantPointerNull>(Ptr) &&
1588 SI->getPointerAddressSpace() == 0)) {
1589 changeToUnreachable(SI, true);
1596 TerminatorInst *Terminator = BB->getTerminator();
1597 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
1598 // Turn invokes that call 'nounwind' functions into ordinary calls.
1599 Value *Callee = II->getCalledValue();
1600 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1601 changeToUnreachable(II, true);
1603 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1604 if (II->use_empty() && II->onlyReadsMemory()) {
1605 // jump to the normal destination branch.
1606 BranchInst::Create(II->getNormalDest(), II);
1607 II->getUnwindDest()->removePredecessor(II->getParent());
1608 II->eraseFromParent();
1613 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
1614 // Remove catchpads which cannot be reached.
1615 struct CatchPadDenseMapInfo {
1616 static CatchPadInst *getEmptyKey() {
1617 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1619 static CatchPadInst *getTombstoneKey() {
1620 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1622 static unsigned getHashValue(CatchPadInst *CatchPad) {
1623 return static_cast<unsigned>(hash_combine_range(
1624 CatchPad->value_op_begin(), CatchPad->value_op_end()));
1626 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1627 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1628 RHS == getEmptyKey() || RHS == getTombstoneKey())
1630 return LHS->isIdenticalTo(RHS);
1634 // Set of unique CatchPads.
1635 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1636 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1638 detail::DenseSetEmpty Empty;
1639 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1640 E = CatchSwitch->handler_end();
1642 BasicBlock *HandlerBB = *I;
1643 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1644 if (!HandlerSet.insert({CatchPad, Empty}).second) {
1645 CatchSwitch->removeHandler(I);
1653 Changed |= ConstantFoldTerminator(BB, true);
1654 for (BasicBlock *Successor : successors(BB))
1655 if (Reachable.insert(Successor).second)
1656 Worklist.push_back(Successor);
1657 } while (!Worklist.empty());
1661 void llvm::removeUnwindEdge(BasicBlock *BB) {
1662 TerminatorInst *TI = BB->getTerminator();
1664 if (auto *II = dyn_cast<InvokeInst>(TI)) {
1669 TerminatorInst *NewTI;
1670 BasicBlock *UnwindDest;
1672 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1673 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1674 UnwindDest = CRI->getUnwindDest();
1675 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1676 auto *NewCatchSwitch = CatchSwitchInst::Create(
1677 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1678 CatchSwitch->getName(), CatchSwitch);
1679 for (BasicBlock *PadBB : CatchSwitch->handlers())
1680 NewCatchSwitch->addHandler(PadBB);
1682 NewTI = NewCatchSwitch;
1683 UnwindDest = CatchSwitch->getUnwindDest();
1685 llvm_unreachable("Could not find unwind successor");
1688 NewTI->takeName(TI);
1689 NewTI->setDebugLoc(TI->getDebugLoc());
1690 UnwindDest->removePredecessor(BB);
1691 TI->replaceAllUsesWith(NewTI);
1692 TI->eraseFromParent();
1695 /// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even
1696 /// if they are in a dead cycle. Return true if a change was made, false
1698 bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
1699 SmallPtrSet<BasicBlock*, 16> Reachable;
1700 bool Changed = markAliveBlocks(F, Reachable);
1702 // If there are unreachable blocks in the CFG...
1703 if (Reachable.size() == F.size())
1706 assert(Reachable.size() < F.size());
1707 NumRemoved += F.size()-Reachable.size();
1709 // Loop over all of the basic blocks that are not reachable, dropping all of
1710 // their internal references...
1711 for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
1712 if (Reachable.count(&*BB))
1715 for (BasicBlock *Successor : successors(&*BB))
1716 if (Reachable.count(Successor))
1717 Successor->removePredecessor(&*BB);
1719 LVI->eraseBlock(&*BB);
1720 BB->dropAllReferences();
1723 for (Function::iterator I = ++F.begin(); I != F.end();)
1724 if (!Reachable.count(&*I))
1725 I = F.getBasicBlockList().erase(I);
1732 void llvm::combineMetadata(Instruction *K, const Instruction *J,
1733 ArrayRef<unsigned> KnownIDs) {
1734 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1735 K->dropUnknownNonDebugMetadata(KnownIDs);
1736 K->getAllMetadataOtherThanDebugLoc(Metadata);
1737 for (const auto &MD : Metadata) {
1738 unsigned Kind = MD.first;
1739 MDNode *JMD = J->getMetadata(Kind);
1740 MDNode *KMD = MD.second;
1744 K->setMetadata(Kind, nullptr); // Remove unknown metadata
1746 case LLVMContext::MD_dbg:
1747 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
1748 case LLVMContext::MD_tbaa:
1749 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
1751 case LLVMContext::MD_alias_scope:
1752 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
1754 case LLVMContext::MD_noalias:
1755 case LLVMContext::MD_mem_parallel_loop_access:
1756 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
1758 case LLVMContext::MD_range:
1759 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
1761 case LLVMContext::MD_fpmath:
1762 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
1764 case LLVMContext::MD_invariant_load:
1765 // Only set the !invariant.load if it is present in both instructions.
1766 K->setMetadata(Kind, JMD);
1768 case LLVMContext::MD_nonnull:
1769 // Only set the !nonnull if it is present in both instructions.
1770 K->setMetadata(Kind, JMD);
1772 case LLVMContext::MD_invariant_group:
1773 // Preserve !invariant.group in K.
1775 case LLVMContext::MD_align:
1776 K->setMetadata(Kind,
1777 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1779 case LLVMContext::MD_dereferenceable:
1780 case LLVMContext::MD_dereferenceable_or_null:
1781 K->setMetadata(Kind,
1782 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1786 // Set !invariant.group from J if J has it. If both instructions have it
1787 // then we will just pick it from J - even when they are different.
1788 // Also make sure that K is load or store - f.e. combining bitcast with load
1789 // could produce bitcast with invariant.group metadata, which is invalid.
1790 // FIXME: we should try to preserve both invariant.group md if they are
1791 // different, but right now instruction can only have one invariant.group.
1792 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
1793 if (isa<LoadInst>(K) || isa<StoreInst>(K))
1794 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
1797 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
1798 unsigned KnownIDs[] = {
1799 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1800 LLVMContext::MD_noalias, LLVMContext::MD_range,
1801 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
1802 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
1803 LLVMContext::MD_dereferenceable,
1804 LLVMContext::MD_dereferenceable_or_null};
1805 combineMetadata(K, J, KnownIDs);
1808 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1810 const BasicBlockEdge &Root) {
1811 assert(From->getType() == To->getType());
1814 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1817 if (DT.dominates(Root, U)) {
1819 DEBUG(dbgs() << "Replace dominated use of '"
1820 << From->getName() << "' as "
1821 << *To << " in " << *U << "\n");
1828 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1830 const BasicBlock *BB) {
1831 assert(From->getType() == To->getType());
1834 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1837 auto *I = cast<Instruction>(U.getUser());
1838 if (DT.properlyDominates(BB, I->getParent())) {
1840 DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
1841 << *To << " in " << *U << "\n");
1848 bool llvm::callsGCLeafFunction(ImmutableCallSite CS) {
1849 // Check if the function is specifically marked as a gc leaf function.
1850 if (CS.hasFnAttr("gc-leaf-function"))
1852 if (const Function *F = CS.getCalledFunction()) {
1853 if (F->hasFnAttribute("gc-leaf-function"))
1856 if (auto IID = F->getIntrinsicID())
1857 // Most LLVM intrinsics do not take safepoints.
1858 return IID != Intrinsic::experimental_gc_statepoint &&
1859 IID != Intrinsic::experimental_deoptimize;
1866 /// A potential constituent of a bitreverse or bswap expression. See
1867 /// collectBitParts for a fuller explanation.
1869 BitPart(Value *P, unsigned BW) : Provider(P) {
1870 Provenance.resize(BW);
1873 /// The Value that this is a bitreverse/bswap of.
1875 /// The "provenance" of each bit. Provenance[A] = B means that bit A
1876 /// in Provider becomes bit B in the result of this expression.
1877 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
1879 enum { Unset = -1 };
1881 } // end anonymous namespace
1883 /// Analyze the specified subexpression and see if it is capable of providing
1884 /// pieces of a bswap or bitreverse. The subexpression provides a potential
1885 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
1886 /// the output of the expression came from a corresponding bit in some other
1887 /// value. This function is recursive, and the end result is a mapping of
1888 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
1889 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
1891 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
1892 /// that the expression deposits the low byte of %X into the high byte of the
1893 /// result and that all other bits are zero. This expression is accepted and a
1894 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
1897 /// To avoid revisiting values, the BitPart results are memoized into the
1898 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
1899 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
1900 /// store BitParts objects, not pointers. As we need the concept of a nullptr
1901 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
1902 /// type instead to provide the same functionality.
1904 /// Because we pass around references into \c BPS, we must use a container that
1905 /// does not invalidate internal references (std::map instead of DenseMap).
1907 static const Optional<BitPart> &
1908 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
1909 std::map<Value *, Optional<BitPart>> &BPS) {
1910 auto I = BPS.find(V);
1914 auto &Result = BPS[V] = None;
1915 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
1917 if (Instruction *I = dyn_cast<Instruction>(V)) {
1918 // If this is an or instruction, it may be an inner node of the bswap.
1919 if (I->getOpcode() == Instruction::Or) {
1920 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
1921 MatchBitReversals, BPS);
1922 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
1923 MatchBitReversals, BPS);
1927 // Try and merge the two together.
1928 if (!A->Provider || A->Provider != B->Provider)
1931 Result = BitPart(A->Provider, BitWidth);
1932 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
1933 if (A->Provenance[i] != BitPart::Unset &&
1934 B->Provenance[i] != BitPart::Unset &&
1935 A->Provenance[i] != B->Provenance[i])
1936 return Result = None;
1938 if (A->Provenance[i] == BitPart::Unset)
1939 Result->Provenance[i] = B->Provenance[i];
1941 Result->Provenance[i] = A->Provenance[i];
1947 // If this is a logical shift by a constant, recurse then shift the result.
1948 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1950 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1951 // Ensure the shift amount is defined.
1952 if (BitShift > BitWidth)
1955 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
1956 MatchBitReversals, BPS);
1961 // Perform the "shift" on BitProvenance.
1962 auto &P = Result->Provenance;
1963 if (I->getOpcode() == Instruction::Shl) {
1964 P.erase(std::prev(P.end(), BitShift), P.end());
1965 P.insert(P.begin(), BitShift, BitPart::Unset);
1967 P.erase(P.begin(), std::next(P.begin(), BitShift));
1968 P.insert(P.end(), BitShift, BitPart::Unset);
1974 // If this is a logical 'and' with a mask that clears bits, recurse then
1975 // unset the appropriate bits.
1976 if (I->getOpcode() == Instruction::And &&
1977 isa<ConstantInt>(I->getOperand(1))) {
1978 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
1979 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1981 // Check that the mask allows a multiple of 8 bits for a bswap, for an
1983 unsigned NumMaskedBits = AndMask.countPopulation();
1984 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
1987 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
1988 MatchBitReversals, BPS);
1993 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
1994 // If the AndMask is zero for this bit, clear the bit.
1995 if ((AndMask & Bit) == 0)
1996 Result->Provenance[i] = BitPart::Unset;
2000 // If this is a zext instruction zero extend the result.
2001 if (I->getOpcode() == Instruction::ZExt) {
2002 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2003 MatchBitReversals, BPS);
2007 Result = BitPart(Res->Provider, BitWidth);
2008 auto NarrowBitWidth =
2009 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2010 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2011 Result->Provenance[i] = Res->Provenance[i];
2012 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2013 Result->Provenance[i] = BitPart::Unset;
2018 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2019 // the input value to the bswap/bitreverse.
2020 Result = BitPart(V, BitWidth);
2021 for (unsigned i = 0; i < BitWidth; ++i)
2022 Result->Provenance[i] = i;
2026 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2027 unsigned BitWidth) {
2028 if (From % 8 != To % 8)
2030 // Convert from bit indices to byte indices and check for a byte reversal.
2034 return From == BitWidth - To - 1;
2037 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2038 unsigned BitWidth) {
2039 return From == BitWidth - To - 1;
2042 /// Given an OR instruction, check to see if this is a bitreverse
2043 /// idiom. If so, insert the new intrinsic and return true.
2044 bool llvm::recognizeBSwapOrBitReverseIdiom(
2045 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2046 SmallVectorImpl<Instruction *> &InsertedInsts) {
2047 if (Operator::getOpcode(I) != Instruction::Or)
2049 if (!MatchBSwaps && !MatchBitReversals)
2051 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2052 if (!ITy || ITy->getBitWidth() > 128)
2053 return false; // Can't do vectors or integers > 128 bits.
2054 unsigned BW = ITy->getBitWidth();
2056 unsigned DemandedBW = BW;
2057 IntegerType *DemandedTy = ITy;
2058 if (I->hasOneUse()) {
2059 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2060 DemandedTy = cast<IntegerType>(Trunc->getType());
2061 DemandedBW = DemandedTy->getBitWidth();
2065 // Try to find all the pieces corresponding to the bswap.
2066 std::map<Value *, Optional<BitPart>> BPS;
2067 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2070 auto &BitProvenance = Res->Provenance;
2072 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2073 // only byteswap values with an even number of bytes.
2074 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2075 for (unsigned i = 0; i < DemandedBW; ++i) {
2077 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2079 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2082 Intrinsic::ID Intrin;
2083 if (OKForBSwap && MatchBSwaps)
2084 Intrin = Intrinsic::bswap;
2085 else if (OKForBitReverse && MatchBitReversals)
2086 Intrin = Intrinsic::bitreverse;
2090 if (ITy != DemandedTy) {
2091 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2092 Value *Provider = Res->Provider;
2093 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2094 // We may need to truncate the provider.
2095 if (DemandedTy != ProviderTy) {
2096 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2098 InsertedInsts.push_back(Trunc);
2101 auto *CI = CallInst::Create(F, Provider, "rev", I);
2102 InsertedInsts.push_back(CI);
2103 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2104 InsertedInsts.push_back(ExtInst);
2108 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2109 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2113 // CodeGen has special handling for some string functions that may replace
2114 // them with target-specific intrinsics. Since that'd skip our interceptors
2115 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2116 // we mark affected calls as NoBuiltin, which will disable optimization
2118 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2119 CallInst *CI, const TargetLibraryInfo *TLI) {
2120 Function *F = CI->getCalledFunction();
2122 if (F && !F->hasLocalLinkage() && F->hasName() &&
2123 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2124 !F->doesNotAccessMemory())
2125 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);