1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/iterator_range.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/BlockFrequencyInfo.h"
27 #include "llvm/Analysis/CallGraph.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/EHPersonalities.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/Cloning.h"
63 #include "llvm/Transforms/Utils/Local.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
77 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::desc("Convert noalias attributes to metadata during inlining."));
82 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
83 cl::init(true), cl::Hidden,
84 cl::desc("Convert align attributes to assumptions during inlining."));
86 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
87 AAResults *CalleeAAR, bool InsertLifetime) {
88 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
91 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
92 AAResults *CalleeAAR, bool InsertLifetime) {
93 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
98 /// A class for recording information about inlining a landing pad.
99 class LandingPadInliningInfo {
100 /// Destination of the invoke's unwind.
101 BasicBlock *OuterResumeDest;
103 /// Destination for the callee's resume.
104 BasicBlock *InnerResumeDest = nullptr;
106 /// LandingPadInst associated with the invoke.
107 LandingPadInst *CallerLPad = nullptr;
109 /// PHI for EH values from landingpad insts.
110 PHINode *InnerEHValuesPHI = nullptr;
112 SmallVector<Value*, 8> UnwindDestPHIValues;
115 LandingPadInliningInfo(InvokeInst *II)
116 : OuterResumeDest(II->getUnwindDest()) {
117 // If there are PHI nodes in the unwind destination block, we need to keep
118 // track of which values came into them from the invoke before removing
119 // the edge from this block.
120 BasicBlock *InvokeBB = II->getParent();
121 BasicBlock::iterator I = OuterResumeDest->begin();
122 for (; isa<PHINode>(I); ++I) {
123 // Save the value to use for this edge.
124 PHINode *PHI = cast<PHINode>(I);
125 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
128 CallerLPad = cast<LandingPadInst>(I);
131 /// The outer unwind destination is the target of
132 /// unwind edges introduced for calls within the inlined function.
133 BasicBlock *getOuterResumeDest() const {
134 return OuterResumeDest;
137 BasicBlock *getInnerResumeDest();
139 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
141 /// Forward the 'resume' instruction to the caller's landing pad block.
142 /// When the landing pad block has only one predecessor, this is
143 /// a simple branch. When there is more than one predecessor, we need to
144 /// split the landing pad block after the landingpad instruction and jump
146 void forwardResume(ResumeInst *RI,
147 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
149 /// Add incoming-PHI values to the unwind destination block for the given
150 /// basic block, using the values for the original invoke's source block.
151 void addIncomingPHIValuesFor(BasicBlock *BB) const {
152 addIncomingPHIValuesForInto(BB, OuterResumeDest);
155 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
156 BasicBlock::iterator I = dest->begin();
157 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
158 PHINode *phi = cast<PHINode>(I);
159 phi->addIncoming(UnwindDestPHIValues[i], src);
164 } // end anonymous namespace
166 /// Get or create a target for the branch from ResumeInsts.
167 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
168 if (InnerResumeDest) return InnerResumeDest;
170 // Split the landing pad.
171 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
173 OuterResumeDest->splitBasicBlock(SplitPoint,
174 OuterResumeDest->getName() + ".body");
176 // The number of incoming edges we expect to the inner landing pad.
177 const unsigned PHICapacity = 2;
179 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
180 Instruction *InsertPoint = &InnerResumeDest->front();
181 BasicBlock::iterator I = OuterResumeDest->begin();
182 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
183 PHINode *OuterPHI = cast<PHINode>(I);
184 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
185 OuterPHI->getName() + ".lpad-body",
187 OuterPHI->replaceAllUsesWith(InnerPHI);
188 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
191 // Create a PHI for the exception values.
192 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
193 "eh.lpad-body", InsertPoint);
194 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
195 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
198 return InnerResumeDest;
201 /// Forward the 'resume' instruction to the caller's landing pad block.
202 /// When the landing pad block has only one predecessor, this is a simple
203 /// branch. When there is more than one predecessor, we need to split the
204 /// landing pad block after the landingpad instruction and jump to there.
205 void LandingPadInliningInfo::forwardResume(
206 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
207 BasicBlock *Dest = getInnerResumeDest();
208 BasicBlock *Src = RI->getParent();
210 BranchInst::Create(Dest, Src);
212 // Update the PHIs in the destination. They were inserted in an order which
214 addIncomingPHIValuesForInto(Src, Dest);
216 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
217 RI->eraseFromParent();
220 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
221 static Value *getParentPad(Value *EHPad) {
222 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
223 return FPI->getParentPad();
224 return cast<CatchSwitchInst>(EHPad)->getParentPad();
227 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
229 /// Helper for getUnwindDestToken that does the descendant-ward part of
231 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
232 UnwindDestMemoTy &MemoMap) {
233 SmallVector<Instruction *, 8> Worklist(1, EHPad);
235 while (!Worklist.empty()) {
236 Instruction *CurrentPad = Worklist.pop_back_val();
237 // We only put pads on the worklist that aren't in the MemoMap. When
238 // we find an unwind dest for a pad we may update its ancestors, but
239 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
240 // so they should never get updated while queued on the worklist.
241 assert(!MemoMap.count(CurrentPad));
242 Value *UnwindDestToken = nullptr;
243 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
244 if (CatchSwitch->hasUnwindDest()) {
245 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
247 // Catchswitch doesn't have a 'nounwind' variant, and one might be
248 // annotated as "unwinds to caller" when really it's nounwind (see
249 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
250 // parent's unwind dest from this. We can check its catchpads'
251 // descendants, since they might include a cleanuppad with an
252 // "unwinds to caller" cleanupret, which can be trusted.
253 for (auto HI = CatchSwitch->handler_begin(),
254 HE = CatchSwitch->handler_end();
255 HI != HE && !UnwindDestToken; ++HI) {
256 BasicBlock *HandlerBlock = *HI;
257 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
258 for (User *Child : CatchPad->users()) {
259 // Intentionally ignore invokes here -- since the catchswitch is
260 // marked "unwind to caller", it would be a verifier error if it
261 // contained an invoke which unwinds out of it, so any invoke we'd
262 // encounter must unwind to some child of the catch.
263 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
266 Instruction *ChildPad = cast<Instruction>(Child);
267 auto Memo = MemoMap.find(ChildPad);
268 if (Memo == MemoMap.end()) {
269 // Haven't figured out this child pad yet; queue it.
270 Worklist.push_back(ChildPad);
273 // We've already checked this child, but might have found that
274 // it offers no proof either way.
275 Value *ChildUnwindDestToken = Memo->second;
276 if (!ChildUnwindDestToken)
278 // We already know the child's unwind dest, which can either
279 // be ConstantTokenNone to indicate unwind to caller, or can
280 // be another child of the catchpad. Only the former indicates
281 // the unwind dest of the catchswitch.
282 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
283 UnwindDestToken = ChildUnwindDestToken;
286 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
291 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
292 for (User *U : CleanupPad->users()) {
293 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
294 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
295 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
297 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
300 Value *ChildUnwindDestToken;
301 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
302 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
303 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
304 Instruction *ChildPad = cast<Instruction>(U);
305 auto Memo = MemoMap.find(ChildPad);
306 if (Memo == MemoMap.end()) {
307 // Haven't resolved this child yet; queue it and keep searching.
308 Worklist.push_back(ChildPad);
311 // We've checked this child, but still need to ignore it if it
312 // had no proof either way.
313 ChildUnwindDestToken = Memo->second;
314 if (!ChildUnwindDestToken)
317 // Not a relevant user of the cleanuppad
320 // In a well-formed program, the child/invoke must either unwind to
321 // an(other) child of the cleanup, or exit the cleanup. In the
322 // first case, continue searching.
323 if (isa<Instruction>(ChildUnwindDestToken) &&
324 getParentPad(ChildUnwindDestToken) == CleanupPad)
326 UnwindDestToken = ChildUnwindDestToken;
330 // If we haven't found an unwind dest for CurrentPad, we may have queued its
331 // children, so move on to the next in the worklist.
332 if (!UnwindDestToken)
335 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
336 // any ancestors of CurrentPad up to but not including UnwindDestToken's
337 // parent pad. Record this in the memo map, and check to see if the
338 // original EHPad being queried is one of the ones exited.
340 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
341 UnwindParent = getParentPad(UnwindPad);
343 UnwindParent = nullptr;
344 bool ExitedOriginalPad = false;
345 for (Instruction *ExitedPad = CurrentPad;
346 ExitedPad && ExitedPad != UnwindParent;
347 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
348 // Skip over catchpads since they just follow their catchswitches.
349 if (isa<CatchPadInst>(ExitedPad))
351 MemoMap[ExitedPad] = UnwindDestToken;
352 ExitedOriginalPad |= (ExitedPad == EHPad);
355 if (ExitedOriginalPad)
356 return UnwindDestToken;
358 // Continue the search.
361 // No definitive information is contained within this funclet.
365 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
366 /// return that pad instruction. If it unwinds to caller, return
367 /// ConstantTokenNone. If it does not have a definitive unwind destination,
370 /// This routine gets invoked for calls in funclets in inlinees when inlining
371 /// an invoke. Since many funclets don't have calls inside them, it's queried
372 /// on-demand rather than building a map of pads to unwind dests up front.
373 /// Determining a funclet's unwind dest may require recursively searching its
374 /// descendants, and also ancestors and cousins if the descendants don't provide
375 /// an answer. Since most funclets will have their unwind dest immediately
376 /// available as the unwind dest of a catchswitch or cleanupret, this routine
377 /// searches top-down from the given pad and then up. To avoid worst-case
378 /// quadratic run-time given that approach, it uses a memo map to avoid
379 /// re-processing funclet trees. The callers that rewrite the IR as they go
380 /// take advantage of this, for correctness, by checking/forcing rewritten
381 /// pads' entries to match the original callee view.
382 static Value *getUnwindDestToken(Instruction *EHPad,
383 UnwindDestMemoTy &MemoMap) {
384 // Catchpads unwind to the same place as their catchswitch;
385 // redirct any queries on catchpads so the code below can
386 // deal with just catchswitches and cleanuppads.
387 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
388 EHPad = CPI->getCatchSwitch();
390 // Check if we've already determined the unwind dest for this pad.
391 auto Memo = MemoMap.find(EHPad);
392 if (Memo != MemoMap.end())
395 // Search EHPad and, if necessary, its descendants.
396 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
397 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
399 return UnwindDestToken;
401 // No information is available for this EHPad from itself or any of its
402 // descendants. An unwind all the way out to a pad in the caller would
403 // need also to agree with the unwind dest of the parent funclet, so
404 // search up the chain to try to find a funclet with information. Put
405 // null entries in the memo map to avoid re-processing as we go up.
406 MemoMap[EHPad] = nullptr;
408 SmallPtrSet<Instruction *, 4> TempMemos;
409 TempMemos.insert(EHPad);
411 Instruction *LastUselessPad = EHPad;
412 Value *AncestorToken;
413 for (AncestorToken = getParentPad(EHPad);
414 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
415 AncestorToken = getParentPad(AncestorToken)) {
416 // Skip over catchpads since they just follow their catchswitches.
417 if (isa<CatchPadInst>(AncestorPad))
419 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
420 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
421 // call to getUnwindDestToken, that would mean that AncestorPad had no
422 // information in itself, its descendants, or its ancestors. If that
423 // were the case, then we should also have recorded the lack of information
424 // for the descendant that we're coming from. So assert that we don't
425 // find a null entry in the MemoMap for AncestorPad.
426 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
427 auto AncestorMemo = MemoMap.find(AncestorPad);
428 if (AncestorMemo == MemoMap.end()) {
429 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
431 UnwindDestToken = AncestorMemo->second;
435 LastUselessPad = AncestorPad;
436 MemoMap[LastUselessPad] = nullptr;
438 TempMemos.insert(LastUselessPad);
442 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
443 // returned nullptr (and likewise for EHPad and any of its ancestors up to
444 // LastUselessPad), so LastUselessPad has no information from below. Since
445 // getUnwindDestTokenHelper must investigate all downward paths through
446 // no-information nodes to prove that a node has no information like this,
447 // and since any time it finds information it records it in the MemoMap for
448 // not just the immediately-containing funclet but also any ancestors also
449 // exited, it must be the case that, walking downward from LastUselessPad,
450 // visiting just those nodes which have not been mapped to an unwind dest
451 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
452 // they are just used to keep getUnwindDestTokenHelper from repeating work),
453 // any node visited must have been exhaustively searched with no information
455 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
456 while (!Worklist.empty()) {
457 Instruction *UselessPad = Worklist.pop_back_val();
458 auto Memo = MemoMap.find(UselessPad);
459 if (Memo != MemoMap.end() && Memo->second) {
460 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
461 // that it is a funclet that does have information about unwinding to
462 // a particular destination; its parent was a useless pad.
463 // Since its parent has no information, the unwind edge must not escape
464 // the parent, and must target a sibling of this pad. This local unwind
465 // gives us no information about EHPad. Leave it and the subtree rooted
467 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
470 // We know we don't have information for UselesPad. If it has an entry in
471 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
472 // added on this invocation of getUnwindDestToken; if a previous invocation
473 // recorded nullptr, it would have had to prove that the ancestors of
474 // UselessPad, which include LastUselessPad, had no information, and that
475 // in turn would have required proving that the descendants of
476 // LastUselesPad, which include EHPad, have no information about
477 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
478 // the MemoMap on that invocation, which isn't the case if we got here.
479 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
480 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
481 // information that we'd be contradicting by making a map entry for it
482 // (which is something that getUnwindDestTokenHelper must have proved for
483 // us to get here). Just assert on is direct users here; the checks in
484 // this downward walk at its descendants will verify that they don't have
485 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
486 // unwind edges or unwind to a sibling).
487 MemoMap[UselessPad] = UnwindDestToken;
488 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
489 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
490 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
491 auto *CatchPad = HandlerBlock->getFirstNonPHI();
492 for (User *U : CatchPad->users()) {
494 (!isa<InvokeInst>(U) ||
496 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
498 "Expected useless pad");
499 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
500 Worklist.push_back(cast<Instruction>(U));
504 assert(isa<CleanupPadInst>(UselessPad));
505 for (User *U : UselessPad->users()) {
506 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
507 assert((!isa<InvokeInst>(U) ||
509 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
511 "Expected useless pad");
512 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
513 Worklist.push_back(cast<Instruction>(U));
518 return UnwindDestToken;
521 /// When we inline a basic block into an invoke,
522 /// we have to turn all of the calls that can throw into invokes.
523 /// This function analyze BB to see if there are any calls, and if so,
524 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
525 /// nodes in that block with the values specified in InvokeDestPHIValues.
526 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
527 BasicBlock *BB, BasicBlock *UnwindEdge,
528 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
529 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
530 Instruction *I = &*BBI++;
532 // We only need to check for function calls: inlined invoke
533 // instructions require no special handling.
534 CallInst *CI = dyn_cast<CallInst>(I);
536 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
539 // We do not need to (and in fact, cannot) convert possibly throwing calls
540 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
541 // invokes. The caller's "segment" of the deoptimization continuation
542 // attached to the newly inlined @llvm.experimental_deoptimize
543 // (resp. @llvm.experimental.guard) call should contain the exception
544 // handling logic, if any.
545 if (auto *F = CI->getCalledFunction())
546 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
547 F->getIntrinsicID() == Intrinsic::experimental_guard)
550 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
551 // This call is nested inside a funclet. If that funclet has an unwind
552 // destination within the inlinee, then unwinding out of this call would
553 // be UB. Rewriting this call to an invoke which targets the inlined
554 // invoke's unwind dest would give the call's parent funclet multiple
555 // unwind destinations, which is something that subsequent EH table
556 // generation can't handle and that the veirifer rejects. So when we
557 // see such a call, leave it as a call.
558 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
559 Value *UnwindDestToken =
560 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
561 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
564 Instruction *MemoKey;
565 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
566 MemoKey = CatchPad->getCatchSwitch();
568 MemoKey = FuncletPad;
569 assert(FuncletUnwindMap->count(MemoKey) &&
570 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
571 "must get memoized to avoid confusing later searches");
575 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
581 /// If we inlined an invoke site, we need to convert calls
582 /// in the body of the inlined function into invokes.
584 /// II is the invoke instruction being inlined. FirstNewBlock is the first
585 /// block of the inlined code (the last block is the end of the function),
586 /// and InlineCodeInfo is information about the code that got inlined.
587 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
588 ClonedCodeInfo &InlinedCodeInfo) {
589 BasicBlock *InvokeDest = II->getUnwindDest();
591 Function *Caller = FirstNewBlock->getParent();
593 // The inlined code is currently at the end of the function, scan from the
594 // start of the inlined code to its end, checking for stuff we need to
596 LandingPadInliningInfo Invoke(II);
598 // Get all of the inlined landing pad instructions.
599 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
600 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
602 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
603 InlinedLPads.insert(II->getLandingPadInst());
605 // Append the clauses from the outer landing pad instruction into the inlined
606 // landing pad instructions.
607 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
608 for (LandingPadInst *InlinedLPad : InlinedLPads) {
609 unsigned OuterNum = OuterLPad->getNumClauses();
610 InlinedLPad->reserveClauses(OuterNum);
611 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
612 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
613 if (OuterLPad->isCleanup())
614 InlinedLPad->setCleanup(true);
617 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
619 if (InlinedCodeInfo.ContainsCalls)
620 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
621 &*BB, Invoke.getOuterResumeDest()))
622 // Update any PHI nodes in the exceptional block to indicate that there
623 // is now a new entry in them.
624 Invoke.addIncomingPHIValuesFor(NewBB);
626 // Forward any resumes that are remaining here.
627 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
628 Invoke.forwardResume(RI, InlinedLPads);
631 // Now that everything is happy, we have one final detail. The PHI nodes in
632 // the exception destination block still have entries due to the original
633 // invoke instruction. Eliminate these entries (which might even delete the
635 InvokeDest->removePredecessor(II->getParent());
638 /// If we inlined an invoke site, we need to convert calls
639 /// in the body of the inlined function into invokes.
641 /// II is the invoke instruction being inlined. FirstNewBlock is the first
642 /// block of the inlined code (the last block is the end of the function),
643 /// and InlineCodeInfo is information about the code that got inlined.
644 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
645 ClonedCodeInfo &InlinedCodeInfo) {
646 BasicBlock *UnwindDest = II->getUnwindDest();
647 Function *Caller = FirstNewBlock->getParent();
649 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
651 // If there are PHI nodes in the unwind destination block, we need to keep
652 // track of which values came into them from the invoke before removing the
653 // edge from this block.
654 SmallVector<Value *, 8> UnwindDestPHIValues;
655 BasicBlock *InvokeBB = II->getParent();
656 for (Instruction &I : *UnwindDest) {
657 // Save the value to use for this edge.
658 PHINode *PHI = dyn_cast<PHINode>(&I);
661 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
664 // Add incoming-PHI values to the unwind destination block for the given basic
665 // block, using the values for the original invoke's source block.
666 auto UpdatePHINodes = [&](BasicBlock *Src) {
667 BasicBlock::iterator I = UnwindDest->begin();
668 for (Value *V : UnwindDestPHIValues) {
669 PHINode *PHI = cast<PHINode>(I);
670 PHI->addIncoming(V, Src);
675 // This connects all the instructions which 'unwind to caller' to the invoke
677 UnwindDestMemoTy FuncletUnwindMap;
678 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
680 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
681 if (CRI->unwindsToCaller()) {
682 auto *CleanupPad = CRI->getCleanupPad();
683 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
684 CRI->eraseFromParent();
685 UpdatePHINodes(&*BB);
686 // Finding a cleanupret with an unwind destination would confuse
687 // subsequent calls to getUnwindDestToken, so map the cleanuppad
688 // to short-circuit any such calls and recognize this as an "unwind
689 // to caller" cleanup.
690 assert(!FuncletUnwindMap.count(CleanupPad) ||
691 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
692 FuncletUnwindMap[CleanupPad] =
693 ConstantTokenNone::get(Caller->getContext());
697 Instruction *I = BB->getFirstNonPHI();
701 Instruction *Replacement = nullptr;
702 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
703 if (CatchSwitch->unwindsToCaller()) {
704 Value *UnwindDestToken;
705 if (auto *ParentPad =
706 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
707 // This catchswitch is nested inside another funclet. If that
708 // funclet has an unwind destination within the inlinee, then
709 // unwinding out of this catchswitch would be UB. Rewriting this
710 // catchswitch to unwind to the inlined invoke's unwind dest would
711 // give the parent funclet multiple unwind destinations, which is
712 // something that subsequent EH table generation can't handle and
713 // that the veirifer rejects. So when we see such a call, leave it
714 // as "unwind to caller".
715 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
716 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
719 // This catchswitch has no parent to inherit constraints from, and
720 // none of its descendants can have an unwind edge that exits it and
721 // targets another funclet in the inlinee. It may or may not have a
722 // descendant that definitively has an unwind to caller. In either
723 // case, we'll have to assume that any unwinds out of it may need to
724 // be routed to the caller, so treat it as though it has a definitive
726 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
728 auto *NewCatchSwitch = CatchSwitchInst::Create(
729 CatchSwitch->getParentPad(), UnwindDest,
730 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
732 for (BasicBlock *PadBB : CatchSwitch->handlers())
733 NewCatchSwitch->addHandler(PadBB);
734 // Propagate info for the old catchswitch over to the new one in
735 // the unwind map. This also serves to short-circuit any subsequent
736 // checks for the unwind dest of this catchswitch, which would get
737 // confused if they found the outer handler in the callee.
738 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
739 Replacement = NewCatchSwitch;
741 } else if (!isa<FuncletPadInst>(I)) {
742 llvm_unreachable("unexpected EHPad!");
746 Replacement->takeName(I);
747 I->replaceAllUsesWith(Replacement);
748 I->eraseFromParent();
749 UpdatePHINodes(&*BB);
753 if (InlinedCodeInfo.ContainsCalls)
754 for (Function::iterator BB = FirstNewBlock->getIterator(),
757 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
758 &*BB, UnwindDest, &FuncletUnwindMap))
759 // Update any PHI nodes in the exceptional block to indicate that there
760 // is now a new entry in them.
761 UpdatePHINodes(NewBB);
763 // Now that everything is happy, we have one final detail. The PHI nodes in
764 // the exception destination block still have entries due to the original
765 // invoke instruction. Eliminate these entries (which might even delete the
767 UnwindDest->removePredecessor(InvokeBB);
770 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
771 /// that metadata should be propagated to all memory-accessing cloned
773 static void PropagateParallelLoopAccessMetadata(CallSite CS,
774 ValueToValueMapTy &VMap) {
776 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
780 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
781 VMI != VMIE; ++VMI) {
785 Instruction *NI = dyn_cast<Instruction>(VMI->second);
789 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
790 M = MDNode::concatenate(PM, M);
791 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
792 } else if (NI->mayReadOrWriteMemory()) {
793 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
798 /// When inlining a function that contains noalias scope metadata,
799 /// this metadata needs to be cloned so that the inlined blocks
800 /// have different "unique scopes" at every call site. Were this not done, then
801 /// aliasing scopes from a function inlined into a caller multiple times could
802 /// not be differentiated (and this would lead to miscompiles because the
803 /// non-aliasing property communicated by the metadata could have
804 /// call-site-specific control dependencies).
805 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
806 const Function *CalledFunc = CS.getCalledFunction();
807 SetVector<const MDNode *> MD;
809 // Note: We could only clone the metadata if it is already used in the
810 // caller. I'm omitting that check here because it might confuse
811 // inter-procedural alias analysis passes. We can revisit this if it becomes
812 // an efficiency or overhead problem.
814 for (const BasicBlock &I : *CalledFunc)
815 for (const Instruction &J : I) {
816 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
818 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
825 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
827 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
828 while (!Queue.empty()) {
829 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
830 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
831 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
836 // Now we have a complete set of all metadata in the chains used to specify
837 // the noalias scopes and the lists of those scopes.
838 SmallVector<TempMDTuple, 16> DummyNodes;
839 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
840 for (const MDNode *I : MD) {
841 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
842 MDMap[I].reset(DummyNodes.back().get());
845 // Create new metadata nodes to replace the dummy nodes, replacing old
846 // metadata references with either a dummy node or an already-created new
848 for (const MDNode *I : MD) {
849 SmallVector<Metadata *, 4> NewOps;
850 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
851 const Metadata *V = I->getOperand(i);
852 if (const MDNode *M = dyn_cast<MDNode>(V))
853 NewOps.push_back(MDMap[M]);
855 NewOps.push_back(const_cast<Metadata *>(V));
858 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
859 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
860 assert(TempM->isTemporary() && "Expected temporary node");
862 TempM->replaceAllUsesWith(NewM);
865 // Now replace the metadata in the new inlined instructions with the
866 // repacements from the map.
867 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
868 VMI != VMIE; ++VMI) {
872 Instruction *NI = dyn_cast<Instruction>(VMI->second);
876 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
877 MDNode *NewMD = MDMap[M];
878 // If the call site also had alias scope metadata (a list of scopes to
879 // which instructions inside it might belong), propagate those scopes to
880 // the inlined instructions.
882 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
883 NewMD = MDNode::concatenate(NewMD, CSM);
884 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
885 } else if (NI->mayReadOrWriteMemory()) {
887 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
888 NI->setMetadata(LLVMContext::MD_alias_scope, M);
891 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
892 MDNode *NewMD = MDMap[M];
893 // If the call site also had noalias metadata (a list of scopes with
894 // which instructions inside it don't alias), propagate those scopes to
895 // the inlined instructions.
897 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
898 NewMD = MDNode::concatenate(NewMD, CSM);
899 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
900 } else if (NI->mayReadOrWriteMemory()) {
901 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
902 NI->setMetadata(LLVMContext::MD_noalias, M);
907 /// If the inlined function has noalias arguments,
908 /// then add new alias scopes for each noalias argument, tag the mapped noalias
909 /// parameters with noalias metadata specifying the new scope, and tag all
910 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
911 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
912 const DataLayout &DL, AAResults *CalleeAAR) {
913 if (!EnableNoAliasConversion)
916 const Function *CalledFunc = CS.getCalledFunction();
917 SmallVector<const Argument *, 4> NoAliasArgs;
919 for (const Argument &Arg : CalledFunc->args())
920 if (Arg.hasNoAliasAttr() && !Arg.use_empty())
921 NoAliasArgs.push_back(&Arg);
923 if (NoAliasArgs.empty())
926 // To do a good job, if a noalias variable is captured, we need to know if
927 // the capture point dominates the particular use we're considering.
929 DT.recalculate(const_cast<Function&>(*CalledFunc));
931 // noalias indicates that pointer values based on the argument do not alias
932 // pointer values which are not based on it. So we add a new "scope" for each
933 // noalias function argument. Accesses using pointers based on that argument
934 // become part of that alias scope, accesses using pointers not based on that
935 // argument are tagged as noalias with that scope.
937 DenseMap<const Argument *, MDNode *> NewScopes;
938 MDBuilder MDB(CalledFunc->getContext());
940 // Create a new scope domain for this function.
942 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
943 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
944 const Argument *A = NoAliasArgs[i];
946 std::string Name = CalledFunc->getName();
949 Name += A->getName();
951 Name += ": argument ";
955 // Note: We always create a new anonymous root here. This is true regardless
956 // of the linkage of the callee because the aliasing "scope" is not just a
957 // property of the callee, but also all control dependencies in the caller.
958 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
959 NewScopes.insert(std::make_pair(A, NewScope));
962 // Iterate over all new instructions in the map; for all memory-access
963 // instructions, add the alias scope metadata.
964 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
965 VMI != VMIE; ++VMI) {
966 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
970 Instruction *NI = dyn_cast<Instruction>(VMI->second);
974 bool IsArgMemOnlyCall = false, IsFuncCall = false;
975 SmallVector<const Value *, 2> PtrArgs;
977 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
978 PtrArgs.push_back(LI->getPointerOperand());
979 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
980 PtrArgs.push_back(SI->getPointerOperand());
981 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
982 PtrArgs.push_back(VAAI->getPointerOperand());
983 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
984 PtrArgs.push_back(CXI->getPointerOperand());
985 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
986 PtrArgs.push_back(RMWI->getPointerOperand());
987 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
988 // If we know that the call does not access memory, then we'll still
989 // know that about the inlined clone of this call site, and we don't
990 // need to add metadata.
991 if (ICS.doesNotAccessMemory())
996 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
997 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
998 MRB == FMRB_OnlyReadsArgumentPointees)
999 IsArgMemOnlyCall = true;
1002 for (Value *Arg : ICS.args()) {
1003 // We need to check the underlying objects of all arguments, not just
1004 // the pointer arguments, because we might be passing pointers as
1006 // However, if we know that the call only accesses pointer arguments,
1007 // then we only need to check the pointer arguments.
1008 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1011 PtrArgs.push_back(Arg);
1015 // If we found no pointers, then this instruction is not suitable for
1016 // pairing with an instruction to receive aliasing metadata.
1017 // However, if this is a call, this we might just alias with none of the
1018 // noalias arguments.
1019 if (PtrArgs.empty() && !IsFuncCall)
1022 // It is possible that there is only one underlying object, but you
1023 // need to go through several PHIs to see it, and thus could be
1024 // repeated in the Objects list.
1025 SmallPtrSet<const Value *, 4> ObjSet;
1026 SmallVector<Metadata *, 4> Scopes, NoAliases;
1028 SmallSetVector<const Argument *, 4> NAPtrArgs;
1029 for (const Value *V : PtrArgs) {
1030 SmallVector<Value *, 4> Objects;
1031 GetUnderlyingObjects(const_cast<Value*>(V),
1032 Objects, DL, /* LI = */ nullptr);
1034 for (Value *O : Objects)
1038 // Figure out if we're derived from anything that is not a noalias
1040 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1041 for (const Value *V : ObjSet) {
1042 // Is this value a constant that cannot be derived from any pointer
1043 // value (we need to exclude constant expressions, for example, that
1044 // are formed from arithmetic on global symbols).
1045 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1046 isa<ConstantPointerNull>(V) ||
1047 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1051 // If this is anything other than a noalias argument, then we cannot
1052 // completely describe the aliasing properties using alias.scope
1053 // metadata (and, thus, won't add any).
1054 if (const Argument *A = dyn_cast<Argument>(V)) {
1055 if (!A->hasNoAliasAttr())
1056 UsesAliasingPtr = true;
1058 UsesAliasingPtr = true;
1061 // If this is not some identified function-local object (which cannot
1062 // directly alias a noalias argument), or some other argument (which,
1063 // by definition, also cannot alias a noalias argument), then we could
1064 // alias a noalias argument that has been captured).
1065 if (!isa<Argument>(V) &&
1066 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1067 CanDeriveViaCapture = true;
1070 // A function call can always get captured noalias pointers (via other
1071 // parameters, globals, etc.).
1072 if (IsFuncCall && !IsArgMemOnlyCall)
1073 CanDeriveViaCapture = true;
1075 // First, we want to figure out all of the sets with which we definitely
1076 // don't alias. Iterate over all noalias set, and add those for which:
1077 // 1. The noalias argument is not in the set of objects from which we
1078 // definitely derive.
1079 // 2. The noalias argument has not yet been captured.
1080 // An arbitrary function that might load pointers could see captured
1081 // noalias arguments via other noalias arguments or globals, and so we
1082 // must always check for prior capture.
1083 for (const Argument *A : NoAliasArgs) {
1084 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1085 // It might be tempting to skip the
1086 // PointerMayBeCapturedBefore check if
1087 // A->hasNoCaptureAttr() is true, but this is
1088 // incorrect because nocapture only guarantees
1089 // that no copies outlive the function, not
1090 // that the value cannot be locally captured.
1091 !PointerMayBeCapturedBefore(A,
1092 /* ReturnCaptures */ false,
1093 /* StoreCaptures */ false, I, &DT)))
1094 NoAliases.push_back(NewScopes[A]);
1097 if (!NoAliases.empty())
1098 NI->setMetadata(LLVMContext::MD_noalias,
1099 MDNode::concatenate(
1100 NI->getMetadata(LLVMContext::MD_noalias),
1101 MDNode::get(CalledFunc->getContext(), NoAliases)));
1103 // Next, we want to figure out all of the sets to which we might belong.
1104 // We might belong to a set if the noalias argument is in the set of
1105 // underlying objects. If there is some non-noalias argument in our list
1106 // of underlying objects, then we cannot add a scope because the fact
1107 // that some access does not alias with any set of our noalias arguments
1108 // cannot itself guarantee that it does not alias with this access
1109 // (because there is some pointer of unknown origin involved and the
1110 // other access might also depend on this pointer). We also cannot add
1111 // scopes to arbitrary functions unless we know they don't access any
1112 // non-parameter pointer-values.
1113 bool CanAddScopes = !UsesAliasingPtr;
1114 if (CanAddScopes && IsFuncCall)
1115 CanAddScopes = IsArgMemOnlyCall;
1118 for (const Argument *A : NoAliasArgs) {
1119 if (ObjSet.count(A))
1120 Scopes.push_back(NewScopes[A]);
1123 if (!Scopes.empty())
1125 LLVMContext::MD_alias_scope,
1126 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1127 MDNode::get(CalledFunc->getContext(), Scopes)));
1132 /// If the inlined function has non-byval align arguments, then
1133 /// add @llvm.assume-based alignment assumptions to preserve this information.
1134 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1135 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1138 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1139 auto &DL = CS.getCaller()->getParent()->getDataLayout();
1141 // To avoid inserting redundant assumptions, we should check for assumptions
1142 // already in the caller. To do this, we might need a DT of the caller.
1144 bool DTCalculated = false;
1146 Function *CalledFunc = CS.getCalledFunction();
1147 for (Argument &Arg : CalledFunc->args()) {
1148 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1149 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1150 if (!DTCalculated) {
1151 DT.recalculate(*CS.getCaller());
1152 DTCalculated = true;
1155 // If we can already prove the asserted alignment in the context of the
1156 // caller, then don't bother inserting the assumption.
1157 Value *ArgVal = CS.getArgument(Arg.getArgNo());
1158 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1161 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1162 .CreateAlignmentAssumption(DL, ArgVal, Align);
1163 AC->registerAssumption(NewAsmp);
1168 /// Once we have cloned code over from a callee into the caller,
1169 /// update the specified callgraph to reflect the changes we made.
1170 /// Note that it's possible that not all code was copied over, so only
1171 /// some edges of the callgraph may remain.
1172 static void UpdateCallGraphAfterInlining(CallSite CS,
1173 Function::iterator FirstNewBlock,
1174 ValueToValueMapTy &VMap,
1175 InlineFunctionInfo &IFI) {
1176 CallGraph &CG = *IFI.CG;
1177 const Function *Caller = CS.getCaller();
1178 const Function *Callee = CS.getCalledFunction();
1179 CallGraphNode *CalleeNode = CG[Callee];
1180 CallGraphNode *CallerNode = CG[Caller];
1182 // Since we inlined some uninlined call sites in the callee into the caller,
1183 // add edges from the caller to all of the callees of the callee.
1184 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1186 // Consider the case where CalleeNode == CallerNode.
1187 CallGraphNode::CalledFunctionsVector CallCache;
1188 if (CalleeNode == CallerNode) {
1189 CallCache.assign(I, E);
1190 I = CallCache.begin();
1191 E = CallCache.end();
1194 for (; I != E; ++I) {
1195 const Value *OrigCall = I->first;
1197 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1198 // Only copy the edge if the call was inlined!
1199 if (VMI == VMap.end() || VMI->second == nullptr)
1202 // If the call was inlined, but then constant folded, there is no edge to
1203 // add. Check for this case.
1204 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1208 // We do not treat intrinsic calls like real function calls because we
1209 // expect them to become inline code; do not add an edge for an intrinsic.
1210 CallSite CS = CallSite(NewCall);
1211 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1214 // Remember that this call site got inlined for the client of
1216 IFI.InlinedCalls.push_back(NewCall);
1218 // It's possible that inlining the callsite will cause it to go from an
1219 // indirect to a direct call by resolving a function pointer. If this
1220 // happens, set the callee of the new call site to a more precise
1221 // destination. This can also happen if the call graph node of the caller
1222 // was just unnecessarily imprecise.
1223 if (!I->second->getFunction())
1224 if (Function *F = CallSite(NewCall).getCalledFunction()) {
1225 // Indirect call site resolved to direct call.
1226 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1231 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1234 // Update the call graph by deleting the edge from Callee to Caller. We must
1235 // do this after the loop above in case Caller and Callee are the same.
1236 CallerNode->removeCallEdgeFor(CS);
1239 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1240 BasicBlock *InsertBlock,
1241 InlineFunctionInfo &IFI) {
1242 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1243 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1245 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1247 // Always generate a memcpy of alignment 1 here because we don't know
1248 // the alignment of the src pointer. Other optimizations can infer
1249 // better alignment.
1250 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1253 /// When inlining a call site that has a byval argument,
1254 /// we have to make the implicit memcpy explicit by adding it.
1255 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1256 const Function *CalledFunc,
1257 InlineFunctionInfo &IFI,
1258 unsigned ByValAlignment) {
1259 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1260 Type *AggTy = ArgTy->getElementType();
1262 Function *Caller = TheCall->getFunction();
1263 const DataLayout &DL = Caller->getParent()->getDataLayout();
1265 // If the called function is readonly, then it could not mutate the caller's
1266 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1268 if (CalledFunc->onlyReadsMemory()) {
1269 // If the byval argument has a specified alignment that is greater than the
1270 // passed in pointer, then we either have to round up the input pointer or
1271 // give up on this transformation.
1272 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1275 AssumptionCache *AC =
1276 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1278 // If the pointer is already known to be sufficiently aligned, or if we can
1279 // round it up to a larger alignment, then we don't need a temporary.
1280 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1284 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1285 // for code quality, but rarely happens and is required for correctness.
1288 // Create the alloca. If we have DataLayout, use nice alignment.
1289 unsigned Align = DL.getPrefTypeAlignment(AggTy);
1291 // If the byval had an alignment specified, we *must* use at least that
1292 // alignment, as it is required by the byval argument (and uses of the
1293 // pointer inside the callee).
1294 Align = std::max(Align, ByValAlignment);
1296 Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(),
1297 nullptr, Align, Arg->getName(),
1298 &*Caller->begin()->begin());
1299 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1301 // Uses of the argument in the function should use our new alloca
1306 // Check whether this Value is used by a lifetime intrinsic.
1307 static bool isUsedByLifetimeMarker(Value *V) {
1308 for (User *U : V->users()) {
1309 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1310 switch (II->getIntrinsicID()) {
1312 case Intrinsic::lifetime_start:
1313 case Intrinsic::lifetime_end:
1321 // Check whether the given alloca already has
1322 // lifetime.start or lifetime.end intrinsics.
1323 static bool hasLifetimeMarkers(AllocaInst *AI) {
1324 Type *Ty = AI->getType();
1325 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1326 Ty->getPointerAddressSpace());
1327 if (Ty == Int8PtrTy)
1328 return isUsedByLifetimeMarker(AI);
1330 // Do a scan to find all the casts to i8*.
1331 for (User *U : AI->users()) {
1332 if (U->getType() != Int8PtrTy) continue;
1333 if (U->stripPointerCasts() != AI) continue;
1334 if (isUsedByLifetimeMarker(U))
1340 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1341 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1342 /// cannot be static.
1343 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1344 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1347 /// Update inlined instructions' line numbers to
1348 /// to encode location where these instructions are inlined.
1349 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1350 Instruction *TheCall, bool CalleeHasDebugInfo) {
1351 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1355 auto &Ctx = Fn->getContext();
1356 DILocation *InlinedAtNode = TheCallDL;
1358 // Create a unique call site, not to be confused with any other call from the
1360 InlinedAtNode = DILocation::getDistinct(
1361 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1362 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1364 // Cache the inlined-at nodes as they're built so they are reused, without
1365 // this every instruction's inlined-at chain would become distinct from each
1367 DenseMap<const MDNode *, MDNode *> IANodes;
1369 for (; FI != Fn->end(); ++FI) {
1370 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1372 if (DebugLoc DL = BI->getDebugLoc()) {
1373 auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(),
1375 auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA);
1376 BI->setDebugLoc(IDL);
1380 if (CalleeHasDebugInfo)
1383 // If the inlined instruction has no line number, make it look as if it
1384 // originates from the call location. This is important for
1385 // ((__always_inline__, __nodebug__)) functions which must use caller
1386 // location for all instructions in their function body.
1388 // Don't update static allocas, as they may get moved later.
1389 if (auto *AI = dyn_cast<AllocaInst>(BI))
1390 if (allocaWouldBeStaticInEntry(AI))
1393 BI->setDebugLoc(TheCallDL);
1398 /// Update the block frequencies of the caller after a callee has been inlined.
1400 /// Each block cloned into the caller has its block frequency scaled by the
1401 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1402 /// callee's entry block gets the same frequency as the callsite block and the
1403 /// relative frequencies of all cloned blocks remain the same after cloning.
1404 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1405 const ValueToValueMapTy &VMap,
1406 BlockFrequencyInfo *CallerBFI,
1407 BlockFrequencyInfo *CalleeBFI,
1408 const BasicBlock &CalleeEntryBlock) {
1409 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1410 for (auto const &Entry : VMap) {
1411 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1413 auto *OrigBB = cast<BasicBlock>(Entry.first);
1414 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1415 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1416 if (!ClonedBBs.insert(ClonedBB).second) {
1417 // Multiple blocks in the callee might get mapped to one cloned block in
1418 // the caller since we prune the callee as we clone it. When that happens,
1419 // we want to use the maximum among the original blocks' frequencies.
1420 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1424 CallerBFI->setBlockFreq(ClonedBB, Freq);
1426 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1427 CallerBFI->setBlockFreqAndScale(
1428 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1432 /// Update the branch metadata for cloned call instructions.
1433 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1434 const Optional<uint64_t> &CalleeEntryCount,
1435 const Instruction *TheCall,
1436 ProfileSummaryInfo *PSI,
1437 BlockFrequencyInfo *CallerBFI) {
1438 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.getValue() < 1)
1440 Optional<uint64_t> CallSiteCount =
1441 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1442 uint64_t CallCount =
1443 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1444 CalleeEntryCount.getValue());
1446 for (auto const &Entry : VMap)
1447 if (isa<CallInst>(Entry.first))
1448 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1449 CI->updateProfWeight(CallCount, CalleeEntryCount.getValue());
1450 for (BasicBlock &BB : *Callee)
1451 // No need to update the callsite if it is pruned during inlining.
1452 if (VMap.count(&BB))
1453 for (Instruction &I : BB)
1454 if (CallInst *CI = dyn_cast<CallInst>(&I))
1455 CI->updateProfWeight(CalleeEntryCount.getValue() - CallCount,
1456 CalleeEntryCount.getValue());
1459 /// Update the entry count of callee after inlining.
1461 /// The callsite's block count is subtracted from the callee's function entry
1463 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB,
1464 Instruction *CallInst, Function *Callee,
1465 ProfileSummaryInfo *PSI) {
1466 // If the callee has a original count of N, and the estimated count of
1467 // callsite is M, the new callee count is set to N - M. M is estimated from
1468 // the caller's entry count, its entry block frequency and the block frequency
1470 Optional<uint64_t> CalleeCount = Callee->getEntryCount();
1471 if (!CalleeCount.hasValue() || !PSI)
1473 Optional<uint64_t> CallCount = PSI->getProfileCount(CallInst, CallerBFI);
1474 if (!CallCount.hasValue())
1476 // Since CallSiteCount is an estimate, it could exceed the original callee
1477 // count and has to be set to 0.
1478 if (CallCount.getValue() > CalleeCount.getValue())
1479 Callee->setEntryCount(0);
1481 Callee->setEntryCount(CalleeCount.getValue() - CallCount.getValue());
1484 /// This function inlines the called function into the basic block of the
1485 /// caller. This returns false if it is not possible to inline this call.
1486 /// The program is still in a well defined state if this occurs though.
1488 /// Note that this only does one level of inlining. For example, if the
1489 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1490 /// exists in the instruction stream. Similarly this will inline a recursive
1491 /// function by one level.
1492 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1493 AAResults *CalleeAAR, bool InsertLifetime,
1494 Function *ForwardVarArgsTo) {
1495 Instruction *TheCall = CS.getInstruction();
1496 assert(TheCall->getParent() && TheCall->getFunction()
1497 && "Instruction not in function!");
1499 // If IFI has any state in it, zap it before we fill it in.
1502 Function *CalledFunc = CS.getCalledFunction();
1503 if (!CalledFunc || // Can't inline external function or indirect
1504 CalledFunc->isDeclaration() ||
1505 (!ForwardVarArgsTo && CalledFunc->isVarArg())) // call, or call to a vararg function!
1508 // The inliner does not know how to inline through calls with operand bundles
1510 if (CS.hasOperandBundles()) {
1511 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1512 uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1513 // ... but it knows how to inline through "deopt" operand bundles ...
1514 if (Tag == LLVMContext::OB_deopt)
1516 // ... and "funclet" operand bundles.
1517 if (Tag == LLVMContext::OB_funclet)
1524 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1525 // calls that we inline.
1526 bool MarkNoUnwind = CS.doesNotThrow();
1528 BasicBlock *OrigBB = TheCall->getParent();
1529 Function *Caller = OrigBB->getParent();
1531 // GC poses two hazards to inlining, which only occur when the callee has GC:
1532 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1534 // 2. If the caller has a differing GC, it is invalid to inline.
1535 if (CalledFunc->hasGC()) {
1536 if (!Caller->hasGC())
1537 Caller->setGC(CalledFunc->getGC());
1538 else if (CalledFunc->getGC() != Caller->getGC())
1542 // Get the personality function from the callee if it contains a landing pad.
1543 Constant *CalledPersonality =
1544 CalledFunc->hasPersonalityFn()
1545 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1548 // Find the personality function used by the landing pads of the caller. If it
1549 // exists, then check to see that it matches the personality function used in
1551 Constant *CallerPersonality =
1552 Caller->hasPersonalityFn()
1553 ? Caller->getPersonalityFn()->stripPointerCasts()
1555 if (CalledPersonality) {
1556 if (!CallerPersonality)
1557 Caller->setPersonalityFn(CalledPersonality);
1558 // If the personality functions match, then we can perform the
1559 // inlining. Otherwise, we can't inline.
1560 // TODO: This isn't 100% true. Some personality functions are proper
1561 // supersets of others and can be used in place of the other.
1562 else if (CalledPersonality != CallerPersonality)
1566 // We need to figure out which funclet the callsite was in so that we may
1567 // properly nest the callee.
1568 Instruction *CallSiteEHPad = nullptr;
1569 if (CallerPersonality) {
1570 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1571 if (isFuncletEHPersonality(Personality)) {
1572 Optional<OperandBundleUse> ParentFunclet =
1573 CS.getOperandBundle(LLVMContext::OB_funclet);
1575 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1577 // OK, the inlining site is legal. What about the target function?
1579 if (CallSiteEHPad) {
1580 if (Personality == EHPersonality::MSVC_CXX) {
1581 // The MSVC personality cannot tolerate catches getting inlined into
1582 // cleanup funclets.
1583 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1584 // Ok, the call site is within a cleanuppad. Let's check the callee
1586 for (const BasicBlock &CalledBB : *CalledFunc) {
1587 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1591 } else if (isAsynchronousEHPersonality(Personality)) {
1592 // SEH is even less tolerant, there may not be any sort of exceptional
1593 // funclet in the callee.
1594 for (const BasicBlock &CalledBB : *CalledFunc) {
1595 if (CalledBB.isEHPad())
1603 // Determine if we are dealing with a call in an EHPad which does not unwind
1605 bool EHPadForCallUnwindsLocally = false;
1606 if (CallSiteEHPad && CS.isCall()) {
1607 UnwindDestMemoTy FuncletUnwindMap;
1608 Value *CallSiteUnwindDestToken =
1609 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1611 EHPadForCallUnwindsLocally =
1612 CallSiteUnwindDestToken &&
1613 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1616 // Get an iterator to the last basic block in the function, which will have
1617 // the new function inlined after it.
1618 Function::iterator LastBlock = --Caller->end();
1620 // Make sure to capture all of the return instructions from the cloned
1622 SmallVector<ReturnInst*, 8> Returns;
1623 ClonedCodeInfo InlinedFunctionInfo;
1624 Function::iterator FirstNewBlock;
1626 { // Scope to destroy VMap after cloning.
1627 ValueToValueMapTy VMap;
1628 // Keep a list of pair (dst, src) to emit byval initializations.
1629 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1631 auto &DL = Caller->getParent()->getDataLayout();
1633 assert((CalledFunc->arg_size() == CS.arg_size() || ForwardVarArgsTo) &&
1634 "Varargs calls can only be inlined if the Varargs are forwarded!");
1636 // Calculate the vector of arguments to pass into the function cloner, which
1637 // matches up the formal to the actual argument values.
1638 CallSite::arg_iterator AI = CS.arg_begin();
1640 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1641 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1642 Value *ActualArg = *AI;
1644 // When byval arguments actually inlined, we need to make the copy implied
1645 // by them explicit. However, we don't do this if the callee is readonly
1646 // or readnone, because the copy would be unneeded: the callee doesn't
1647 // modify the struct.
1648 if (CS.isByValArgument(ArgNo)) {
1649 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1650 CalledFunc->getParamAlignment(ArgNo));
1651 if (ActualArg != *AI)
1652 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1655 VMap[&*I] = ActualArg;
1658 // Add alignment assumptions if necessary. We do this before the inlined
1659 // instructions are actually cloned into the caller so that we can easily
1660 // check what will be known at the start of the inlined code.
1661 AddAlignmentAssumptions(CS, IFI);
1663 // We want the inliner to prune the code as it copies. We would LOVE to
1664 // have no dead or constant instructions leftover after inlining occurs
1665 // (which can happen, e.g., because an argument was constant), but we'll be
1666 // happy with whatever the cloner can do.
1667 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1668 /*ModuleLevelChanges=*/false, Returns, ".i",
1669 &InlinedFunctionInfo, TheCall);
1670 // Remember the first block that is newly cloned over.
1671 FirstNewBlock = LastBlock; ++FirstNewBlock;
1673 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1674 // Update the BFI of blocks cloned into the caller.
1675 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1676 CalledFunc->front());
1678 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1679 IFI.PSI, IFI.CallerBFI);
1680 // Update the profile count of callee.
1681 updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc, IFI.PSI);
1683 // Inject byval arguments initialization.
1684 for (std::pair<Value*, Value*> &Init : ByValInit)
1685 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1686 &*FirstNewBlock, IFI);
1688 Optional<OperandBundleUse> ParentDeopt =
1689 CS.getOperandBundle(LLVMContext::OB_deopt);
1691 SmallVector<OperandBundleDef, 2> OpDefs;
1693 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1694 Instruction *I = dyn_cast_or_null<Instruction>(VH);
1695 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1700 OpDefs.reserve(ICS.getNumOperandBundles());
1702 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1703 auto ChildOB = ICS.getOperandBundleAt(i);
1704 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1705 // If the inlined call has other operand bundles, let them be
1706 OpDefs.emplace_back(ChildOB);
1710 // It may be useful to separate this logic (of handling operand
1711 // bundles) out to a separate "policy" component if this gets crowded.
1712 // Prepend the parent's deoptimization continuation to the newly
1713 // inlined call's deoptimization continuation.
1714 std::vector<Value *> MergedDeoptArgs;
1715 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1716 ChildOB.Inputs.size());
1718 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1719 ParentDeopt->Inputs.begin(),
1720 ParentDeopt->Inputs.end());
1721 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1722 ChildOB.Inputs.end());
1724 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1727 Instruction *NewI = nullptr;
1728 if (isa<CallInst>(I))
1729 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1731 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1733 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1734 // this even if the call returns void.
1735 I->replaceAllUsesWith(NewI);
1738 I->eraseFromParent();
1742 // Update the callgraph if requested.
1744 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1746 // For 'nodebug' functions, the associated DISubprogram is always null.
1747 // Conservatively avoid propagating the callsite debug location to
1748 // instructions inlined from a function whose DISubprogram is not null.
1749 fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1750 CalledFunc->getSubprogram() != nullptr);
1752 // Clone existing noalias metadata if necessary.
1753 CloneAliasScopeMetadata(CS, VMap);
1755 // Add noalias metadata if necessary.
1756 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1758 // Propagate llvm.mem.parallel_loop_access if necessary.
1759 PropagateParallelLoopAccessMetadata(CS, VMap);
1761 // Register any cloned assumptions.
1762 if (IFI.GetAssumptionCache)
1763 for (BasicBlock &NewBlock :
1764 make_range(FirstNewBlock->getIterator(), Caller->end()))
1765 for (Instruction &I : NewBlock) {
1766 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1767 if (II->getIntrinsicID() == Intrinsic::assume)
1768 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1772 // If there are any alloca instructions in the block that used to be the entry
1773 // block for the callee, move them to the entry block of the caller. First
1774 // calculate which instruction they should be inserted before. We insert the
1775 // instructions at the end of the current alloca list.
1777 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1778 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1779 E = FirstNewBlock->end(); I != E; ) {
1780 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1783 // If the alloca is now dead, remove it. This often occurs due to code
1785 if (AI->use_empty()) {
1786 AI->eraseFromParent();
1790 if (!allocaWouldBeStaticInEntry(AI))
1793 // Keep track of the static allocas that we inline into the caller.
1794 IFI.StaticAllocas.push_back(AI);
1796 // Scan for the block of allocas that we can move over, and move them
1798 while (isa<AllocaInst>(I) &&
1799 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1800 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1804 // Transfer all of the allocas over in a block. Using splice means
1805 // that the instructions aren't removed from the symbol table, then
1807 Caller->getEntryBlock().getInstList().splice(
1808 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1810 // Move any dbg.declares describing the allocas into the entry basic block.
1811 DIBuilder DIB(*Caller->getParent());
1812 for (auto &AI : IFI.StaticAllocas)
1813 replaceDbgDeclareForAlloca(AI, AI, DIB, DIExpression::NoDeref, 0,
1814 DIExpression::NoDeref);
1817 SmallVector<Value*,4> VarArgsToForward;
1818 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1819 i < CS.getNumArgOperands(); i++)
1820 VarArgsToForward.push_back(CS.getArgOperand(i));
1822 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1823 if (InlinedFunctionInfo.ContainsCalls) {
1824 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1825 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1826 CallSiteTailKind = CI->getTailCallKind();
1828 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1830 for (auto II = BB->begin(); II != BB->end();) {
1831 Instruction &I = *II++;
1832 CallInst *CI = dyn_cast<CallInst>(&I);
1836 if (Function *F = CI->getCalledFunction())
1837 InlinedDeoptimizeCalls |=
1838 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1840 // We need to reduce the strength of any inlined tail calls. For
1841 // musttail, we have to avoid introducing potential unbounded stack
1842 // growth. For example, if functions 'f' and 'g' are mutually recursive
1843 // with musttail, we can inline 'g' into 'f' so long as we preserve
1844 // musttail on the cloned call to 'f'. If either the inlined call site
1845 // or the cloned call site is *not* musttail, the program already has
1846 // one frame of stack growth, so it's safe to remove musttail. Here is
1847 // a table of example transformations:
1849 // f -> musttail g -> musttail f ==> f -> musttail f
1850 // f -> musttail g -> tail f ==> f -> tail f
1851 // f -> g -> musttail f ==> f -> f
1852 // f -> g -> tail f ==> f -> f
1853 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1854 if (ChildTCK != CallInst::TCK_NoTail)
1855 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1856 CI->setTailCallKind(ChildTCK);
1857 InlinedMustTailCalls |= CI->isMustTailCall();
1859 // Calls inlined through a 'nounwind' call site should be marked
1862 CI->setDoesNotThrow();
1864 if (ForwardVarArgsTo && !VarArgsToForward.empty() &&
1865 CI->getCalledFunction() == ForwardVarArgsTo) {
1866 SmallVector<Value*, 6> Params(CI->arg_operands());
1867 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
1868 CallInst *Call = CallInst::Create(CI->getCalledFunction(), Params, "", CI);
1869 Call->setDebugLoc(CI->getDebugLoc());
1870 CI->replaceAllUsesWith(Call);
1871 CI->eraseFromParent();
1877 // Leave lifetime markers for the static alloca's, scoping them to the
1878 // function we just inlined.
1879 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1880 IRBuilder<> builder(&FirstNewBlock->front());
1881 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1882 AllocaInst *AI = IFI.StaticAllocas[ai];
1883 // Don't mark swifterror allocas. They can't have bitcast uses.
1884 if (AI->isSwiftError())
1887 // If the alloca is already scoped to something smaller than the whole
1888 // function then there's no need to add redundant, less accurate markers.
1889 if (hasLifetimeMarkers(AI))
1892 // Try to determine the size of the allocation.
1893 ConstantInt *AllocaSize = nullptr;
1894 if (ConstantInt *AIArraySize =
1895 dyn_cast<ConstantInt>(AI->getArraySize())) {
1896 auto &DL = Caller->getParent()->getDataLayout();
1897 Type *AllocaType = AI->getAllocatedType();
1898 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1899 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1901 // Don't add markers for zero-sized allocas.
1902 if (AllocaArraySize == 0)
1905 // Check that array size doesn't saturate uint64_t and doesn't
1906 // overflow when it's multiplied by type size.
1907 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
1908 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
1910 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1911 AllocaArraySize * AllocaTypeSize);
1915 builder.CreateLifetimeStart(AI, AllocaSize);
1916 for (ReturnInst *RI : Returns) {
1917 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1918 // call and a return. The return kills all local allocas.
1919 if (InlinedMustTailCalls &&
1920 RI->getParent()->getTerminatingMustTailCall())
1922 if (InlinedDeoptimizeCalls &&
1923 RI->getParent()->getTerminatingDeoptimizeCall())
1925 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1930 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1931 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1932 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1933 Module *M = Caller->getParent();
1934 // Get the two intrinsics we care about.
1935 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1936 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1938 // Insert the llvm.stacksave.
1939 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1940 .CreateCall(StackSave, {}, "savedstack");
1942 // Insert a call to llvm.stackrestore before any return instructions in the
1943 // inlined function.
1944 for (ReturnInst *RI : Returns) {
1945 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1946 // call and a return. The return will restore the stack pointer.
1947 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1949 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1951 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1955 // If we are inlining for an invoke instruction, we must make sure to rewrite
1956 // any call instructions into invoke instructions. This is sensitive to which
1957 // funclet pads were top-level in the inlinee, so must be done before
1958 // rewriting the "parent pad" links.
1959 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1960 BasicBlock *UnwindDest = II->getUnwindDest();
1961 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1962 if (isa<LandingPadInst>(FirstNonPHI)) {
1963 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1965 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1969 // Update the lexical scopes of the new funclets and callsites.
1970 // Anything that had 'none' as its parent is now nested inside the callsite's
1973 if (CallSiteEHPad) {
1974 for (Function::iterator BB = FirstNewBlock->getIterator(),
1977 // Add bundle operands to any top-level call sites.
1978 SmallVector<OperandBundleDef, 1> OpBundles;
1979 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1980 Instruction *I = &*BBI++;
1985 // Skip call sites which are nounwind intrinsics.
1987 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1988 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1991 // Skip call sites which already have a "funclet" bundle.
1992 if (CS.getOperandBundle(LLVMContext::OB_funclet))
1995 CS.getOperandBundlesAsDefs(OpBundles);
1996 OpBundles.emplace_back("funclet", CallSiteEHPad);
1998 Instruction *NewInst;
2000 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2002 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2003 NewInst->takeName(I);
2004 I->replaceAllUsesWith(NewInst);
2005 I->eraseFromParent();
2010 // It is problematic if the inlinee has a cleanupret which unwinds to
2011 // caller and we inline it into a call site which doesn't unwind but into
2012 // an EH pad that does. Such an edge must be dynamically unreachable.
2013 // As such, we replace the cleanupret with unreachable.
2014 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2015 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2016 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2018 Instruction *I = BB->getFirstNonPHI();
2022 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2023 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2024 CatchSwitch->setParentPad(CallSiteEHPad);
2026 auto *FPI = cast<FuncletPadInst>(I);
2027 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2028 FPI->setParentPad(CallSiteEHPad);
2033 if (InlinedDeoptimizeCalls) {
2034 // We need to at least remove the deoptimizing returns from the Return set,
2035 // so that the control flow from those returns does not get merged into the
2036 // caller (but terminate it instead). If the caller's return type does not
2037 // match the callee's return type, we also need to change the return type of
2039 if (Caller->getReturnType() == TheCall->getType()) {
2040 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2041 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2043 Returns.erase(NewEnd, Returns.end());
2045 SmallVector<ReturnInst *, 8> NormalReturns;
2046 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2047 Caller->getParent(), Intrinsic::experimental_deoptimize,
2048 {Caller->getReturnType()});
2050 for (ReturnInst *RI : Returns) {
2051 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2053 NormalReturns.push_back(RI);
2057 // The calling convention on the deoptimize call itself may be bogus,
2058 // since the code we're inlining may have undefined behavior (and may
2059 // never actually execute at runtime); but all
2060 // @llvm.experimental.deoptimize declarations have to have the same
2061 // calling convention in a well-formed module.
2062 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2063 NewDeoptIntrinsic->setCallingConv(CallingConv);
2064 auto *CurBB = RI->getParent();
2065 RI->eraseFromParent();
2067 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2068 DeoptCall->arg_end());
2070 SmallVector<OperandBundleDef, 1> OpBundles;
2071 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2072 DeoptCall->eraseFromParent();
2073 assert(!OpBundles.empty() &&
2074 "Expected at least the deopt operand bundle");
2076 IRBuilder<> Builder(CurBB);
2077 CallInst *NewDeoptCall =
2078 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2079 NewDeoptCall->setCallingConv(CallingConv);
2080 if (NewDeoptCall->getType()->isVoidTy())
2081 Builder.CreateRetVoid();
2083 Builder.CreateRet(NewDeoptCall);
2086 // Leave behind the normal returns so we can merge control flow.
2087 std::swap(Returns, NormalReturns);
2091 // Handle any inlined musttail call sites. In order for a new call site to be
2092 // musttail, the source of the clone and the inlined call site must have been
2093 // musttail. Therefore it's safe to return without merging control into the
2095 if (InlinedMustTailCalls) {
2096 // Check if we need to bitcast the result of any musttail calls.
2097 Type *NewRetTy = Caller->getReturnType();
2098 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2100 // Handle the returns preceded by musttail calls separately.
2101 SmallVector<ReturnInst *, 8> NormalReturns;
2102 for (ReturnInst *RI : Returns) {
2103 CallInst *ReturnedMustTail =
2104 RI->getParent()->getTerminatingMustTailCall();
2105 if (!ReturnedMustTail) {
2106 NormalReturns.push_back(RI);
2112 // Delete the old return and any preceding bitcast.
2113 BasicBlock *CurBB = RI->getParent();
2114 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2115 RI->eraseFromParent();
2117 OldCast->eraseFromParent();
2119 // Insert a new bitcast and return with the right type.
2120 IRBuilder<> Builder(CurBB);
2121 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2124 // Leave behind the normal returns so we can merge control flow.
2125 std::swap(Returns, NormalReturns);
2128 // Now that all of the transforms on the inlined code have taken place but
2129 // before we splice the inlined code into the CFG and lose track of which
2130 // blocks were actually inlined, collect the call sites. We only do this if
2131 // call graph updates weren't requested, as those provide value handle based
2132 // tracking of inlined call sites instead.
2133 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2134 // Otherwise just collect the raw call sites that were inlined.
2135 for (BasicBlock &NewBB :
2136 make_range(FirstNewBlock->getIterator(), Caller->end()))
2137 for (Instruction &I : NewBB)
2138 if (auto CS = CallSite(&I))
2139 IFI.InlinedCallSites.push_back(CS);
2142 // If we cloned in _exactly one_ basic block, and if that block ends in a
2143 // return instruction, we splice the body of the inlined callee directly into
2144 // the calling basic block.
2145 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2146 // Move all of the instructions right before the call.
2147 OrigBB->getInstList().splice(TheCall->getIterator(),
2148 FirstNewBlock->getInstList(),
2149 FirstNewBlock->begin(), FirstNewBlock->end());
2150 // Remove the cloned basic block.
2151 Caller->getBasicBlockList().pop_back();
2153 // If the call site was an invoke instruction, add a branch to the normal
2155 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2156 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2157 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2160 // If the return instruction returned a value, replace uses of the call with
2161 // uses of the returned value.
2162 if (!TheCall->use_empty()) {
2163 ReturnInst *R = Returns[0];
2164 if (TheCall == R->getReturnValue())
2165 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2167 TheCall->replaceAllUsesWith(R->getReturnValue());
2169 // Since we are now done with the Call/Invoke, we can delete it.
2170 TheCall->eraseFromParent();
2172 // Since we are now done with the return instruction, delete it also.
2173 Returns[0]->eraseFromParent();
2175 // We are now done with the inlining.
2179 // Otherwise, we have the normal case, of more than one block to inline or
2180 // multiple return sites.
2182 // We want to clone the entire callee function into the hole between the
2183 // "starter" and "ender" blocks. How we accomplish this depends on whether
2184 // this is an invoke instruction or a call instruction.
2185 BasicBlock *AfterCallBB;
2186 BranchInst *CreatedBranchToNormalDest = nullptr;
2187 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2189 // Add an unconditional branch to make this look like the CallInst case...
2190 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2192 // Split the basic block. This guarantees that no PHI nodes will have to be
2193 // updated due to new incoming edges, and make the invoke case more
2194 // symmetric to the call case.
2196 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2197 CalledFunc->getName() + ".exit");
2199 } else { // It's a call
2200 // If this is a call instruction, we need to split the basic block that
2201 // the call lives in.
2203 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2204 CalledFunc->getName() + ".exit");
2207 if (IFI.CallerBFI) {
2208 // Copy original BB's block frequency to AfterCallBB
2209 IFI.CallerBFI->setBlockFreq(
2210 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2213 // Change the branch that used to go to AfterCallBB to branch to the first
2214 // basic block of the inlined function.
2216 TerminatorInst *Br = OrigBB->getTerminator();
2217 assert(Br && Br->getOpcode() == Instruction::Br &&
2218 "splitBasicBlock broken!");
2219 Br->setOperand(0, &*FirstNewBlock);
2221 // Now that the function is correct, make it a little bit nicer. In
2222 // particular, move the basic blocks inserted from the end of the function
2223 // into the space made by splitting the source basic block.
2224 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2225 Caller->getBasicBlockList(), FirstNewBlock,
2228 // Handle all of the return instructions that we just cloned in, and eliminate
2229 // any users of the original call/invoke instruction.
2230 Type *RTy = CalledFunc->getReturnType();
2232 PHINode *PHI = nullptr;
2233 if (Returns.size() > 1) {
2234 // The PHI node should go at the front of the new basic block to merge all
2235 // possible incoming values.
2236 if (!TheCall->use_empty()) {
2237 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2238 &AfterCallBB->front());
2239 // Anything that used the result of the function call should now use the
2240 // PHI node as their operand.
2241 TheCall->replaceAllUsesWith(PHI);
2244 // Loop over all of the return instructions adding entries to the PHI node
2247 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2248 ReturnInst *RI = Returns[i];
2249 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2250 "Ret value not consistent in function!");
2251 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2255 // Add a branch to the merge points and remove return instructions.
2257 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2258 ReturnInst *RI = Returns[i];
2259 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2260 Loc = RI->getDebugLoc();
2261 BI->setDebugLoc(Loc);
2262 RI->eraseFromParent();
2264 // We need to set the debug location to *somewhere* inside the
2265 // inlined function. The line number may be nonsensical, but the
2266 // instruction will at least be associated with the right
2268 if (CreatedBranchToNormalDest)
2269 CreatedBranchToNormalDest->setDebugLoc(Loc);
2270 } else if (!Returns.empty()) {
2271 // Otherwise, if there is exactly one return value, just replace anything
2272 // using the return value of the call with the computed value.
2273 if (!TheCall->use_empty()) {
2274 if (TheCall == Returns[0]->getReturnValue())
2275 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2277 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2280 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2281 BasicBlock *ReturnBB = Returns[0]->getParent();
2282 ReturnBB->replaceAllUsesWith(AfterCallBB);
2284 // Splice the code from the return block into the block that it will return
2285 // to, which contains the code that was after the call.
2286 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2287 ReturnBB->getInstList());
2289 if (CreatedBranchToNormalDest)
2290 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2292 // Delete the return instruction now and empty ReturnBB now.
2293 Returns[0]->eraseFromParent();
2294 ReturnBB->eraseFromParent();
2295 } else if (!TheCall->use_empty()) {
2296 // No returns, but something is using the return value of the call. Just
2298 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2301 // Since we are now done with the Call/Invoke, we can delete it.
2302 TheCall->eraseFromParent();
2304 // If we inlined any musttail calls and the original return is now
2305 // unreachable, delete it. It can only contain a bitcast and ret.
2306 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2307 AfterCallBB->eraseFromParent();
2309 // We should always be able to fold the entry block of the function into the
2310 // single predecessor of the block...
2311 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2312 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2314 // Splice the code entry block into calling block, right before the
2315 // unconditional branch.
2316 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2317 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2319 // Remove the unconditional branch.
2320 OrigBB->getInstList().erase(Br);
2322 // Now we can remove the CalleeEntry block, which is now empty.
2323 Caller->getBasicBlockList().erase(CalleeEntry);
2325 // If we inserted a phi node, check to see if it has a single value (e.g. all
2326 // the entries are the same or undef). If so, remove the PHI so it doesn't
2327 // block other optimizations.
2329 AssumptionCache *AC =
2330 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2331 auto &DL = Caller->getParent()->getDataLayout();
2332 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2333 PHI->replaceAllUsesWith(V);
2334 PHI->eraseFromParent();