1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/EHPersonalities.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/ProfileSummaryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DebugInfo.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/DIBuilder.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/MDBuilder.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Transforms/Utils/Local.h"
52 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
54 cl::desc("Convert noalias attributes to metadata during inlining."));
57 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
58 cl::init(true), cl::Hidden,
59 cl::desc("Convert align attributes to assumptions during inlining."));
61 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
62 AAResults *CalleeAAR, bool InsertLifetime) {
63 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
65 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
66 AAResults *CalleeAAR, bool InsertLifetime) {
67 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
71 /// A class for recording information about inlining a landing pad.
72 class LandingPadInliningInfo {
73 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
74 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
75 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
76 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
77 SmallVector<Value*, 8> UnwindDestPHIValues;
80 LandingPadInliningInfo(InvokeInst *II)
81 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
82 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
83 // If there are PHI nodes in the unwind destination block, we need to keep
84 // track of which values came into them from the invoke before removing
85 // the edge from this block.
86 llvm::BasicBlock *InvokeBB = II->getParent();
87 BasicBlock::iterator I = OuterResumeDest->begin();
88 for (; isa<PHINode>(I); ++I) {
89 // Save the value to use for this edge.
90 PHINode *PHI = cast<PHINode>(I);
91 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
94 CallerLPad = cast<LandingPadInst>(I);
97 /// The outer unwind destination is the target of
98 /// unwind edges introduced for calls within the inlined function.
99 BasicBlock *getOuterResumeDest() const {
100 return OuterResumeDest;
103 BasicBlock *getInnerResumeDest();
105 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
107 /// Forward the 'resume' instruction to the caller's landing pad block.
108 /// When the landing pad block has only one predecessor, this is
109 /// a simple branch. When there is more than one predecessor, we need to
110 /// split the landing pad block after the landingpad instruction and jump
112 void forwardResume(ResumeInst *RI,
113 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
115 /// Add incoming-PHI values to the unwind destination block for the given
116 /// basic block, using the values for the original invoke's source block.
117 void addIncomingPHIValuesFor(BasicBlock *BB) const {
118 addIncomingPHIValuesForInto(BB, OuterResumeDest);
121 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
122 BasicBlock::iterator I = dest->begin();
123 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
124 PHINode *phi = cast<PHINode>(I);
125 phi->addIncoming(UnwindDestPHIValues[i], src);
129 } // anonymous namespace
131 /// Get or create a target for the branch from ResumeInsts.
132 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
133 if (InnerResumeDest) return InnerResumeDest;
135 // Split the landing pad.
136 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
138 OuterResumeDest->splitBasicBlock(SplitPoint,
139 OuterResumeDest->getName() + ".body");
141 // The number of incoming edges we expect to the inner landing pad.
142 const unsigned PHICapacity = 2;
144 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
145 Instruction *InsertPoint = &InnerResumeDest->front();
146 BasicBlock::iterator I = OuterResumeDest->begin();
147 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
148 PHINode *OuterPHI = cast<PHINode>(I);
149 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
150 OuterPHI->getName() + ".lpad-body",
152 OuterPHI->replaceAllUsesWith(InnerPHI);
153 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
156 // Create a PHI for the exception values.
157 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
158 "eh.lpad-body", InsertPoint);
159 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
160 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
163 return InnerResumeDest;
166 /// Forward the 'resume' instruction to the caller's landing pad block.
167 /// When the landing pad block has only one predecessor, this is a simple
168 /// branch. When there is more than one predecessor, we need to split the
169 /// landing pad block after the landingpad instruction and jump to there.
170 void LandingPadInliningInfo::forwardResume(
171 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
172 BasicBlock *Dest = getInnerResumeDest();
173 BasicBlock *Src = RI->getParent();
175 BranchInst::Create(Dest, Src);
177 // Update the PHIs in the destination. They were inserted in an order which
179 addIncomingPHIValuesForInto(Src, Dest);
181 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
182 RI->eraseFromParent();
185 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
186 static Value *getParentPad(Value *EHPad) {
187 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
188 return FPI->getParentPad();
189 return cast<CatchSwitchInst>(EHPad)->getParentPad();
192 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
194 /// Helper for getUnwindDestToken that does the descendant-ward part of
196 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
197 UnwindDestMemoTy &MemoMap) {
198 SmallVector<Instruction *, 8> Worklist(1, EHPad);
200 while (!Worklist.empty()) {
201 Instruction *CurrentPad = Worklist.pop_back_val();
202 // We only put pads on the worklist that aren't in the MemoMap. When
203 // we find an unwind dest for a pad we may update its ancestors, but
204 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
205 // so they should never get updated while queued on the worklist.
206 assert(!MemoMap.count(CurrentPad));
207 Value *UnwindDestToken = nullptr;
208 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
209 if (CatchSwitch->hasUnwindDest()) {
210 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
212 // Catchswitch doesn't have a 'nounwind' variant, and one might be
213 // annotated as "unwinds to caller" when really it's nounwind (see
214 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
215 // parent's unwind dest from this. We can check its catchpads'
216 // descendants, since they might include a cleanuppad with an
217 // "unwinds to caller" cleanupret, which can be trusted.
218 for (auto HI = CatchSwitch->handler_begin(),
219 HE = CatchSwitch->handler_end();
220 HI != HE && !UnwindDestToken; ++HI) {
221 BasicBlock *HandlerBlock = *HI;
222 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
223 for (User *Child : CatchPad->users()) {
224 // Intentionally ignore invokes here -- since the catchswitch is
225 // marked "unwind to caller", it would be a verifier error if it
226 // contained an invoke which unwinds out of it, so any invoke we'd
227 // encounter must unwind to some child of the catch.
228 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
231 Instruction *ChildPad = cast<Instruction>(Child);
232 auto Memo = MemoMap.find(ChildPad);
233 if (Memo == MemoMap.end()) {
234 // Haven't figured out this child pad yet; queue it.
235 Worklist.push_back(ChildPad);
238 // We've already checked this child, but might have found that
239 // it offers no proof either way.
240 Value *ChildUnwindDestToken = Memo->second;
241 if (!ChildUnwindDestToken)
243 // We already know the child's unwind dest, which can either
244 // be ConstantTokenNone to indicate unwind to caller, or can
245 // be another child of the catchpad. Only the former indicates
246 // the unwind dest of the catchswitch.
247 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
248 UnwindDestToken = ChildUnwindDestToken;
251 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
256 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
257 for (User *U : CleanupPad->users()) {
258 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
259 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
260 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
262 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
265 Value *ChildUnwindDestToken;
266 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
267 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
268 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
269 Instruction *ChildPad = cast<Instruction>(U);
270 auto Memo = MemoMap.find(ChildPad);
271 if (Memo == MemoMap.end()) {
272 // Haven't resolved this child yet; queue it and keep searching.
273 Worklist.push_back(ChildPad);
276 // We've checked this child, but still need to ignore it if it
277 // had no proof either way.
278 ChildUnwindDestToken = Memo->second;
279 if (!ChildUnwindDestToken)
282 // Not a relevant user of the cleanuppad
285 // In a well-formed program, the child/invoke must either unwind to
286 // an(other) child of the cleanup, or exit the cleanup. In the
287 // first case, continue searching.
288 if (isa<Instruction>(ChildUnwindDestToken) &&
289 getParentPad(ChildUnwindDestToken) == CleanupPad)
291 UnwindDestToken = ChildUnwindDestToken;
295 // If we haven't found an unwind dest for CurrentPad, we may have queued its
296 // children, so move on to the next in the worklist.
297 if (!UnwindDestToken)
300 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
301 // any ancestors of CurrentPad up to but not including UnwindDestToken's
302 // parent pad. Record this in the memo map, and check to see if the
303 // original EHPad being queried is one of the ones exited.
305 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
306 UnwindParent = getParentPad(UnwindPad);
308 UnwindParent = nullptr;
309 bool ExitedOriginalPad = false;
310 for (Instruction *ExitedPad = CurrentPad;
311 ExitedPad && ExitedPad != UnwindParent;
312 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
313 // Skip over catchpads since they just follow their catchswitches.
314 if (isa<CatchPadInst>(ExitedPad))
316 MemoMap[ExitedPad] = UnwindDestToken;
317 ExitedOriginalPad |= (ExitedPad == EHPad);
320 if (ExitedOriginalPad)
321 return UnwindDestToken;
323 // Continue the search.
326 // No definitive information is contained within this funclet.
330 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
331 /// return that pad instruction. If it unwinds to caller, return
332 /// ConstantTokenNone. If it does not have a definitive unwind destination,
335 /// This routine gets invoked for calls in funclets in inlinees when inlining
336 /// an invoke. Since many funclets don't have calls inside them, it's queried
337 /// on-demand rather than building a map of pads to unwind dests up front.
338 /// Determining a funclet's unwind dest may require recursively searching its
339 /// descendants, and also ancestors and cousins if the descendants don't provide
340 /// an answer. Since most funclets will have their unwind dest immediately
341 /// available as the unwind dest of a catchswitch or cleanupret, this routine
342 /// searches top-down from the given pad and then up. To avoid worst-case
343 /// quadratic run-time given that approach, it uses a memo map to avoid
344 /// re-processing funclet trees. The callers that rewrite the IR as they go
345 /// take advantage of this, for correctness, by checking/forcing rewritten
346 /// pads' entries to match the original callee view.
347 static Value *getUnwindDestToken(Instruction *EHPad,
348 UnwindDestMemoTy &MemoMap) {
349 // Catchpads unwind to the same place as their catchswitch;
350 // redirct any queries on catchpads so the code below can
351 // deal with just catchswitches and cleanuppads.
352 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
353 EHPad = CPI->getCatchSwitch();
355 // Check if we've already determined the unwind dest for this pad.
356 auto Memo = MemoMap.find(EHPad);
357 if (Memo != MemoMap.end())
360 // Search EHPad and, if necessary, its descendants.
361 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
362 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
364 return UnwindDestToken;
366 // No information is available for this EHPad from itself or any of its
367 // descendants. An unwind all the way out to a pad in the caller would
368 // need also to agree with the unwind dest of the parent funclet, so
369 // search up the chain to try to find a funclet with information. Put
370 // null entries in the memo map to avoid re-processing as we go up.
371 MemoMap[EHPad] = nullptr;
373 SmallPtrSet<Instruction *, 4> TempMemos;
374 TempMemos.insert(EHPad);
376 Instruction *LastUselessPad = EHPad;
377 Value *AncestorToken;
378 for (AncestorToken = getParentPad(EHPad);
379 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
380 AncestorToken = getParentPad(AncestorToken)) {
381 // Skip over catchpads since they just follow their catchswitches.
382 if (isa<CatchPadInst>(AncestorPad))
384 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
385 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
386 // call to getUnwindDestToken, that would mean that AncestorPad had no
387 // information in itself, its descendants, or its ancestors. If that
388 // were the case, then we should also have recorded the lack of information
389 // for the descendant that we're coming from. So assert that we don't
390 // find a null entry in the MemoMap for AncestorPad.
391 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
392 auto AncestorMemo = MemoMap.find(AncestorPad);
393 if (AncestorMemo == MemoMap.end()) {
394 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
396 UnwindDestToken = AncestorMemo->second;
400 LastUselessPad = AncestorPad;
401 MemoMap[LastUselessPad] = nullptr;
403 TempMemos.insert(LastUselessPad);
407 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
408 // returned nullptr (and likewise for EHPad and any of its ancestors up to
409 // LastUselessPad), so LastUselessPad has no information from below. Since
410 // getUnwindDestTokenHelper must investigate all downward paths through
411 // no-information nodes to prove that a node has no information like this,
412 // and since any time it finds information it records it in the MemoMap for
413 // not just the immediately-containing funclet but also any ancestors also
414 // exited, it must be the case that, walking downward from LastUselessPad,
415 // visiting just those nodes which have not been mapped to an unwind dest
416 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
417 // they are just used to keep getUnwindDestTokenHelper from repeating work),
418 // any node visited must have been exhaustively searched with no information
420 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
421 while (!Worklist.empty()) {
422 Instruction *UselessPad = Worklist.pop_back_val();
423 auto Memo = MemoMap.find(UselessPad);
424 if (Memo != MemoMap.end() && Memo->second) {
425 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
426 // that it is a funclet that does have information about unwinding to
427 // a particular destination; its parent was a useless pad.
428 // Since its parent has no information, the unwind edge must not escape
429 // the parent, and must target a sibling of this pad. This local unwind
430 // gives us no information about EHPad. Leave it and the subtree rooted
432 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
435 // We know we don't have information for UselesPad. If it has an entry in
436 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
437 // added on this invocation of getUnwindDestToken; if a previous invocation
438 // recorded nullptr, it would have had to prove that the ancestors of
439 // UselessPad, which include LastUselessPad, had no information, and that
440 // in turn would have required proving that the descendants of
441 // LastUselesPad, which include EHPad, have no information about
442 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
443 // the MemoMap on that invocation, which isn't the case if we got here.
444 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
445 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
446 // information that we'd be contradicting by making a map entry for it
447 // (which is something that getUnwindDestTokenHelper must have proved for
448 // us to get here). Just assert on is direct users here; the checks in
449 // this downward walk at its descendants will verify that they don't have
450 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
451 // unwind edges or unwind to a sibling).
452 MemoMap[UselessPad] = UnwindDestToken;
453 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
454 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
455 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
456 auto *CatchPad = HandlerBlock->getFirstNonPHI();
457 for (User *U : CatchPad->users()) {
459 (!isa<InvokeInst>(U) ||
461 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
463 "Expected useless pad");
464 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
465 Worklist.push_back(cast<Instruction>(U));
469 assert(isa<CleanupPadInst>(UselessPad));
470 for (User *U : UselessPad->users()) {
471 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
472 assert((!isa<InvokeInst>(U) ||
474 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
476 "Expected useless pad");
477 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
478 Worklist.push_back(cast<Instruction>(U));
483 return UnwindDestToken;
486 /// When we inline a basic block into an invoke,
487 /// we have to turn all of the calls that can throw into invokes.
488 /// This function analyze BB to see if there are any calls, and if so,
489 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
490 /// nodes in that block with the values specified in InvokeDestPHIValues.
491 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
492 BasicBlock *BB, BasicBlock *UnwindEdge,
493 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
494 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
495 Instruction *I = &*BBI++;
497 // We only need to check for function calls: inlined invoke
498 // instructions require no special handling.
499 CallInst *CI = dyn_cast<CallInst>(I);
501 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
504 // We do not need to (and in fact, cannot) convert possibly throwing calls
505 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
506 // invokes. The caller's "segment" of the deoptimization continuation
507 // attached to the newly inlined @llvm.experimental_deoptimize
508 // (resp. @llvm.experimental.guard) call should contain the exception
509 // handling logic, if any.
510 if (auto *F = CI->getCalledFunction())
511 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
512 F->getIntrinsicID() == Intrinsic::experimental_guard)
515 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
516 // This call is nested inside a funclet. If that funclet has an unwind
517 // destination within the inlinee, then unwinding out of this call would
518 // be UB. Rewriting this call to an invoke which targets the inlined
519 // invoke's unwind dest would give the call's parent funclet multiple
520 // unwind destinations, which is something that subsequent EH table
521 // generation can't handle and that the veirifer rejects. So when we
522 // see such a call, leave it as a call.
523 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
524 Value *UnwindDestToken =
525 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
526 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
529 Instruction *MemoKey;
530 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
531 MemoKey = CatchPad->getCatchSwitch();
533 MemoKey = FuncletPad;
534 assert(FuncletUnwindMap->count(MemoKey) &&
535 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
536 "must get memoized to avoid confusing later searches");
540 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
546 /// If we inlined an invoke site, we need to convert calls
547 /// in the body of the inlined function into invokes.
549 /// II is the invoke instruction being inlined. FirstNewBlock is the first
550 /// block of the inlined code (the last block is the end of the function),
551 /// and InlineCodeInfo is information about the code that got inlined.
552 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
553 ClonedCodeInfo &InlinedCodeInfo) {
554 BasicBlock *InvokeDest = II->getUnwindDest();
556 Function *Caller = FirstNewBlock->getParent();
558 // The inlined code is currently at the end of the function, scan from the
559 // start of the inlined code to its end, checking for stuff we need to
561 LandingPadInliningInfo Invoke(II);
563 // Get all of the inlined landing pad instructions.
564 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
565 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
567 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
568 InlinedLPads.insert(II->getLandingPadInst());
570 // Append the clauses from the outer landing pad instruction into the inlined
571 // landing pad instructions.
572 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
573 for (LandingPadInst *InlinedLPad : InlinedLPads) {
574 unsigned OuterNum = OuterLPad->getNumClauses();
575 InlinedLPad->reserveClauses(OuterNum);
576 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
577 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
578 if (OuterLPad->isCleanup())
579 InlinedLPad->setCleanup(true);
582 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
584 if (InlinedCodeInfo.ContainsCalls)
585 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
586 &*BB, Invoke.getOuterResumeDest()))
587 // Update any PHI nodes in the exceptional block to indicate that there
588 // is now a new entry in them.
589 Invoke.addIncomingPHIValuesFor(NewBB);
591 // Forward any resumes that are remaining here.
592 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
593 Invoke.forwardResume(RI, InlinedLPads);
596 // Now that everything is happy, we have one final detail. The PHI nodes in
597 // the exception destination block still have entries due to the original
598 // invoke instruction. Eliminate these entries (which might even delete the
600 InvokeDest->removePredecessor(II->getParent());
603 /// If we inlined an invoke site, we need to convert calls
604 /// in the body of the inlined function into invokes.
606 /// II is the invoke instruction being inlined. FirstNewBlock is the first
607 /// block of the inlined code (the last block is the end of the function),
608 /// and InlineCodeInfo is information about the code that got inlined.
609 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
610 ClonedCodeInfo &InlinedCodeInfo) {
611 BasicBlock *UnwindDest = II->getUnwindDest();
612 Function *Caller = FirstNewBlock->getParent();
614 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
616 // If there are PHI nodes in the unwind destination block, we need to keep
617 // track of which values came into them from the invoke before removing the
618 // edge from this block.
619 SmallVector<Value *, 8> UnwindDestPHIValues;
620 llvm::BasicBlock *InvokeBB = II->getParent();
621 for (Instruction &I : *UnwindDest) {
622 // Save the value to use for this edge.
623 PHINode *PHI = dyn_cast<PHINode>(&I);
626 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
629 // Add incoming-PHI values to the unwind destination block for the given basic
630 // block, using the values for the original invoke's source block.
631 auto UpdatePHINodes = [&](BasicBlock *Src) {
632 BasicBlock::iterator I = UnwindDest->begin();
633 for (Value *V : UnwindDestPHIValues) {
634 PHINode *PHI = cast<PHINode>(I);
635 PHI->addIncoming(V, Src);
640 // This connects all the instructions which 'unwind to caller' to the invoke
642 UnwindDestMemoTy FuncletUnwindMap;
643 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
645 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
646 if (CRI->unwindsToCaller()) {
647 auto *CleanupPad = CRI->getCleanupPad();
648 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
649 CRI->eraseFromParent();
650 UpdatePHINodes(&*BB);
651 // Finding a cleanupret with an unwind destination would confuse
652 // subsequent calls to getUnwindDestToken, so map the cleanuppad
653 // to short-circuit any such calls and recognize this as an "unwind
654 // to caller" cleanup.
655 assert(!FuncletUnwindMap.count(CleanupPad) ||
656 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
657 FuncletUnwindMap[CleanupPad] =
658 ConstantTokenNone::get(Caller->getContext());
662 Instruction *I = BB->getFirstNonPHI();
666 Instruction *Replacement = nullptr;
667 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
668 if (CatchSwitch->unwindsToCaller()) {
669 Value *UnwindDestToken;
670 if (auto *ParentPad =
671 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
672 // This catchswitch is nested inside another funclet. If that
673 // funclet has an unwind destination within the inlinee, then
674 // unwinding out of this catchswitch would be UB. Rewriting this
675 // catchswitch to unwind to the inlined invoke's unwind dest would
676 // give the parent funclet multiple unwind destinations, which is
677 // something that subsequent EH table generation can't handle and
678 // that the veirifer rejects. So when we see such a call, leave it
679 // as "unwind to caller".
680 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
681 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
684 // This catchswitch has no parent to inherit constraints from, and
685 // none of its descendants can have an unwind edge that exits it and
686 // targets another funclet in the inlinee. It may or may not have a
687 // descendant that definitively has an unwind to caller. In either
688 // case, we'll have to assume that any unwinds out of it may need to
689 // be routed to the caller, so treat it as though it has a definitive
691 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
693 auto *NewCatchSwitch = CatchSwitchInst::Create(
694 CatchSwitch->getParentPad(), UnwindDest,
695 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
697 for (BasicBlock *PadBB : CatchSwitch->handlers())
698 NewCatchSwitch->addHandler(PadBB);
699 // Propagate info for the old catchswitch over to the new one in
700 // the unwind map. This also serves to short-circuit any subsequent
701 // checks for the unwind dest of this catchswitch, which would get
702 // confused if they found the outer handler in the callee.
703 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
704 Replacement = NewCatchSwitch;
706 } else if (!isa<FuncletPadInst>(I)) {
707 llvm_unreachable("unexpected EHPad!");
711 Replacement->takeName(I);
712 I->replaceAllUsesWith(Replacement);
713 I->eraseFromParent();
714 UpdatePHINodes(&*BB);
718 if (InlinedCodeInfo.ContainsCalls)
719 for (Function::iterator BB = FirstNewBlock->getIterator(),
722 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
723 &*BB, UnwindDest, &FuncletUnwindMap))
724 // Update any PHI nodes in the exceptional block to indicate that there
725 // is now a new entry in them.
726 UpdatePHINodes(NewBB);
728 // Now that everything is happy, we have one final detail. The PHI nodes in
729 // the exception destination block still have entries due to the original
730 // invoke instruction. Eliminate these entries (which might even delete the
732 UnwindDest->removePredecessor(InvokeBB);
735 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
736 /// that metadata should be propagated to all memory-accessing cloned
738 static void PropagateParallelLoopAccessMetadata(CallSite CS,
739 ValueToValueMapTy &VMap) {
741 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
745 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
746 VMI != VMIE; ++VMI) {
750 Instruction *NI = dyn_cast<Instruction>(VMI->second);
754 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
755 M = MDNode::concatenate(PM, M);
756 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
757 } else if (NI->mayReadOrWriteMemory()) {
758 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
763 /// When inlining a function that contains noalias scope metadata,
764 /// this metadata needs to be cloned so that the inlined blocks
765 /// have different "unique scopes" at every call site. Were this not done, then
766 /// aliasing scopes from a function inlined into a caller multiple times could
767 /// not be differentiated (and this would lead to miscompiles because the
768 /// non-aliasing property communicated by the metadata could have
769 /// call-site-specific control dependencies).
770 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
771 const Function *CalledFunc = CS.getCalledFunction();
772 SetVector<const MDNode *> MD;
774 // Note: We could only clone the metadata if it is already used in the
775 // caller. I'm omitting that check here because it might confuse
776 // inter-procedural alias analysis passes. We can revisit this if it becomes
777 // an efficiency or overhead problem.
779 for (const BasicBlock &I : *CalledFunc)
780 for (const Instruction &J : I) {
781 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
783 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
790 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
792 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
793 while (!Queue.empty()) {
794 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
795 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
796 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
801 // Now we have a complete set of all metadata in the chains used to specify
802 // the noalias scopes and the lists of those scopes.
803 SmallVector<TempMDTuple, 16> DummyNodes;
804 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
805 for (const MDNode *I : MD) {
806 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
807 MDMap[I].reset(DummyNodes.back().get());
810 // Create new metadata nodes to replace the dummy nodes, replacing old
811 // metadata references with either a dummy node or an already-created new
813 for (const MDNode *I : MD) {
814 SmallVector<Metadata *, 4> NewOps;
815 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
816 const Metadata *V = I->getOperand(i);
817 if (const MDNode *M = dyn_cast<MDNode>(V))
818 NewOps.push_back(MDMap[M]);
820 NewOps.push_back(const_cast<Metadata *>(V));
823 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
824 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
825 assert(TempM->isTemporary() && "Expected temporary node");
827 TempM->replaceAllUsesWith(NewM);
830 // Now replace the metadata in the new inlined instructions with the
831 // repacements from the map.
832 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
833 VMI != VMIE; ++VMI) {
837 Instruction *NI = dyn_cast<Instruction>(VMI->second);
841 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
842 MDNode *NewMD = MDMap[M];
843 // If the call site also had alias scope metadata (a list of scopes to
844 // which instructions inside it might belong), propagate those scopes to
845 // the inlined instructions.
847 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
848 NewMD = MDNode::concatenate(NewMD, CSM);
849 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
850 } else if (NI->mayReadOrWriteMemory()) {
852 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
853 NI->setMetadata(LLVMContext::MD_alias_scope, M);
856 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
857 MDNode *NewMD = MDMap[M];
858 // If the call site also had noalias metadata (a list of scopes with
859 // which instructions inside it don't alias), propagate those scopes to
860 // the inlined instructions.
862 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
863 NewMD = MDNode::concatenate(NewMD, CSM);
864 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
865 } else if (NI->mayReadOrWriteMemory()) {
866 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
867 NI->setMetadata(LLVMContext::MD_noalias, M);
872 /// If the inlined function has noalias arguments,
873 /// then add new alias scopes for each noalias argument, tag the mapped noalias
874 /// parameters with noalias metadata specifying the new scope, and tag all
875 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
876 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
877 const DataLayout &DL, AAResults *CalleeAAR) {
878 if (!EnableNoAliasConversion)
881 const Function *CalledFunc = CS.getCalledFunction();
882 SmallVector<const Argument *, 4> NoAliasArgs;
884 for (const Argument &Arg : CalledFunc->args())
885 if (Arg.hasNoAliasAttr() && !Arg.use_empty())
886 NoAliasArgs.push_back(&Arg);
888 if (NoAliasArgs.empty())
891 // To do a good job, if a noalias variable is captured, we need to know if
892 // the capture point dominates the particular use we're considering.
894 DT.recalculate(const_cast<Function&>(*CalledFunc));
896 // noalias indicates that pointer values based on the argument do not alias
897 // pointer values which are not based on it. So we add a new "scope" for each
898 // noalias function argument. Accesses using pointers based on that argument
899 // become part of that alias scope, accesses using pointers not based on that
900 // argument are tagged as noalias with that scope.
902 DenseMap<const Argument *, MDNode *> NewScopes;
903 MDBuilder MDB(CalledFunc->getContext());
905 // Create a new scope domain for this function.
907 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
908 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
909 const Argument *A = NoAliasArgs[i];
911 std::string Name = CalledFunc->getName();
914 Name += A->getName();
916 Name += ": argument ";
920 // Note: We always create a new anonymous root here. This is true regardless
921 // of the linkage of the callee because the aliasing "scope" is not just a
922 // property of the callee, but also all control dependencies in the caller.
923 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
924 NewScopes.insert(std::make_pair(A, NewScope));
927 // Iterate over all new instructions in the map; for all memory-access
928 // instructions, add the alias scope metadata.
929 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
930 VMI != VMIE; ++VMI) {
931 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
935 Instruction *NI = dyn_cast<Instruction>(VMI->second);
939 bool IsArgMemOnlyCall = false, IsFuncCall = false;
940 SmallVector<const Value *, 2> PtrArgs;
942 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
943 PtrArgs.push_back(LI->getPointerOperand());
944 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
945 PtrArgs.push_back(SI->getPointerOperand());
946 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
947 PtrArgs.push_back(VAAI->getPointerOperand());
948 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
949 PtrArgs.push_back(CXI->getPointerOperand());
950 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
951 PtrArgs.push_back(RMWI->getPointerOperand());
952 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
953 // If we know that the call does not access memory, then we'll still
954 // know that about the inlined clone of this call site, and we don't
955 // need to add metadata.
956 if (ICS.doesNotAccessMemory())
961 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
962 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
963 MRB == FMRB_OnlyReadsArgumentPointees)
964 IsArgMemOnlyCall = true;
967 for (Value *Arg : ICS.args()) {
968 // We need to check the underlying objects of all arguments, not just
969 // the pointer arguments, because we might be passing pointers as
971 // However, if we know that the call only accesses pointer arguments,
972 // then we only need to check the pointer arguments.
973 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
976 PtrArgs.push_back(Arg);
980 // If we found no pointers, then this instruction is not suitable for
981 // pairing with an instruction to receive aliasing metadata.
982 // However, if this is a call, this we might just alias with none of the
983 // noalias arguments.
984 if (PtrArgs.empty() && !IsFuncCall)
987 // It is possible that there is only one underlying object, but you
988 // need to go through several PHIs to see it, and thus could be
989 // repeated in the Objects list.
990 SmallPtrSet<const Value *, 4> ObjSet;
991 SmallVector<Metadata *, 4> Scopes, NoAliases;
993 SmallSetVector<const Argument *, 4> NAPtrArgs;
994 for (const Value *V : PtrArgs) {
995 SmallVector<Value *, 4> Objects;
996 GetUnderlyingObjects(const_cast<Value*>(V),
997 Objects, DL, /* LI = */ nullptr);
999 for (Value *O : Objects)
1003 // Figure out if we're derived from anything that is not a noalias
1005 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1006 for (const Value *V : ObjSet) {
1007 // Is this value a constant that cannot be derived from any pointer
1008 // value (we need to exclude constant expressions, for example, that
1009 // are formed from arithmetic on global symbols).
1010 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1011 isa<ConstantPointerNull>(V) ||
1012 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1016 // If this is anything other than a noalias argument, then we cannot
1017 // completely describe the aliasing properties using alias.scope
1018 // metadata (and, thus, won't add any).
1019 if (const Argument *A = dyn_cast<Argument>(V)) {
1020 if (!A->hasNoAliasAttr())
1021 UsesAliasingPtr = true;
1023 UsesAliasingPtr = true;
1026 // If this is not some identified function-local object (which cannot
1027 // directly alias a noalias argument), or some other argument (which,
1028 // by definition, also cannot alias a noalias argument), then we could
1029 // alias a noalias argument that has been captured).
1030 if (!isa<Argument>(V) &&
1031 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1032 CanDeriveViaCapture = true;
1035 // A function call can always get captured noalias pointers (via other
1036 // parameters, globals, etc.).
1037 if (IsFuncCall && !IsArgMemOnlyCall)
1038 CanDeriveViaCapture = true;
1040 // First, we want to figure out all of the sets with which we definitely
1041 // don't alias. Iterate over all noalias set, and add those for which:
1042 // 1. The noalias argument is not in the set of objects from which we
1043 // definitely derive.
1044 // 2. The noalias argument has not yet been captured.
1045 // An arbitrary function that might load pointers could see captured
1046 // noalias arguments via other noalias arguments or globals, and so we
1047 // must always check for prior capture.
1048 for (const Argument *A : NoAliasArgs) {
1049 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1050 // It might be tempting to skip the
1051 // PointerMayBeCapturedBefore check if
1052 // A->hasNoCaptureAttr() is true, but this is
1053 // incorrect because nocapture only guarantees
1054 // that no copies outlive the function, not
1055 // that the value cannot be locally captured.
1056 !PointerMayBeCapturedBefore(A,
1057 /* ReturnCaptures */ false,
1058 /* StoreCaptures */ false, I, &DT)))
1059 NoAliases.push_back(NewScopes[A]);
1062 if (!NoAliases.empty())
1063 NI->setMetadata(LLVMContext::MD_noalias,
1064 MDNode::concatenate(
1065 NI->getMetadata(LLVMContext::MD_noalias),
1066 MDNode::get(CalledFunc->getContext(), NoAliases)));
1068 // Next, we want to figure out all of the sets to which we might belong.
1069 // We might belong to a set if the noalias argument is in the set of
1070 // underlying objects. If there is some non-noalias argument in our list
1071 // of underlying objects, then we cannot add a scope because the fact
1072 // that some access does not alias with any set of our noalias arguments
1073 // cannot itself guarantee that it does not alias with this access
1074 // (because there is some pointer of unknown origin involved and the
1075 // other access might also depend on this pointer). We also cannot add
1076 // scopes to arbitrary functions unless we know they don't access any
1077 // non-parameter pointer-values.
1078 bool CanAddScopes = !UsesAliasingPtr;
1079 if (CanAddScopes && IsFuncCall)
1080 CanAddScopes = IsArgMemOnlyCall;
1083 for (const Argument *A : NoAliasArgs) {
1084 if (ObjSet.count(A))
1085 Scopes.push_back(NewScopes[A]);
1088 if (!Scopes.empty())
1090 LLVMContext::MD_alias_scope,
1091 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1092 MDNode::get(CalledFunc->getContext(), Scopes)));
1097 /// If the inlined function has non-byval align arguments, then
1098 /// add @llvm.assume-based alignment assumptions to preserve this information.
1099 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1100 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1103 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1104 auto &DL = CS.getCaller()->getParent()->getDataLayout();
1106 // To avoid inserting redundant assumptions, we should check for assumptions
1107 // already in the caller. To do this, we might need a DT of the caller.
1109 bool DTCalculated = false;
1111 Function *CalledFunc = CS.getCalledFunction();
1112 for (Argument &Arg : CalledFunc->args()) {
1113 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1114 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1115 if (!DTCalculated) {
1116 DT.recalculate(*CS.getCaller());
1117 DTCalculated = true;
1120 // If we can already prove the asserted alignment in the context of the
1121 // caller, then don't bother inserting the assumption.
1122 Value *ArgVal = CS.getArgument(Arg.getArgNo());
1123 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1126 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1127 .CreateAlignmentAssumption(DL, ArgVal, Align);
1128 AC->registerAssumption(NewAsmp);
1133 /// Once we have cloned code over from a callee into the caller,
1134 /// update the specified callgraph to reflect the changes we made.
1135 /// Note that it's possible that not all code was copied over, so only
1136 /// some edges of the callgraph may remain.
1137 static void UpdateCallGraphAfterInlining(CallSite CS,
1138 Function::iterator FirstNewBlock,
1139 ValueToValueMapTy &VMap,
1140 InlineFunctionInfo &IFI) {
1141 CallGraph &CG = *IFI.CG;
1142 const Function *Caller = CS.getCaller();
1143 const Function *Callee = CS.getCalledFunction();
1144 CallGraphNode *CalleeNode = CG[Callee];
1145 CallGraphNode *CallerNode = CG[Caller];
1147 // Since we inlined some uninlined call sites in the callee into the caller,
1148 // add edges from the caller to all of the callees of the callee.
1149 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1151 // Consider the case where CalleeNode == CallerNode.
1152 CallGraphNode::CalledFunctionsVector CallCache;
1153 if (CalleeNode == CallerNode) {
1154 CallCache.assign(I, E);
1155 I = CallCache.begin();
1156 E = CallCache.end();
1159 for (; I != E; ++I) {
1160 const Value *OrigCall = I->first;
1162 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1163 // Only copy the edge if the call was inlined!
1164 if (VMI == VMap.end() || VMI->second == nullptr)
1167 // If the call was inlined, but then constant folded, there is no edge to
1168 // add. Check for this case.
1169 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1173 // We do not treat intrinsic calls like real function calls because we
1174 // expect them to become inline code; do not add an edge for an intrinsic.
1175 CallSite CS = CallSite(NewCall);
1176 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1179 // Remember that this call site got inlined for the client of
1181 IFI.InlinedCalls.push_back(NewCall);
1183 // It's possible that inlining the callsite will cause it to go from an
1184 // indirect to a direct call by resolving a function pointer. If this
1185 // happens, set the callee of the new call site to a more precise
1186 // destination. This can also happen if the call graph node of the caller
1187 // was just unnecessarily imprecise.
1188 if (!I->second->getFunction())
1189 if (Function *F = CallSite(NewCall).getCalledFunction()) {
1190 // Indirect call site resolved to direct call.
1191 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1196 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1199 // Update the call graph by deleting the edge from Callee to Caller. We must
1200 // do this after the loop above in case Caller and Callee are the same.
1201 CallerNode->removeCallEdgeFor(CS);
1204 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1205 BasicBlock *InsertBlock,
1206 InlineFunctionInfo &IFI) {
1207 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1208 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1210 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1212 // Always generate a memcpy of alignment 1 here because we don't know
1213 // the alignment of the src pointer. Other optimizations can infer
1214 // better alignment.
1215 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1218 /// When inlining a call site that has a byval argument,
1219 /// we have to make the implicit memcpy explicit by adding it.
1220 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1221 const Function *CalledFunc,
1222 InlineFunctionInfo &IFI,
1223 unsigned ByValAlignment) {
1224 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1225 Type *AggTy = ArgTy->getElementType();
1227 Function *Caller = TheCall->getFunction();
1228 const DataLayout &DL = Caller->getParent()->getDataLayout();
1230 // If the called function is readonly, then it could not mutate the caller's
1231 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1233 if (CalledFunc->onlyReadsMemory()) {
1234 // If the byval argument has a specified alignment that is greater than the
1235 // passed in pointer, then we either have to round up the input pointer or
1236 // give up on this transformation.
1237 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1240 AssumptionCache *AC =
1241 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1243 // If the pointer is already known to be sufficiently aligned, or if we can
1244 // round it up to a larger alignment, then we don't need a temporary.
1245 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1249 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1250 // for code quality, but rarely happens and is required for correctness.
1253 // Create the alloca. If we have DataLayout, use nice alignment.
1254 unsigned Align = DL.getPrefTypeAlignment(AggTy);
1256 // If the byval had an alignment specified, we *must* use at least that
1257 // alignment, as it is required by the byval argument (and uses of the
1258 // pointer inside the callee).
1259 Align = std::max(Align, ByValAlignment);
1261 Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(),
1262 nullptr, Align, Arg->getName(),
1263 &*Caller->begin()->begin());
1264 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1266 // Uses of the argument in the function should use our new alloca
1271 // Check whether this Value is used by a lifetime intrinsic.
1272 static bool isUsedByLifetimeMarker(Value *V) {
1273 for (User *U : V->users()) {
1274 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1275 switch (II->getIntrinsicID()) {
1277 case Intrinsic::lifetime_start:
1278 case Intrinsic::lifetime_end:
1286 // Check whether the given alloca already has
1287 // lifetime.start or lifetime.end intrinsics.
1288 static bool hasLifetimeMarkers(AllocaInst *AI) {
1289 Type *Ty = AI->getType();
1290 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1291 Ty->getPointerAddressSpace());
1292 if (Ty == Int8PtrTy)
1293 return isUsedByLifetimeMarker(AI);
1295 // Do a scan to find all the casts to i8*.
1296 for (User *U : AI->users()) {
1297 if (U->getType() != Int8PtrTy) continue;
1298 if (U->stripPointerCasts() != AI) continue;
1299 if (isUsedByLifetimeMarker(U))
1305 /// Rebuild the entire inlined-at chain for this instruction so that the top of
1306 /// the chain now is inlined-at the new call site.
1308 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode,
1310 DenseMap<const DILocation *, DILocation *> &IANodes) {
1311 SmallVector<DILocation *, 3> InlinedAtLocations;
1312 DILocation *Last = InlinedAtNode;
1313 DILocation *CurInlinedAt = DL;
1315 // Gather all the inlined-at nodes
1316 while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
1317 // Skip any we've already built nodes for
1318 if (DILocation *Found = IANodes[IA]) {
1323 InlinedAtLocations.push_back(IA);
1327 // Starting from the top, rebuild the nodes to point to the new inlined-at
1328 // location (then rebuilding the rest of the chain behind it) and update the
1329 // map of already-constructed inlined-at nodes.
1330 for (const DILocation *MD : reverse(InlinedAtLocations)) {
1331 Last = IANodes[MD] = DILocation::getDistinct(
1332 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
1335 // And finally create the normal location for this instruction, referring to
1336 // the new inlined-at chain.
1337 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
1340 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1341 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1342 /// cannot be static.
1343 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1344 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1347 /// Update inlined instructions' line numbers to
1348 /// to encode location where these instructions are inlined.
1349 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1350 Instruction *TheCall, bool CalleeHasDebugInfo) {
1351 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1355 auto &Ctx = Fn->getContext();
1356 DILocation *InlinedAtNode = TheCallDL;
1358 // Create a unique call site, not to be confused with any other call from the
1360 InlinedAtNode = DILocation::getDistinct(
1361 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1362 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1364 // Cache the inlined-at nodes as they're built so they are reused, without
1365 // this every instruction's inlined-at chain would become distinct from each
1367 DenseMap<const DILocation *, DILocation *> IANodes;
1369 for (; FI != Fn->end(); ++FI) {
1370 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1372 if (DebugLoc DL = BI->getDebugLoc()) {
1374 updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1378 if (CalleeHasDebugInfo)
1381 // If the inlined instruction has no line number, make it look as if it
1382 // originates from the call location. This is important for
1383 // ((__always_inline__, __nodebug__)) functions which must use caller
1384 // location for all instructions in their function body.
1386 // Don't update static allocas, as they may get moved later.
1387 if (auto *AI = dyn_cast<AllocaInst>(BI))
1388 if (allocaWouldBeStaticInEntry(AI))
1391 BI->setDebugLoc(TheCallDL);
1395 /// Update the block frequencies of the caller after a callee has been inlined.
1397 /// Each block cloned into the caller has its block frequency scaled by the
1398 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1399 /// callee's entry block gets the same frequency as the callsite block and the
1400 /// relative frequencies of all cloned blocks remain the same after cloning.
1401 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1402 const ValueToValueMapTy &VMap,
1403 BlockFrequencyInfo *CallerBFI,
1404 BlockFrequencyInfo *CalleeBFI,
1405 const BasicBlock &CalleeEntryBlock) {
1406 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1407 for (auto const &Entry : VMap) {
1408 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1410 auto *OrigBB = cast<BasicBlock>(Entry.first);
1411 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1412 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1413 if (!ClonedBBs.insert(ClonedBB).second) {
1414 // Multiple blocks in the callee might get mapped to one cloned block in
1415 // the caller since we prune the callee as we clone it. When that happens,
1416 // we want to use the maximum among the original blocks' frequencies.
1417 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1421 CallerBFI->setBlockFreq(ClonedBB, Freq);
1423 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1424 CallerBFI->setBlockFreqAndScale(
1425 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1429 /// Update the branch metadata for cloned call instructions.
1430 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1431 const Optional<uint64_t> &CalleeEntryCount,
1432 const Instruction *TheCall) {
1433 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.getValue() < 1)
1435 Optional<uint64_t> CallSiteCount =
1436 ProfileSummaryInfo::getProfileCount(TheCall, nullptr);
1437 uint64_t CallCount =
1438 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1439 CalleeEntryCount.getValue());
1441 for (auto const &Entry : VMap)
1442 if (isa<CallInst>(Entry.first))
1443 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1444 CI->updateProfWeight(CallCount, CalleeEntryCount.getValue());
1445 for (BasicBlock &BB : *Callee)
1446 // No need to update the callsite if it is pruned during inlining.
1447 if (VMap.count(&BB))
1448 for (Instruction &I : BB)
1449 if (CallInst *CI = dyn_cast<CallInst>(&I))
1450 CI->updateProfWeight(CalleeEntryCount.getValue() - CallCount,
1451 CalleeEntryCount.getValue());
1454 /// Update the entry count of callee after inlining.
1456 /// The callsite's block count is subtracted from the callee's function entry
1458 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB,
1459 Instruction *CallInst, Function *Callee) {
1460 // If the callee has a original count of N, and the estimated count of
1461 // callsite is M, the new callee count is set to N - M. M is estimated from
1462 // the caller's entry count, its entry block frequency and the block frequency
1464 Optional<uint64_t> CalleeCount = Callee->getEntryCount();
1465 if (!CalleeCount.hasValue())
1467 Optional<uint64_t> CallCount =
1468 ProfileSummaryInfo::getProfileCount(CallInst, CallerBFI);
1469 if (!CallCount.hasValue())
1471 // Since CallSiteCount is an estimate, it could exceed the original callee
1472 // count and has to be set to 0.
1473 if (CallCount.getValue() > CalleeCount.getValue())
1474 Callee->setEntryCount(0);
1476 Callee->setEntryCount(CalleeCount.getValue() - CallCount.getValue());
1479 /// This function inlines the called function into the basic block of the
1480 /// caller. This returns false if it is not possible to inline this call.
1481 /// The program is still in a well defined state if this occurs though.
1483 /// Note that this only does one level of inlining. For example, if the
1484 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1485 /// exists in the instruction stream. Similarly this will inline a recursive
1486 /// function by one level.
1487 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1488 AAResults *CalleeAAR, bool InsertLifetime) {
1489 Instruction *TheCall = CS.getInstruction();
1490 assert(TheCall->getParent() && TheCall->getFunction()
1491 && "Instruction not in function!");
1493 // If IFI has any state in it, zap it before we fill it in.
1496 Function *CalledFunc = CS.getCalledFunction();
1497 if (!CalledFunc || // Can't inline external function or indirect
1498 CalledFunc->isDeclaration() || // call, or call to a vararg function!
1499 CalledFunc->getFunctionType()->isVarArg()) return false;
1501 // The inliner does not know how to inline through calls with operand bundles
1503 if (CS.hasOperandBundles()) {
1504 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1505 uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1506 // ... but it knows how to inline through "deopt" operand bundles ...
1507 if (Tag == LLVMContext::OB_deopt)
1509 // ... and "funclet" operand bundles.
1510 if (Tag == LLVMContext::OB_funclet)
1517 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1518 // calls that we inline.
1519 bool MarkNoUnwind = CS.doesNotThrow();
1521 BasicBlock *OrigBB = TheCall->getParent();
1522 Function *Caller = OrigBB->getParent();
1524 // GC poses two hazards to inlining, which only occur when the callee has GC:
1525 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1527 // 2. If the caller has a differing GC, it is invalid to inline.
1528 if (CalledFunc->hasGC()) {
1529 if (!Caller->hasGC())
1530 Caller->setGC(CalledFunc->getGC());
1531 else if (CalledFunc->getGC() != Caller->getGC())
1535 // Get the personality function from the callee if it contains a landing pad.
1536 Constant *CalledPersonality =
1537 CalledFunc->hasPersonalityFn()
1538 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1541 // Find the personality function used by the landing pads of the caller. If it
1542 // exists, then check to see that it matches the personality function used in
1544 Constant *CallerPersonality =
1545 Caller->hasPersonalityFn()
1546 ? Caller->getPersonalityFn()->stripPointerCasts()
1548 if (CalledPersonality) {
1549 if (!CallerPersonality)
1550 Caller->setPersonalityFn(CalledPersonality);
1551 // If the personality functions match, then we can perform the
1552 // inlining. Otherwise, we can't inline.
1553 // TODO: This isn't 100% true. Some personality functions are proper
1554 // supersets of others and can be used in place of the other.
1555 else if (CalledPersonality != CallerPersonality)
1559 // We need to figure out which funclet the callsite was in so that we may
1560 // properly nest the callee.
1561 Instruction *CallSiteEHPad = nullptr;
1562 if (CallerPersonality) {
1563 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1564 if (isFuncletEHPersonality(Personality)) {
1565 Optional<OperandBundleUse> ParentFunclet =
1566 CS.getOperandBundle(LLVMContext::OB_funclet);
1568 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1570 // OK, the inlining site is legal. What about the target function?
1572 if (CallSiteEHPad) {
1573 if (Personality == EHPersonality::MSVC_CXX) {
1574 // The MSVC personality cannot tolerate catches getting inlined into
1575 // cleanup funclets.
1576 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1577 // Ok, the call site is within a cleanuppad. Let's check the callee
1579 for (const BasicBlock &CalledBB : *CalledFunc) {
1580 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1584 } else if (isAsynchronousEHPersonality(Personality)) {
1585 // SEH is even less tolerant, there may not be any sort of exceptional
1586 // funclet in the callee.
1587 for (const BasicBlock &CalledBB : *CalledFunc) {
1588 if (CalledBB.isEHPad())
1596 // Determine if we are dealing with a call in an EHPad which does not unwind
1598 bool EHPadForCallUnwindsLocally = false;
1599 if (CallSiteEHPad && CS.isCall()) {
1600 UnwindDestMemoTy FuncletUnwindMap;
1601 Value *CallSiteUnwindDestToken =
1602 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1604 EHPadForCallUnwindsLocally =
1605 CallSiteUnwindDestToken &&
1606 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1609 // Get an iterator to the last basic block in the function, which will have
1610 // the new function inlined after it.
1611 Function::iterator LastBlock = --Caller->end();
1613 // Make sure to capture all of the return instructions from the cloned
1615 SmallVector<ReturnInst*, 8> Returns;
1616 ClonedCodeInfo InlinedFunctionInfo;
1617 Function::iterator FirstNewBlock;
1619 { // Scope to destroy VMap after cloning.
1620 ValueToValueMapTy VMap;
1621 // Keep a list of pair (dst, src) to emit byval initializations.
1622 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1624 auto &DL = Caller->getParent()->getDataLayout();
1626 assert(CalledFunc->arg_size() == CS.arg_size() &&
1627 "No varargs calls can be inlined!");
1629 // Calculate the vector of arguments to pass into the function cloner, which
1630 // matches up the formal to the actual argument values.
1631 CallSite::arg_iterator AI = CS.arg_begin();
1633 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1634 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1635 Value *ActualArg = *AI;
1637 // When byval arguments actually inlined, we need to make the copy implied
1638 // by them explicit. However, we don't do this if the callee is readonly
1639 // or readnone, because the copy would be unneeded: the callee doesn't
1640 // modify the struct.
1641 if (CS.isByValArgument(ArgNo)) {
1642 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1643 CalledFunc->getParamAlignment(ArgNo));
1644 if (ActualArg != *AI)
1645 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1648 VMap[&*I] = ActualArg;
1651 // Add alignment assumptions if necessary. We do this before the inlined
1652 // instructions are actually cloned into the caller so that we can easily
1653 // check what will be known at the start of the inlined code.
1654 AddAlignmentAssumptions(CS, IFI);
1656 // We want the inliner to prune the code as it copies. We would LOVE to
1657 // have no dead or constant instructions leftover after inlining occurs
1658 // (which can happen, e.g., because an argument was constant), but we'll be
1659 // happy with whatever the cloner can do.
1660 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1661 /*ModuleLevelChanges=*/false, Returns, ".i",
1662 &InlinedFunctionInfo, TheCall);
1663 // Remember the first block that is newly cloned over.
1664 FirstNewBlock = LastBlock; ++FirstNewBlock;
1666 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1667 // Update the BFI of blocks cloned into the caller.
1668 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1669 CalledFunc->front());
1671 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall);
1672 // Update the profile count of callee.
1673 updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc);
1675 // Inject byval arguments initialization.
1676 for (std::pair<Value*, Value*> &Init : ByValInit)
1677 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1678 &*FirstNewBlock, IFI);
1680 Optional<OperandBundleUse> ParentDeopt =
1681 CS.getOperandBundle(LLVMContext::OB_deopt);
1683 SmallVector<OperandBundleDef, 2> OpDefs;
1685 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1686 Instruction *I = dyn_cast_or_null<Instruction>(VH);
1687 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1692 OpDefs.reserve(ICS.getNumOperandBundles());
1694 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1695 auto ChildOB = ICS.getOperandBundleAt(i);
1696 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1697 // If the inlined call has other operand bundles, let them be
1698 OpDefs.emplace_back(ChildOB);
1702 // It may be useful to separate this logic (of handling operand
1703 // bundles) out to a separate "policy" component if this gets crowded.
1704 // Prepend the parent's deoptimization continuation to the newly
1705 // inlined call's deoptimization continuation.
1706 std::vector<Value *> MergedDeoptArgs;
1707 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1708 ChildOB.Inputs.size());
1710 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1711 ParentDeopt->Inputs.begin(),
1712 ParentDeopt->Inputs.end());
1713 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1714 ChildOB.Inputs.end());
1716 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1719 Instruction *NewI = nullptr;
1720 if (isa<CallInst>(I))
1721 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1723 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1725 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1726 // this even if the call returns void.
1727 I->replaceAllUsesWith(NewI);
1730 I->eraseFromParent();
1734 // Update the callgraph if requested.
1736 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1738 // For 'nodebug' functions, the associated DISubprogram is always null.
1739 // Conservatively avoid propagating the callsite debug location to
1740 // instructions inlined from a function whose DISubprogram is not null.
1741 fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1742 CalledFunc->getSubprogram() != nullptr);
1744 // Clone existing noalias metadata if necessary.
1745 CloneAliasScopeMetadata(CS, VMap);
1747 // Add noalias metadata if necessary.
1748 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1750 // Propagate llvm.mem.parallel_loop_access if necessary.
1751 PropagateParallelLoopAccessMetadata(CS, VMap);
1753 // Register any cloned assumptions.
1754 if (IFI.GetAssumptionCache)
1755 for (BasicBlock &NewBlock :
1756 make_range(FirstNewBlock->getIterator(), Caller->end()))
1757 for (Instruction &I : NewBlock) {
1758 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1759 if (II->getIntrinsicID() == Intrinsic::assume)
1760 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1764 // If there are any alloca instructions in the block that used to be the entry
1765 // block for the callee, move them to the entry block of the caller. First
1766 // calculate which instruction they should be inserted before. We insert the
1767 // instructions at the end of the current alloca list.
1769 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1770 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1771 E = FirstNewBlock->end(); I != E; ) {
1772 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1775 // If the alloca is now dead, remove it. This often occurs due to code
1777 if (AI->use_empty()) {
1778 AI->eraseFromParent();
1782 if (!allocaWouldBeStaticInEntry(AI))
1785 // Keep track of the static allocas that we inline into the caller.
1786 IFI.StaticAllocas.push_back(AI);
1788 // Scan for the block of allocas that we can move over, and move them
1790 while (isa<AllocaInst>(I) &&
1791 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1792 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1796 // Transfer all of the allocas over in a block. Using splice means
1797 // that the instructions aren't removed from the symbol table, then
1799 Caller->getEntryBlock().getInstList().splice(
1800 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1802 // Move any dbg.declares describing the allocas into the entry basic block.
1803 DIBuilder DIB(*Caller->getParent());
1804 for (auto &AI : IFI.StaticAllocas)
1805 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1808 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1809 if (InlinedFunctionInfo.ContainsCalls) {
1810 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1811 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1812 CallSiteTailKind = CI->getTailCallKind();
1814 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1816 for (Instruction &I : *BB) {
1817 CallInst *CI = dyn_cast<CallInst>(&I);
1821 if (Function *F = CI->getCalledFunction())
1822 InlinedDeoptimizeCalls |=
1823 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1825 // We need to reduce the strength of any inlined tail calls. For
1826 // musttail, we have to avoid introducing potential unbounded stack
1827 // growth. For example, if functions 'f' and 'g' are mutually recursive
1828 // with musttail, we can inline 'g' into 'f' so long as we preserve
1829 // musttail on the cloned call to 'f'. If either the inlined call site
1830 // or the cloned call site is *not* musttail, the program already has
1831 // one frame of stack growth, so it's safe to remove musttail. Here is
1832 // a table of example transformations:
1834 // f -> musttail g -> musttail f ==> f -> musttail f
1835 // f -> musttail g -> tail f ==> f -> tail f
1836 // f -> g -> musttail f ==> f -> f
1837 // f -> g -> tail f ==> f -> f
1838 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1839 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1840 CI->setTailCallKind(ChildTCK);
1841 InlinedMustTailCalls |= CI->isMustTailCall();
1843 // Calls inlined through a 'nounwind' call site should be marked
1846 CI->setDoesNotThrow();
1851 // Leave lifetime markers for the static alloca's, scoping them to the
1852 // function we just inlined.
1853 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1854 IRBuilder<> builder(&FirstNewBlock->front());
1855 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1856 AllocaInst *AI = IFI.StaticAllocas[ai];
1857 // Don't mark swifterror allocas. They can't have bitcast uses.
1858 if (AI->isSwiftError())
1861 // If the alloca is already scoped to something smaller than the whole
1862 // function then there's no need to add redundant, less accurate markers.
1863 if (hasLifetimeMarkers(AI))
1866 // Try to determine the size of the allocation.
1867 ConstantInt *AllocaSize = nullptr;
1868 if (ConstantInt *AIArraySize =
1869 dyn_cast<ConstantInt>(AI->getArraySize())) {
1870 auto &DL = Caller->getParent()->getDataLayout();
1871 Type *AllocaType = AI->getAllocatedType();
1872 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1873 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1875 // Don't add markers for zero-sized allocas.
1876 if (AllocaArraySize == 0)
1879 // Check that array size doesn't saturate uint64_t and doesn't
1880 // overflow when it's multiplied by type size.
1881 if (AllocaArraySize != ~0ULL &&
1882 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1883 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1884 AllocaArraySize * AllocaTypeSize);
1888 builder.CreateLifetimeStart(AI, AllocaSize);
1889 for (ReturnInst *RI : Returns) {
1890 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1891 // call and a return. The return kills all local allocas.
1892 if (InlinedMustTailCalls &&
1893 RI->getParent()->getTerminatingMustTailCall())
1895 if (InlinedDeoptimizeCalls &&
1896 RI->getParent()->getTerminatingDeoptimizeCall())
1898 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1903 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1904 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1905 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1906 Module *M = Caller->getParent();
1907 // Get the two intrinsics we care about.
1908 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1909 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1911 // Insert the llvm.stacksave.
1912 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1913 .CreateCall(StackSave, {}, "savedstack");
1915 // Insert a call to llvm.stackrestore before any return instructions in the
1916 // inlined function.
1917 for (ReturnInst *RI : Returns) {
1918 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1919 // call and a return. The return will restore the stack pointer.
1920 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1922 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1924 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1928 // If we are inlining for an invoke instruction, we must make sure to rewrite
1929 // any call instructions into invoke instructions. This is sensitive to which
1930 // funclet pads were top-level in the inlinee, so must be done before
1931 // rewriting the "parent pad" links.
1932 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1933 BasicBlock *UnwindDest = II->getUnwindDest();
1934 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1935 if (isa<LandingPadInst>(FirstNonPHI)) {
1936 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1938 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1942 // Update the lexical scopes of the new funclets and callsites.
1943 // Anything that had 'none' as its parent is now nested inside the callsite's
1946 if (CallSiteEHPad) {
1947 for (Function::iterator BB = FirstNewBlock->getIterator(),
1950 // Add bundle operands to any top-level call sites.
1951 SmallVector<OperandBundleDef, 1> OpBundles;
1952 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1953 Instruction *I = &*BBI++;
1958 // Skip call sites which are nounwind intrinsics.
1960 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1961 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1964 // Skip call sites which already have a "funclet" bundle.
1965 if (CS.getOperandBundle(LLVMContext::OB_funclet))
1968 CS.getOperandBundlesAsDefs(OpBundles);
1969 OpBundles.emplace_back("funclet", CallSiteEHPad);
1971 Instruction *NewInst;
1973 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1975 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1976 NewInst->takeName(I);
1977 I->replaceAllUsesWith(NewInst);
1978 I->eraseFromParent();
1983 // It is problematic if the inlinee has a cleanupret which unwinds to
1984 // caller and we inline it into a call site which doesn't unwind but into
1985 // an EH pad that does. Such an edge must be dynamically unreachable.
1986 // As such, we replace the cleanupret with unreachable.
1987 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
1988 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
1989 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
1991 Instruction *I = BB->getFirstNonPHI();
1995 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1996 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1997 CatchSwitch->setParentPad(CallSiteEHPad);
1999 auto *FPI = cast<FuncletPadInst>(I);
2000 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2001 FPI->setParentPad(CallSiteEHPad);
2006 if (InlinedDeoptimizeCalls) {
2007 // We need to at least remove the deoptimizing returns from the Return set,
2008 // so that the control flow from those returns does not get merged into the
2009 // caller (but terminate it instead). If the caller's return type does not
2010 // match the callee's return type, we also need to change the return type of
2012 if (Caller->getReturnType() == TheCall->getType()) {
2013 auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
2014 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2016 Returns.erase(NewEnd, Returns.end());
2018 SmallVector<ReturnInst *, 8> NormalReturns;
2019 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2020 Caller->getParent(), Intrinsic::experimental_deoptimize,
2021 {Caller->getReturnType()});
2023 for (ReturnInst *RI : Returns) {
2024 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2026 NormalReturns.push_back(RI);
2030 // The calling convention on the deoptimize call itself may be bogus,
2031 // since the code we're inlining may have undefined behavior (and may
2032 // never actually execute at runtime); but all
2033 // @llvm.experimental.deoptimize declarations have to have the same
2034 // calling convention in a well-formed module.
2035 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2036 NewDeoptIntrinsic->setCallingConv(CallingConv);
2037 auto *CurBB = RI->getParent();
2038 RI->eraseFromParent();
2040 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2041 DeoptCall->arg_end());
2043 SmallVector<OperandBundleDef, 1> OpBundles;
2044 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2045 DeoptCall->eraseFromParent();
2046 assert(!OpBundles.empty() &&
2047 "Expected at least the deopt operand bundle");
2049 IRBuilder<> Builder(CurBB);
2050 CallInst *NewDeoptCall =
2051 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2052 NewDeoptCall->setCallingConv(CallingConv);
2053 if (NewDeoptCall->getType()->isVoidTy())
2054 Builder.CreateRetVoid();
2056 Builder.CreateRet(NewDeoptCall);
2059 // Leave behind the normal returns so we can merge control flow.
2060 std::swap(Returns, NormalReturns);
2064 // Handle any inlined musttail call sites. In order for a new call site to be
2065 // musttail, the source of the clone and the inlined call site must have been
2066 // musttail. Therefore it's safe to return without merging control into the
2068 if (InlinedMustTailCalls) {
2069 // Check if we need to bitcast the result of any musttail calls.
2070 Type *NewRetTy = Caller->getReturnType();
2071 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2073 // Handle the returns preceded by musttail calls separately.
2074 SmallVector<ReturnInst *, 8> NormalReturns;
2075 for (ReturnInst *RI : Returns) {
2076 CallInst *ReturnedMustTail =
2077 RI->getParent()->getTerminatingMustTailCall();
2078 if (!ReturnedMustTail) {
2079 NormalReturns.push_back(RI);
2085 // Delete the old return and any preceding bitcast.
2086 BasicBlock *CurBB = RI->getParent();
2087 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2088 RI->eraseFromParent();
2090 OldCast->eraseFromParent();
2092 // Insert a new bitcast and return with the right type.
2093 IRBuilder<> Builder(CurBB);
2094 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2097 // Leave behind the normal returns so we can merge control flow.
2098 std::swap(Returns, NormalReturns);
2101 // Now that all of the transforms on the inlined code have taken place but
2102 // before we splice the inlined code into the CFG and lose track of which
2103 // blocks were actually inlined, collect the call sites. We only do this if
2104 // call graph updates weren't requested, as those provide value handle based
2105 // tracking of inlined call sites instead.
2106 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2107 // Otherwise just collect the raw call sites that were inlined.
2108 for (BasicBlock &NewBB :
2109 make_range(FirstNewBlock->getIterator(), Caller->end()))
2110 for (Instruction &I : NewBB)
2111 if (auto CS = CallSite(&I))
2112 IFI.InlinedCallSites.push_back(CS);
2115 // If we cloned in _exactly one_ basic block, and if that block ends in a
2116 // return instruction, we splice the body of the inlined callee directly into
2117 // the calling basic block.
2118 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2119 // Move all of the instructions right before the call.
2120 OrigBB->getInstList().splice(TheCall->getIterator(),
2121 FirstNewBlock->getInstList(),
2122 FirstNewBlock->begin(), FirstNewBlock->end());
2123 // Remove the cloned basic block.
2124 Caller->getBasicBlockList().pop_back();
2126 // If the call site was an invoke instruction, add a branch to the normal
2128 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2129 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2130 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2133 // If the return instruction returned a value, replace uses of the call with
2134 // uses of the returned value.
2135 if (!TheCall->use_empty()) {
2136 ReturnInst *R = Returns[0];
2137 if (TheCall == R->getReturnValue())
2138 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2140 TheCall->replaceAllUsesWith(R->getReturnValue());
2142 // Since we are now done with the Call/Invoke, we can delete it.
2143 TheCall->eraseFromParent();
2145 // Since we are now done with the return instruction, delete it also.
2146 Returns[0]->eraseFromParent();
2148 // We are now done with the inlining.
2152 // Otherwise, we have the normal case, of more than one block to inline or
2153 // multiple return sites.
2155 // We want to clone the entire callee function into the hole between the
2156 // "starter" and "ender" blocks. How we accomplish this depends on whether
2157 // this is an invoke instruction or a call instruction.
2158 BasicBlock *AfterCallBB;
2159 BranchInst *CreatedBranchToNormalDest = nullptr;
2160 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2162 // Add an unconditional branch to make this look like the CallInst case...
2163 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2165 // Split the basic block. This guarantees that no PHI nodes will have to be
2166 // updated due to new incoming edges, and make the invoke case more
2167 // symmetric to the call case.
2169 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2170 CalledFunc->getName() + ".exit");
2172 } else { // It's a call
2173 // If this is a call instruction, we need to split the basic block that
2174 // the call lives in.
2176 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2177 CalledFunc->getName() + ".exit");
2180 if (IFI.CallerBFI) {
2181 // Copy original BB's block frequency to AfterCallBB
2182 IFI.CallerBFI->setBlockFreq(
2183 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2186 // Change the branch that used to go to AfterCallBB to branch to the first
2187 // basic block of the inlined function.
2189 TerminatorInst *Br = OrigBB->getTerminator();
2190 assert(Br && Br->getOpcode() == Instruction::Br &&
2191 "splitBasicBlock broken!");
2192 Br->setOperand(0, &*FirstNewBlock);
2194 // Now that the function is correct, make it a little bit nicer. In
2195 // particular, move the basic blocks inserted from the end of the function
2196 // into the space made by splitting the source basic block.
2197 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2198 Caller->getBasicBlockList(), FirstNewBlock,
2201 // Handle all of the return instructions that we just cloned in, and eliminate
2202 // any users of the original call/invoke instruction.
2203 Type *RTy = CalledFunc->getReturnType();
2205 PHINode *PHI = nullptr;
2206 if (Returns.size() > 1) {
2207 // The PHI node should go at the front of the new basic block to merge all
2208 // possible incoming values.
2209 if (!TheCall->use_empty()) {
2210 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2211 &AfterCallBB->front());
2212 // Anything that used the result of the function call should now use the
2213 // PHI node as their operand.
2214 TheCall->replaceAllUsesWith(PHI);
2217 // Loop over all of the return instructions adding entries to the PHI node
2220 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2221 ReturnInst *RI = Returns[i];
2222 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2223 "Ret value not consistent in function!");
2224 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2228 // Add a branch to the merge points and remove return instructions.
2230 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2231 ReturnInst *RI = Returns[i];
2232 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2233 Loc = RI->getDebugLoc();
2234 BI->setDebugLoc(Loc);
2235 RI->eraseFromParent();
2237 // We need to set the debug location to *somewhere* inside the
2238 // inlined function. The line number may be nonsensical, but the
2239 // instruction will at least be associated with the right
2241 if (CreatedBranchToNormalDest)
2242 CreatedBranchToNormalDest->setDebugLoc(Loc);
2243 } else if (!Returns.empty()) {
2244 // Otherwise, if there is exactly one return value, just replace anything
2245 // using the return value of the call with the computed value.
2246 if (!TheCall->use_empty()) {
2247 if (TheCall == Returns[0]->getReturnValue())
2248 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2250 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2253 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2254 BasicBlock *ReturnBB = Returns[0]->getParent();
2255 ReturnBB->replaceAllUsesWith(AfterCallBB);
2257 // Splice the code from the return block into the block that it will return
2258 // to, which contains the code that was after the call.
2259 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2260 ReturnBB->getInstList());
2262 if (CreatedBranchToNormalDest)
2263 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2265 // Delete the return instruction now and empty ReturnBB now.
2266 Returns[0]->eraseFromParent();
2267 ReturnBB->eraseFromParent();
2268 } else if (!TheCall->use_empty()) {
2269 // No returns, but something is using the return value of the call. Just
2271 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2274 // Since we are now done with the Call/Invoke, we can delete it.
2275 TheCall->eraseFromParent();
2277 // If we inlined any musttail calls and the original return is now
2278 // unreachable, delete it. It can only contain a bitcast and ret.
2279 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2280 AfterCallBB->eraseFromParent();
2282 // We should always be able to fold the entry block of the function into the
2283 // single predecessor of the block...
2284 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2285 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2287 // Splice the code entry block into calling block, right before the
2288 // unconditional branch.
2289 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2290 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2292 // Remove the unconditional branch.
2293 OrigBB->getInstList().erase(Br);
2295 // Now we can remove the CalleeEntry block, which is now empty.
2296 Caller->getBasicBlockList().erase(CalleeEntry);
2298 // If we inserted a phi node, check to see if it has a single value (e.g. all
2299 // the entries are the same or undef). If so, remove the PHI so it doesn't
2300 // block other optimizations.
2302 AssumptionCache *AC =
2303 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2304 auto &DL = Caller->getParent()->getDataLayout();
2305 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2306 PHI->replaceAllUsesWith(V);
2307 PHI->eraseFromParent();