1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph. The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Inliner.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/BasicAliasAnalysis.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/InlineCost.h"
24 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
25 #include "llvm/Analysis/ProfileSummaryInfo.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/InstIterator.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/Cloning.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/Transforms/Utils/ModuleUtils.h"
41 #define DEBUG_TYPE "inline"
43 STATISTIC(NumInlined, "Number of functions inlined");
44 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
45 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
46 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
48 // This weirdly named statistic tracks the number of times that, when attempting
49 // to inline a function A into B, we analyze the callers of B in order to see
50 // if those would be more profitable and blocked inline steps.
51 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
53 /// Flag to disable manual alloca merging.
55 /// Merging of allocas was originally done as a stack-size saving technique
56 /// prior to LLVM's code generator having support for stack coloring based on
57 /// lifetime markers. It is now in the process of being removed. To experiment
58 /// with disabling it and relying fully on lifetime marker based stack
59 /// coloring, you can pass this flag to LLVM.
61 DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
62 cl::init(false), cl::Hidden);
65 enum class InlinerFunctionImportStatsOpts {
71 cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats(
72 "inliner-function-import-stats",
73 cl::init(InlinerFunctionImportStatsOpts::No),
74 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic",
76 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose",
77 "printing of statistics for each inlined function")),
78 cl::Hidden, cl::desc("Enable inliner stats for imported functions"));
81 LegacyInlinerBase::LegacyInlinerBase(char &ID)
82 : CallGraphSCCPass(ID), InsertLifetime(true) {}
84 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
85 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
87 /// For this class, we declare that we require and preserve the call graph.
88 /// If the derived class implements this method, it should
89 /// always explicitly call the implementation here.
90 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const {
91 AU.addRequired<AssumptionCacheTracker>();
92 AU.addRequired<ProfileSummaryInfoWrapperPass>();
93 AU.addRequired<TargetLibraryInfoWrapperPass>();
94 getAAResultsAnalysisUsage(AU);
95 CallGraphSCCPass::getAnalysisUsage(AU);
98 typedef DenseMap<ArrayType *, std::vector<AllocaInst *>> InlinedArrayAllocasTy;
100 /// Look at all of the allocas that we inlined through this call site. If we
101 /// have already inlined other allocas through other calls into this function,
102 /// then we know that they have disjoint lifetimes and that we can merge them.
104 /// There are many heuristics possible for merging these allocas, and the
105 /// different options have different tradeoffs. One thing that we *really*
106 /// don't want to hurt is SRoA: once inlining happens, often allocas are no
107 /// longer address taken and so they can be promoted.
109 /// Our "solution" for that is to only merge allocas whose outermost type is an
110 /// array type. These are usually not promoted because someone is using a
111 /// variable index into them. These are also often the most important ones to
114 /// A better solution would be to have real memory lifetime markers in the IR
115 /// and not have the inliner do any merging of allocas at all. This would
116 /// allow the backend to do proper stack slot coloring of all allocas that
117 /// *actually make it to the backend*, which is really what we want.
119 /// Because we don't have this information, we do this simple and useful hack.
120 static void mergeInlinedArrayAllocas(
121 Function *Caller, InlineFunctionInfo &IFI,
122 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory) {
123 SmallPtrSet<AllocaInst *, 16> UsedAllocas;
125 // When processing our SCC, check to see if CS was inlined from some other
126 // call site. For example, if we're processing "A" in this code:
128 // B() { x = alloca ... C() }
129 // C() { y = alloca ... }
130 // Assume that C was not inlined into B initially, and so we're processing A
131 // and decide to inline B into A. Doing this makes an alloca available for
132 // reuse and makes a callsite (C) available for inlining. When we process
133 // the C call site we don't want to do any alloca merging between X and Y
134 // because their scopes are not disjoint. We could make this smarter by
135 // keeping track of the inline history for each alloca in the
136 // InlinedArrayAllocas but this isn't likely to be a significant win.
137 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
140 // Loop over all the allocas we have so far and see if they can be merged with
141 // a previously inlined alloca. If not, remember that we had it.
142 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size(); AllocaNo != e;
144 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
146 // Don't bother trying to merge array allocations (they will usually be
147 // canonicalized to be an allocation *of* an array), or allocations whose
148 // type is not itself an array (because we're afraid of pessimizing SRoA).
149 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
150 if (!ATy || AI->isArrayAllocation())
153 // Get the list of all available allocas for this array type.
154 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy];
156 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
157 // that we have to be careful not to reuse the same "available" alloca for
158 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
159 // set to keep track of which "available" allocas are being used by this
160 // function. Also, AllocasForType can be empty of course!
161 bool MergedAwayAlloca = false;
162 for (AllocaInst *AvailableAlloca : AllocasForType) {
164 unsigned Align1 = AI->getAlignment(),
165 Align2 = AvailableAlloca->getAlignment();
167 // The available alloca has to be in the right function, not in some other
168 // function in this SCC.
169 if (AvailableAlloca->getParent() != AI->getParent())
172 // If the inlined function already uses this alloca then we can't reuse
174 if (!UsedAllocas.insert(AvailableAlloca).second)
177 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
179 DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI
180 << "\n\t\tINTO: " << *AvailableAlloca << '\n');
182 // Move affected dbg.declare calls immediately after the new alloca to
183 // avoid the situation when a dbg.declare precedes its alloca.
184 if (auto *L = LocalAsMetadata::getIfExists(AI))
185 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
186 for (User *U : MDV->users())
187 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
188 DDI->moveBefore(AvailableAlloca->getNextNode());
190 AI->replaceAllUsesWith(AvailableAlloca);
192 if (Align1 != Align2) {
193 if (!Align1 || !Align2) {
194 const DataLayout &DL = Caller->getParent()->getDataLayout();
195 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType());
197 Align1 = Align1 ? Align1 : TypeAlign;
198 Align2 = Align2 ? Align2 : TypeAlign;
202 AvailableAlloca->setAlignment(AI->getAlignment());
205 AI->eraseFromParent();
206 MergedAwayAlloca = true;
208 IFI.StaticAllocas[AllocaNo] = nullptr;
212 // If we already nuked the alloca, we're done with it.
213 if (MergedAwayAlloca)
216 // If we were unable to merge away the alloca either because there are no
217 // allocas of the right type available or because we reused them all
218 // already, remember that this alloca came from an inlined function and mark
219 // it used so we don't reuse it for other allocas from this inline
221 AllocasForType.push_back(AI);
222 UsedAllocas.insert(AI);
226 /// If it is possible to inline the specified call site,
227 /// do so and update the CallGraph for this operation.
229 /// This function also does some basic book-keeping to update the IR. The
230 /// InlinedArrayAllocas map keeps track of any allocas that are already
231 /// available from other functions inlined into the caller. If we are able to
232 /// inline this call site we attempt to reuse already available allocas or add
233 /// any new allocas to the set if not possible.
234 static bool InlineCallIfPossible(
235 CallSite CS, InlineFunctionInfo &IFI,
236 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory,
237 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter,
238 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
239 Function *Callee = CS.getCalledFunction();
240 Function *Caller = CS.getCaller();
242 AAResults &AAR = AARGetter(*Callee);
244 // Try to inline the function. Get the list of static allocas that were
246 if (!InlineFunction(CS, IFI, &AAR, InsertLifetime))
249 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
250 ImportedFunctionsStats.recordInline(*Caller, *Callee);
252 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
254 if (!DisableInlinedAllocaMerging)
255 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
260 /// Return true if inlining of CS can block the caller from being
261 /// inlined which is proved to be more beneficial. \p IC is the
262 /// estimated inline cost associated with callsite \p CS.
263 /// \p TotalAltCost will be set to the estimated cost of inlining the caller
264 /// if \p CS is suppressed for inlining.
266 shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC,
267 int &TotalSecondaryCost,
268 function_ref<InlineCost(CallSite CS)> GetInlineCost) {
270 // For now we only handle local or inline functions.
271 if (!Caller->hasLocalLinkage() && !Caller->hasLinkOnceODRLinkage())
273 // Try to detect the case where the current inlining candidate caller (call
274 // it B) is a static or linkonce-ODR function and is an inlining candidate
275 // elsewhere, and the current candidate callee (call it C) is large enough
276 // that inlining it into B would make B too big to inline later. In these
277 // circumstances it may be best not to inline C into B, but to inline B into
280 // This only applies to static and linkonce-ODR functions because those are
281 // expected to be available for inlining in the translation units where they
282 // are used. Thus we will always have the opportunity to make local inlining
283 // decisions. Importantly the linkonce-ODR linkage covers inline functions
284 // and templates in C++.
286 // FIXME: All of this logic should be sunk into getInlineCost. It relies on
287 // the internal implementation of the inline cost metrics rather than
288 // treating them as truly abstract units etc.
289 TotalSecondaryCost = 0;
290 // The candidate cost to be imposed upon the current function.
291 int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
292 // This bool tracks what happens if we do NOT inline C into B.
293 bool callerWillBeRemoved = Caller->hasLocalLinkage();
294 // This bool tracks what happens if we DO inline C into B.
295 bool inliningPreventsSomeOuterInline = false;
296 for (User *U : Caller->users()) {
299 // If this isn't a call to Caller (it could be some other sort
300 // of reference) skip it. Such references will prevent the caller
301 // from being removed.
302 if (!CS2 || CS2.getCalledFunction() != Caller) {
303 callerWillBeRemoved = false;
307 InlineCost IC2 = GetInlineCost(CS2);
308 ++NumCallerCallersAnalyzed;
310 callerWillBeRemoved = false;
316 // See if inlining of the original callsite would erase the cost delta of
317 // this callsite. We subtract off the penalty for the call instruction,
318 // which we would be deleting.
319 if (IC2.getCostDelta() <= CandidateCost) {
320 inliningPreventsSomeOuterInline = true;
321 TotalSecondaryCost += IC2.getCost();
324 // If all outer calls to Caller would get inlined, the cost for the last
325 // one is set very low by getInlineCost, in anticipation that Caller will
326 // be removed entirely. We did not account for this above unless there
327 // is only one caller of Caller.
328 if (callerWillBeRemoved && !Caller->use_empty())
329 TotalSecondaryCost -= InlineConstants::LastCallToStaticBonus;
331 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost())
337 /// Return true if the inliner should attempt to inline at the given CallSite.
338 static bool shouldInline(CallSite CS,
339 function_ref<InlineCost(CallSite CS)> GetInlineCost,
340 OptimizationRemarkEmitter &ORE) {
342 InlineCost IC = GetInlineCost(CS);
343 Instruction *Call = CS.getInstruction();
344 Function *Callee = CS.getCalledFunction();
347 DEBUG(dbgs() << " Inlining: cost=always"
348 << ", Call: " << *CS.getInstruction() << "\n");
349 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AlwaysInline", Call)
350 << NV("Callee", Callee)
351 << " should always be inlined (cost=always)");
356 DEBUG(dbgs() << " NOT Inlining: cost=never"
357 << ", Call: " << *CS.getInstruction() << "\n");
358 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "NeverInline", Call)
359 << NV("Callee", Callee)
360 << " should never be inlined (cost=never)");
364 Function *Caller = CS.getCaller();
366 DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
367 << ", thres=" << (IC.getCostDelta() + IC.getCost())
368 << ", Call: " << *CS.getInstruction() << "\n");
369 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "TooCostly", Call)
370 << NV("Callee", Callee) << " too costly to inline (cost="
371 << NV("Cost", IC.getCost()) << ", threshold="
372 << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")");
376 int TotalSecondaryCost = 0;
377 if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost, GetInlineCost)) {
378 DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction()
379 << " Cost = " << IC.getCost()
380 << ", outer Cost = " << TotalSecondaryCost << '\n');
381 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE,
382 "IncreaseCostInOtherContexts", Call)
383 << "Not inlining. Cost of inlining " << NV("Callee", Callee)
384 << " increases the cost of inlining " << NV("Caller", Caller)
385 << " in other contexts");
389 DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
390 << ", thres=" << (IC.getCostDelta() + IC.getCost())
391 << ", Call: " << *CS.getInstruction() << '\n');
392 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "CanBeInlined", Call)
393 << NV("Callee", Callee) << " can be inlined into "
394 << NV("Caller", Caller) << " with cost=" << NV("Cost", IC.getCost())
396 << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")");
400 /// Return true if the specified inline history ID
401 /// indicates an inline history that includes the specified function.
402 static bool InlineHistoryIncludes(
403 Function *F, int InlineHistoryID,
404 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
405 while (InlineHistoryID != -1) {
406 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
407 "Invalid inline history ID");
408 if (InlineHistory[InlineHistoryID].first == F)
410 InlineHistoryID = InlineHistory[InlineHistoryID].second;
415 bool LegacyInlinerBase::doInitialization(CallGraph &CG) {
416 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
417 ImportedFunctionsStats.setModuleInfo(CG.getModule());
418 return false; // No changes to CallGraph.
421 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) {
424 return inlineCalls(SCC);
428 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
429 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
430 ProfileSummaryInfo *PSI, TargetLibraryInfo &TLI,
432 function_ref<InlineCost(CallSite CS)> GetInlineCost,
433 function_ref<AAResults &(Function &)> AARGetter,
434 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
435 SmallPtrSet<Function *, 8> SCCFunctions;
436 DEBUG(dbgs() << "Inliner visiting SCC:");
437 for (CallGraphNode *Node : SCC) {
438 Function *F = Node->getFunction();
440 SCCFunctions.insert(F);
441 DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
444 // Scan through and identify all call sites ahead of time so that we only
445 // inline call sites in the original functions, not call sites that result
446 // from inlining other functions.
447 SmallVector<std::pair<CallSite, int>, 16> CallSites;
449 // When inlining a callee produces new call sites, we want to keep track of
450 // the fact that they were inlined from the callee. This allows us to avoid
451 // infinite inlining in some obscure cases. To represent this, we use an
452 // index into the InlineHistory vector.
453 SmallVector<std::pair<Function *, int>, 8> InlineHistory;
455 for (CallGraphNode *Node : SCC) {
456 Function *F = Node->getFunction();
457 if (!F || F->isDeclaration())
460 OptimizationRemarkEmitter ORE(F);
461 for (BasicBlock &BB : *F)
462 for (Instruction &I : BB) {
463 CallSite CS(cast<Value>(&I));
464 // If this isn't a call, or it is a call to an intrinsic, it can
466 if (!CS || isa<IntrinsicInst>(I))
469 // If this is a direct call to an external function, we can never inline
470 // it. If it is an indirect call, inlining may resolve it to be a
471 // direct call, so we keep it.
472 if (Function *Callee = CS.getCalledFunction())
473 if (Callee->isDeclaration()) {
475 ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
476 << NV("Callee", Callee) << " will not be inlined into "
477 << NV("Caller", CS.getCaller())
478 << " because its definition is unavailable"
483 CallSites.push_back(std::make_pair(CS, -1));
487 DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
489 // If there are no calls in this function, exit early.
490 if (CallSites.empty())
493 // Now that we have all of the call sites, move the ones to functions in the
494 // current SCC to the end of the list.
495 unsigned FirstCallInSCC = CallSites.size();
496 for (unsigned i = 0; i < FirstCallInSCC; ++i)
497 if (Function *F = CallSites[i].first.getCalledFunction())
498 if (SCCFunctions.count(F))
499 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
501 InlinedArrayAllocasTy InlinedArrayAllocas;
502 InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache);
504 // Now that we have all of the call sites, loop over them and inline them if
505 // it looks profitable to do so.
506 bool Changed = false;
510 // Iterate over the outer loop because inlining functions can cause indirect
511 // calls to become direct calls.
512 // CallSites may be modified inside so ranged for loop can not be used.
513 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
514 CallSite CS = CallSites[CSi].first;
516 Function *Caller = CS.getCaller();
517 Function *Callee = CS.getCalledFunction();
519 // If this call site is dead and it is to a readonly function, we should
520 // just delete the call instead of trying to inline it, regardless of
521 // size. This happens because IPSCCP propagates the result out of the
522 // call and then we're left with the dead call.
523 if (isInstructionTriviallyDead(CS.getInstruction(), &TLI)) {
524 DEBUG(dbgs() << " -> Deleting dead call: " << *CS.getInstruction()
526 // Update the call graph by deleting the edge from Callee to Caller.
527 CG[Caller]->removeCallEdgeFor(CS);
528 CS.getInstruction()->eraseFromParent();
531 // We can only inline direct calls to non-declarations.
532 if (!Callee || Callee->isDeclaration())
535 // If this call site was obtained by inlining another function, verify
536 // that the include path for the function did not include the callee
537 // itself. If so, we'd be recursively inlining the same function,
538 // which would provide the same callsites, which would cause us to
539 // infinitely inline.
540 int InlineHistoryID = CallSites[CSi].second;
541 if (InlineHistoryID != -1 &&
542 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
545 // Get DebugLoc to report. CS will be invalid after Inliner.
546 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
547 BasicBlock *Block = CS.getParent();
548 // FIXME for new PM: because of the old PM we currently generate ORE and
549 // in turn BFI on demand. With the new PM, the ORE dependency should
550 // just become a regular analysis dependency.
551 OptimizationRemarkEmitter ORE(Caller);
553 // If the policy determines that we should inline this function,
556 if (!shouldInline(CS, GetInlineCost, ORE)) {
558 OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
559 << NV("Callee", Callee) << " will not be inlined into "
560 << NV("Caller", Caller));
564 // Attempt to inline the function.
565 if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
566 InlineHistoryID, InsertLifetime, AARGetter,
567 ImportedFunctionsStats)) {
569 OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
570 << NV("Callee", Callee) << " will not be inlined into "
571 << NV("Caller", Caller));
576 // Report the inline decision.
577 ORE.emit(OptimizationRemark(DEBUG_TYPE, "Inlined", DLoc, Block)
578 << NV("Callee", Callee) << " inlined into "
579 << NV("Caller", Caller));
581 // If inlining this function gave us any new call sites, throw them
582 // onto our worklist to process. They are useful inline candidates.
583 if (!InlineInfo.InlinedCalls.empty()) {
584 // Create a new inline history entry for this, so that we remember
585 // that these new callsites came about due to inlining Callee.
586 int NewHistoryID = InlineHistory.size();
587 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
589 for (Value *Ptr : InlineInfo.InlinedCalls)
590 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
594 // If we inlined or deleted the last possible call site to the function,
595 // delete the function body now.
596 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
597 // TODO: Can remove if in SCC now.
598 !SCCFunctions.count(Callee) &&
600 // The function may be apparently dead, but if there are indirect
601 // callgraph references to the node, we cannot delete it yet, this
602 // could invalidate the CGSCC iterator.
603 CG[Callee]->getNumReferences() == 0) {
604 DEBUG(dbgs() << " -> Deleting dead function: " << Callee->getName()
606 CallGraphNode *CalleeNode = CG[Callee];
608 // Remove any call graph edges from the callee to its callees.
609 CalleeNode->removeAllCalledFunctions();
611 // Removing the node for callee from the call graph and delete it.
612 delete CG.removeFunctionFromModule(CalleeNode);
616 // Remove this call site from the list. If possible, use
617 // swap/pop_back for efficiency, but do not use it if doing so would
618 // move a call site to a function in this SCC before the
619 // 'FirstCallInSCC' barrier.
620 if (SCC.isSingular()) {
621 CallSites[CSi] = CallSites.back();
622 CallSites.pop_back();
624 CallSites.erase(CallSites.begin() + CSi);
631 } while (LocalChange);
636 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) {
637 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
638 ACT = &getAnalysis<AssumptionCacheTracker>();
639 PSI = getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
640 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
641 // We compute dedicated AA results for each function in the SCC as needed. We
642 // use a lambda referencing external objects so that they live long enough to
643 // be queried, but we re-use them each time.
644 Optional<BasicAAResult> BAR;
645 Optional<AAResults> AAR;
646 auto AARGetter = [&](Function &F) -> AAResults & {
647 BAR.emplace(createLegacyPMBasicAAResult(*this, F));
648 AAR.emplace(createLegacyPMAAResults(*this, F, *BAR));
651 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
652 return ACT->getAssumptionCache(F);
654 return inlineCallsImpl(SCC, CG, GetAssumptionCache, PSI, TLI, InsertLifetime,
655 [this](CallSite CS) { return getInlineCost(CS); },
656 AARGetter, ImportedFunctionsStats);
659 /// Remove now-dead linkonce functions at the end of
660 /// processing to avoid breaking the SCC traversal.
661 bool LegacyInlinerBase::doFinalization(CallGraph &CG) {
662 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
663 ImportedFunctionsStats.dump(InlinerFunctionImportStats ==
664 InlinerFunctionImportStatsOpts::Verbose);
665 return removeDeadFunctions(CG);
668 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
669 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
670 bool AlwaysInlineOnly) {
671 SmallVector<CallGraphNode *, 16> FunctionsToRemove;
672 SmallVector<Function *, 16> DeadFunctionsInComdats;
674 auto RemoveCGN = [&](CallGraphNode *CGN) {
675 // Remove any call graph edges from the function to its callees.
676 CGN->removeAllCalledFunctions();
678 // Remove any edges from the external node to the function's call graph
679 // node. These edges might have been made irrelegant due to
680 // optimization of the program.
681 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
683 // Removing the node for callee from the call graph and delete it.
684 FunctionsToRemove.push_back(CGN);
687 // Scan for all of the functions, looking for ones that should now be removed
688 // from the program. Insert the dead ones in the FunctionsToRemove set.
689 for (const auto &I : CG) {
690 CallGraphNode *CGN = I.second.get();
691 Function *F = CGN->getFunction();
692 if (!F || F->isDeclaration())
695 // Handle the case when this function is called and we only want to care
696 // about always-inline functions. This is a bit of a hack to share code
697 // between here and the InlineAlways pass.
698 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
701 // If the only remaining users of the function are dead constants, remove
703 F->removeDeadConstantUsers();
705 if (!F->isDefTriviallyDead())
708 // It is unsafe to drop a function with discardable linkage from a COMDAT
709 // without also dropping the other members of the COMDAT.
710 // The inliner doesn't visit non-function entities which are in COMDAT
711 // groups so it is unsafe to do so *unless* the linkage is local.
712 if (!F->hasLocalLinkage()) {
713 if (F->hasComdat()) {
714 DeadFunctionsInComdats.push_back(F);
721 if (!DeadFunctionsInComdats.empty()) {
722 // Filter out the functions whose comdats remain alive.
723 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
725 for (Function *F : DeadFunctionsInComdats)
729 if (FunctionsToRemove.empty())
732 // Now that we know which functions to delete, do so. We didn't want to do
733 // this inline, because that would invalidate our CallGraph::iterator
736 // Note that it doesn't matter that we are iterating over a non-stable order
737 // here to do this, it doesn't matter which order the functions are deleted
739 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
740 FunctionsToRemove.erase(
741 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
742 FunctionsToRemove.end());
743 for (CallGraphNode *CGN : FunctionsToRemove) {
744 delete CG.removeFunctionFromModule(CGN);
750 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
751 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
752 CGSCCUpdateResult &UR) {
753 FunctionAnalysisManager &FAM =
754 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
756 const ModuleAnalysisManager &MAM =
757 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG).getManager();
758 bool Changed = false;
760 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
761 Module &M = *InitialC.begin()->getFunction().getParent();
762 ProfileSummaryInfo *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(M);
764 std::function<AssumptionCache &(Function &)> GetAssumptionCache =
765 [&](Function &F) -> AssumptionCache & {
766 return FAM.getResult<AssumptionAnalysis>(F);
769 // Setup the data structure used to plumb customization into the
770 // `InlineFunction` routine.
771 InlineFunctionInfo IFI(/*cg=*/nullptr, &GetAssumptionCache);
773 auto GetInlineCost = [&](CallSite CS) {
774 Function &Callee = *CS.getCalledFunction();
775 auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(Callee);
776 return getInlineCost(CS, Params, CalleeTTI, GetAssumptionCache, PSI);
779 // We use a worklist of nodes to process so that we can handle if the SCC
780 // structure changes and some nodes are no longer part of the current SCC. We
781 // also need to use an updatable pointer for the SCC as a consequence.
782 SmallVector<LazyCallGraph::Node *, 16> Nodes;
783 for (auto &N : InitialC)
786 auto *RC = &C->getOuterRefSCC();
788 // We also use a secondary worklist of call sites within a particular node to
789 // allow quickly continuing to inline through newly inlined call sites where
791 SmallVector<std::pair<CallSite, int>, 16> Calls;
793 // When inlining a callee produces new call sites, we want to keep track of
794 // the fact that they were inlined from the callee. This allows us to avoid
795 // infinite inlining in some obscure cases. To represent this, we use an
796 // index into the InlineHistory vector.
797 SmallVector<std::pair<Function *, int>, 16> InlineHistory;
799 // Track a set vector of inlined callees so that we can augment the caller
800 // with all of their edges in the call graph before pruning out the ones that
801 // got simplified away.
802 SmallSetVector<Function *, 4> InlinedCallees;
804 // Track the dead functions to delete once finished with inlining calls. We
805 // defer deleting these to make it easier to handle the call graph updates.
806 SmallVector<Function *, 4> DeadFunctions;
809 auto &N = *Nodes.pop_back_val();
810 if (CG.lookupSCC(N) != C)
812 Function &F = N.getFunction();
813 if (F.hasFnAttribute(Attribute::OptimizeNone))
816 // Get the remarks emission analysis for the caller.
817 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
819 // We want to generally process call sites top-down in order for
820 // simplifications stemming from replacing the call with the returned value
821 // after inlining to be visible to subsequent inlining decisions. So we
822 // walk the function backwards and then process the back of the vector.
823 // FIXME: Using reverse is a really bad way to do this. Instead we should
824 // do an actual PO walk of the function body.
825 for (Instruction &I : reverse(instructions(F)))
826 if (auto CS = CallSite(&I))
827 if (Function *Callee = CS.getCalledFunction())
828 if (!Callee->isDeclaration())
829 Calls.push_back({CS, -1});
831 bool DidInline = false;
832 while (!Calls.empty()) {
835 std::tie(CS, InlineHistoryID) = Calls.pop_back_val();
836 Function &Callee = *CS.getCalledFunction();
838 if (InlineHistoryID != -1 &&
839 InlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory))
842 // Check whether we want to inline this callsite.
843 if (!shouldInline(CS, GetInlineCost, ORE))
846 if (!InlineFunction(CS, IFI))
849 InlinedCallees.insert(&Callee);
851 // Add any new callsites to defined functions to the worklist.
852 if (!IFI.InlinedCallSites.empty()) {
853 int NewHistoryID = InlineHistory.size();
854 InlineHistory.push_back({&Callee, InlineHistoryID});
855 for (CallSite &CS : reverse(IFI.InlinedCallSites))
856 if (Function *NewCallee = CS.getCalledFunction())
857 if (!NewCallee->isDeclaration())
858 Calls.push_back({CS, NewHistoryID});
861 // Merge the attributes based on the inlining.
862 AttributeFuncs::mergeAttributesForInlining(F, Callee);
864 // For local functions, check whether this makes the callee trivially
865 // dead. In that case, we can drop the body of the function eagerly
866 // which may reduce the number of callers of other functions to one,
867 // changing inline cost thresholds.
868 if (Callee.hasLocalLinkage()) {
869 // To check this we also need to nuke any dead constant uses (perhaps
870 // made dead by this operation on other functions).
871 Callee.removeDeadConstantUsers();
872 if (Callee.use_empty()) {
873 // Clear the body and queue the function itself for deletion when we
874 // finish inlining and call graph updates.
875 // Note that after this point, it is an error to do anything other
876 // than use the callee's address or delete it.
877 Callee.dropAllReferences();
878 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() &&
879 "Cannot put cause a function to become dead twice!");
880 DeadFunctions.push_back(&Callee);
889 // Add all the inlined callees' edges as ref edges to the caller. These are
890 // by definition trivial edges as we always have *some* transitive ref edge
891 // chain. While in some cases these edges are direct calls inside the
892 // callee, they have to be modeled in the inliner as reference edges as
893 // there may be a reference edge anywhere along the chain from the current
894 // caller to the callee that causes the whole thing to appear like
895 // a (transitive) reference edge that will require promotion to a call edge
897 for (Function *InlinedCallee : InlinedCallees) {
898 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee);
899 for (LazyCallGraph::Edge &E : CalleeN)
900 RC->insertTrivialRefEdge(N, *E.getNode());
902 InlinedCallees.clear();
904 // At this point, since we have made changes we have at least removed
905 // a call instruction. However, in the process we do some incremental
906 // simplification of the surrounding code. This simplification can
907 // essentially do all of the same things as a function pass and we can
908 // re-use the exact same logic for updating the call graph to reflect the
910 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR);
911 RC = &C->getOuterRefSCC();
912 } while (!Nodes.empty());
914 // Now that we've finished inlining all of the calls across this SCC, delete
915 // all of the trivially dead functions, updating the call graph and the CGSCC
916 // pass manager in the process.
918 // Note that this walks a pointer set which has non-deterministic order but
919 // that is OK as all we do is delete things and add pointers to unordered
921 for (Function *DeadF : DeadFunctions) {
922 // Get the necessary information out of the call graph and nuke the
924 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
925 auto &DeadRC = DeadC.getOuterRefSCC();
926 CG.removeDeadFunction(*DeadF);
928 // Mark the relevant parts of the call graph as invalid so we don't visit
930 UR.InvalidatedSCCs.insert(&DeadC);
931 UR.InvalidatedRefSCCs.insert(&DeadRC);
933 // And delete the actual function from the module.
934 M.getFunctionList().erase(DeadF);
936 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();