1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements inline cost analysis.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/InstVisitor.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
45 #define DEBUG_TYPE "inline-cost"
47 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
49 static cl::opt<int> InlineThreshold(
50 "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
51 cl::desc("Control the amount of inlining to perform (default = 225)"));
53 static cl::opt<int> HintThreshold(
54 "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
55 cl::desc("Threshold for inlining functions with inline hint"));
58 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
59 cl::init(45), cl::ZeroOrMore,
60 cl::desc("Threshold for inlining cold callsites"));
62 // We introduce this threshold to help performance of instrumentation based
63 // PGO before we actually hook up inliner with analysis passes such as BPI and
65 static cl::opt<int> ColdThreshold(
66 "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
67 cl::desc("Threshold for inlining functions with cold attribute"));
70 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
72 cl::desc("Threshold for hot callsites "));
74 static cl::opt<int> LocallyHotCallSiteThreshold(
75 "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
76 cl::desc("Threshold for locally hot callsites "));
78 static cl::opt<int> ColdCallSiteRelFreq(
79 "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
80 cl::desc("Maximum block frequency, expressed as a percentage of caller's "
81 "entry frequency, for a callsite to be cold in the absence of "
82 "profile information."));
84 static cl::opt<int> HotCallSiteRelFreq(
85 "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
86 cl::desc("Minimum block frequency, expressed as a multiple of caller's "
87 "entry frequency, for a callsite to be hot in the absence of "
88 "profile information."));
90 static cl::opt<bool> OptComputeFullInlineCost(
91 "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
92 cl::desc("Compute the full inline cost of a call site even when the cost "
93 "exceeds the threshold."));
96 class InlineCostCallAnalyzer;
97 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
98 typedef InstVisitor<CallAnalyzer, bool> Base;
99 friend class InstVisitor<CallAnalyzer, bool>;
102 virtual ~CallAnalyzer() {}
103 /// The TargetTransformInfo available for this compilation.
104 const TargetTransformInfo &TTI;
106 /// Getter for the cache of @llvm.assume intrinsics.
107 std::function<AssumptionCache &(Function &)> &GetAssumptionCache;
109 /// Getter for BlockFrequencyInfo
110 Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI;
112 /// Profile summary information.
113 ProfileSummaryInfo *PSI;
115 /// The called function.
118 // Cache the DataLayout since we use it a lot.
119 const DataLayout &DL;
121 /// The OptimizationRemarkEmitter available for this compilation.
122 OptimizationRemarkEmitter *ORE;
124 /// The candidate callsite being analyzed. Please do not use this to do
125 /// analysis in the caller function; we want the inline cost query to be
126 /// easily cacheable. Instead, use the cover function paramHasAttr.
127 CallBase &CandidateCall;
129 /// Extension points for handling callsite features.
130 /// Called after a basic block was analyzed.
131 virtual void onBlockAnalyzed(const BasicBlock *BB) {}
133 /// Called at the end of the analysis of the callsite. Return the outcome of
134 /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
135 /// the reason it can't.
136 virtual InlineResult finalizeAnalysis() { return true; }
138 /// Called when we're about to start processing a basic block, and every time
139 /// we are done processing an instruction. Return true if there is no point in
140 /// continuing the analysis (e.g. we've determined already the call site is
141 /// too expensive to inline)
142 virtual bool shouldStop() { return false; }
144 /// Called before the analysis of the callee body starts (with callsite
145 /// contexts propagated). It checks callsite-specific information. Return a
146 /// reason analysis can't continue if that's the case, or 'true' if it may
148 virtual InlineResult onAnalysisStart() { return true; }
150 /// Called if the analysis engine decides SROA cannot be done for the given
152 virtual void onDisableSROA(AllocaInst *Arg) {}
154 /// Called the analysis engine determines load elimination won't happen.
155 virtual void onDisableLoadElimination() {}
157 /// Called to account for a call.
158 virtual void onCallPenalty() {}
160 /// Called to account for the expectation the inlining would result in a load
162 virtual void onLoadEliminationOpportunity() {}
164 /// Called to account for the cost of argument setup for the Call in the
165 /// callee's body (not the callsite currently under analysis).
166 virtual void onCallArgumentSetup(const CallBase &Call) {}
168 /// Called to account for a load relative intrinsic.
169 virtual void onLoadRelativeIntrinsic() {}
171 /// Called to account for a lowered call.
172 virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
175 /// Account for a jump table of given size. Return false to stop further
176 /// processing the switch instruction
177 virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
179 /// Account for a case cluster of given size. Return false to stop further
180 /// processing of the instruction.
181 virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
183 /// Called at the end of processing a switch instruction, with the given
184 /// number of case clusters.
185 virtual void onFinalizeSwitch(unsigned JumpTableSize,
186 unsigned NumCaseCluster) {}
188 /// Called to account for any other instruction not specifically accounted
190 virtual void onCommonInstructionSimplification() {}
192 /// Start accounting potential benefits due to SROA for the given alloca.
193 virtual void onInitializeSROAArg(AllocaInst *Arg) {}
195 /// Account SROA savings for the AllocaInst value.
196 virtual void onAggregateSROAUse(AllocaInst *V) {}
198 bool handleSROA(Value *V, bool DoNotDisable) {
199 // Check for SROA candidates in comparisons.
200 if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
202 onAggregateSROAUse(SROAArg);
205 disableSROAForArg(SROAArg);
210 bool IsCallerRecursive = false;
211 bool IsRecursiveCall = false;
212 bool ExposesReturnsTwice = false;
213 bool HasDynamicAlloca = false;
214 bool ContainsNoDuplicateCall = false;
215 bool HasReturn = false;
216 bool HasIndirectBr = false;
217 bool HasUninlineableIntrinsic = false;
218 bool InitsVargArgs = false;
220 /// Number of bytes allocated statically by the callee.
221 uint64_t AllocatedSize = 0;
222 unsigned NumInstructions = 0;
223 unsigned NumVectorInstructions = 0;
225 /// While we walk the potentially-inlined instructions, we build up and
226 /// maintain a mapping of simplified values specific to this callsite. The
227 /// idea is to propagate any special information we have about arguments to
228 /// this call through the inlinable section of the function, and account for
229 /// likely simplifications post-inlining. The most important aspect we track
230 /// is CFG altering simplifications -- when we prove a basic block dead, that
231 /// can cause dramatic shifts in the cost of inlining a function.
232 DenseMap<Value *, Constant *> SimplifiedValues;
234 /// Keep track of the values which map back (through function arguments) to
235 /// allocas on the caller stack which could be simplified through SROA.
236 DenseMap<Value *, AllocaInst *> SROAArgValues;
238 /// Keep track of Allocas for which we believe we may get SROA optimization.
239 /// We don't delete entries in SROAArgValue because we still want
240 /// isAllocaDerivedArg to function correctly.
241 DenseSet<AllocaInst *> EnabledSROAArgValues;
243 /// Keep track of values which map to a pointer base and constant offset.
244 DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
246 /// Keep track of dead blocks due to the constant arguments.
247 SetVector<BasicBlock *> DeadBlocks;
249 /// The mapping of the blocks to their known unique successors due to the
250 /// constant arguments.
251 DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
253 /// Model the elimination of repeated loads that is expected to happen
254 /// whenever we simplify away the stores that would otherwise cause them to be
256 bool EnableLoadElimination;
257 SmallPtrSet<Value *, 16> LoadAddrSet;
259 AllocaInst *getSROAArgForValueOrNull(Value *V) const {
260 auto It = SROAArgValues.find(V);
261 if (It == SROAArgValues.end() ||
262 EnabledSROAArgValues.count(It->second) == 0)
267 // Custom simplification helper routines.
268 bool isAllocaDerivedArg(Value *V);
269 void disableSROAForArg(AllocaInst *SROAArg);
270 void disableSROA(Value *V);
271 void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
272 void disableLoadElimination();
273 bool isGEPFree(GetElementPtrInst &GEP);
274 bool canFoldInboundsGEP(GetElementPtrInst &I);
275 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
276 bool simplifyCallSite(Function *F, CallBase &Call);
277 template <typename Callable>
278 bool simplifyInstruction(Instruction &I, Callable Evaluate);
279 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
281 /// Return true if the given argument to the function being considered for
282 /// inlining has the given attribute set either at the call site or the
283 /// function declaration. Primarily used to inspect call site specific
284 /// attributes since these can be more precise than the ones on the callee
286 bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
288 /// Return true if the given value is known non null within the callee if
289 /// inlined through this particular callsite.
290 bool isKnownNonNullInCallee(Value *V);
292 /// Return true if size growth is allowed when inlining the callee at \p Call.
293 bool allowSizeGrowth(CallBase &Call);
295 // Custom analysis routines.
296 InlineResult analyzeBlock(BasicBlock *BB,
297 SmallPtrSetImpl<const Value *> &EphValues);
299 // Disable several entry points to the visitor so we don't accidentally use
300 // them by declaring but not defining them here.
301 void visit(Module *);
302 void visit(Module &);
303 void visit(Function *);
304 void visit(Function &);
305 void visit(BasicBlock *);
306 void visit(BasicBlock &);
308 // Provide base case for our instruction visit.
309 bool visitInstruction(Instruction &I);
311 // Our visit overrides.
312 bool visitAlloca(AllocaInst &I);
313 bool visitPHI(PHINode &I);
314 bool visitGetElementPtr(GetElementPtrInst &I);
315 bool visitBitCast(BitCastInst &I);
316 bool visitPtrToInt(PtrToIntInst &I);
317 bool visitIntToPtr(IntToPtrInst &I);
318 bool visitCastInst(CastInst &I);
319 bool visitUnaryInstruction(UnaryInstruction &I);
320 bool visitCmpInst(CmpInst &I);
321 bool visitSub(BinaryOperator &I);
322 bool visitBinaryOperator(BinaryOperator &I);
323 bool visitFNeg(UnaryOperator &I);
324 bool visitLoad(LoadInst &I);
325 bool visitStore(StoreInst &I);
326 bool visitExtractValue(ExtractValueInst &I);
327 bool visitInsertValue(InsertValueInst &I);
328 bool visitCallBase(CallBase &Call);
329 bool visitReturnInst(ReturnInst &RI);
330 bool visitBranchInst(BranchInst &BI);
331 bool visitSelectInst(SelectInst &SI);
332 bool visitSwitchInst(SwitchInst &SI);
333 bool visitIndirectBrInst(IndirectBrInst &IBI);
334 bool visitResumeInst(ResumeInst &RI);
335 bool visitCleanupReturnInst(CleanupReturnInst &RI);
336 bool visitCatchReturnInst(CatchReturnInst &RI);
337 bool visitUnreachableInst(UnreachableInst &I);
340 CallAnalyzer(const TargetTransformInfo &TTI,
341 std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
342 Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI,
343 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE,
344 Function &Callee, CallBase &Call)
345 : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
346 PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
347 CandidateCall(Call), EnableLoadElimination(true) {}
349 InlineResult analyze();
351 // Keep a bunch of stats about the cost savings found so we can print them
352 // out when debugging.
353 unsigned NumConstantArgs = 0;
354 unsigned NumConstantOffsetPtrArgs = 0;
355 unsigned NumAllocaArgs = 0;
356 unsigned NumConstantPtrCmps = 0;
357 unsigned NumConstantPtrDiffs = 0;
358 unsigned NumInstructionsSimplified = 0;
363 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
364 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
365 class InlineCostCallAnalyzer final : public CallAnalyzer {
366 const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
367 const bool ComputeFullInlineCost;
368 int LoadEliminationCost = 0;
369 /// Bonus to be applied when percentage of vector instructions in callee is
370 /// high (see more details in updateThreshold).
372 /// Bonus to be applied when the callee has only one reachable basic block.
373 int SingleBBBonus = 0;
375 /// Tunable parameters that control the analysis.
376 const InlineParams &Params;
378 /// Upper bound for the inlining cost. Bonuses are being applied to account
379 /// for speculative "expected profit" of the inlining decision.
382 /// Attempt to evaluate indirect calls to boost its inline cost.
383 const bool BoostIndirectCalls;
385 /// Inlining cost measured in abstract units, accounts for all the
386 /// instructions expected to be executed for a given function invocation.
387 /// Instructions that are statically proven to be dead based on call-site
388 /// arguments are not counted here.
391 bool SingleBB = true;
393 unsigned SROACostSavings = 0;
394 unsigned SROACostSavingsLost = 0;
396 /// The mapping of caller Alloca values to their accumulated cost savings. If
397 /// we have to disable SROA for one of the allocas, this tells us how much
398 /// cost must be added.
399 DenseMap<AllocaInst *, int> SROAArgCosts;
401 /// Return true if \p Call is a cold callsite.
402 bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
404 /// Update Threshold based on callsite properties such as callee
405 /// attributes and callee hotness for PGO builds. The Callee is explicitly
406 /// passed to support analyzing indirect calls whose target is inferred by
408 void updateThreshold(CallBase &Call, Function &Callee);
409 /// Return a higher threshold if \p Call is a hot callsite.
410 Optional<int> getHotCallSiteThreshold(CallBase &Call,
411 BlockFrequencyInfo *CallerBFI);
413 /// Handle a capped 'int' increment for Cost.
414 void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
415 assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
416 Cost = (int)std::min(UpperBound, Cost + Inc);
419 void onDisableSROA(AllocaInst *Arg) override {
420 auto CostIt = SROAArgCosts.find(Arg);
421 if (CostIt == SROAArgCosts.end())
423 addCost(CostIt->second);
424 SROACostSavings -= CostIt->second;
425 SROACostSavingsLost += CostIt->second;
426 SROAArgCosts.erase(CostIt);
429 void onDisableLoadElimination() override {
430 addCost(LoadEliminationCost);
431 LoadEliminationCost = 0;
433 void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
434 void onCallArgumentSetup(const CallBase &Call) override {
435 // Pay the price of the argument setup. We account for the average 1
436 // instruction per call argument setup here.
437 addCost(Call.arg_size() * InlineConstants::InstrCost);
439 void onLoadRelativeIntrinsic() override {
440 // This is normally lowered to 4 LLVM instructions.
441 addCost(3 * InlineConstants::InstrCost);
443 void onLoweredCall(Function *F, CallBase &Call,
444 bool IsIndirectCall) override {
445 // We account for the average 1 instruction per call argument setup here.
446 addCost(Call.arg_size() * InlineConstants::InstrCost);
448 // If we have a constant that we are calling as a function, we can peer
449 // through it and see the function target. This happens not infrequently
450 // during devirtualization and so we want to give it a hefty bonus for
451 // inlining, but cap that bonus in the event that inlining wouldn't pan out.
452 // Pretend to inline the function, with a custom threshold.
453 if (IsIndirectCall && BoostIndirectCalls) {
454 auto IndirectCallParams = Params;
455 IndirectCallParams.DefaultThreshold =
456 InlineConstants::IndirectCallThreshold;
457 /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
458 /// to instantiate the derived class.
459 InlineCostCallAnalyzer CA(TTI, GetAssumptionCache, GetBFI, PSI, ORE, *F,
460 Call, IndirectCallParams, false);
462 // We were able to inline the indirect call! Subtract the cost from the
463 // threshold to get the bonus we want to apply, but don't go below zero.
464 Cost -= std::max(0, CA.getThreshold() - CA.getCost());
467 // Otherwise simply add the cost for merely making the call.
468 addCost(InlineConstants::CallPenalty);
471 void onFinalizeSwitch(unsigned JumpTableSize,
472 unsigned NumCaseCluster) override {
473 // If suitable for a jump table, consider the cost for the table size and
474 // branch to destination.
475 // Maximum valid cost increased in this function.
477 int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
478 4 * InlineConstants::InstrCost;
480 addCost(JTCost, (int64_t)CostUpperBound);
483 // Considering forming a binary search, we should find the number of nodes
484 // which is same as the number of comparisons when lowered. For a given
485 // number of clusters, n, we can define a recursive function, f(n), to find
486 // the number of nodes in the tree. The recursion is :
487 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
488 // and f(n) = n, when n <= 3.
489 // This will lead a binary tree where the leaf should be either f(2) or f(3)
490 // when n > 3. So, the number of comparisons from leaves should be n, while
491 // the number of non-leaf should be :
492 // 2^(log2(n) - 1) - 1
493 // = 2^log2(n) * 2^-1 - 1
495 // Considering comparisons from leaf and non-leaf nodes, we can estimate the
496 // number of comparisons in a simple closed form :
497 // n + n / 2 - 1 = n * 3 / 2 - 1
498 if (NumCaseCluster <= 3) {
499 // Suppose a comparison includes one compare and one conditional branch.
500 addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
504 int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
506 ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
508 addCost(SwitchCost, (int64_t)CostUpperBound);
510 void onCommonInstructionSimplification() override {
511 addCost(InlineConstants::InstrCost);
514 void onInitializeSROAArg(AllocaInst *Arg) override {
515 assert(Arg != nullptr &&
516 "Should not initialize SROA costs for null value.");
517 SROAArgCosts[Arg] = 0;
518 EnabledSROAArgValues.insert(Arg);
521 void onAggregateSROAUse(AllocaInst *SROAArg) override {
522 auto CostIt = SROAArgCosts.find(SROAArg);
523 assert(CostIt != SROAArgCosts.end() &&
524 "expected this argument to have a cost");
525 CostIt->second += InlineConstants::InstrCost;
526 SROACostSavings += InlineConstants::InstrCost;
529 void onBlockAnalyzed(const BasicBlock *BB) override {
530 auto *TI = BB->getTerminator();
531 // If we had any successors at this point, than post-inlining is likely to
532 // have them as well. Note that we assume any basic blocks which existed
533 // due to branches or switches which folded above will also fold after
535 if (SingleBB && TI->getNumSuccessors() > 1) {
536 // Take off the bonus we applied to the threshold.
537 Threshold -= SingleBBBonus;
541 InlineResult finalizeAnalysis() override {
542 // Loops generally act a lot like calls in that they act like barriers to
543 // movement, require a certain amount of setup, etc. So when optimising for
544 // size, we penalise any call sites that perform loops. We do this after all
545 // other costs here, so will likely only be dealing with relatively small
546 // functions (and hence DT and LI will hopefully be cheap).
547 auto *Caller = CandidateCall.getFunction();
548 if (Caller->hasMinSize()) {
553 // Ignore loops that will not be executed
554 if (DeadBlocks.count(L->getHeader()))
558 addCost(NumLoops * InlineConstants::CallPenalty);
561 // We applied the maximum possible vector bonus at the beginning. Now,
562 // subtract the excess bonus, if any, from the Threshold before
563 // comparing against Cost.
564 if (NumVectorInstructions <= NumInstructions / 10)
565 Threshold -= VectorBonus;
566 else if (NumVectorInstructions <= NumInstructions / 2)
567 Threshold -= VectorBonus / 2;
569 return Cost < std::max(1, Threshold);
571 bool shouldStop() override {
572 // Bail out the moment we cross the threshold. This means we'll under-count
573 // the cost, but only when undercounting doesn't matter.
574 return Cost >= Threshold && !ComputeFullInlineCost;
577 void onLoadEliminationOpportunity() override {
578 LoadEliminationCost += InlineConstants::InstrCost;
581 InlineResult onAnalysisStart() override {
582 // Perform some tweaks to the cost and threshold based on the direct
583 // callsite information.
585 // We want to more aggressively inline vector-dense kernels, so up the
586 // threshold, and we'll lower it if the % of vector instructions gets too
587 // low. Note that these bonuses are some what arbitrary and evolved over
588 // time by accident as much as because they are principled bonuses.
590 // FIXME: It would be nice to remove all such bonuses. At least it would be
591 // nice to base the bonus values on something more scientific.
592 assert(NumInstructions == 0);
593 assert(NumVectorInstructions == 0);
595 // Update the threshold based on callsite properties
596 updateThreshold(CandidateCall, F);
598 // While Threshold depends on commandline options that can take negative
599 // values, we want to enforce the invariant that the computed threshold and
600 // bonuses are non-negative.
601 assert(Threshold >= 0);
602 assert(SingleBBBonus >= 0);
603 assert(VectorBonus >= 0);
605 // Speculatively apply all possible bonuses to Threshold. If cost exceeds
606 // this Threshold any time, and cost cannot decrease, we can stop processing
607 // the rest of the function body.
608 Threshold += (SingleBBBonus + VectorBonus);
610 // Give out bonuses for the callsite, as the instructions setting them up
611 // will be gone after inlining.
612 addCost(-getCallsiteCost(this->CandidateCall, DL));
614 // If this function uses the coldcc calling convention, prefer not to inline
616 if (F.getCallingConv() == CallingConv::Cold)
617 Cost += InlineConstants::ColdccPenalty;
619 // Check if we're done. This can happen due to bonuses and penalties.
620 if (Cost >= Threshold && !ComputeFullInlineCost)
627 InlineCostCallAnalyzer(
628 const TargetTransformInfo &TTI,
629 std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
630 Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI,
631 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE, Function &Callee,
632 CallBase &Call, const InlineParams &Params, bool BoostIndirect = true)
633 : CallAnalyzer(TTI, GetAssumptionCache, GetBFI, PSI, ORE, Callee, Call),
634 ComputeFullInlineCost(OptComputeFullInlineCost ||
635 Params.ComputeFullInlineCost || ORE),
636 Params(Params), Threshold(Params.DefaultThreshold),
637 BoostIndirectCalls(BoostIndirect) {}
640 virtual ~InlineCostCallAnalyzer() {}
641 int getThreshold() { return Threshold; }
642 int getCost() { return Cost; }
646 /// Test whether the given value is an Alloca-derived function argument.
647 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
648 return SROAArgValues.count(V);
651 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
652 onDisableSROA(SROAArg);
653 EnabledSROAArgValues.erase(SROAArg);
654 disableLoadElimination();
656 /// If 'V' maps to a SROA candidate, disable SROA for it.
657 void CallAnalyzer::disableSROA(Value *V) {
658 if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
659 disableSROAForArg(SROAArg);
663 void CallAnalyzer::disableLoadElimination() {
664 if (EnableLoadElimination) {
665 onDisableLoadElimination();
666 EnableLoadElimination = false;
670 /// Accumulate a constant GEP offset into an APInt if possible.
672 /// Returns false if unable to compute the offset for any reason. Respects any
673 /// simplified values known during the analysis of this callsite.
674 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
675 unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
676 assert(IntPtrWidth == Offset.getBitWidth());
678 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
680 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
682 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
683 OpC = dyn_cast<ConstantInt>(SimpleOp);
689 // Handle a struct index, which adds its field offset to the pointer.
690 if (StructType *STy = GTI.getStructTypeOrNull()) {
691 unsigned ElementIdx = OpC->getZExtValue();
692 const StructLayout *SL = DL.getStructLayout(STy);
693 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
697 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
698 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
703 /// Use TTI to check whether a GEP is free.
705 /// Respects any simplified values known during the analysis of this callsite.
706 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
707 SmallVector<Value *, 4> Operands;
708 Operands.push_back(GEP.getOperand(0));
709 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
710 if (Constant *SimpleOp = SimplifiedValues.lookup(*I))
711 Operands.push_back(SimpleOp);
713 Operands.push_back(*I);
714 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&GEP, Operands);
717 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
718 // Check whether inlining will turn a dynamic alloca into a static
719 // alloca and handle that case.
720 if (I.isArrayAllocation()) {
721 Constant *Size = SimplifiedValues.lookup(I.getArraySize());
722 if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
723 Type *Ty = I.getAllocatedType();
724 AllocatedSize = SaturatingMultiplyAdd(
725 AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getFixedSize(),
727 return Base::visitAlloca(I);
731 // Accumulate the allocated size.
732 if (I.isStaticAlloca()) {
733 Type *Ty = I.getAllocatedType();
735 SaturatingAdd(DL.getTypeAllocSize(Ty).getFixedSize(), AllocatedSize);
738 // We will happily inline static alloca instructions.
739 if (I.isStaticAlloca())
740 return Base::visitAlloca(I);
742 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
743 // a variety of reasons, and so we would like to not inline them into
744 // functions which don't currently have a dynamic alloca. This simply
745 // disables inlining altogether in the presence of a dynamic alloca.
746 HasDynamicAlloca = true;
750 bool CallAnalyzer::visitPHI(PHINode &I) {
751 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
752 // though we don't want to propagate it's bonuses. The idea is to disable
753 // SROA if it *might* be used in an inappropriate manner.
755 // Phi nodes are always zero-cost.
756 // FIXME: Pointer sizes may differ between different address spaces, so do we
757 // need to use correct address space in the call to getPointerSizeInBits here?
758 // Or could we skip the getPointerSizeInBits call completely? As far as I can
759 // see the ZeroOffset is used as a dummy value, so we can probably use any
760 // bit width for the ZeroOffset?
761 APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
762 bool CheckSROA = I.getType()->isPointerTy();
764 // Track the constant or pointer with constant offset we've seen so far.
765 Constant *FirstC = nullptr;
766 std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
767 Value *FirstV = nullptr;
769 for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
770 BasicBlock *Pred = I.getIncomingBlock(i);
771 // If the incoming block is dead, skip the incoming block.
772 if (DeadBlocks.count(Pred))
774 // If the parent block of phi is not the known successor of the incoming
775 // block, skip the incoming block.
776 BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
777 if (KnownSuccessor && KnownSuccessor != I.getParent())
780 Value *V = I.getIncomingValue(i);
781 // If the incoming value is this phi itself, skip the incoming value.
785 Constant *C = dyn_cast<Constant>(V);
787 C = SimplifiedValues.lookup(V);
789 std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
791 BaseAndOffset = ConstantOffsetPtrs.lookup(V);
793 if (!C && !BaseAndOffset.first)
794 // The incoming value is neither a constant nor a pointer with constant
795 // offset, exit early.
800 // If we've seen a constant incoming value before and it is the same
801 // constant we see this time, continue checking the next incoming value.
803 // Otherwise early exit because we either see a different constant or saw
804 // a constant before but we have a pointer with constant offset this time.
809 // The same logic as above, but check pointer with constant offset here.
810 if (FirstBaseAndOffset == BaseAndOffset)
816 // This is the 1st time we've seen a constant, record it.
821 // The remaining case is that this is the 1st time we've seen a pointer with
822 // constant offset, record it.
824 FirstBaseAndOffset = BaseAndOffset;
827 // Check if we can map phi to a constant.
829 SimplifiedValues[&I] = FirstC;
833 // Check if we can map phi to a pointer with constant offset.
834 if (FirstBaseAndOffset.first) {
835 ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
837 if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
838 SROAArgValues[&I] = SROAArg;
844 /// Check we can fold GEPs of constant-offset call site argument pointers.
845 /// This requires target data and inbounds GEPs.
847 /// \return true if the specified GEP can be folded.
848 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
849 // Check if we have a base + offset for the pointer.
850 std::pair<Value *, APInt> BaseAndOffset =
851 ConstantOffsetPtrs.lookup(I.getPointerOperand());
852 if (!BaseAndOffset.first)
855 // Check if the offset of this GEP is constant, and if so accumulate it
857 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
860 // Add the result as a new mapping to Base + Offset.
861 ConstantOffsetPtrs[&I] = BaseAndOffset;
866 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
867 auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
869 // Lambda to check whether a GEP's indices are all constant.
870 auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
871 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
872 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
877 if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
879 SROAArgValues[&I] = SROAArg;
881 // Constant GEPs are modeled as free.
885 // Variable GEPs will require math and will disable SROA.
887 disableSROAForArg(SROAArg);
891 /// Simplify \p I if its operands are constants and update SimplifiedValues.
892 /// \p Evaluate is a callable specific to instruction type that evaluates the
893 /// instruction when all the operands are constants.
894 template <typename Callable>
895 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
896 SmallVector<Constant *, 2> COps;
897 for (Value *Op : I.operands()) {
898 Constant *COp = dyn_cast<Constant>(Op);
900 COp = SimplifiedValues.lookup(Op);
905 auto *C = Evaluate(COps);
908 SimplifiedValues[&I] = C;
912 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
913 // Propagate constants through bitcasts.
914 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
915 return ConstantExpr::getBitCast(COps[0], I.getType());
919 // Track base/offsets through casts
920 std::pair<Value *, APInt> BaseAndOffset =
921 ConstantOffsetPtrs.lookup(I.getOperand(0));
922 // Casts don't change the offset, just wrap it up.
923 if (BaseAndOffset.first)
924 ConstantOffsetPtrs[&I] = BaseAndOffset;
926 // Also look for SROA candidates here.
927 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
928 SROAArgValues[&I] = SROAArg;
930 // Bitcasts are always zero cost.
934 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
935 // Propagate constants through ptrtoint.
936 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
937 return ConstantExpr::getPtrToInt(COps[0], I.getType());
941 // Track base/offset pairs when converted to a plain integer provided the
942 // integer is large enough to represent the pointer.
943 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
944 unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
945 if (IntegerSize >= DL.getPointerSizeInBits(AS)) {
946 std::pair<Value *, APInt> BaseAndOffset =
947 ConstantOffsetPtrs.lookup(I.getOperand(0));
948 if (BaseAndOffset.first)
949 ConstantOffsetPtrs[&I] = BaseAndOffset;
952 // This is really weird. Technically, ptrtoint will disable SROA. However,
953 // unless that ptrtoint is *used* somewhere in the live basic blocks after
954 // inlining, it will be nuked, and SROA should proceed. All of the uses which
955 // would block SROA would also block SROA if applied directly to a pointer,
956 // and so we can just add the integer in here. The only places where SROA is
957 // preserved either cannot fire on an integer, or won't in-and-of themselves
958 // disable SROA (ext) w/o some later use that we would see and disable.
959 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
960 SROAArgValues[&I] = SROAArg;
962 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
965 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
966 // Propagate constants through ptrtoint.
967 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
968 return ConstantExpr::getIntToPtr(COps[0], I.getType());
972 // Track base/offset pairs when round-tripped through a pointer without
973 // modifications provided the integer is not too large.
974 Value *Op = I.getOperand(0);
975 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
976 if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
977 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
978 if (BaseAndOffset.first)
979 ConstantOffsetPtrs[&I] = BaseAndOffset;
982 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
983 if (auto *SROAArg = getSROAArgForValueOrNull(Op))
984 SROAArgValues[&I] = SROAArg;
986 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
989 bool CallAnalyzer::visitCastInst(CastInst &I) {
990 // Propagate constants through casts.
991 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
992 return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
996 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
997 disableSROA(I.getOperand(0));
999 // If this is a floating-point cast, and the target says this operation
1000 // is expensive, this may eventually become a library call. Treat the cost
1002 switch (I.getOpcode()) {
1003 case Instruction::FPTrunc:
1004 case Instruction::FPExt:
1005 case Instruction::UIToFP:
1006 case Instruction::SIToFP:
1007 case Instruction::FPToUI:
1008 case Instruction::FPToSI:
1009 if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1016 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
1019 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1020 Value *Operand = I.getOperand(0);
1021 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1022 return ConstantFoldInstOperands(&I, COps[0], DL);
1026 // Disable any SROA on the argument to arbitrary unary instructions.
1027 disableSROA(Operand);
1032 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1033 return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1036 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1037 // Does the *call site* have the NonNull attribute set on an argument? We
1038 // use the attribute on the call site to memoize any analysis done in the
1039 // caller. This will also trip if the callee function has a non-null
1040 // parameter attribute, but that's a less interesting case because hopefully
1041 // the callee would already have been simplified based on that.
1042 if (Argument *A = dyn_cast<Argument>(V))
1043 if (paramHasAttr(A, Attribute::NonNull))
1046 // Is this an alloca in the caller? This is distinct from the attribute case
1047 // above because attributes aren't updated within the inliner itself and we
1048 // always want to catch the alloca derived case.
1049 if (isAllocaDerivedArg(V))
1050 // We can actually predict the result of comparisons between an
1051 // alloca-derived value and null. Note that this fires regardless of
1058 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1059 // If the normal destination of the invoke or the parent block of the call
1060 // site is unreachable-terminated, there is little point in inlining this
1061 // unless there is literally zero cost.
1062 // FIXME: Note that it is possible that an unreachable-terminated block has a
1063 // hot entry. For example, in below scenario inlining hot_call_X() may be
1071 // For now, we are not handling this corner case here as it is rare in real
1072 // code. In future, we should elaborate this based on BPI and BFI in more
1073 // general threshold adjusting heuristics in updateThreshold().
1074 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1075 if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1077 } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1083 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1084 BlockFrequencyInfo *CallerBFI) {
1085 // If global profile summary is available, then callsite's coldness is
1086 // determined based on that.
1087 if (PSI && PSI->hasProfileSummary())
1088 return PSI->isColdCallSite(CallSite(&Call), CallerBFI);
1090 // Otherwise we need BFI to be available.
1094 // Determine if the callsite is cold relative to caller's entry. We could
1095 // potentially cache the computation of scaled entry frequency, but the added
1096 // complexity is not worth it unless this scaling shows up high in the
1098 const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1099 auto CallSiteBB = Call.getParent();
1100 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1101 auto CallerEntryFreq =
1102 CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1103 return CallSiteFreq < CallerEntryFreq * ColdProb;
1107 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1108 BlockFrequencyInfo *CallerBFI) {
1110 // If global profile summary is available, then callsite's hotness is
1111 // determined based on that.
1112 if (PSI && PSI->hasProfileSummary() &&
1113 PSI->isHotCallSite(CallSite(&Call), CallerBFI))
1114 return Params.HotCallSiteThreshold;
1116 // Otherwise we need BFI to be available and to have a locally hot callsite
1118 if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1121 // Determine if the callsite is hot relative to caller's entry. We could
1122 // potentially cache the computation of scaled entry frequency, but the added
1123 // complexity is not worth it unless this scaling shows up high in the
1125 auto CallSiteBB = Call.getParent();
1126 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1127 auto CallerEntryFreq = CallerBFI->getEntryFreq();
1128 if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1129 return Params.LocallyHotCallSiteThreshold;
1131 // Otherwise treat it normally.
1135 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1136 // If no size growth is allowed for this inlining, set Threshold to 0.
1137 if (!allowSizeGrowth(Call)) {
1142 Function *Caller = Call.getCaller();
1144 // return min(A, B) if B is valid.
1145 auto MinIfValid = [](int A, Optional<int> B) {
1146 return B ? std::min(A, B.getValue()) : A;
1149 // return max(A, B) if B is valid.
1150 auto MaxIfValid = [](int A, Optional<int> B) {
1151 return B ? std::max(A, B.getValue()) : A;
1154 // Various bonus percentages. These are multiplied by Threshold to get the
1156 // SingleBBBonus: This bonus is applied if the callee has a single reachable
1157 // basic block at the given callsite context. This is speculatively applied
1158 // and withdrawn if more than one basic block is seen.
1160 // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1161 // of the last call to a static function as inlining such functions is
1162 // guaranteed to reduce code size.
1164 // These bonus percentages may be set to 0 based on properties of the caller
1165 // and the callsite.
1166 int SingleBBBonusPercent = 50;
1167 int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1168 int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1170 // Lambda to set all the above bonus and bonus percentages to 0.
1171 auto DisallowAllBonuses = [&]() {
1172 SingleBBBonusPercent = 0;
1173 VectorBonusPercent = 0;
1174 LastCallToStaticBonus = 0;
1177 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1178 // and reduce the threshold if the caller has the necessary attribute.
1179 if (Caller->hasMinSize()) {
1180 Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1181 // For minsize, we want to disable the single BB bonus and the vector
1182 // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1183 // a static function will, at the minimum, eliminate the parameter setup and
1184 // call/return instructions.
1185 SingleBBBonusPercent = 0;
1186 VectorBonusPercent = 0;
1187 } else if (Caller->hasOptSize())
1188 Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1190 // Adjust the threshold based on inlinehint attribute and profile based
1191 // hotness information if the caller does not have MinSize attribute.
1192 if (!Caller->hasMinSize()) {
1193 if (Callee.hasFnAttribute(Attribute::InlineHint))
1194 Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1196 // FIXME: After switching to the new passmanager, simplify the logic below
1197 // by checking only the callsite hotness/coldness as we will reliably
1198 // have local profile information.
1200 // Callsite hotness and coldness can be determined if sample profile is
1201 // used (which adds hotness metadata to calls) or if caller's
1202 // BlockFrequencyInfo is available.
1203 BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr;
1204 auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1205 if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1206 LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1207 // FIXME: This should update the threshold only if it exceeds the
1208 // current threshold, but AutoFDO + ThinLTO currently relies on this
1209 // behavior to prevent inlining of hot callsites during ThinLTO
1211 Threshold = HotCallSiteThreshold.getValue();
1212 } else if (isColdCallSite(Call, CallerBFI)) {
1213 LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1214 // Do not apply bonuses for a cold callsite including the
1215 // LastCallToStatic bonus. While this bonus might result in code size
1216 // reduction, it can cause the size of a non-cold caller to increase
1217 // preventing it from being inlined.
1218 DisallowAllBonuses();
1219 Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1221 // Use callee's global profile information only if we have no way of
1222 // determining this via callsite information.
1223 if (PSI->isFunctionEntryHot(&Callee)) {
1224 LLVM_DEBUG(dbgs() << "Hot callee.\n");
1225 // If callsite hotness can not be determined, we may still know
1226 // that the callee is hot and treat it as a weaker hint for threshold
1228 Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1229 } else if (PSI->isFunctionEntryCold(&Callee)) {
1230 LLVM_DEBUG(dbgs() << "Cold callee.\n");
1231 // Do not apply bonuses for a cold callee including the
1232 // LastCallToStatic bonus. While this bonus might result in code size
1233 // reduction, it can cause the size of a non-cold caller to increase
1234 // preventing it from being inlined.
1235 DisallowAllBonuses();
1236 Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1241 // Finally, take the target-specific inlining threshold multiplier into
1243 Threshold *= TTI.getInliningThresholdMultiplier();
1245 SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1246 VectorBonus = Threshold * VectorBonusPercent / 100;
1248 bool OnlyOneCallAndLocalLinkage =
1249 F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1250 // If there is only one call of the function, and it has internal linkage,
1251 // the cost of inlining it drops dramatically. It may seem odd to update
1252 // Cost in updateThreshold, but the bonus depends on the logic in this method.
1253 if (OnlyOneCallAndLocalLinkage)
1254 Cost -= LastCallToStaticBonus;
1257 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1258 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1259 // First try to handle simplified comparisons.
1260 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1261 return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1265 if (I.getOpcode() == Instruction::FCmp)
1268 // Otherwise look for a comparison between constant offset pointers with
1270 Value *LHSBase, *RHSBase;
1271 APInt LHSOffset, RHSOffset;
1272 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1274 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1275 if (RHSBase && LHSBase == RHSBase) {
1276 // We have common bases, fold the icmp to a constant based on the
1278 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1279 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1280 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1281 SimplifiedValues[&I] = C;
1282 ++NumConstantPtrCmps;
1288 // If the comparison is an equality comparison with null, we can simplify it
1289 // if we know the value (argument) can't be null
1290 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1291 isKnownNonNullInCallee(I.getOperand(0))) {
1292 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1293 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1294 : ConstantInt::getFalse(I.getType());
1297 return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1300 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1301 // Try to handle a special case: we can fold computing the difference of two
1302 // constant-related pointers.
1303 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1304 Value *LHSBase, *RHSBase;
1305 APInt LHSOffset, RHSOffset;
1306 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1308 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1309 if (RHSBase && LHSBase == RHSBase) {
1310 // We have common bases, fold the subtract to a constant based on the
1312 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1313 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1314 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1315 SimplifiedValues[&I] = C;
1316 ++NumConstantPtrDiffs;
1322 // Otherwise, fall back to the generic logic for simplifying and handling
1324 return Base::visitSub(I);
1327 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1328 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1329 Constant *CLHS = dyn_cast<Constant>(LHS);
1331 CLHS = SimplifiedValues.lookup(LHS);
1332 Constant *CRHS = dyn_cast<Constant>(RHS);
1334 CRHS = SimplifiedValues.lookup(RHS);
1336 Value *SimpleV = nullptr;
1337 if (auto FI = dyn_cast<FPMathOperator>(&I))
1338 SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1339 FI->getFastMathFlags(), DL);
1342 SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1344 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1345 SimplifiedValues[&I] = C;
1350 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1354 // If the instruction is floating point, and the target says this operation
1355 // is expensive, this may eventually become a library call. Treat the cost
1356 // as such. Unless it's fneg which can be implemented with an xor.
1357 using namespace llvm::PatternMatch;
1358 if (I.getType()->isFloatingPointTy() &&
1359 TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1360 !match(&I, m_FNeg(m_Value())))
1366 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1367 Value *Op = I.getOperand(0);
1368 Constant *COp = dyn_cast<Constant>(Op);
1370 COp = SimplifiedValues.lookup(Op);
1372 Value *SimpleV = SimplifyFNegInst(
1373 COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1375 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1376 SimplifiedValues[&I] = C;
1381 // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1387 bool CallAnalyzer::visitLoad(LoadInst &I) {
1388 if (handleSROA(I.getPointerOperand(), I.isSimple()))
1391 // If the data is already loaded from this address and hasn't been clobbered
1392 // by any stores or calls, this load is likely to be redundant and can be
1394 if (EnableLoadElimination &&
1395 !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1396 onLoadEliminationOpportunity();
1403 bool CallAnalyzer::visitStore(StoreInst &I) {
1404 if (handleSROA(I.getPointerOperand(), I.isSimple()))
1407 // The store can potentially clobber loads and prevent repeated loads from
1408 // being eliminated.
1410 // 1. We can probably keep an initial set of eliminatable loads substracted
1411 // from the cost even when we finally see a store. We just need to disable
1412 // *further* accumulation of elimination savings.
1413 // 2. We should probably at some point thread MemorySSA for the callee into
1414 // this and then use that to actually compute *really* precise savings.
1415 disableLoadElimination();
1419 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1420 // Constant folding for extract value is trivial.
1421 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1422 return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1426 // SROA can look through these but give them a cost.
1430 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1431 // Constant folding for insert value is trivial.
1432 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1433 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1434 /*InsertedValueOperand*/ COps[1],
1439 // SROA can look through these but give them a cost.
1443 /// Try to simplify a call site.
1445 /// Takes a concrete function and callsite and tries to actually simplify it by
1446 /// analyzing the arguments and call itself with instsimplify. Returns true if
1447 /// it has simplified the callsite to some other entity (a constant), making it
1449 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1450 // FIXME: Using the instsimplify logic directly for this is inefficient
1451 // because we have to continually rebuild the argument list even when no
1452 // simplifications can be performed. Until that is fixed with remapping
1453 // inside of instsimplify, directly constant fold calls here.
1454 if (!canConstantFoldCallTo(&Call, F))
1457 // Try to re-map the arguments to constants.
1458 SmallVector<Constant *, 4> ConstantArgs;
1459 ConstantArgs.reserve(Call.arg_size());
1460 for (Value *I : Call.args()) {
1461 Constant *C = dyn_cast<Constant>(I);
1463 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1465 return false; // This argument doesn't map to a constant.
1467 ConstantArgs.push_back(C);
1469 if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1470 SimplifiedValues[&Call] = C;
1477 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1478 if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1479 !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1480 // This aborts the entire analysis.
1481 ExposesReturnsTwice = true;
1484 if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1485 ContainsNoDuplicateCall = true;
1487 Value *Callee = Call.getCalledOperand();
1488 Function *F = dyn_cast_or_null<Function>(Callee);
1489 bool IsIndirectCall = !F;
1490 if (IsIndirectCall) {
1491 // Check if this happens to be an indirect function call to a known function
1492 // in this inline context. If not, we've done all we can.
1493 F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1495 onCallArgumentSetup(Call);
1497 if (!Call.onlyReadsMemory())
1498 disableLoadElimination();
1499 return Base::visitCallBase(Call);
1503 assert(F && "Expected a call to a known function");
1505 // When we have a concrete function, first try to simplify it directly.
1506 if (simplifyCallSite(F, Call))
1509 // Next check if it is an intrinsic we know about.
1510 // FIXME: Lift this into part of the InstVisitor.
1511 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1512 switch (II->getIntrinsicID()) {
1514 if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1515 disableLoadElimination();
1516 return Base::visitCallBase(Call);
1518 case Intrinsic::load_relative:
1519 onLoadRelativeIntrinsic();
1522 case Intrinsic::memset:
1523 case Intrinsic::memcpy:
1524 case Intrinsic::memmove:
1525 disableLoadElimination();
1526 // SROA can usually chew through these intrinsics, but they aren't free.
1528 case Intrinsic::icall_branch_funnel:
1529 case Intrinsic::localescape:
1530 HasUninlineableIntrinsic = true;
1532 case Intrinsic::vastart:
1533 InitsVargArgs = true;
1538 if (F == Call.getFunction()) {
1539 // This flag will fully abort the analysis, so don't bother with anything
1541 IsRecursiveCall = true;
1545 if (TTI.isLoweredToCall(F)) {
1546 onLoweredCall(F, Call, IsIndirectCall);
1549 if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1550 disableLoadElimination();
1551 return Base::visitCallBase(Call);
1554 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1555 // At least one return instruction will be free after inlining.
1556 bool Free = !HasReturn;
1561 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1562 // We model unconditional branches as essentially free -- they really
1563 // shouldn't exist at all, but handling them makes the behavior of the
1564 // inliner more regular and predictable. Interestingly, conditional branches
1565 // which will fold away are also free.
1566 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1567 dyn_cast_or_null<ConstantInt>(
1568 SimplifiedValues.lookup(BI.getCondition()));
1571 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1572 bool CheckSROA = SI.getType()->isPointerTy();
1573 Value *TrueVal = SI.getTrueValue();
1574 Value *FalseVal = SI.getFalseValue();
1576 Constant *TrueC = dyn_cast<Constant>(TrueVal);
1578 TrueC = SimplifiedValues.lookup(TrueVal);
1579 Constant *FalseC = dyn_cast<Constant>(FalseVal);
1581 FalseC = SimplifiedValues.lookup(FalseVal);
1583 dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1586 // Select C, X, X => X
1587 if (TrueC == FalseC && TrueC) {
1588 SimplifiedValues[&SI] = TrueC;
1593 return Base::visitSelectInst(SI);
1595 std::pair<Value *, APInt> TrueBaseAndOffset =
1596 ConstantOffsetPtrs.lookup(TrueVal);
1597 std::pair<Value *, APInt> FalseBaseAndOffset =
1598 ConstantOffsetPtrs.lookup(FalseVal);
1599 if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1600 ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1602 if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1603 SROAArgValues[&SI] = SROAArg;
1607 return Base::visitSelectInst(SI);
1610 // Select condition is a constant.
1611 Value *SelectedV = CondC->isAllOnesValue()
1613 : (CondC->isNullValue()) ? FalseVal : nullptr;
1615 // Condition is a vector constant that is not all 1s or all 0s. If all
1616 // operands are constants, ConstantExpr::getSelect() can handle the cases
1617 // such as select vectors.
1618 if (TrueC && FalseC) {
1619 if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1620 SimplifiedValues[&SI] = C;
1624 return Base::visitSelectInst(SI);
1627 // Condition is either all 1s or all 0s. SI can be simplified.
1628 if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1629 SimplifiedValues[&SI] = SelectedC;
1636 std::pair<Value *, APInt> BaseAndOffset =
1637 ConstantOffsetPtrs.lookup(SelectedV);
1638 if (BaseAndOffset.first) {
1639 ConstantOffsetPtrs[&SI] = BaseAndOffset;
1641 if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1642 SROAArgValues[&SI] = SROAArg;
1648 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1649 // We model unconditional switches as free, see the comments on handling
1651 if (isa<ConstantInt>(SI.getCondition()))
1653 if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1654 if (isa<ConstantInt>(V))
1657 // Assume the most general case where the switch is lowered into
1658 // either a jump table, bit test, or a balanced binary tree consisting of
1659 // case clusters without merging adjacent clusters with the same
1660 // destination. We do not consider the switches that are lowered with a mix
1661 // of jump table/bit test/binary search tree. The cost of the switch is
1662 // proportional to the size of the tree or the size of jump table range.
1664 // NB: We convert large switches which are just used to initialize large phi
1665 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1666 // inlining those. It will prevent inlining in cases where the optimization
1667 // does not (yet) fire.
1669 unsigned JumpTableSize = 0;
1670 BlockFrequencyInfo *BFI = GetBFI ? &((*GetBFI)(F)) : nullptr;
1671 unsigned NumCaseCluster =
1672 TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
1674 onFinalizeSwitch(JumpTableSize, NumCaseCluster);
1678 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1679 // We never want to inline functions that contain an indirectbr. This is
1680 // incorrect because all the blockaddress's (in static global initializers
1681 // for example) would be referring to the original function, and this
1682 // indirect jump would jump from the inlined copy of the function into the
1683 // original function which is extremely undefined behavior.
1684 // FIXME: This logic isn't really right; we can safely inline functions with
1685 // indirectbr's as long as no other function or global references the
1686 // blockaddress of a block within the current function.
1687 HasIndirectBr = true;
1691 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1692 // FIXME: It's not clear that a single instruction is an accurate model for
1693 // the inline cost of a resume instruction.
1697 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1698 // FIXME: It's not clear that a single instruction is an accurate model for
1699 // the inline cost of a cleanupret instruction.
1703 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1704 // FIXME: It's not clear that a single instruction is an accurate model for
1705 // the inline cost of a catchret instruction.
1709 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1710 // FIXME: It might be reasonably to discount the cost of instructions leading
1711 // to unreachable as they have the lowest possible impact on both runtime and
1713 return true; // No actual code is needed for unreachable.
1716 bool CallAnalyzer::visitInstruction(Instruction &I) {
1717 // Some instructions are free. All of the free intrinsics can also be
1718 // handled by SROA, etc.
1719 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
1722 // We found something we don't understand or can't handle. Mark any SROA-able
1723 // values in the operand list as no longer viable.
1724 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1730 /// Analyze a basic block for its contribution to the inline cost.
1732 /// This method walks the analyzer over every instruction in the given basic
1733 /// block and accounts for their cost during inlining at this callsite. It
1734 /// aborts early if the threshold has been exceeded or an impossible to inline
1735 /// construct has been detected. It returns false if inlining is no longer
1736 /// viable, and true if inlining remains viable.
1738 CallAnalyzer::analyzeBlock(BasicBlock *BB,
1739 SmallPtrSetImpl<const Value *> &EphValues) {
1740 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1741 // FIXME: Currently, the number of instructions in a function regardless of
1742 // our ability to simplify them during inline to constants or dead code,
1743 // are actually used by the vector bonus heuristic. As long as that's true,
1744 // we have to special case debug intrinsics here to prevent differences in
1745 // inlining due to debug symbols. Eventually, the number of unsimplified
1746 // instructions shouldn't factor into the cost computation, but until then,
1747 // hack around it here.
1748 if (isa<DbgInfoIntrinsic>(I))
1751 // Skip ephemeral values.
1752 if (EphValues.count(&*I))
1756 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1757 ++NumVectorInstructions;
1759 // If the instruction simplified to a constant, there is no cost to this
1760 // instruction. Visit the instructions using our InstVisitor to account for
1761 // all of the per-instruction logic. The visit tree returns true if we
1762 // consumed the instruction in any way, and false if the instruction's base
1763 // cost should count against inlining.
1764 if (Base::visit(&*I))
1765 ++NumInstructionsSimplified;
1767 onCommonInstructionSimplification();
1769 using namespace ore;
1770 // If the visit this instruction detected an uninlinable pattern, abort.
1772 if (IsRecursiveCall)
1774 else if (ExposesReturnsTwice)
1775 IR = "exposes returns twice";
1776 else if (HasDynamicAlloca)
1777 IR = "dynamic alloca";
1778 else if (HasIndirectBr)
1779 IR = "indirect branch";
1780 else if (HasUninlineableIntrinsic)
1781 IR = "uninlinable intrinsic";
1782 else if (InitsVargArgs)
1787 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1789 << NV("Callee", &F) << " has uninlinable pattern ("
1790 << NV("InlineResult", IR.message)
1791 << ") and cost is not fully computed";
1796 // If the caller is a recursive function then we don't want to inline
1797 // functions which allocate a lot of stack space because it would increase
1798 // the caller stack usage dramatically.
1799 if (IsCallerRecursive &&
1800 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
1801 InlineResult IR = "recursive and allocates too much stack space";
1804 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1806 << NV("Callee", &F) << " is " << NV("InlineResult", IR.message)
1807 << ". Cost is not fully computed";
1819 /// Compute the base pointer and cumulative constant offsets for V.
1821 /// This strips all constant offsets off of V, leaving it the base pointer, and
1822 /// accumulates the total constant offset applied in the returned constant. It
1823 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1824 /// no constant offsets applied.
1825 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1826 if (!V->getType()->isPointerTy())
1829 unsigned AS = V->getType()->getPointerAddressSpace();
1830 unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
1831 APInt Offset = APInt::getNullValue(IntPtrWidth);
1833 // Even though we don't look through PHI nodes, we could be called on an
1834 // instruction in an unreachable block, which may be on a cycle.
1835 SmallPtrSet<Value *, 4> Visited;
1838 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1839 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1841 V = GEP->getPointerOperand();
1842 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1843 V = cast<Operator>(V)->getOperand(0);
1844 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1845 if (GA->isInterposable())
1847 V = GA->getAliasee();
1851 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1852 } while (Visited.insert(V).second);
1854 Type *IdxPtrTy = DL.getIndexType(V->getType());
1855 return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
1858 /// Find dead blocks due to deleted CFG edges during inlining.
1860 /// If we know the successor of the current block, \p CurrBB, has to be \p
1861 /// NextBB, the other successors of \p CurrBB are dead if these successors have
1862 /// no live incoming CFG edges. If one block is found to be dead, we can
1863 /// continue growing the dead block list by checking the successors of the dead
1864 /// blocks to see if all their incoming edges are dead or not.
1865 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
1866 auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
1867 // A CFG edge is dead if the predecessor is dead or the predecessor has a
1868 // known successor which is not the one under exam.
1869 return (DeadBlocks.count(Pred) ||
1870 (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
1873 auto IsNewlyDead = [&](BasicBlock *BB) {
1874 // If all the edges to a block are dead, the block is also dead.
1875 return (!DeadBlocks.count(BB) &&
1876 llvm::all_of(predecessors(BB),
1877 [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
1880 for (BasicBlock *Succ : successors(CurrBB)) {
1881 if (Succ == NextBB || !IsNewlyDead(Succ))
1883 SmallVector<BasicBlock *, 4> NewDead;
1884 NewDead.push_back(Succ);
1885 while (!NewDead.empty()) {
1886 BasicBlock *Dead = NewDead.pop_back_val();
1887 if (DeadBlocks.insert(Dead))
1888 // Continue growing the dead block lists.
1889 for (BasicBlock *S : successors(Dead))
1891 NewDead.push_back(S);
1896 /// Analyze a call site for potential inlining.
1898 /// Returns true if inlining this call is viable, and false if it is not
1899 /// viable. It computes the cost and adjusts the threshold based on numerous
1900 /// factors and heuristics. If this method returns false but the computed cost
1901 /// is below the computed threshold, then inlining was forcibly disabled by
1902 /// some artifact of the routine.
1903 InlineResult CallAnalyzer::analyze() {
1906 auto Result = onAnalysisStart();
1913 Function *Caller = CandidateCall.getFunction();
1914 // Check if the caller function is recursive itself.
1915 for (User *U : Caller->users()) {
1916 CallBase *Call = dyn_cast<CallBase>(U);
1917 if (Call && Call->getFunction() == Caller) {
1918 IsCallerRecursive = true;
1923 // Populate our simplified values by mapping from function arguments to call
1924 // arguments with known important simplifications.
1925 auto CAI = CandidateCall.arg_begin();
1926 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1927 FAI != FAE; ++FAI, ++CAI) {
1928 assert(CAI != CandidateCall.arg_end());
1929 if (Constant *C = dyn_cast<Constant>(CAI))
1930 SimplifiedValues[&*FAI] = C;
1932 Value *PtrArg = *CAI;
1933 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1934 ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
1936 // We can SROA any pointer arguments derived from alloca instructions.
1937 if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
1938 SROAArgValues[&*FAI] = SROAArg;
1939 onInitializeSROAArg(SROAArg);
1943 NumConstantArgs = SimplifiedValues.size();
1944 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1945 NumAllocaArgs = SROAArgValues.size();
1947 // FIXME: If a caller has multiple calls to a callee, we end up recomputing
1948 // the ephemeral values multiple times (and they're completely determined by
1949 // the callee, so this is purely duplicate work).
1950 SmallPtrSet<const Value *, 32> EphValues;
1951 CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
1953 // The worklist of live basic blocks in the callee *after* inlining. We avoid
1954 // adding basic blocks of the callee which can be proven to be dead for this
1955 // particular call site in order to get more accurate cost estimates. This
1956 // requires a somewhat heavyweight iteration pattern: we need to walk the
1957 // basic blocks in a breadth-first order as we insert live successors. To
1958 // accomplish this, prioritizing for small iterations because we exit after
1959 // crossing our threshold, we use a small-size optimized SetVector.
1960 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
1961 SmallPtrSet<BasicBlock *, 16>>
1963 BBSetVector BBWorklist;
1964 BBWorklist.insert(&F.getEntryBlock());
1966 // Note that we *must not* cache the size, this loop grows the worklist.
1967 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1971 BasicBlock *BB = BBWorklist[Idx];
1975 // Disallow inlining a blockaddress with uses other than strictly callbr.
1976 // A blockaddress only has defined behavior for an indirect branch in the
1977 // same function, and we do not currently support inlining indirect
1978 // branches. But, the inliner may not see an indirect branch that ends up
1979 // being dead code at a particular call site. If the blockaddress escapes
1980 // the function, e.g., via a global variable, inlining may lead to an
1981 // invalid cross-function reference.
1982 // FIXME: pr/39560: continue relaxing this overt restriction.
1983 if (BB->hasAddressTaken())
1984 for (User *U : BlockAddress::get(&*BB)->users())
1985 if (!isa<CallBrInst>(*U))
1986 return "blockaddress used outside of callbr";
1988 // Analyze the cost of this block. If we blow through the threshold, this
1989 // returns false, and we can bail on out.
1990 InlineResult IR = analyzeBlock(BB, EphValues);
1994 Instruction *TI = BB->getTerminator();
1996 // Add in the live successors by first checking whether we have terminator
1997 // that may be simplified based on the values simplified by this call.
1998 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1999 if (BI->isConditional()) {
2000 Value *Cond = BI->getCondition();
2001 if (ConstantInt *SimpleCond =
2002 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2003 BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2004 BBWorklist.insert(NextBB);
2005 KnownSuccessors[BB] = NextBB;
2006 findDeadBlocks(BB, NextBB);
2010 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2011 Value *Cond = SI->getCondition();
2012 if (ConstantInt *SimpleCond =
2013 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2014 BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2015 BBWorklist.insert(NextBB);
2016 KnownSuccessors[BB] = NextBB;
2017 findDeadBlocks(BB, NextBB);
2022 // If we're unable to select a particular successor, just count all of
2024 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2026 BBWorklist.insert(TI->getSuccessor(TIdx));
2028 onBlockAnalyzed(BB);
2031 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2032 &F == CandidateCall.getCalledFunction();
2033 // If this is a noduplicate call, we can still inline as long as
2034 // inlining this would cause the removal of the caller (so the instruction
2035 // is not actually duplicated, just moved).
2036 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2037 return "noduplicate";
2039 return finalizeAnalysis();
2042 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2043 /// Dump stats about this call's analysis.
2044 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2045 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
2046 DEBUG_PRINT_STAT(NumConstantArgs);
2047 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2048 DEBUG_PRINT_STAT(NumAllocaArgs);
2049 DEBUG_PRINT_STAT(NumConstantPtrCmps);
2050 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2051 DEBUG_PRINT_STAT(NumInstructionsSimplified);
2052 DEBUG_PRINT_STAT(NumInstructions);
2053 DEBUG_PRINT_STAT(SROACostSavings);
2054 DEBUG_PRINT_STAT(SROACostSavingsLost);
2055 DEBUG_PRINT_STAT(LoadEliminationCost);
2056 DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2057 DEBUG_PRINT_STAT(Cost);
2058 DEBUG_PRINT_STAT(Threshold);
2059 #undef DEBUG_PRINT_STAT
2063 /// Test that there are no attribute conflicts between Caller and Callee
2064 /// that prevent inlining.
2065 static bool functionsHaveCompatibleAttributes(Function *Caller,
2067 TargetTransformInfo &TTI) {
2068 return TTI.areInlineCompatible(Caller, Callee) &&
2069 AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2072 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2074 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2075 if (Call.isByValArgument(I)) {
2076 // We approximate the number of loads and stores needed by dividing the
2077 // size of the byval type by the target's pointer size.
2078 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2079 unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2080 unsigned AS = PTy->getAddressSpace();
2081 unsigned PointerSize = DL.getPointerSizeInBits(AS);
2082 // Ceiling division.
2083 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2085 // If it generates more than 8 stores it is likely to be expanded as an
2086 // inline memcpy so we take that as an upper bound. Otherwise we assume
2087 // one load and one store per word copied.
2088 // FIXME: The maxStoresPerMemcpy setting from the target should be used
2089 // here instead of a magic number of 8, but it's not available via
2091 NumStores = std::min(NumStores, 8U);
2093 Cost += 2 * NumStores * InlineConstants::InstrCost;
2095 // For non-byval arguments subtract off one instruction per call
2097 Cost += InlineConstants::InstrCost;
2100 // The call instruction also disappears after inlining.
2101 Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2105 InlineCost llvm::getInlineCost(
2106 CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2107 std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
2108 Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
2109 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2110 return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2111 GetAssumptionCache, GetBFI, PSI, ORE);
2114 InlineCost llvm::getInlineCost(
2115 CallBase &Call, Function *Callee, const InlineParams &Params,
2116 TargetTransformInfo &CalleeTTI,
2117 std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
2118 Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
2119 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2121 // Cannot inline indirect calls.
2123 return llvm::InlineCost::getNever("indirect call");
2125 // Never inline calls with byval arguments that does not have the alloca
2126 // address space. Since byval arguments can be replaced with a copy to an
2127 // alloca, the inlined code would need to be adjusted to handle that the
2128 // argument is in the alloca address space (so it is a little bit complicated
2130 unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2131 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2132 if (Call.isByValArgument(I)) {
2133 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2134 if (PTy->getAddressSpace() != AllocaAS)
2135 return llvm::InlineCost::getNever("byval arguments without alloca"
2139 // Calls to functions with always-inline attributes should be inlined
2140 // whenever possible.
2141 if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2142 auto IsViable = isInlineViable(*Callee);
2144 return llvm::InlineCost::getAlways("always inline attribute");
2145 return llvm::InlineCost::getNever(IsViable.message);
2148 // Never inline functions with conflicting attributes (unless callee has
2149 // always-inline attribute).
2150 Function *Caller = Call.getCaller();
2151 if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI))
2152 return llvm::InlineCost::getNever("conflicting attributes");
2154 // Don't inline this call if the caller has the optnone attribute.
2155 if (Caller->hasOptNone())
2156 return llvm::InlineCost::getNever("optnone attribute");
2158 // Don't inline a function that treats null pointer as valid into a caller
2159 // that does not have this attribute.
2160 if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2161 return llvm::InlineCost::getNever("nullptr definitions incompatible");
2163 // Don't inline functions which can be interposed at link-time.
2164 if (Callee->isInterposable())
2165 return llvm::InlineCost::getNever("interposable");
2167 // Don't inline functions marked noinline.
2168 if (Callee->hasFnAttribute(Attribute::NoInline))
2169 return llvm::InlineCost::getNever("noinline function attribute");
2171 // Don't inline call sites marked noinline.
2172 if (Call.isNoInline())
2173 return llvm::InlineCost::getNever("noinline call site attribute");
2175 LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
2176 << "... (caller:" << Caller->getName() << ")\n");
2178 InlineCostCallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, ORE,
2179 *Callee, Call, Params);
2180 InlineResult ShouldInline = CA.analyze();
2182 LLVM_DEBUG(CA.dump());
2184 // Check if there was a reason to force inlining or no inlining.
2185 if (!ShouldInline && CA.getCost() < CA.getThreshold())
2186 return InlineCost::getNever(ShouldInline.message);
2187 if (ShouldInline && CA.getCost() >= CA.getThreshold())
2188 return InlineCost::getAlways("empty function");
2190 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2193 InlineResult llvm::isInlineViable(Function &F) {
2194 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2195 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
2196 // Disallow inlining of functions which contain indirect branches.
2197 if (isa<IndirectBrInst>(BI->getTerminator()))
2198 return "contains indirect branches";
2200 // Disallow inlining of blockaddresses which are used by non-callbr
2202 if (BI->hasAddressTaken())
2203 for (User *U : BlockAddress::get(&*BI)->users())
2204 if (!isa<CallBrInst>(*U))
2205 return "blockaddress used outside of callbr";
2207 for (auto &II : *BI) {
2208 CallBase *Call = dyn_cast<CallBase>(&II);
2212 // Disallow recursive calls.
2213 if (&F == Call->getCalledFunction())
2214 return "recursive call";
2216 // Disallow calls which expose returns-twice to a function not previously
2217 // attributed as such.
2218 if (!ReturnsTwice && isa<CallInst>(Call) &&
2219 cast<CallInst>(Call)->canReturnTwice())
2220 return "exposes returns-twice attribute";
2222 if (Call->getCalledFunction())
2223 switch (Call->getCalledFunction()->getIntrinsicID()) {
2226 case llvm::Intrinsic::icall_branch_funnel:
2227 // Disallow inlining of @llvm.icall.branch.funnel because current
2228 // backend can't separate call targets from call arguments.
2229 return "disallowed inlining of @llvm.icall.branch.funnel";
2230 case llvm::Intrinsic::localescape:
2231 // Disallow inlining functions that call @llvm.localescape. Doing this
2232 // correctly would require major changes to the inliner.
2233 return "disallowed inlining of @llvm.localescape";
2234 case llvm::Intrinsic::vastart:
2235 // Disallow inlining of functions that initialize VarArgs with
2237 return "contains VarArgs initialized with va_start";
2245 // APIs to create InlineParams based on command line flags and/or other
2248 InlineParams llvm::getInlineParams(int Threshold) {
2249 InlineParams Params;
2251 // This field is the threshold to use for a callee by default. This is
2252 // derived from one or more of:
2253 // * optimization or size-optimization levels,
2254 // * a value passed to createFunctionInliningPass function, or
2255 // * the -inline-threshold flag.
2256 // If the -inline-threshold flag is explicitly specified, that is used
2257 // irrespective of anything else.
2258 if (InlineThreshold.getNumOccurrences() > 0)
2259 Params.DefaultThreshold = InlineThreshold;
2261 Params.DefaultThreshold = Threshold;
2263 // Set the HintThreshold knob from the -inlinehint-threshold.
2264 Params.HintThreshold = HintThreshold;
2266 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2267 Params.HotCallSiteThreshold = HotCallSiteThreshold;
2269 // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2270 // populate LocallyHotCallSiteThreshold. Later, we populate
2271 // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2272 // we know that optimization level is O3 (in the getInlineParams variant that
2273 // takes the opt and size levels).
2274 // FIXME: Remove this check (and make the assignment unconditional) after
2275 // addressing size regression issues at O2.
2276 if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2277 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2279 // Set the ColdCallSiteThreshold knob from the
2280 // -inline-cold-callsite-threshold.
2281 Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2283 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2284 // -inlinehint-threshold commandline option is not explicitly given. If that
2285 // option is present, then its value applies even for callees with size and
2286 // minsize attributes.
2287 // If the -inline-threshold is not specified, set the ColdThreshold from the
2288 // -inlinecold-threshold even if it is not explicitly passed. If
2289 // -inline-threshold is specified, then -inlinecold-threshold needs to be
2290 // explicitly specified to set the ColdThreshold knob
2291 if (InlineThreshold.getNumOccurrences() == 0) {
2292 Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2293 Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2294 Params.ColdThreshold = ColdThreshold;
2295 } else if (ColdThreshold.getNumOccurrences() > 0) {
2296 Params.ColdThreshold = ColdThreshold;
2301 InlineParams llvm::getInlineParams() {
2302 return getInlineParams(InlineThreshold);
2305 // Compute the default threshold for inlining based on the opt level and the
2307 static int computeThresholdFromOptLevels(unsigned OptLevel,
2308 unsigned SizeOptLevel) {
2310 return InlineConstants::OptAggressiveThreshold;
2311 if (SizeOptLevel == 1) // -Os
2312 return InlineConstants::OptSizeThreshold;
2313 if (SizeOptLevel == 2) // -Oz
2314 return InlineConstants::OptMinSizeThreshold;
2315 return InlineThreshold;
2318 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2320 getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2321 // At O3, use the value of -locally-hot-callsite-threshold option to populate
2322 // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2323 // when it is specified explicitly.
2325 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;