1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inline cost analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CodeMetrics.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/GetElementPtrTypeIterator.h"
29 #include "llvm/IR/GlobalAlias.h"
30 #include "llvm/IR/InstVisitor.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Operator.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
38 #define DEBUG_TYPE "inline-cost"
40 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
44 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
45 typedef InstVisitor<CallAnalyzer, bool> Base;
46 friend class InstVisitor<CallAnalyzer, bool>;
48 /// The TargetTransformInfo available for this compilation.
49 const TargetTransformInfo &TTI;
51 /// The cache of @llvm.assume intrinsics.
52 AssumptionCacheTracker *ACT;
54 // The called function.
60 bool IsCallerRecursive;
62 bool ExposesReturnsTwice;
63 bool HasDynamicAlloca;
64 bool ContainsNoDuplicateCall;
69 /// Number of bytes allocated statically by the callee.
70 uint64_t AllocatedSize;
71 unsigned NumInstructions, NumVectorInstructions;
72 int FiftyPercentVectorBonus, TenPercentVectorBonus;
75 // While we walk the potentially-inlined instructions, we build up and
76 // maintain a mapping of simplified values specific to this callsite. The
77 // idea is to propagate any special information we have about arguments to
78 // this call through the inlinable section of the function, and account for
79 // likely simplifications post-inlining. The most important aspect we track
80 // is CFG altering simplifications -- when we prove a basic block dead, that
81 // can cause dramatic shifts in the cost of inlining a function.
82 DenseMap<Value *, Constant *> SimplifiedValues;
84 // Keep track of the values which map back (through function arguments) to
85 // allocas on the caller stack which could be simplified through SROA.
86 DenseMap<Value *, Value *> SROAArgValues;
88 // The mapping of caller Alloca values to their accumulated cost savings. If
89 // we have to disable SROA for one of the allocas, this tells us how much
90 // cost must be added.
91 DenseMap<Value *, int> SROAArgCosts;
93 // Keep track of values which map to a pointer base and constant offset.
94 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
96 // Custom simplification helper routines.
97 bool isAllocaDerivedArg(Value *V);
98 bool lookupSROAArgAndCost(Value *V, Value *&Arg,
99 DenseMap<Value *, int>::iterator &CostIt);
100 void disableSROA(DenseMap<Value *, int>::iterator CostIt);
101 void disableSROA(Value *V);
102 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
103 int InstructionCost);
104 bool isGEPOffsetConstant(GetElementPtrInst &GEP);
105 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
106 bool simplifyCallSite(Function *F, CallSite CS);
107 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
109 // Custom analysis routines.
110 bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues);
112 // Disable several entry points to the visitor so we don't accidentally use
113 // them by declaring but not defining them here.
114 void visit(Module *); void visit(Module &);
115 void visit(Function *); void visit(Function &);
116 void visit(BasicBlock *); void visit(BasicBlock &);
118 // Provide base case for our instruction visit.
119 bool visitInstruction(Instruction &I);
121 // Our visit overrides.
122 bool visitAlloca(AllocaInst &I);
123 bool visitPHI(PHINode &I);
124 bool visitGetElementPtr(GetElementPtrInst &I);
125 bool visitBitCast(BitCastInst &I);
126 bool visitPtrToInt(PtrToIntInst &I);
127 bool visitIntToPtr(IntToPtrInst &I);
128 bool visitCastInst(CastInst &I);
129 bool visitUnaryInstruction(UnaryInstruction &I);
130 bool visitCmpInst(CmpInst &I);
131 bool visitSub(BinaryOperator &I);
132 bool visitBinaryOperator(BinaryOperator &I);
133 bool visitLoad(LoadInst &I);
134 bool visitStore(StoreInst &I);
135 bool visitExtractValue(ExtractValueInst &I);
136 bool visitInsertValue(InsertValueInst &I);
137 bool visitCallSite(CallSite CS);
138 bool visitReturnInst(ReturnInst &RI);
139 bool visitBranchInst(BranchInst &BI);
140 bool visitSwitchInst(SwitchInst &SI);
141 bool visitIndirectBrInst(IndirectBrInst &IBI);
142 bool visitResumeInst(ResumeInst &RI);
143 bool visitUnreachableInst(UnreachableInst &I);
146 CallAnalyzer(const TargetTransformInfo &TTI, AssumptionCacheTracker *ACT,
147 Function &Callee, int Threshold)
148 : TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
149 IsCallerRecursive(false), IsRecursiveCall(false),
150 ExposesReturnsTwice(false), HasDynamicAlloca(false),
151 ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
152 HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
153 NumVectorInstructions(0), FiftyPercentVectorBonus(0),
154 TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
155 NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
156 NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
157 SROACostSavings(0), SROACostSavingsLost(0) {}
159 bool analyzeCall(CallSite CS);
161 int getThreshold() { return Threshold; }
162 int getCost() { return Cost; }
164 // Keep a bunch of stats about the cost savings found so we can print them
165 // out when debugging.
166 unsigned NumConstantArgs;
167 unsigned NumConstantOffsetPtrArgs;
168 unsigned NumAllocaArgs;
169 unsigned NumConstantPtrCmps;
170 unsigned NumConstantPtrDiffs;
171 unsigned NumInstructionsSimplified;
172 unsigned SROACostSavings;
173 unsigned SROACostSavingsLost;
180 /// \brief Test whether the given value is an Alloca-derived function argument.
181 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
182 return SROAArgValues.count(V);
185 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
186 /// Returns false if V does not map to a SROA-candidate.
187 bool CallAnalyzer::lookupSROAArgAndCost(
188 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
189 if (SROAArgValues.empty() || SROAArgCosts.empty())
192 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
193 if (ArgIt == SROAArgValues.end())
197 CostIt = SROAArgCosts.find(Arg);
198 return CostIt != SROAArgCosts.end();
201 /// \brief Disable SROA for the candidate marked by this cost iterator.
203 /// This marks the candidate as no longer viable for SROA, and adds the cost
204 /// savings associated with it back into the inline cost measurement.
205 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
206 // If we're no longer able to perform SROA we need to undo its cost savings
207 // and prevent subsequent analysis.
208 Cost += CostIt->second;
209 SROACostSavings -= CostIt->second;
210 SROACostSavingsLost += CostIt->second;
211 SROAArgCosts.erase(CostIt);
214 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
215 void CallAnalyzer::disableSROA(Value *V) {
217 DenseMap<Value *, int>::iterator CostIt;
218 if (lookupSROAArgAndCost(V, SROAArg, CostIt))
222 /// \brief Accumulate the given cost for a particular SROA candidate.
223 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
224 int InstructionCost) {
225 CostIt->second += InstructionCost;
226 SROACostSavings += InstructionCost;
229 /// \brief Check whether a GEP's indices are all constant.
231 /// Respects any simplified values known during the analysis of this callsite.
232 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
233 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
234 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
240 /// \brief Accumulate a constant GEP offset into an APInt if possible.
242 /// Returns false if unable to compute the offset for any reason. Respects any
243 /// simplified values known during the analysis of this callsite.
244 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
245 const DataLayout &DL = F.getParent()->getDataLayout();
246 unsigned IntPtrWidth = DL.getPointerSizeInBits();
247 assert(IntPtrWidth == Offset.getBitWidth());
249 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
251 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
253 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
254 OpC = dyn_cast<ConstantInt>(SimpleOp);
257 if (OpC->isZero()) continue;
259 // Handle a struct index, which adds its field offset to the pointer.
260 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
261 unsigned ElementIdx = OpC->getZExtValue();
262 const StructLayout *SL = DL.getStructLayout(STy);
263 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
267 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
268 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
273 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
274 // Check whether inlining will turn a dynamic alloca into a static
275 // alloca, and handle that case.
276 if (I.isArrayAllocation()) {
277 if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
278 ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
279 assert(AllocSize && "Allocation size not a constant int?");
280 Type *Ty = I.getAllocatedType();
281 AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
282 return Base::visitAlloca(I);
286 // Accumulate the allocated size.
287 if (I.isStaticAlloca()) {
288 const DataLayout &DL = F.getParent()->getDataLayout();
289 Type *Ty = I.getAllocatedType();
290 AllocatedSize += DL.getTypeAllocSize(Ty);
293 // We will happily inline static alloca instructions.
294 if (I.isStaticAlloca())
295 return Base::visitAlloca(I);
297 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
298 // a variety of reasons, and so we would like to not inline them into
299 // functions which don't currently have a dynamic alloca. This simply
300 // disables inlining altogether in the presence of a dynamic alloca.
301 HasDynamicAlloca = true;
305 bool CallAnalyzer::visitPHI(PHINode &I) {
306 // FIXME: We should potentially be tracking values through phi nodes,
307 // especially when they collapse to a single value due to deleted CFG edges
310 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
311 // though we don't want to propagate it's bonuses. The idea is to disable
312 // SROA if it *might* be used in an inappropriate manner.
314 // Phi nodes are always zero-cost.
318 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
320 DenseMap<Value *, int>::iterator CostIt;
321 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
324 // Try to fold GEPs of constant-offset call site argument pointers. This
325 // requires target data and inbounds GEPs.
326 if (I.isInBounds()) {
327 // Check if we have a base + offset for the pointer.
328 Value *Ptr = I.getPointerOperand();
329 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
330 if (BaseAndOffset.first) {
331 // Check if the offset of this GEP is constant, and if so accumulate it
333 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
334 // Non-constant GEPs aren't folded, and disable SROA.
340 // Add the result as a new mapping to Base + Offset.
341 ConstantOffsetPtrs[&I] = BaseAndOffset;
343 // Also handle SROA candidates here, we already know that the GEP is
344 // all-constant indexed.
346 SROAArgValues[&I] = SROAArg;
352 if (isGEPOffsetConstant(I)) {
354 SROAArgValues[&I] = SROAArg;
356 // Constant GEPs are modeled as free.
360 // Variable GEPs will require math and will disable SROA.
366 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
367 // Propagate constants through bitcasts.
368 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
370 COp = SimplifiedValues.lookup(I.getOperand(0));
372 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
373 SimplifiedValues[&I] = C;
377 // Track base/offsets through casts
378 std::pair<Value *, APInt> BaseAndOffset
379 = ConstantOffsetPtrs.lookup(I.getOperand(0));
380 // Casts don't change the offset, just wrap it up.
381 if (BaseAndOffset.first)
382 ConstantOffsetPtrs[&I] = BaseAndOffset;
384 // Also look for SROA candidates here.
386 DenseMap<Value *, int>::iterator CostIt;
387 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
388 SROAArgValues[&I] = SROAArg;
390 // Bitcasts are always zero cost.
394 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
395 // Propagate constants through ptrtoint.
396 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
398 COp = SimplifiedValues.lookup(I.getOperand(0));
400 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
401 SimplifiedValues[&I] = C;
405 // Track base/offset pairs when converted to a plain integer provided the
406 // integer is large enough to represent the pointer.
407 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
408 const DataLayout &DL = F.getParent()->getDataLayout();
409 if (IntegerSize >= DL.getPointerSizeInBits()) {
410 std::pair<Value *, APInt> BaseAndOffset
411 = ConstantOffsetPtrs.lookup(I.getOperand(0));
412 if (BaseAndOffset.first)
413 ConstantOffsetPtrs[&I] = BaseAndOffset;
416 // This is really weird. Technically, ptrtoint will disable SROA. However,
417 // unless that ptrtoint is *used* somewhere in the live basic blocks after
418 // inlining, it will be nuked, and SROA should proceed. All of the uses which
419 // would block SROA would also block SROA if applied directly to a pointer,
420 // and so we can just add the integer in here. The only places where SROA is
421 // preserved either cannot fire on an integer, or won't in-and-of themselves
422 // disable SROA (ext) w/o some later use that we would see and disable.
424 DenseMap<Value *, int>::iterator CostIt;
425 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
426 SROAArgValues[&I] = SROAArg;
428 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
431 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
432 // Propagate constants through ptrtoint.
433 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
435 COp = SimplifiedValues.lookup(I.getOperand(0));
437 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
438 SimplifiedValues[&I] = C;
442 // Track base/offset pairs when round-tripped through a pointer without
443 // modifications provided the integer is not too large.
444 Value *Op = I.getOperand(0);
445 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
446 const DataLayout &DL = F.getParent()->getDataLayout();
447 if (IntegerSize <= DL.getPointerSizeInBits()) {
448 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
449 if (BaseAndOffset.first)
450 ConstantOffsetPtrs[&I] = BaseAndOffset;
453 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
455 DenseMap<Value *, int>::iterator CostIt;
456 if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
457 SROAArgValues[&I] = SROAArg;
459 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
462 bool CallAnalyzer::visitCastInst(CastInst &I) {
463 // Propagate constants through ptrtoint.
464 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
466 COp = SimplifiedValues.lookup(I.getOperand(0));
468 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
469 SimplifiedValues[&I] = C;
473 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
474 disableSROA(I.getOperand(0));
476 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
479 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
480 Value *Operand = I.getOperand(0);
481 Constant *COp = dyn_cast<Constant>(Operand);
483 COp = SimplifiedValues.lookup(Operand);
485 const DataLayout &DL = F.getParent()->getDataLayout();
486 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
488 SimplifiedValues[&I] = C;
493 // Disable any SROA on the argument to arbitrary unary operators.
494 disableSROA(Operand);
499 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
500 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
501 // First try to handle simplified comparisons.
502 if (!isa<Constant>(LHS))
503 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
505 if (!isa<Constant>(RHS))
506 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
508 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
509 if (Constant *CRHS = dyn_cast<Constant>(RHS))
510 if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
511 SimplifiedValues[&I] = C;
516 if (I.getOpcode() == Instruction::FCmp)
519 // Otherwise look for a comparison between constant offset pointers with
521 Value *LHSBase, *RHSBase;
522 APInt LHSOffset, RHSOffset;
523 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
525 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
526 if (RHSBase && LHSBase == RHSBase) {
527 // We have common bases, fold the icmp to a constant based on the
529 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
530 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
531 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
532 SimplifiedValues[&I] = C;
533 ++NumConstantPtrCmps;
539 // If the comparison is an equality comparison with null, we can simplify it
540 // for any alloca-derived argument.
541 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
542 if (isAllocaDerivedArg(I.getOperand(0))) {
543 // We can actually predict the result of comparisons between an
544 // alloca-derived value and null. Note that this fires regardless of
546 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
547 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
548 : ConstantInt::getFalse(I.getType());
552 // Finally check for SROA candidates in comparisons.
554 DenseMap<Value *, int>::iterator CostIt;
555 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
556 if (isa<ConstantPointerNull>(I.getOperand(1))) {
557 accumulateSROACost(CostIt, InlineConstants::InstrCost);
567 bool CallAnalyzer::visitSub(BinaryOperator &I) {
568 // Try to handle a special case: we can fold computing the difference of two
569 // constant-related pointers.
570 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
571 Value *LHSBase, *RHSBase;
572 APInt LHSOffset, RHSOffset;
573 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
575 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
576 if (RHSBase && LHSBase == RHSBase) {
577 // We have common bases, fold the subtract to a constant based on the
579 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
580 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
581 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
582 SimplifiedValues[&I] = C;
583 ++NumConstantPtrDiffs;
589 // Otherwise, fall back to the generic logic for simplifying and handling
591 return Base::visitSub(I);
594 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
595 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
596 const DataLayout &DL = F.getParent()->getDataLayout();
597 if (!isa<Constant>(LHS))
598 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
600 if (!isa<Constant>(RHS))
601 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
603 Value *SimpleV = nullptr;
604 if (auto FI = dyn_cast<FPMathOperator>(&I))
606 SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
608 SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
610 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
611 SimplifiedValues[&I] = C;
615 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
622 bool CallAnalyzer::visitLoad(LoadInst &I) {
624 DenseMap<Value *, int>::iterator CostIt;
625 if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
627 accumulateSROACost(CostIt, InlineConstants::InstrCost);
637 bool CallAnalyzer::visitStore(StoreInst &I) {
639 DenseMap<Value *, int>::iterator CostIt;
640 if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
642 accumulateSROACost(CostIt, InlineConstants::InstrCost);
652 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
653 // Constant folding for extract value is trivial.
654 Constant *C = dyn_cast<Constant>(I.getAggregateOperand());
656 C = SimplifiedValues.lookup(I.getAggregateOperand());
658 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
662 // SROA can look through these but give them a cost.
666 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
667 // Constant folding for insert value is trivial.
668 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand());
670 AggC = SimplifiedValues.lookup(I.getAggregateOperand());
671 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand());
673 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
674 if (AggC && InsertedC) {
675 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC,
680 // SROA can look through these but give them a cost.
684 /// \brief Try to simplify a call site.
686 /// Takes a concrete function and callsite and tries to actually simplify it by
687 /// analyzing the arguments and call itself with instsimplify. Returns true if
688 /// it has simplified the callsite to some other entity (a constant), making it
690 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
691 // FIXME: Using the instsimplify logic directly for this is inefficient
692 // because we have to continually rebuild the argument list even when no
693 // simplifications can be performed. Until that is fixed with remapping
694 // inside of instsimplify, directly constant fold calls here.
695 if (!canConstantFoldCallTo(F))
698 // Try to re-map the arguments to constants.
699 SmallVector<Constant *, 4> ConstantArgs;
700 ConstantArgs.reserve(CS.arg_size());
701 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
703 Constant *C = dyn_cast<Constant>(*I);
705 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
707 return false; // This argument doesn't map to a constant.
709 ConstantArgs.push_back(C);
711 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
712 SimplifiedValues[CS.getInstruction()] = C;
719 bool CallAnalyzer::visitCallSite(CallSite CS) {
720 if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
721 !F.hasFnAttribute(Attribute::ReturnsTwice)) {
722 // This aborts the entire analysis.
723 ExposesReturnsTwice = true;
727 cast<CallInst>(CS.getInstruction())->cannotDuplicate())
728 ContainsNoDuplicateCall = true;
730 if (Function *F = CS.getCalledFunction()) {
731 // When we have a concrete function, first try to simplify it directly.
732 if (simplifyCallSite(F, CS))
735 // Next check if it is an intrinsic we know about.
736 // FIXME: Lift this into part of the InstVisitor.
737 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
738 switch (II->getIntrinsicID()) {
740 return Base::visitCallSite(CS);
742 case Intrinsic::memset:
743 case Intrinsic::memcpy:
744 case Intrinsic::memmove:
745 // SROA can usually chew through these intrinsics, but they aren't free.
747 case Intrinsic::frameescape:
748 HasFrameEscape = true;
753 if (F == CS.getInstruction()->getParent()->getParent()) {
754 // This flag will fully abort the analysis, so don't bother with anything
756 IsRecursiveCall = true;
760 if (TTI.isLoweredToCall(F)) {
761 // We account for the average 1 instruction per call argument setup
763 Cost += CS.arg_size() * InlineConstants::InstrCost;
765 // Everything other than inline ASM will also have a significant cost
766 // merely from making the call.
767 if (!isa<InlineAsm>(CS.getCalledValue()))
768 Cost += InlineConstants::CallPenalty;
771 return Base::visitCallSite(CS);
774 // Otherwise we're in a very special case -- an indirect function call. See
775 // if we can be particularly clever about this.
776 Value *Callee = CS.getCalledValue();
778 // First, pay the price of the argument setup. We account for the average
779 // 1 instruction per call argument setup here.
780 Cost += CS.arg_size() * InlineConstants::InstrCost;
782 // Next, check if this happens to be an indirect function call to a known
783 // function in this inline context. If not, we've done all we can.
784 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
786 return Base::visitCallSite(CS);
788 // If we have a constant that we are calling as a function, we can peer
789 // through it and see the function target. This happens not infrequently
790 // during devirtualization and so we want to give it a hefty bonus for
791 // inlining, but cap that bonus in the event that inlining wouldn't pan
792 // out. Pretend to inline the function, with a custom threshold.
793 CallAnalyzer CA(TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
794 if (CA.analyzeCall(CS)) {
795 // We were able to inline the indirect call! Subtract the cost from the
796 // bonus we want to apply, but don't go below zero.
797 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
800 return Base::visitCallSite(CS);
803 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
804 // At least one return instruction will be free after inlining.
805 bool Free = !HasReturn;
810 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
811 // We model unconditional branches as essentially free -- they really
812 // shouldn't exist at all, but handling them makes the behavior of the
813 // inliner more regular and predictable. Interestingly, conditional branches
814 // which will fold away are also free.
815 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
816 dyn_cast_or_null<ConstantInt>(
817 SimplifiedValues.lookup(BI.getCondition()));
820 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
821 // We model unconditional switches as free, see the comments on handling
823 if (isa<ConstantInt>(SI.getCondition()))
825 if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
826 if (isa<ConstantInt>(V))
829 // Otherwise, we need to accumulate a cost proportional to the number of
830 // distinct successor blocks. This fan-out in the CFG cannot be represented
831 // for free even if we can represent the core switch as a jumptable that
832 // takes a single instruction.
834 // NB: We convert large switches which are just used to initialize large phi
835 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
836 // inlining those. It will prevent inlining in cases where the optimization
837 // does not (yet) fire.
838 SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
839 SuccessorBlocks.insert(SI.getDefaultDest());
840 for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
841 SuccessorBlocks.insert(I.getCaseSuccessor());
842 // Add cost corresponding to the number of distinct destinations. The first
843 // we model as free because of fallthrough.
844 Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
848 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
849 // We never want to inline functions that contain an indirectbr. This is
850 // incorrect because all the blockaddress's (in static global initializers
851 // for example) would be referring to the original function, and this
852 // indirect jump would jump from the inlined copy of the function into the
853 // original function which is extremely undefined behavior.
854 // FIXME: This logic isn't really right; we can safely inline functions with
855 // indirectbr's as long as no other function or global references the
856 // blockaddress of a block within the current function.
857 HasIndirectBr = true;
861 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
862 // FIXME: It's not clear that a single instruction is an accurate model for
863 // the inline cost of a resume instruction.
867 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
868 // FIXME: It might be reasonably to discount the cost of instructions leading
869 // to unreachable as they have the lowest possible impact on both runtime and
871 return true; // No actual code is needed for unreachable.
874 bool CallAnalyzer::visitInstruction(Instruction &I) {
875 // Some instructions are free. All of the free intrinsics can also be
876 // handled by SROA, etc.
877 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
880 // We found something we don't understand or can't handle. Mark any SROA-able
881 // values in the operand list as no longer viable.
882 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
889 /// \brief Analyze a basic block for its contribution to the inline cost.
891 /// This method walks the analyzer over every instruction in the given basic
892 /// block and accounts for their cost during inlining at this callsite. It
893 /// aborts early if the threshold has been exceeded or an impossible to inline
894 /// construct has been detected. It returns false if inlining is no longer
895 /// viable, and true if inlining remains viable.
896 bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
897 SmallPtrSetImpl<const Value *> &EphValues) {
898 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
899 // FIXME: Currently, the number of instructions in a function regardless of
900 // our ability to simplify them during inline to constants or dead code,
901 // are actually used by the vector bonus heuristic. As long as that's true,
902 // we have to special case debug intrinsics here to prevent differences in
903 // inlining due to debug symbols. Eventually, the number of unsimplified
904 // instructions shouldn't factor into the cost computation, but until then,
905 // hack around it here.
906 if (isa<DbgInfoIntrinsic>(I))
909 // Skip ephemeral values.
910 if (EphValues.count(I))
914 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
915 ++NumVectorInstructions;
917 // If the instruction is floating point, and the target says this operation is
918 // expensive or the function has the "use-soft-float" attribute, this may
919 // eventually become a library call. Treat the cost as such.
920 if (I->getType()->isFloatingPointTy()) {
921 bool hasSoftFloatAttr = false;
923 // If the function has the "use-soft-float" attribute, mark it as expensive.
924 if (F.hasFnAttribute("use-soft-float")) {
925 Attribute Attr = F.getFnAttribute("use-soft-float");
926 StringRef Val = Attr.getValueAsString();
928 hasSoftFloatAttr = true;
931 if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive ||
933 Cost += InlineConstants::CallPenalty;
936 // If the instruction simplified to a constant, there is no cost to this
937 // instruction. Visit the instructions using our InstVisitor to account for
938 // all of the per-instruction logic. The visit tree returns true if we
939 // consumed the instruction in any way, and false if the instruction's base
940 // cost should count against inlining.
942 ++NumInstructionsSimplified;
944 Cost += InlineConstants::InstrCost;
946 // If the visit this instruction detected an uninlinable pattern, abort.
947 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
948 HasIndirectBr || HasFrameEscape)
951 // If the caller is a recursive function then we don't want to inline
952 // functions which allocate a lot of stack space because it would increase
953 // the caller stack usage dramatically.
954 if (IsCallerRecursive &&
955 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
958 // Check if we've past the maximum possible threshold so we don't spin in
959 // huge basic blocks that will never inline.
960 if (Cost > Threshold)
967 /// \brief Compute the base pointer and cumulative constant offsets for V.
969 /// This strips all constant offsets off of V, leaving it the base pointer, and
970 /// accumulates the total constant offset applied in the returned constant. It
971 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
972 /// no constant offsets applied.
973 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
974 if (!V->getType()->isPointerTy())
977 const DataLayout &DL = F.getParent()->getDataLayout();
978 unsigned IntPtrWidth = DL.getPointerSizeInBits();
979 APInt Offset = APInt::getNullValue(IntPtrWidth);
981 // Even though we don't look through PHI nodes, we could be called on an
982 // instruction in an unreachable block, which may be on a cycle.
983 SmallPtrSet<Value *, 4> Visited;
986 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
987 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
989 V = GEP->getPointerOperand();
990 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
991 V = cast<Operator>(V)->getOperand(0);
992 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
993 if (GA->mayBeOverridden())
995 V = GA->getAliasee();
999 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1000 } while (Visited.insert(V).second);
1002 Type *IntPtrTy = DL.getIntPtrType(V->getContext());
1003 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
1006 /// \brief Analyze a call site for potential inlining.
1008 /// Returns true if inlining this call is viable, and false if it is not
1009 /// viable. It computes the cost and adjusts the threshold based on numerous
1010 /// factors and heuristics. If this method returns false but the computed cost
1011 /// is below the computed threshold, then inlining was forcibly disabled by
1012 /// some artifact of the routine.
1013 bool CallAnalyzer::analyzeCall(CallSite CS) {
1016 // Perform some tweaks to the cost and threshold based on the direct
1017 // callsite information.
1019 // We want to more aggressively inline vector-dense kernels, so up the
1020 // threshold, and we'll lower it if the % of vector instructions gets too
1021 // low. Note that these bonuses are some what arbitrary and evolved over time
1022 // by accident as much as because they are principled bonuses.
1024 // FIXME: It would be nice to remove all such bonuses. At least it would be
1025 // nice to base the bonus values on something more scientific.
1026 assert(NumInstructions == 0);
1027 assert(NumVectorInstructions == 0);
1028 FiftyPercentVectorBonus = 3 * Threshold / 2;
1029 TenPercentVectorBonus = 3 * Threshold / 4;
1030 const DataLayout &DL = F.getParent()->getDataLayout();
1032 // Track whether the post-inlining function would have more than one basic
1033 // block. A single basic block is often intended for inlining. Balloon the
1034 // threshold by 50% until we pass the single-BB phase.
1035 bool SingleBB = true;
1036 int SingleBBBonus = Threshold / 2;
1038 // Speculatively apply all possible bonuses to Threshold. If cost exceeds
1039 // this Threshold any time, and cost cannot decrease, we can stop processing
1040 // the rest of the function body.
1041 Threshold += (SingleBBBonus + FiftyPercentVectorBonus);
1043 // Give out bonuses per argument, as the instructions setting them up will
1044 // be gone after inlining.
1045 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
1046 if (CS.isByValArgument(I)) {
1047 // We approximate the number of loads and stores needed by dividing the
1048 // size of the byval type by the target's pointer size.
1049 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
1050 unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
1051 unsigned PointerSize = DL.getPointerSizeInBits();
1052 // Ceiling division.
1053 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
1055 // If it generates more than 8 stores it is likely to be expanded as an
1056 // inline memcpy so we take that as an upper bound. Otherwise we assume
1057 // one load and one store per word copied.
1058 // FIXME: The maxStoresPerMemcpy setting from the target should be used
1059 // here instead of a magic number of 8, but it's not available via
1061 NumStores = std::min(NumStores, 8U);
1063 Cost -= 2 * NumStores * InlineConstants::InstrCost;
1065 // For non-byval arguments subtract off one instruction per call
1067 Cost -= InlineConstants::InstrCost;
1071 // If there is only one call of the function, and it has internal linkage,
1072 // the cost of inlining it drops dramatically.
1073 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
1074 &F == CS.getCalledFunction();
1075 if (OnlyOneCallAndLocalLinkage)
1076 Cost += InlineConstants::LastCallToStaticBonus;
1078 // If the instruction after the call, or if the normal destination of the
1079 // invoke is an unreachable instruction, the function is noreturn. As such,
1080 // there is little point in inlining this unless there is literally zero
1082 Instruction *Instr = CS.getInstruction();
1083 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
1084 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
1086 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
1089 // If this function uses the coldcc calling convention, prefer not to inline
1091 if (F.getCallingConv() == CallingConv::Cold)
1092 Cost += InlineConstants::ColdccPenalty;
1094 // Check if we're done. This can happen due to bonuses and penalties.
1095 if (Cost > Threshold)
1101 Function *Caller = CS.getInstruction()->getParent()->getParent();
1102 // Check if the caller function is recursive itself.
1103 for (User *U : Caller->users()) {
1107 Instruction *I = Site.getInstruction();
1108 if (I->getParent()->getParent() == Caller) {
1109 IsCallerRecursive = true;
1114 // Populate our simplified values by mapping from function arguments to call
1115 // arguments with known important simplifications.
1116 CallSite::arg_iterator CAI = CS.arg_begin();
1117 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1118 FAI != FAE; ++FAI, ++CAI) {
1119 assert(CAI != CS.arg_end());
1120 if (Constant *C = dyn_cast<Constant>(CAI))
1121 SimplifiedValues[FAI] = C;
1123 Value *PtrArg = *CAI;
1124 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1125 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
1127 // We can SROA any pointer arguments derived from alloca instructions.
1128 if (isa<AllocaInst>(PtrArg)) {
1129 SROAArgValues[FAI] = PtrArg;
1130 SROAArgCosts[PtrArg] = 0;
1134 NumConstantArgs = SimplifiedValues.size();
1135 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1136 NumAllocaArgs = SROAArgValues.size();
1138 // FIXME: If a caller has multiple calls to a callee, we end up recomputing
1139 // the ephemeral values multiple times (and they're completely determined by
1140 // the callee, so this is purely duplicate work).
1141 SmallPtrSet<const Value *, 32> EphValues;
1142 CodeMetrics::collectEphemeralValues(&F, &ACT->getAssumptionCache(F), EphValues);
1144 // The worklist of live basic blocks in the callee *after* inlining. We avoid
1145 // adding basic blocks of the callee which can be proven to be dead for this
1146 // particular call site in order to get more accurate cost estimates. This
1147 // requires a somewhat heavyweight iteration pattern: we need to walk the
1148 // basic blocks in a breadth-first order as we insert live successors. To
1149 // accomplish this, prioritizing for small iterations because we exit after
1150 // crossing our threshold, we use a small-size optimized SetVector.
1151 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
1152 SmallPtrSet<BasicBlock *, 16> > BBSetVector;
1153 BBSetVector BBWorklist;
1154 BBWorklist.insert(&F.getEntryBlock());
1155 // Note that we *must not* cache the size, this loop grows the worklist.
1156 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1157 // Bail out the moment we cross the threshold. This means we'll under-count
1158 // the cost, but only when undercounting doesn't matter.
1159 if (Cost > Threshold)
1162 BasicBlock *BB = BBWorklist[Idx];
1166 // Disallow inlining a blockaddress. A blockaddress only has defined
1167 // behavior for an indirect branch in the same function, and we do not
1168 // currently support inlining indirect branches. But, the inliner may not
1169 // see an indirect branch that ends up being dead code at a particular call
1170 // site. If the blockaddress escapes the function, e.g., via a global
1171 // variable, inlining may lead to an invalid cross-function reference.
1172 if (BB->hasAddressTaken())
1175 // Analyze the cost of this block. If we blow through the threshold, this
1176 // returns false, and we can bail on out.
1177 if (!analyzeBlock(BB, EphValues)) {
1178 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
1179 HasIndirectBr || HasFrameEscape)
1182 // If the caller is a recursive function then we don't want to inline
1183 // functions which allocate a lot of stack space because it would increase
1184 // the caller stack usage dramatically.
1185 if (IsCallerRecursive &&
1186 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
1192 TerminatorInst *TI = BB->getTerminator();
1194 // Add in the live successors by first checking whether we have terminator
1195 // that may be simplified based on the values simplified by this call.
1196 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1197 if (BI->isConditional()) {
1198 Value *Cond = BI->getCondition();
1199 if (ConstantInt *SimpleCond
1200 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1201 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1205 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1206 Value *Cond = SI->getCondition();
1207 if (ConstantInt *SimpleCond
1208 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1209 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1214 // If we're unable to select a particular successor, just count all of
1216 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1218 BBWorklist.insert(TI->getSuccessor(TIdx));
1220 // If we had any successors at this point, than post-inlining is likely to
1221 // have them as well. Note that we assume any basic blocks which existed
1222 // due to branches or switches which folded above will also fold after
1224 if (SingleBB && TI->getNumSuccessors() > 1) {
1225 // Take off the bonus we applied to the threshold.
1226 Threshold -= SingleBBBonus;
1231 // If this is a noduplicate call, we can still inline as long as
1232 // inlining this would cause the removal of the caller (so the instruction
1233 // is not actually duplicated, just moved).
1234 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1237 // We applied the maximum possible vector bonus at the beginning. Now,
1238 // subtract the excess bonus, if any, from the Threshold before
1239 // comparing against Cost.
1240 if (NumVectorInstructions <= NumInstructions / 10)
1241 Threshold -= FiftyPercentVectorBonus;
1242 else if (NumVectorInstructions <= NumInstructions / 2)
1243 Threshold -= (FiftyPercentVectorBonus - TenPercentVectorBonus);
1245 return Cost < Threshold;
1248 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1249 /// \brief Dump stats about this call's analysis.
1250 void CallAnalyzer::dump() {
1251 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
1252 DEBUG_PRINT_STAT(NumConstantArgs);
1253 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1254 DEBUG_PRINT_STAT(NumAllocaArgs);
1255 DEBUG_PRINT_STAT(NumConstantPtrCmps);
1256 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1257 DEBUG_PRINT_STAT(NumInstructionsSimplified);
1258 DEBUG_PRINT_STAT(NumInstructions);
1259 DEBUG_PRINT_STAT(SROACostSavings);
1260 DEBUG_PRINT_STAT(SROACostSavingsLost);
1261 DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1262 DEBUG_PRINT_STAT(Cost);
1263 DEBUG_PRINT_STAT(Threshold);
1264 #undef DEBUG_PRINT_STAT
1268 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1270 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1271 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1272 INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1275 char InlineCostAnalysis::ID = 0;
1277 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
1279 InlineCostAnalysis::~InlineCostAnalysis() {}
1281 void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1282 AU.setPreservesAll();
1283 AU.addRequired<AssumptionCacheTracker>();
1284 AU.addRequired<TargetTransformInfoWrapperPass>();
1285 CallGraphSCCPass::getAnalysisUsage(AU);
1288 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
1289 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
1290 ACT = &getAnalysis<AssumptionCacheTracker>();
1294 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) {
1295 return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1298 /// \brief Test that two functions either have or have not the given attribute
1299 /// at the same time.
1300 template<typename AttrKind>
1301 static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr) {
1302 return F1->getFnAttribute(Attr) == F2->getFnAttribute(Attr);
1305 /// \brief Test that there are no attribute conflicts between Caller and Callee
1306 /// that prevent inlining.
1307 static bool functionsHaveCompatibleAttributes(Function *Caller,
1309 return attributeMatches(Caller, Callee, "target-cpu") &&
1310 attributeMatches(Caller, Callee, "target-features") &&
1311 attributeMatches(Caller, Callee, Attribute::SanitizeAddress) &&
1312 attributeMatches(Caller, Callee, Attribute::SanitizeMemory) &&
1313 attributeMatches(Caller, Callee, Attribute::SanitizeThread);
1316 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
1318 // Cannot inline indirect calls.
1320 return llvm::InlineCost::getNever();
1322 // Calls to functions with always-inline attributes should be inlined
1323 // whenever possible.
1324 if (CS.hasFnAttr(Attribute::AlwaysInline)) {
1325 if (isInlineViable(*Callee))
1326 return llvm::InlineCost::getAlways();
1327 return llvm::InlineCost::getNever();
1330 // Never inline functions with conflicting attributes (unless callee has
1331 // always-inline attribute).
1332 if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
1333 return llvm::InlineCost::getNever();
1335 // Don't inline this call if the caller has the optnone attribute.
1336 if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
1337 return llvm::InlineCost::getNever();
1339 // Don't inline functions which can be redefined at link-time to mean
1340 // something else. Don't inline functions marked noinline or call sites
1342 if (Callee->mayBeOverridden() ||
1343 Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline())
1344 return llvm::InlineCost::getNever();
1346 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1349 CallAnalyzer CA(TTIWP->getTTI(*Callee), ACT, *Callee, Threshold);
1350 bool ShouldInline = CA.analyzeCall(CS);
1354 // Check if there was a reason to force inlining or no inlining.
1355 if (!ShouldInline && CA.getCost() < CA.getThreshold())
1356 return InlineCost::getNever();
1357 if (ShouldInline && CA.getCost() >= CA.getThreshold())
1358 return InlineCost::getAlways();
1360 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1363 bool InlineCostAnalysis::isInlineViable(Function &F) {
1364 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
1365 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1366 // Disallow inlining of functions which contain indirect branches or
1368 if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
1371 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
1377 // Disallow recursive calls.
1378 if (&F == CS.getCalledFunction())
1381 // Disallow calls which expose returns-twice to a function not previously
1382 // attributed as such.
1383 if (!ReturnsTwice && CS.isCall() &&
1384 cast<CallInst>(CS.getInstruction())->canReturnTwice())
1387 // Disallow inlining functions that call @llvm.frameescape. Doing this
1388 // correctly would require major changes to the inliner.
1389 if (CS.getCalledFunction() &&
1390 CS.getCalledFunction()->getIntrinsicID() ==
1391 llvm::Intrinsic::frameescape)