1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides helpers for the implementation of
11 /// a TargetTransformInfo-conforming class.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/Analysis/VectorUtils.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/GetElementPtrTypeIterator.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Type.h"
30 /// Base class for use as a mix-in that aids implementing
31 /// a TargetTransformInfo-compatible class.
32 class TargetTransformInfoImplBase {
34 typedef TargetTransformInfo TTI;
38 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
41 // Provide value semantics. MSVC requires that we spell all of these out.
42 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
44 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
46 const DataLayout &getDataLayout() const { return DL; }
48 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
51 // By default, just classify everything as 'basic'.
52 return TTI::TCC_Basic;
54 case Instruction::GetElementPtr:
55 llvm_unreachable("Use getGEPCost for GEP operations!");
57 case Instruction::BitCast:
58 assert(OpTy && "Cast instructions must provide the operand type");
59 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
60 // Identity and pointer-to-pointer casts are free.
63 // Otherwise, the default basic cost is used.
64 return TTI::TCC_Basic;
66 case Instruction::FDiv:
67 case Instruction::FRem:
68 case Instruction::SDiv:
69 case Instruction::SRem:
70 case Instruction::UDiv:
71 case Instruction::URem:
72 return TTI::TCC_Expensive;
74 case Instruction::IntToPtr: {
75 // An inttoptr cast is free so long as the input is a legal integer type
76 // which doesn't contain values outside the range of a pointer.
77 unsigned OpSize = OpTy->getScalarSizeInBits();
78 if (DL.isLegalInteger(OpSize) &&
79 OpSize <= DL.getPointerTypeSizeInBits(Ty))
82 // Otherwise it's not a no-op.
83 return TTI::TCC_Basic;
85 case Instruction::PtrToInt: {
86 // A ptrtoint cast is free so long as the result is large enough to store
87 // the pointer, and a legal integer type.
88 unsigned DestSize = Ty->getScalarSizeInBits();
89 if (DL.isLegalInteger(DestSize) &&
90 DestSize >= DL.getPointerTypeSizeInBits(OpTy))
93 // Otherwise it's not a no-op.
94 return TTI::TCC_Basic;
96 case Instruction::Trunc:
97 // trunc to a native type is free (assuming the target has compare and
98 // shift-right of the same width).
99 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
100 return TTI::TCC_Free;
102 return TTI::TCC_Basic;
106 int getGEPCost(Type *PointeeType, const Value *Ptr,
107 ArrayRef<const Value *> Operands) {
108 // In the basic model, we just assume that all-constant GEPs will be folded
109 // into their uses via addressing modes.
110 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
111 if (!isa<Constant>(Operands[Idx]))
112 return TTI::TCC_Basic;
114 return TTI::TCC_Free;
117 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
120 return SI.getNumCases();
123 int getExtCost(const Instruction *I, const Value *Src) {
124 return TTI::TCC_Basic;
127 unsigned getCallCost(FunctionType *FTy, int NumArgs) {
128 assert(FTy && "FunctionType must be provided to this routine.");
130 // The target-independent implementation just measures the size of the
131 // function by approximating that each argument will take on average one
132 // instruction to prepare.
135 // Set the argument number to the number of explicit arguments in the
137 NumArgs = FTy->getNumParams();
139 return TTI::TCC_Basic * (NumArgs + 1);
142 unsigned getInliningThresholdMultiplier() { return 1; }
144 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
145 ArrayRef<Type *> ParamTys) {
148 // Intrinsics rarely (if ever) have normal argument setup constraints.
149 // Model them as having a basic instruction cost.
150 // FIXME: This is wrong for libc intrinsics.
151 return TTI::TCC_Basic;
153 case Intrinsic::annotation:
154 case Intrinsic::assume:
155 case Intrinsic::sideeffect:
156 case Intrinsic::dbg_declare:
157 case Intrinsic::dbg_value:
158 case Intrinsic::dbg_label:
159 case Intrinsic::invariant_start:
160 case Intrinsic::invariant_end:
161 case Intrinsic::launder_invariant_group:
162 case Intrinsic::strip_invariant_group:
163 case Intrinsic::is_constant:
164 case Intrinsic::lifetime_start:
165 case Intrinsic::lifetime_end:
166 case Intrinsic::objectsize:
167 case Intrinsic::ptr_annotation:
168 case Intrinsic::var_annotation:
169 case Intrinsic::experimental_gc_result:
170 case Intrinsic::experimental_gc_relocate:
171 case Intrinsic::coro_alloc:
172 case Intrinsic::coro_begin:
173 case Intrinsic::coro_free:
174 case Intrinsic::coro_end:
175 case Intrinsic::coro_frame:
176 case Intrinsic::coro_size:
177 case Intrinsic::coro_suspend:
178 case Intrinsic::coro_param:
179 case Intrinsic::coro_subfn_addr:
180 // These intrinsics don't actually represent code after lowering.
181 return TTI::TCC_Free;
185 bool hasBranchDivergence() { return false; }
187 bool isSourceOfDivergence(const Value *V) { return false; }
189 bool isAlwaysUniform(const Value *V) { return false; }
191 unsigned getFlatAddressSpace () {
195 bool isLoweredToCall(const Function *F) {
196 assert(F && "A concrete function must be provided to this routine.");
198 // FIXME: These should almost certainly not be handled here, and instead
199 // handled with the help of TLI or the target itself. This was largely
200 // ported from existing analysis heuristics here so that such refactorings
201 // can take place in the future.
203 if (F->isIntrinsic())
206 if (F->hasLocalLinkage() || !F->hasName())
209 StringRef Name = F->getName();
211 // These will all likely lower to a single selection DAG node.
212 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
213 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
214 Name == "fmin" || Name == "fminf" || Name == "fminl" ||
215 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
216 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
217 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
220 // These are all likely to be optimized into something smaller.
221 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
222 Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
223 Name == "floorf" || Name == "ceil" || Name == "round" ||
224 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
231 void getUnrollingPreferences(Loop *, ScalarEvolution &,
232 TTI::UnrollingPreferences &) {}
234 bool isLegalAddImmediate(int64_t Imm) { return false; }
236 bool isLegalICmpImmediate(int64_t Imm) { return false; }
238 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
239 bool HasBaseReg, int64_t Scale,
240 unsigned AddrSpace, Instruction *I = nullptr) {
241 // Guess that only reg and reg+reg addressing is allowed. This heuristic is
242 // taken from the implementation of LSR.
243 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
246 bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) {
247 return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
248 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
249 std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
250 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
253 bool canMacroFuseCmp() { return false; }
255 bool shouldFavorPostInc() const { return false; }
257 bool isLegalMaskedStore(Type *DataType) { return false; }
259 bool isLegalMaskedLoad(Type *DataType) { return false; }
261 bool isLegalMaskedScatter(Type *DataType) { return false; }
263 bool isLegalMaskedGather(Type *DataType) { return false; }
265 bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
267 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
269 bool prefersVectorizedAddressing() { return true; }
271 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
272 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
273 // Guess that all legal addressing mode are free.
274 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
280 bool LSRWithInstrQueries() { return false; }
282 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
284 bool isProfitableToHoist(Instruction *I) { return true; }
286 bool useAA() { return false; }
288 bool isTypeLegal(Type *Ty) { return false; }
290 unsigned getJumpBufAlignment() { return 0; }
292 unsigned getJumpBufSize() { return 0; }
294 bool shouldBuildLookupTables() { return true; }
295 bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
297 bool useColdCCForColdCall(Function &F) { return false; }
299 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
303 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
304 unsigned VF) { return 0; }
306 bool supportsEfficientVectorElementLoadStore() { return false; }
308 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
310 const TTI::MemCmpExpansionOptions *enableMemCmpExpansion(
311 bool IsZeroCmp) const {
315 bool enableInterleavedAccessVectorization() { return false; }
317 bool enableMaskedInterleavedAccessVectorization() { return false; }
319 bool isFPVectorizationPotentiallyUnsafe() { return false; }
321 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
323 unsigned AddressSpace,
325 bool *Fast) { return false; }
327 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
328 return TTI::PSK_Software;
331 bool haveFastSqrt(Type *Ty) { return false; }
333 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
335 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
337 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
342 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
344 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
346 return TTI::TCC_Free;
349 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
351 return TTI::TCC_Free;
354 unsigned getNumberOfRegisters(bool Vector) { return 8; }
356 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
358 unsigned getMinVectorRegisterBitWidth() { return 128; }
360 bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
362 unsigned getMinimumVF(unsigned ElemWidth) const { return 0; }
365 shouldConsiderAddressTypePromotion(const Instruction &I,
366 bool &AllowPromotionWithoutCommonHeader) {
367 AllowPromotionWithoutCommonHeader = false;
371 unsigned getCacheLineSize() { return 0; }
373 llvm::Optional<unsigned> getCacheSize(TargetTransformInfo::CacheLevel Level) {
375 case TargetTransformInfo::CacheLevel::L1D:
377 case TargetTransformInfo::CacheLevel::L2D:
378 return llvm::Optional<unsigned>();
381 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
384 llvm::Optional<unsigned> getCacheAssociativity(
385 TargetTransformInfo::CacheLevel Level) {
387 case TargetTransformInfo::CacheLevel::L1D:
389 case TargetTransformInfo::CacheLevel::L2D:
390 return llvm::Optional<unsigned>();
393 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
396 unsigned getPrefetchDistance() { return 0; }
398 unsigned getMinPrefetchStride() { return 1; }
400 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
402 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
404 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
405 TTI::OperandValueKind Opd1Info,
406 TTI::OperandValueKind Opd2Info,
407 TTI::OperandValueProperties Opd1PropInfo,
408 TTI::OperandValueProperties Opd2PropInfo,
409 ArrayRef<const Value *> Args) {
413 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
418 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
419 const Instruction *I) { return 1; }
421 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
422 VectorType *VecTy, unsigned Index) {
426 unsigned getCFInstrCost(unsigned Opcode) { return 1; }
428 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
429 const Instruction *I) {
433 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
437 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
438 unsigned AddressSpace, const Instruction *I) {
442 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
443 unsigned AddressSpace) {
447 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
449 unsigned Alignment) {
453 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
455 ArrayRef<unsigned> Indices,
456 unsigned Alignment, unsigned AddressSpace,
457 bool UseMaskForCond = false,
458 bool UseMaskForGaps = false) {
462 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
463 ArrayRef<Type *> Tys, FastMathFlags FMF,
464 unsigned ScalarizationCostPassed) {
467 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
468 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
472 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
476 unsigned getNumberOfParts(Type *Tp) { return 0; }
478 unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
483 unsigned getArithmeticReductionCost(unsigned, Type *, bool) { return 1; }
485 unsigned getMinMaxReductionCost(Type *, Type *, bool, bool) { return 1; }
487 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
489 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
493 unsigned getAtomicMemIntrinsicMaxElementSize() const {
494 // Note for overrides: You must ensure for all element unordered-atomic
495 // memory intrinsics that all power-of-2 element sizes up to, and
496 // including, the return value of this method have a corresponding
497 // runtime lib call. These runtime lib call definitions can be found
498 // in RuntimeLibcalls.h
502 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
503 Type *ExpectedType) {
507 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
508 unsigned SrcAlign, unsigned DestAlign) const {
509 return Type::getInt8Ty(Context);
512 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
513 LLVMContext &Context,
514 unsigned RemainingBytes,
516 unsigned DestAlign) const {
517 for (unsigned i = 0; i != RemainingBytes; ++i)
518 OpsOut.push_back(Type::getInt8Ty(Context));
521 bool areInlineCompatible(const Function *Caller,
522 const Function *Callee) const {
523 return (Caller->getFnAttribute("target-cpu") ==
524 Callee->getFnAttribute("target-cpu")) &&
525 (Caller->getFnAttribute("target-features") ==
526 Callee->getFnAttribute("target-features"));
529 bool areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
530 SmallPtrSetImpl<Argument *> &Args) const {
531 return (Caller->getFnAttribute("target-cpu") ==
532 Callee->getFnAttribute("target-cpu")) &&
533 (Caller->getFnAttribute("target-features") ==
534 Callee->getFnAttribute("target-features"));
537 bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
538 const DataLayout &DL) const {
542 bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
543 const DataLayout &DL) const {
547 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
549 bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
551 bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
553 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
555 unsigned AddrSpace) const {
559 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
561 unsigned AddrSpace) const {
565 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
566 unsigned ChainSizeInBytes,
567 VectorType *VecTy) const {
571 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
572 unsigned ChainSizeInBytes,
573 VectorType *VecTy) const {
577 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
578 TTI::ReductionFlags Flags) const {
582 bool shouldExpandReduction(const IntrinsicInst *II) const {
587 // Obtain the minimum required size to hold the value (without the sign)
588 // In case of a vector it returns the min required size for one element.
589 unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
590 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
591 const auto* VectorValue = cast<Constant>(Val);
593 // In case of a vector need to pick the max between the min
594 // required size for each element
595 auto *VT = cast<VectorType>(Val->getType());
597 // Assume unsigned elements
600 // The max required size is the total vector width divided by num
601 // of elements in the vector
602 unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
604 unsigned MinRequiredSize = 0;
605 for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
606 if (auto* IntElement =
607 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
608 bool signedElement = IntElement->getValue().isNegative();
609 // Get the element min required size.
610 unsigned ElementMinRequiredSize =
611 IntElement->getValue().getMinSignedBits() - 1;
612 // In case one element is signed then all the vector is signed.
613 isSigned |= signedElement;
614 // Save the max required bit size between all the elements.
615 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
618 // not an int constant element
619 return MaxRequiredSize;
622 return MinRequiredSize;
625 if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
626 isSigned = CI->getValue().isNegative();
627 return CI->getValue().getMinSignedBits() - 1;
630 if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
632 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
635 if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
637 return Cast->getSrcTy()->getScalarSizeInBits();
641 return Val->getType()->getScalarSizeInBits();
644 bool isStridedAccess(const SCEV *Ptr) {
645 return Ptr && isa<SCEVAddRecExpr>(Ptr);
648 const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
650 if (!isStridedAccess(Ptr))
652 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
653 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
656 bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
657 int64_t MergeDistance) {
658 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
661 APInt StrideVal = Step->getAPInt();
662 if (StrideVal.getBitWidth() > 64)
664 // FIXME: Need to take absolute value for negative stride case.
665 return StrideVal.getSExtValue() < MergeDistance;
669 /// CRTP base class for use as a mix-in that aids implementing
670 /// a TargetTransformInfo-compatible class.
671 template <typename T>
672 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
674 typedef TargetTransformInfoImplBase BaseT;
677 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
680 using BaseT::getCallCost;
682 unsigned getCallCost(const Function *F, int NumArgs) {
683 assert(F && "A concrete function must be provided to this routine.");
686 // Set the argument number to the number of explicit arguments in the
688 NumArgs = F->arg_size();
690 if (Intrinsic::ID IID = F->getIntrinsicID()) {
691 FunctionType *FTy = F->getFunctionType();
692 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
693 return static_cast<T *>(this)
694 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
697 if (!static_cast<T *>(this)->isLoweredToCall(F))
698 return TTI::TCC_Basic; // Give a basic cost if it will be lowered
701 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
704 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
705 // Simply delegate to generic handling of the call.
706 // FIXME: We should use instsimplify or something else to catch calls which
707 // will constant fold with these arguments.
708 return static_cast<T *>(this)->getCallCost(F, Arguments.size());
711 using BaseT::getGEPCost;
713 int getGEPCost(Type *PointeeType, const Value *Ptr,
714 ArrayRef<const Value *> Operands) {
715 const GlobalValue *BaseGV = nullptr;
716 if (Ptr != nullptr) {
717 // TODO: will remove this when pointers have an opaque type.
718 assert(Ptr->getType()->getScalarType()->getPointerElementType() ==
720 "explicit pointee type doesn't match operand's pointee type");
721 BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
723 bool HasBaseReg = (BaseGV == nullptr);
725 auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
726 APInt BaseOffset(PtrSizeBits, 0);
729 auto GTI = gep_type_begin(PointeeType, Operands);
730 Type *TargetType = nullptr;
732 // Handle the case where the GEP instruction has a single operand,
733 // the basis, therefore TargetType is a nullptr.
734 if (Operands.empty())
735 return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;
737 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
738 TargetType = GTI.getIndexedType();
739 // We assume that the cost of Scalar GEP with constant index and the
740 // cost of Vector GEP with splat constant index are the same.
741 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
743 if (auto Splat = getSplatValue(*I))
744 ConstIdx = dyn_cast<ConstantInt>(Splat);
745 if (StructType *STy = GTI.getStructTypeOrNull()) {
746 // For structures the index is always splat or scalar constant
747 assert(ConstIdx && "Unexpected GEP index");
748 uint64_t Field = ConstIdx->getZExtValue();
749 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
751 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
754 ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
756 // Needs scale register.
758 // No addressing mode takes two scale registers.
759 return TTI::TCC_Basic;
765 // Assumes the address space is 0 when Ptr is nullptr.
767 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
769 if (static_cast<T *>(this)->isLegalAddressingMode(
770 TargetType, const_cast<GlobalValue *>(BaseGV),
771 BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale, AS))
772 return TTI::TCC_Free;
773 return TTI::TCC_Basic;
776 using BaseT::getIntrinsicCost;
778 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
779 ArrayRef<const Value *> Arguments) {
780 // Delegate to the generic intrinsic handling code. This mostly provides an
781 // opportunity for targets to (for example) special case the cost of
782 // certain intrinsics based on constants used as arguments.
783 SmallVector<Type *, 8> ParamTys;
784 ParamTys.reserve(Arguments.size());
785 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
786 ParamTys.push_back(Arguments[Idx]->getType());
787 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
790 unsigned getUserCost(const User *U, ArrayRef<const Value *> Operands) {
792 return TTI::TCC_Free; // Model all PHI nodes as free.
794 // Static alloca doesn't generate target instructions.
795 if (auto *A = dyn_cast<AllocaInst>(U))
796 if (A->isStaticAlloca())
797 return TTI::TCC_Free;
799 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
800 return static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
801 GEP->getPointerOperand(),
802 Operands.drop_front());
805 if (auto CS = ImmutableCallSite(U)) {
806 const Function *F = CS.getCalledFunction();
808 // Just use the called value type.
809 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
810 return static_cast<T *>(this)
811 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
814 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
815 return static_cast<T *>(this)->getCallCost(F, Arguments);
818 if (const CastInst *CI = dyn_cast<CastInst>(U)) {
819 // Result of a cmp instruction is often extended (to be used by other
820 // cmp instructions, logical or return instructions). These are usually
821 // nop on most sane targets.
822 if (isa<CmpInst>(CI->getOperand(0)))
823 return TTI::TCC_Free;
824 if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
825 return static_cast<T *>(this)->getExtCost(CI, Operands.back());
828 return static_cast<T *>(this)->getOperationCost(
829 Operator::getOpcode(U), U->getType(),
830 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
833 int getInstructionLatency(const Instruction *I) {
834 SmallVector<const Value *, 4> Operands(I->value_op_begin(),
836 if (getUserCost(I, Operands) == TTI::TCC_Free)
839 if (isa<LoadInst>(I))
842 Type *DstTy = I->getType();
844 // Usually an intrinsic is a simple instruction.
845 // A real function call is much slower.
846 if (auto *CI = dyn_cast<CallInst>(I)) {
847 const Function *F = CI->getCalledFunction();
848 if (!F || static_cast<T *>(this)->isLoweredToCall(F))
850 // Some intrinsics return a value and a flag, we use the value type
851 // to decide its latency.
852 if (StructType* StructTy = dyn_cast<StructType>(DstTy))
853 DstTy = StructTy->getElementType(0);
854 // Fall through to simple instructions.
857 if (VectorType *VectorTy = dyn_cast<VectorType>(DstTy))
858 DstTy = VectorTy->getElementType();
859 if (DstTy->isFloatingPointTy())