1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides helpers for the implementation of
11 /// a TargetTransformInfo-conforming class.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/IR/CallSite.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/Operator.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/Analysis/VectorUtils.h"
30 /// \brief Base class for use as a mix-in that aids implementing
31 /// a TargetTransformInfo-compatible class.
32 class TargetTransformInfoImplBase {
34 typedef TargetTransformInfo TTI;
38 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
41 // Provide value semantics. MSVC requires that we spell all of these out.
42 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
44 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
46 const DataLayout &getDataLayout() const { return DL; }
48 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
51 // By default, just classify everything as 'basic'.
52 return TTI::TCC_Basic;
54 case Instruction::GetElementPtr:
55 llvm_unreachable("Use getGEPCost for GEP operations!");
57 case Instruction::BitCast:
58 assert(OpTy && "Cast instructions must provide the operand type");
59 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
60 // Identity and pointer-to-pointer casts are free.
63 // Otherwise, the default basic cost is used.
64 return TTI::TCC_Basic;
66 case Instruction::FDiv:
67 case Instruction::FRem:
68 case Instruction::SDiv:
69 case Instruction::SRem:
70 case Instruction::UDiv:
71 case Instruction::URem:
72 return TTI::TCC_Expensive;
74 case Instruction::IntToPtr: {
75 // An inttoptr cast is free so long as the input is a legal integer type
76 // which doesn't contain values outside the range of a pointer.
77 unsigned OpSize = OpTy->getScalarSizeInBits();
78 if (DL.isLegalInteger(OpSize) &&
79 OpSize <= DL.getPointerTypeSizeInBits(Ty))
82 // Otherwise it's not a no-op.
83 return TTI::TCC_Basic;
85 case Instruction::PtrToInt: {
86 // A ptrtoint cast is free so long as the result is large enough to store
87 // the pointer, and a legal integer type.
88 unsigned DestSize = Ty->getScalarSizeInBits();
89 if (DL.isLegalInteger(DestSize) &&
90 DestSize >= DL.getPointerTypeSizeInBits(OpTy))
93 // Otherwise it's not a no-op.
94 return TTI::TCC_Basic;
96 case Instruction::Trunc:
97 // trunc to a native type is free (assuming the target has compare and
98 // shift-right of the same width).
99 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
100 return TTI::TCC_Free;
102 return TTI::TCC_Basic;
106 int getGEPCost(Type *PointeeType, const Value *Ptr,
107 ArrayRef<const Value *> Operands) {
108 // In the basic model, we just assume that all-constant GEPs will be folded
109 // into their uses via addressing modes.
110 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
111 if (!isa<Constant>(Operands[Idx]))
112 return TTI::TCC_Basic;
114 return TTI::TCC_Free;
117 unsigned getCallCost(FunctionType *FTy, int NumArgs) {
118 assert(FTy && "FunctionType must be provided to this routine.");
120 // The target-independent implementation just measures the size of the
121 // function by approximating that each argument will take on average one
122 // instruction to prepare.
125 // Set the argument number to the number of explicit arguments in the
127 NumArgs = FTy->getNumParams();
129 return TTI::TCC_Basic * (NumArgs + 1);
132 unsigned getInliningThresholdMultiplier() { return 1; }
134 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
135 ArrayRef<Type *> ParamTys) {
138 // Intrinsics rarely (if ever) have normal argument setup constraints.
139 // Model them as having a basic instruction cost.
140 // FIXME: This is wrong for libc intrinsics.
141 return TTI::TCC_Basic;
143 case Intrinsic::annotation:
144 case Intrinsic::assume:
145 case Intrinsic::dbg_declare:
146 case Intrinsic::dbg_value:
147 case Intrinsic::invariant_start:
148 case Intrinsic::invariant_end:
149 case Intrinsic::lifetime_start:
150 case Intrinsic::lifetime_end:
151 case Intrinsic::objectsize:
152 case Intrinsic::ptr_annotation:
153 case Intrinsic::var_annotation:
154 case Intrinsic::experimental_gc_result:
155 case Intrinsic::experimental_gc_relocate:
156 case Intrinsic::coro_alloc:
157 case Intrinsic::coro_begin:
158 case Intrinsic::coro_free:
159 case Intrinsic::coro_end:
160 case Intrinsic::coro_frame:
161 case Intrinsic::coro_size:
162 case Intrinsic::coro_suspend:
163 case Intrinsic::coro_param:
164 case Intrinsic::coro_subfn_addr:
165 // These intrinsics don't actually represent code after lowering.
166 return TTI::TCC_Free;
170 bool hasBranchDivergence() { return false; }
172 bool isSourceOfDivergence(const Value *V) { return false; }
174 bool isLoweredToCall(const Function *F) {
175 // FIXME: These should almost certainly not be handled here, and instead
176 // handled with the help of TLI or the target itself. This was largely
177 // ported from existing analysis heuristics here so that such refactorings
178 // can take place in the future.
180 if (F->isIntrinsic())
183 if (F->hasLocalLinkage() || !F->hasName())
186 StringRef Name = F->getName();
188 // These will all likely lower to a single selection DAG node.
189 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
190 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
191 Name == "fmin" || Name == "fminf" || Name == "fminl" ||
192 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
193 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
194 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
197 // These are all likely to be optimized into something smaller.
198 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
199 Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
200 Name == "floorf" || Name == "ceil" || Name == "round" ||
201 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
208 void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {}
210 bool isLegalAddImmediate(int64_t Imm) { return false; }
212 bool isLegalICmpImmediate(int64_t Imm) { return false; }
214 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
215 bool HasBaseReg, int64_t Scale,
216 unsigned AddrSpace) {
217 // Guess that only reg and reg+reg addressing is allowed. This heuristic is
218 // taken from the implementation of LSR.
219 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
222 bool isLegalMaskedStore(Type *DataType) { return false; }
224 bool isLegalMaskedLoad(Type *DataType) { return false; }
226 bool isLegalMaskedScatter(Type *DataType) { return false; }
228 bool isLegalMaskedGather(Type *DataType) { return false; }
230 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
231 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
232 // Guess that all legal addressing mode are free.
233 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
239 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) { return true; }
241 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
243 bool isProfitableToHoist(Instruction *I) { return true; }
245 bool isTypeLegal(Type *Ty) { return false; }
247 unsigned getJumpBufAlignment() { return 0; }
249 unsigned getJumpBufSize() { return 0; }
251 bool shouldBuildLookupTables() { return true; }
252 bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
254 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
256 bool enableInterleavedAccessVectorization() { return false; }
258 bool isFPVectorizationPotentiallyUnsafe() { return false; }
260 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
262 unsigned AddressSpace,
264 bool *Fast) { return false; }
266 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
267 return TTI::PSK_Software;
270 bool haveFastSqrt(Type *Ty) { return false; }
272 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
274 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
279 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
281 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
283 return TTI::TCC_Free;
286 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
288 return TTI::TCC_Free;
291 unsigned getNumberOfRegisters(bool Vector) { return 8; }
293 unsigned getRegisterBitWidth(bool Vector) { return 32; }
295 unsigned getCacheLineSize() { return 0; }
297 unsigned getPrefetchDistance() { return 0; }
299 unsigned getMinPrefetchStride() { return 1; }
301 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; }
303 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
305 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
306 TTI::OperandValueKind Opd1Info,
307 TTI::OperandValueKind Opd2Info,
308 TTI::OperandValueProperties Opd1PropInfo,
309 TTI::OperandValueProperties Opd2PropInfo,
310 ArrayRef<const Value *> Args) {
314 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
319 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { return 1; }
321 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
322 VectorType *VecTy, unsigned Index) {
326 unsigned getCFInstrCost(unsigned Opcode) { return 1; }
328 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
332 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
336 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
337 unsigned AddressSpace) {
341 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
342 unsigned AddressSpace) {
346 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
348 unsigned Alignment) {
352 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
354 ArrayRef<unsigned> Indices,
356 unsigned AddressSpace) {
360 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
361 ArrayRef<Type *> Tys, FastMathFlags FMF) {
364 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
365 ArrayRef<Value *> Args, FastMathFlags FMF) {
369 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
373 unsigned getNumberOfParts(Type *Tp) { return 0; }
375 unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
380 unsigned getReductionCost(unsigned, Type *, bool) { return 1; }
382 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
384 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
388 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
389 Type *ExpectedType) {
393 bool areInlineCompatible(const Function *Caller,
394 const Function *Callee) const {
395 return (Caller->getFnAttribute("target-cpu") ==
396 Callee->getFnAttribute("target-cpu")) &&
397 (Caller->getFnAttribute("target-features") ==
398 Callee->getFnAttribute("target-features"));
401 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }
403 bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }
405 bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }
407 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
409 unsigned AddrSpace) const {
413 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
415 unsigned AddrSpace) const {
419 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
420 unsigned ChainSizeInBytes,
421 VectorType *VecTy) const {
425 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
426 unsigned ChainSizeInBytes,
427 VectorType *VecTy) const {
431 // Obtain the minimum required size to hold the value (without the sign)
432 // In case of a vector it returns the min required size for one element.
433 unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
434 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
435 const auto* VectorValue = cast<Constant>(Val);
437 // In case of a vector need to pick the max between the min
438 // required size for each element
439 auto *VT = cast<VectorType>(Val->getType());
441 // Assume unsigned elements
444 // The max required size is the total vector width divided by num
445 // of elements in the vector
446 unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
448 unsigned MinRequiredSize = 0;
449 for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
450 if (auto* IntElement =
451 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
452 bool signedElement = IntElement->getValue().isNegative();
453 // Get the element min required size.
454 unsigned ElementMinRequiredSize =
455 IntElement->getValue().getMinSignedBits() - 1;
456 // In case one element is signed then all the vector is signed.
457 isSigned |= signedElement;
458 // Save the max required bit size between all the elements.
459 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
462 // not an int constant element
463 return MaxRequiredSize;
466 return MinRequiredSize;
469 if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
470 isSigned = CI->getValue().isNegative();
471 return CI->getValue().getMinSignedBits() - 1;
474 if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
476 return Cast->getSrcTy()->getScalarSizeInBits() - 1;
479 if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
481 return Cast->getSrcTy()->getScalarSizeInBits();
485 return Val->getType()->getScalarSizeInBits();
488 bool isStridedAccess(const SCEV *Ptr) {
489 return Ptr && isa<SCEVAddRecExpr>(Ptr);
492 const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
494 if (!isStridedAccess(Ptr))
496 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
497 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
500 bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
501 int64_t MergeDistance) {
502 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
505 APInt StrideVal = Step->getAPInt();
506 if (StrideVal.getBitWidth() > 64)
508 // FIXME: need to take absolute value for negtive stride case
509 return StrideVal.getSExtValue() < MergeDistance;
513 /// \brief CRTP base class for use as a mix-in that aids implementing
514 /// a TargetTransformInfo-compatible class.
515 template <typename T>
516 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
518 typedef TargetTransformInfoImplBase BaseT;
521 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
524 using BaseT::getCallCost;
526 unsigned getCallCost(const Function *F, int NumArgs) {
527 assert(F && "A concrete function must be provided to this routine.");
530 // Set the argument number to the number of explicit arguments in the
532 NumArgs = F->arg_size();
534 if (Intrinsic::ID IID = F->getIntrinsicID()) {
535 FunctionType *FTy = F->getFunctionType();
536 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
537 return static_cast<T *>(this)
538 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
541 if (!static_cast<T *>(this)->isLoweredToCall(F))
542 return TTI::TCC_Basic; // Give a basic cost if it will be lowered
545 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
548 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
549 // Simply delegate to generic handling of the call.
550 // FIXME: We should use instsimplify or something else to catch calls which
551 // will constant fold with these arguments.
552 return static_cast<T *>(this)->getCallCost(F, Arguments.size());
555 using BaseT::getGEPCost;
557 int getGEPCost(Type *PointeeType, const Value *Ptr,
558 ArrayRef<const Value *> Operands) {
559 const GlobalValue *BaseGV = nullptr;
560 if (Ptr != nullptr) {
561 // TODO: will remove this when pointers have an opaque type.
562 assert(Ptr->getType()->getScalarType()->getPointerElementType() ==
564 "explicit pointee type doesn't match operand's pointee type");
565 BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
567 bool HasBaseReg = (BaseGV == nullptr);
568 int64_t BaseOffset = 0;
571 auto GTI = gep_type_begin(PointeeType, Operands);
573 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
574 TargetType = GTI.getIndexedType();
575 // We assume that the cost of Scalar GEP with constant index and the
576 // cost of Vector GEP with splat constant index are the same.
577 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
579 if (auto Splat = getSplatValue(*I))
580 ConstIdx = dyn_cast<ConstantInt>(Splat);
581 if (StructType *STy = GTI.getStructTypeOrNull()) {
582 // For structures the index is always splat or scalar constant
583 assert(ConstIdx && "Unexpected GEP index");
584 uint64_t Field = ConstIdx->getZExtValue();
585 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
587 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
589 BaseOffset += ConstIdx->getSExtValue() * ElementSize;
591 // Needs scale register.
593 // No addressing mode takes two scale registers.
594 return TTI::TCC_Basic;
600 // Assumes the address space is 0 when Ptr is nullptr.
602 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
603 if (static_cast<T *>(this)->isLegalAddressingMode(
604 TargetType, const_cast<GlobalValue *>(BaseGV), BaseOffset,
605 HasBaseReg, Scale, AS))
606 return TTI::TCC_Free;
607 return TTI::TCC_Basic;
610 using BaseT::getIntrinsicCost;
612 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
613 ArrayRef<const Value *> Arguments) {
614 // Delegate to the generic intrinsic handling code. This mostly provides an
615 // opportunity for targets to (for example) special case the cost of
616 // certain intrinsics based on constants used as arguments.
617 SmallVector<Type *, 8> ParamTys;
618 ParamTys.reserve(Arguments.size());
619 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
620 ParamTys.push_back(Arguments[Idx]->getType());
621 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
624 unsigned getUserCost(const User *U) {
626 return TTI::TCC_Free; // Model all PHI nodes as free.
628 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
629 SmallVector<Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end());
630 return static_cast<T *>(this)->getGEPCost(
631 GEP->getSourceElementType(), GEP->getPointerOperand(), Indices);
634 if (auto CS = ImmutableCallSite(U)) {
635 const Function *F = CS.getCalledFunction();
637 // Just use the called value type.
638 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
639 return static_cast<T *>(this)
640 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
643 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
644 return static_cast<T *>(this)->getCallCost(F, Arguments);
647 if (const CastInst *CI = dyn_cast<CastInst>(U)) {
648 // Result of a cmp instruction is often extended (to be used by other
649 // cmp instructions, logical or return instructions). These are usually
650 // nop on most sane targets.
651 if (isa<CmpInst>(CI->getOperand(0)))
652 return TTI::TCC_Free;
655 return static_cast<T *>(this)->getOperationCost(
656 Operator::getOpcode(U), U->getType(),
657 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);