1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
46 /// are set if the result needs to be inserted and/or extracted from vectors.
47 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
48 assert(Ty->isVectorTy() && "Can only scalarize vectors");
51 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
53 Cost += static_cast<T *>(this)
54 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
56 Cost += static_cast<T *>(this)
57 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
63 /// Estimate the cost overhead of SK_Alternate shuffle.
64 unsigned getAltShuffleOverhead(Type *Ty) {
65 assert(Ty->isVectorTy() && "Can only shuffle vectors");
67 // Shuffle cost is equal to the cost of extracting element from its argument
68 // plus the cost of inserting them onto the result vector.
70 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
71 // index 0 of first vector, index 1 of second vector,index 2 of first
72 // vector and finally index 3 of second vector and insert them at index
73 // <0,1,2,3> of result vector.
74 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
75 Cost += static_cast<T *>(this)
76 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
77 Cost += static_cast<T *>(this)
78 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
83 /// \brief Local query method delegates up to T which *must* implement this!
84 const TargetSubtargetInfo *getST() const {
85 return static_cast<const T *>(this)->getST();
88 /// \brief Local query method delegates up to T which *must* implement this!
89 const TargetLoweringBase *getTLI() const {
90 return static_cast<const T *>(this)->getTLI();
94 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
97 using TargetTransformInfoImplBase::DL;
100 // Provide value semantics. MSVC requires that we spell all of these out.
101 BasicTTIImplBase(const BasicTTIImplBase &Arg)
102 : BaseT(static_cast<const BaseT &>(Arg)) {}
103 BasicTTIImplBase(BasicTTIImplBase &&Arg)
104 : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
106 /// \name Scalar TTI Implementations
108 bool allowsMisalignedMemoryAccesses(unsigned BitWidth, unsigned AddressSpace,
109 unsigned Alignment, bool *Fast) const {
110 MVT M = MVT::getIntegerVT(BitWidth);
111 return getTLI()->allowsMisalignedMemoryAccesses(M, AddressSpace, Alignment, Fast);
114 bool hasBranchDivergence() { return false; }
116 bool isSourceOfDivergence(const Value *V) { return false; }
118 bool isLegalAddImmediate(int64_t imm) {
119 return getTLI()->isLegalAddImmediate(imm);
122 bool isLegalICmpImmediate(int64_t imm) {
123 return getTLI()->isLegalICmpImmediate(imm);
126 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
127 bool HasBaseReg, int64_t Scale,
128 unsigned AddrSpace) {
129 TargetLoweringBase::AddrMode AM;
131 AM.BaseOffs = BaseOffset;
132 AM.HasBaseReg = HasBaseReg;
134 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
137 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
138 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
139 TargetLoweringBase::AddrMode AM;
141 AM.BaseOffs = BaseOffset;
142 AM.HasBaseReg = HasBaseReg;
144 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
147 bool isTruncateFree(Type *Ty1, Type *Ty2) {
148 return getTLI()->isTruncateFree(Ty1, Ty2);
151 bool isProfitableToHoist(Instruction *I) {
152 return getTLI()->isProfitableToHoist(I);
155 bool isTypeLegal(Type *Ty) {
156 EVT VT = getTLI()->getValueType(DL, Ty);
157 return getTLI()->isTypeLegal(VT);
160 int getGEPCost(Type *PointeeType, const Value *Ptr,
161 ArrayRef<const Value *> Operands) {
162 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
165 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
166 ArrayRef<const Value *> Arguments) {
167 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
170 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
171 ArrayRef<Type *> ParamTys) {
172 if (IID == Intrinsic::cttz) {
173 if (getTLI()->isCheapToSpeculateCttz())
174 return TargetTransformInfo::TCC_Basic;
175 return TargetTransformInfo::TCC_Expensive;
178 if (IID == Intrinsic::ctlz) {
179 if (getTLI()->isCheapToSpeculateCtlz())
180 return TargetTransformInfo::TCC_Basic;
181 return TargetTransformInfo::TCC_Expensive;
184 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
187 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
189 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
191 bool shouldBuildLookupTables() {
192 const TargetLoweringBase *TLI = getTLI();
193 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
194 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
197 bool haveFastSqrt(Type *Ty) {
198 const TargetLoweringBase *TLI = getTLI();
199 EVT VT = TLI->getValueType(DL, Ty);
200 return TLI->isTypeLegal(VT) &&
201 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
204 unsigned getFPOpCost(Type *Ty) {
205 // By default, FP instructions are no more expensive since they are
206 // implemented in HW. Target specific TTI can override this.
207 return TargetTransformInfo::TCC_Basic;
210 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
211 const TargetLoweringBase *TLI = getTLI();
214 case Instruction::Trunc: {
215 if (TLI->isTruncateFree(OpTy, Ty))
216 return TargetTransformInfo::TCC_Free;
217 return TargetTransformInfo::TCC_Basic;
219 case Instruction::ZExt: {
220 if (TLI->isZExtFree(OpTy, Ty))
221 return TargetTransformInfo::TCC_Free;
222 return TargetTransformInfo::TCC_Basic;
226 return BaseT::getOperationCost(Opcode, Ty, OpTy);
229 unsigned getInliningThresholdMultiplier() { return 1; }
231 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
232 // This unrolling functionality is target independent, but to provide some
233 // motivation for its intended use, for x86:
235 // According to the Intel 64 and IA-32 Architectures Optimization Reference
236 // Manual, Intel Core models and later have a loop stream detector (and
237 // associated uop queue) that can benefit from partial unrolling.
238 // The relevant requirements are:
239 // - The loop must have no more than 4 (8 for Nehalem and later) branches
240 // taken, and none of them may be calls.
241 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
243 // According to the Software Optimization Guide for AMD Family 15h
244 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
245 // and loop buffer which can benefit from partial unrolling.
246 // The relevant requirements are:
247 // - The loop must have fewer than 16 branches
248 // - The loop must have less than 40 uops in all executed loop branches
250 // The number of taken branches in a loop is hard to estimate here, and
251 // benchmarking has revealed that it is better not to be conservative when
252 // estimating the branch count. As a result, we'll ignore the branch limits
253 // until someone finds a case where it matters in practice.
256 const TargetSubtargetInfo *ST = getST();
257 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
258 MaxOps = PartialUnrollingThreshold;
259 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
260 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
264 // Scan the loop: don't unroll loops with calls.
265 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
269 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
270 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
271 ImmutableCallSite CS(&*J);
272 if (const Function *F = CS.getCalledFunction()) {
273 if (!static_cast<T *>(this)->isLoweredToCall(F))
281 // Enable runtime and partial unrolling up to the specified size.
282 UP.Partial = UP.Runtime = true;
283 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
288 /// \name Vector TTI Implementations
291 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
293 unsigned getRegisterBitWidth(bool Vector) { return 32; }
295 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
297 unsigned getArithmeticInstrCost(
298 unsigned Opcode, Type *Ty,
299 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
300 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
301 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
302 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
303 // Check if any of the operands are vector operands.
304 const TargetLoweringBase *TLI = getTLI();
305 int ISD = TLI->InstructionOpcodeToISD(Opcode);
306 assert(ISD && "Invalid opcode");
308 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
310 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
311 // Assume that floating point arithmetic operations cost twice as much as
312 // integer operations.
313 unsigned OpCost = (IsFloat ? 2 : 1);
315 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
316 // The operation is legal. Assume it costs 1.
317 // TODO: Once we have extract/insert subvector cost we need to use them.
318 return LT.first * OpCost;
321 if (!TLI->isOperationExpand(ISD, LT.second)) {
322 // If the operation is custom lowered, then assume that the code is twice
324 return LT.first * 2 * OpCost;
327 // Else, assume that we need to scalarize this op.
328 // TODO: If one of the types get legalized by splitting, handle this
329 // similarly to what getCastInstrCost() does.
330 if (Ty->isVectorTy()) {
331 unsigned Num = Ty->getVectorNumElements();
332 unsigned Cost = static_cast<T *>(this)
333 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
334 // return the cost of multiple scalar invocation plus the cost of
336 // and extracting the values.
337 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
340 // We don't know anything about this scalar instruction.
344 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
346 if (Kind == TTI::SK_Alternate) {
347 return getAltShuffleOverhead(Tp);
352 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
353 const TargetLoweringBase *TLI = getTLI();
354 int ISD = TLI->InstructionOpcodeToISD(Opcode);
355 assert(ISD && "Invalid opcode");
356 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
357 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
359 // Check for NOOP conversions.
360 if (SrcLT.first == DstLT.first &&
361 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
363 // Bitcast between types that are legalized to the same type are free.
364 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
368 if (Opcode == Instruction::Trunc &&
369 TLI->isTruncateFree(SrcLT.second, DstLT.second))
372 if (Opcode == Instruction::ZExt &&
373 TLI->isZExtFree(SrcLT.second, DstLT.second))
376 if (Opcode == Instruction::AddrSpaceCast &&
377 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
378 Dst->getPointerAddressSpace()))
381 // If the cast is marked as legal (or promote) then assume low cost.
382 if (SrcLT.first == DstLT.first &&
383 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
386 // Handle scalar conversions.
387 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
389 // Scalar bitcasts are usually free.
390 if (Opcode == Instruction::BitCast)
393 // Just check the op cost. If the operation is legal then assume it costs
395 if (!TLI->isOperationExpand(ISD, DstLT.second))
398 // Assume that illegal scalar instruction are expensive.
402 // Check vector-to-vector casts.
403 if (Dst->isVectorTy() && Src->isVectorTy()) {
405 // If the cast is between same-sized registers, then the check is simple.
406 if (SrcLT.first == DstLT.first &&
407 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
409 // Assume that Zext is done using AND.
410 if (Opcode == Instruction::ZExt)
413 // Assume that sext is done using SHL and SRA.
414 if (Opcode == Instruction::SExt)
417 // Just check the op cost. If the operation is legal then assume it
419 // 1 and multiply by the type-legalization overhead.
420 if (!TLI->isOperationExpand(ISD, DstLT.second))
421 return SrcLT.first * 1;
424 // If we are legalizing by splitting, query the concrete TTI for the cost
425 // of casting the original vector twice. We also need to factor int the
426 // cost of the split itself. Count that as 1, to be consistent with
427 // TLI->getTypeLegalizationCost().
428 if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
429 TargetLowering::TypeSplitVector) ||
430 (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
431 TargetLowering::TypeSplitVector)) {
432 Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
433 Dst->getVectorNumElements() / 2);
434 Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
435 Src->getVectorNumElements() / 2);
436 T *TTI = static_cast<T *>(this);
437 return TTI->getVectorSplitCost() +
438 (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc));
441 // In other cases where the source or destination are illegal, assume
442 // the operation will get scalarized.
443 unsigned Num = Dst->getVectorNumElements();
444 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
445 Opcode, Dst->getScalarType(), Src->getScalarType());
447 // Return the cost of multiple scalar invocation plus the cost of
448 // inserting and extracting the values.
449 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
452 // We already handled vector-to-vector and scalar-to-scalar conversions.
454 // is where we handle bitcast between vectors and scalars. We need to assume
455 // that the conversion is scalarized in one way or another.
456 if (Opcode == Instruction::BitCast)
457 // Illegal bitcasts are done by storing and loading from a stack slot.
458 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
460 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
463 llvm_unreachable("Unhandled cast");
466 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
467 VectorType *VecTy, unsigned Index) {
468 return static_cast<T *>(this)->getVectorInstrCost(
469 Instruction::ExtractElement, VecTy, Index) +
470 static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
471 VecTy->getElementType());
474 unsigned getCFInstrCost(unsigned Opcode) {
475 // Branches are assumed to be predicted.
479 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
480 const TargetLoweringBase *TLI = getTLI();
481 int ISD = TLI->InstructionOpcodeToISD(Opcode);
482 assert(ISD && "Invalid opcode");
484 // Selects on vectors are actually vector selects.
485 if (ISD == ISD::SELECT) {
486 assert(CondTy && "CondTy must exist");
487 if (CondTy->isVectorTy())
490 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
492 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
493 !TLI->isOperationExpand(ISD, LT.second)) {
494 // The operation is legal. Assume it costs 1. Multiply
495 // by the type-legalization overhead.
499 // Otherwise, assume that the cast is scalarized.
500 // TODO: If one of the types get legalized by splitting, handle this
501 // similarly to what getCastInstrCost() does.
502 if (ValTy->isVectorTy()) {
503 unsigned Num = ValTy->getVectorNumElements();
505 CondTy = CondTy->getScalarType();
506 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
507 Opcode, ValTy->getScalarType(), CondTy);
509 // Return the cost of multiple scalar invocation plus the cost of
510 // inserting and extracting the values.
511 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
514 // Unknown scalar opcode.
518 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
519 std::pair<unsigned, MVT> LT =
520 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
525 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
526 unsigned AddressSpace) {
527 assert(!Src->isVoidTy() && "Invalid type");
528 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
530 // Assuming that all loads of legal types cost 1.
531 unsigned Cost = LT.first;
533 if (Src->isVectorTy() &&
534 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
535 // This is a vector load that legalizes to a larger type than the vector
536 // itself. Unless the corresponding extending load or truncating store is
537 // legal, then this will scalarize.
538 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
539 EVT MemVT = getTLI()->getValueType(DL, Src);
540 if (Opcode == Instruction::Store)
541 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
543 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
545 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
546 // This is a vector load/store for some illegal type that is scalarized.
547 // We must account for the cost of building or decomposing the vector.
548 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
549 Opcode == Instruction::Store);
556 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
558 ArrayRef<unsigned> Indices,
560 unsigned AddressSpace) {
561 VectorType *VT = dyn_cast<VectorType>(VecTy);
562 assert(VT && "Expect a vector type for interleaved memory op");
564 unsigned NumElts = VT->getNumElements();
565 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
567 unsigned NumSubElts = NumElts / Factor;
568 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
570 // Firstly, the cost of load/store operation.
571 unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
572 Opcode, VecTy, Alignment, AddressSpace);
574 // Legalize the vector type, and get the legalized and unlegalized type
576 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
578 static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
579 unsigned VecTyLTSize = VecTyLT.getStoreSize();
581 // Return the ceiling of dividing A by B.
582 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
584 // Scale the cost of the memory operation by the fraction of legalized
585 // instructions that will actually be used. We shouldn't account for the
586 // cost of dead instructions since they will be removed.
588 // E.g., An interleaved load of factor 8:
589 // %vec = load <16 x i64>, <16 x i64>* %ptr
590 // %v0 = shufflevector %vec, undef, <0, 8>
592 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
593 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
594 // type). The other loads are unused.
596 // We only scale the cost of loads since interleaved store groups aren't
597 // allowed to have gaps.
598 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
600 // The number of loads of a legal type it will take to represent a load
601 // of the unlegalized vector type.
602 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
604 // The number of elements of the unlegalized type that correspond to a
605 // single legal instruction.
606 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
608 // Determine which legal instructions will be used.
609 BitVector UsedInsts(NumLegalInsts, false);
610 for (unsigned Index : Indices)
611 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
612 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
614 // Scale the cost of the load by the fraction of legal instructions that
616 Cost *= UsedInsts.count() / NumLegalInsts;
619 // Then plus the cost of interleave operation.
620 if (Opcode == Instruction::Load) {
621 // The interleave cost is similar to extract sub vectors' elements
622 // from the wide vector, and insert them into sub vectors.
624 // E.g. An interleaved load of factor 2 (with one member of index 0):
625 // %vec = load <8 x i32>, <8 x i32>* %ptr
626 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
627 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
628 // <8 x i32> vector and insert them into a <4 x i32> vector.
630 assert(Indices.size() <= Factor &&
631 "Interleaved memory op has too many members");
633 for (unsigned Index : Indices) {
634 assert(Index < Factor && "Invalid index for interleaved memory op");
636 // Extract elements from loaded vector for each sub vector.
637 for (unsigned i = 0; i < NumSubElts; i++)
638 Cost += static_cast<T *>(this)->getVectorInstrCost(
639 Instruction::ExtractElement, VT, Index + i * Factor);
642 unsigned InsSubCost = 0;
643 for (unsigned i = 0; i < NumSubElts; i++)
644 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
645 Instruction::InsertElement, SubVT, i);
647 Cost += Indices.size() * InsSubCost;
649 // The interleave cost is extract all elements from sub vectors, and
650 // insert them into the wide vector.
652 // E.g. An interleaved store of factor 2:
653 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
654 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
655 // The cost is estimated as extract all elements from both <4 x i32>
656 // vectors and insert into the <8 x i32> vector.
658 unsigned ExtSubCost = 0;
659 for (unsigned i = 0; i < NumSubElts; i++)
660 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
661 Instruction::ExtractElement, SubVT, i);
662 Cost += ExtSubCost * Factor;
664 for (unsigned i = 0; i < NumElts; i++)
665 Cost += static_cast<T *>(this)
666 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
672 /// Get intrinsic cost based on arguments
673 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
674 ArrayRef<Value *> Args, FastMathFlags FMF) {
677 SmallVector<Type *, 4> Types;
678 for (Value *Op : Args)
679 Types.push_back(Op->getType());
680 return static_cast<T *>(this)->getIntrinsicInstrCost(IID, RetTy, Types,
683 case Intrinsic::masked_scatter: {
684 Value *Mask = Args[3];
685 bool VarMask = !isa<Constant>(Mask);
686 unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
688 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
693 case Intrinsic::masked_gather: {
694 Value *Mask = Args[2];
695 bool VarMask = !isa<Constant>(Mask);
696 unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
698 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
699 RetTy, Args[0], VarMask,
705 /// Get intrinsic cost based on argument types
706 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
707 ArrayRef<Type *> Tys, FastMathFlags FMF) {
708 SmallVector<unsigned, 2> ISDs;
709 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
712 // Assume that we need to scalarize this intrinsic.
713 unsigned ScalarizationCost = 0;
714 unsigned ScalarCalls = 1;
715 Type *ScalarRetTy = RetTy;
716 if (RetTy->isVectorTy()) {
717 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
718 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
719 ScalarRetTy = RetTy->getScalarType();
721 SmallVector<Type *, 4> ScalarTys;
722 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
724 if (Ty->isVectorTy()) {
725 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
726 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
727 Ty = Ty->getScalarType();
729 ScalarTys.push_back(Ty);
731 if (ScalarCalls == 1)
732 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
734 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
735 IID, ScalarRetTy, ScalarTys, FMF);
737 return ScalarCalls * ScalarCost + ScalarizationCost;
739 // Look for intrinsics that can be lowered directly or turned into a scalar
741 case Intrinsic::sqrt:
742 ISDs.push_back(ISD::FSQRT);
745 ISDs.push_back(ISD::FSIN);
748 ISDs.push_back(ISD::FCOS);
751 ISDs.push_back(ISD::FEXP);
753 case Intrinsic::exp2:
754 ISDs.push_back(ISD::FEXP2);
757 ISDs.push_back(ISD::FLOG);
759 case Intrinsic::log10:
760 ISDs.push_back(ISD::FLOG10);
762 case Intrinsic::log2:
763 ISDs.push_back(ISD::FLOG2);
765 case Intrinsic::fabs:
766 ISDs.push_back(ISD::FABS);
768 case Intrinsic::minnum:
769 ISDs.push_back(ISD::FMINNUM);
771 ISDs.push_back(ISD::FMINNAN);
773 case Intrinsic::maxnum:
774 ISDs.push_back(ISD::FMAXNUM);
776 ISDs.push_back(ISD::FMAXNAN);
778 case Intrinsic::copysign:
779 ISDs.push_back(ISD::FCOPYSIGN);
781 case Intrinsic::floor:
782 ISDs.push_back(ISD::FFLOOR);
784 case Intrinsic::ceil:
785 ISDs.push_back(ISD::FCEIL);
787 case Intrinsic::trunc:
788 ISDs.push_back(ISD::FTRUNC);
790 case Intrinsic::nearbyint:
791 ISDs.push_back(ISD::FNEARBYINT);
793 case Intrinsic::rint:
794 ISDs.push_back(ISD::FRINT);
796 case Intrinsic::round:
797 ISDs.push_back(ISD::FROUND);
800 ISDs.push_back(ISD::FPOW);
803 ISDs.push_back(ISD::FMA);
805 case Intrinsic::fmuladd:
806 ISDs.push_back(ISD::FMA);
808 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
809 case Intrinsic::lifetime_start:
810 case Intrinsic::lifetime_end:
812 case Intrinsic::masked_store:
813 return static_cast<T *>(this)
814 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
815 case Intrinsic::masked_load:
816 return static_cast<T *>(this)
817 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
818 case Intrinsic::ctpop:
819 ISDs.push_back(ISD::CTPOP);
820 // In case of legalization use TCC_Expensive. This is cheaper than a
821 // library call but still not a cheap instruction.
822 SingleCallCost = TargetTransformInfo::TCC_Expensive;
824 // FIXME: ctlz, cttz, ...
827 const TargetLoweringBase *TLI = getTLI();
828 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
830 SmallVector<unsigned, 2> LegalCost;
831 SmallVector<unsigned, 2> CustomCost;
832 for (unsigned ISD : ISDs) {
833 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
834 if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
838 // The operation is legal. Assume it costs 1.
839 // If the type is split to multiple registers, assume that there is some
841 // TODO: Once we have extract/insert subvector cost we need to use them.
843 LegalCost.push_back(LT.first * 2);
845 LegalCost.push_back(LT.first * 1);
846 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
847 // If the operation is custom lowered then assume
848 // that the code is twice as expensive.
849 CustomCost.push_back(LT.first * 2);
853 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
854 if (MinLegalCostI != LegalCost.end())
855 return *MinLegalCostI;
857 auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
858 if (MinCustomCostI != CustomCost.end())
859 return *MinCustomCostI;
861 // If we can't lower fmuladd into an FMA estimate the cost as a floating
862 // point mul followed by an add.
863 if (IID == Intrinsic::fmuladd)
864 return static_cast<T *>(this)
865 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
866 static_cast<T *>(this)
867 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
869 // Else, assume that we need to scalarize this intrinsic. For math builtins
870 // this will emit a costly libcall, adding call overhead and spills. Make it
872 if (RetTy->isVectorTy()) {
873 unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
874 unsigned ScalarCalls = RetTy->getVectorNumElements();
875 SmallVector<Type *, 4> ScalarTys;
876 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
878 if (Ty->isVectorTy())
879 Ty = Ty->getScalarType();
880 ScalarTys.push_back(Ty);
882 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
883 IID, RetTy->getScalarType(), ScalarTys, FMF);
884 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
885 if (Tys[i]->isVectorTy()) {
886 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
887 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
891 return ScalarCalls * ScalarCost + ScalarizationCost;
894 // This is going to be turned into a library call, make it expensive.
895 return SingleCallCost;
898 /// \brief Compute a cost of the given call instruction.
900 /// Compute the cost of calling function F with return type RetTy and
901 /// argument types Tys. F might be nullptr, in this case the cost of an
902 /// arbitrary call with the specified signature will be returned.
903 /// This is used, for instance, when we estimate call of a vector
904 /// counterpart of the given function.
905 /// \param F Called function, might be nullptr.
906 /// \param RetTy Return value types.
907 /// \param Tys Argument types.
908 /// \returns The cost of Call instruction.
909 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
913 unsigned getNumberOfParts(Type *Tp) {
914 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
918 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
920 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
921 assert(Ty->isVectorTy() && "Expect a vector type");
922 unsigned NumVecElts = Ty->getVectorNumElements();
923 unsigned NumReduxLevels = Log2_32(NumVecElts);
926 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
927 // Assume the pairwise shuffles add a cost.
928 unsigned ShuffleCost =
929 NumReduxLevels * (IsPairwise + 1) *
930 static_cast<T *>(this)
931 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
932 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
935 unsigned getVectorSplitCost() { return 1; }
940 /// \brief Concrete BasicTTIImpl that can be used if no further customization
942 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
943 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
944 friend class BasicTTIImplBase<BasicTTIImpl>;
946 const TargetSubtargetInfo *ST;
947 const TargetLoweringBase *TLI;
949 const TargetSubtargetInfo *getST() const { return ST; }
950 const TargetLoweringBase *getTLI() const { return TLI; }
953 explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
955 // Provide value semantics. MSVC requires that we spell all of these out.
956 BasicTTIImpl(const BasicTTIImpl &Arg)
957 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
958 BasicTTIImpl(BasicTTIImpl &&Arg)
959 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
960 TLI(std::move(Arg.TLI)) {}