1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
46 /// are set if the result needs to be inserted and/or extracted from vectors.
47 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
48 assert(Ty->isVectorTy() && "Can only scalarize vectors");
51 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
53 Cost += static_cast<T *>(this)
54 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
56 Cost += static_cast<T *>(this)
57 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
63 /// Estimate a cost of shuffle as a sequence of extract and insert
65 unsigned getPermuteShuffleOverhead(Type *Ty) {
66 assert(Ty->isVectorTy() && "Can only shuffle vectors");
68 // Shuffle cost is equal to the cost of extracting element from its argument
69 // plus the cost of inserting them onto the result vector.
71 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
72 // index 0 of first vector, index 1 of second vector,index 2 of first
73 // vector and finally index 3 of second vector and insert them at index
74 // <0,1,2,3> of result vector.
75 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
76 Cost += static_cast<T *>(this)
77 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
78 Cost += static_cast<T *>(this)
79 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
84 /// \brief Local query method delegates up to T which *must* implement this!
85 const TargetSubtargetInfo *getST() const {
86 return static_cast<const T *>(this)->getST();
89 /// \brief Local query method delegates up to T which *must* implement this!
90 const TargetLoweringBase *getTLI() const {
91 return static_cast<const T *>(this)->getTLI();
95 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
98 using TargetTransformInfoImplBase::DL;
101 /// \name Scalar TTI Implementations
103 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
104 unsigned BitWidth, unsigned AddressSpace,
105 unsigned Alignment, bool *Fast) const {
106 EVT E = EVT::getIntegerVT(Context, BitWidth);
107 return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
110 bool hasBranchDivergence() { return false; }
112 bool isSourceOfDivergence(const Value *V) { return false; }
114 bool isLegalAddImmediate(int64_t imm) {
115 return getTLI()->isLegalAddImmediate(imm);
118 bool isLegalICmpImmediate(int64_t imm) {
119 return getTLI()->isLegalICmpImmediate(imm);
122 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
123 bool HasBaseReg, int64_t Scale,
124 unsigned AddrSpace) {
125 TargetLoweringBase::AddrMode AM;
127 AM.BaseOffs = BaseOffset;
128 AM.HasBaseReg = HasBaseReg;
130 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
133 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
134 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
135 TargetLoweringBase::AddrMode AM;
137 AM.BaseOffs = BaseOffset;
138 AM.HasBaseReg = HasBaseReg;
140 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
143 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) {
144 return getTLI()->isFoldableMemAccessOffset(I, Offset);
147 bool isTruncateFree(Type *Ty1, Type *Ty2) {
148 return getTLI()->isTruncateFree(Ty1, Ty2);
151 bool isProfitableToHoist(Instruction *I) {
152 return getTLI()->isProfitableToHoist(I);
155 bool isTypeLegal(Type *Ty) {
156 EVT VT = getTLI()->getValueType(DL, Ty);
157 return getTLI()->isTypeLegal(VT);
160 int getGEPCost(Type *PointeeType, const Value *Ptr,
161 ArrayRef<const Value *> Operands) {
162 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
165 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
166 ArrayRef<const Value *> Arguments) {
167 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
170 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
171 ArrayRef<Type *> ParamTys) {
172 if (IID == Intrinsic::cttz) {
173 if (getTLI()->isCheapToSpeculateCttz())
174 return TargetTransformInfo::TCC_Basic;
175 return TargetTransformInfo::TCC_Expensive;
178 if (IID == Intrinsic::ctlz) {
179 if (getTLI()->isCheapToSpeculateCtlz())
180 return TargetTransformInfo::TCC_Basic;
181 return TargetTransformInfo::TCC_Expensive;
184 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
187 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
189 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
191 bool shouldBuildLookupTables() {
192 const TargetLoweringBase *TLI = getTLI();
193 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
194 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
197 bool haveFastSqrt(Type *Ty) {
198 const TargetLoweringBase *TLI = getTLI();
199 EVT VT = TLI->getValueType(DL, Ty);
200 return TLI->isTypeLegal(VT) &&
201 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
204 unsigned getFPOpCost(Type *Ty) {
205 // By default, FP instructions are no more expensive since they are
206 // implemented in HW. Target specific TTI can override this.
207 return TargetTransformInfo::TCC_Basic;
210 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
211 const TargetLoweringBase *TLI = getTLI();
214 case Instruction::Trunc: {
215 if (TLI->isTruncateFree(OpTy, Ty))
216 return TargetTransformInfo::TCC_Free;
217 return TargetTransformInfo::TCC_Basic;
219 case Instruction::ZExt: {
220 if (TLI->isZExtFree(OpTy, Ty))
221 return TargetTransformInfo::TCC_Free;
222 return TargetTransformInfo::TCC_Basic;
226 return BaseT::getOperationCost(Opcode, Ty, OpTy);
229 unsigned getInliningThresholdMultiplier() { return 1; }
231 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
232 // This unrolling functionality is target independent, but to provide some
233 // motivation for its intended use, for x86:
235 // According to the Intel 64 and IA-32 Architectures Optimization Reference
236 // Manual, Intel Core models and later have a loop stream detector (and
237 // associated uop queue) that can benefit from partial unrolling.
238 // The relevant requirements are:
239 // - The loop must have no more than 4 (8 for Nehalem and later) branches
240 // taken, and none of them may be calls.
241 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
243 // According to the Software Optimization Guide for AMD Family 15h
244 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
245 // and loop buffer which can benefit from partial unrolling.
246 // The relevant requirements are:
247 // - The loop must have fewer than 16 branches
248 // - The loop must have less than 40 uops in all executed loop branches
250 // The number of taken branches in a loop is hard to estimate here, and
251 // benchmarking has revealed that it is better not to be conservative when
252 // estimating the branch count. As a result, we'll ignore the branch limits
253 // until someone finds a case where it matters in practice.
256 const TargetSubtargetInfo *ST = getST();
257 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
258 MaxOps = PartialUnrollingThreshold;
259 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
260 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
264 // Scan the loop: don't unroll loops with calls.
265 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
269 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
270 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
271 ImmutableCallSite CS(&*J);
272 if (const Function *F = CS.getCalledFunction()) {
273 if (!static_cast<T *>(this)->isLoweredToCall(F))
281 // Enable runtime and partial unrolling up to the specified size.
282 // Enable using trip count upper bound to unroll loops.
283 UP.Partial = UP.Runtime = UP.UpperBound = true;
284 UP.PartialThreshold = MaxOps;
286 // Avoid unrolling when optimizing for size.
287 UP.OptSizeThreshold = 0;
288 UP.PartialOptSizeThreshold = 0;
290 // Set number of instructions optimized when "back edge"
291 // becomes "fall through" to default value of 2.
297 /// \name Vector TTI Implementations
300 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
302 unsigned getRegisterBitWidth(bool Vector) { return 32; }
304 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
306 unsigned getArithmeticInstrCost(
307 unsigned Opcode, Type *Ty,
308 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
309 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
310 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
311 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
312 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
313 // Check if any of the operands are vector operands.
314 const TargetLoweringBase *TLI = getTLI();
315 int ISD = TLI->InstructionOpcodeToISD(Opcode);
316 assert(ISD && "Invalid opcode");
318 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
320 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
321 // Assume that floating point arithmetic operations cost twice as much as
322 // integer operations.
323 unsigned OpCost = (IsFloat ? 2 : 1);
325 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
326 // The operation is legal. Assume it costs 1.
327 // TODO: Once we have extract/insert subvector cost we need to use them.
328 return LT.first * OpCost;
331 if (!TLI->isOperationExpand(ISD, LT.second)) {
332 // If the operation is custom lowered, then assume that the code is twice
334 return LT.first * 2 * OpCost;
337 // Else, assume that we need to scalarize this op.
338 // TODO: If one of the types get legalized by splitting, handle this
339 // similarly to what getCastInstrCost() does.
340 if (Ty->isVectorTy()) {
341 unsigned Num = Ty->getVectorNumElements();
342 unsigned Cost = static_cast<T *>(this)
343 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
344 // return the cost of multiple scalar invocation plus the cost of
346 // and extracting the values.
347 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
350 // We don't know anything about this scalar instruction.
354 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
356 if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
357 Kind == TTI::SK_PermuteSingleSrc) {
358 return getPermuteShuffleOverhead(Tp);
363 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
364 const TargetLoweringBase *TLI = getTLI();
365 int ISD = TLI->InstructionOpcodeToISD(Opcode);
366 assert(ISD && "Invalid opcode");
367 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
368 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
370 // Check for NOOP conversions.
371 if (SrcLT.first == DstLT.first &&
372 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
374 // Bitcast between types that are legalized to the same type are free.
375 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
379 if (Opcode == Instruction::Trunc &&
380 TLI->isTruncateFree(SrcLT.second, DstLT.second))
383 if (Opcode == Instruction::ZExt &&
384 TLI->isZExtFree(SrcLT.second, DstLT.second))
387 if (Opcode == Instruction::AddrSpaceCast &&
388 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
389 Dst->getPointerAddressSpace()))
392 // If the cast is marked as legal (or promote) then assume low cost.
393 if (SrcLT.first == DstLT.first &&
394 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
397 // Handle scalar conversions.
398 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
400 // Scalar bitcasts are usually free.
401 if (Opcode == Instruction::BitCast)
404 // Just check the op cost. If the operation is legal then assume it costs
406 if (!TLI->isOperationExpand(ISD, DstLT.second))
409 // Assume that illegal scalar instruction are expensive.
413 // Check vector-to-vector casts.
414 if (Dst->isVectorTy() && Src->isVectorTy()) {
416 // If the cast is between same-sized registers, then the check is simple.
417 if (SrcLT.first == DstLT.first &&
418 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
420 // Assume that Zext is done using AND.
421 if (Opcode == Instruction::ZExt)
424 // Assume that sext is done using SHL and SRA.
425 if (Opcode == Instruction::SExt)
428 // Just check the op cost. If the operation is legal then assume it
430 // 1 and multiply by the type-legalization overhead.
431 if (!TLI->isOperationExpand(ISD, DstLT.second))
432 return SrcLT.first * 1;
435 // If we are legalizing by splitting, query the concrete TTI for the cost
436 // of casting the original vector twice. We also need to factor int the
437 // cost of the split itself. Count that as 1, to be consistent with
438 // TLI->getTypeLegalizationCost().
439 if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
440 TargetLowering::TypeSplitVector) ||
441 (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
442 TargetLowering::TypeSplitVector)) {
443 Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
444 Dst->getVectorNumElements() / 2);
445 Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
446 Src->getVectorNumElements() / 2);
447 T *TTI = static_cast<T *>(this);
448 return TTI->getVectorSplitCost() +
449 (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc));
452 // In other cases where the source or destination are illegal, assume
453 // the operation will get scalarized.
454 unsigned Num = Dst->getVectorNumElements();
455 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
456 Opcode, Dst->getScalarType(), Src->getScalarType());
458 // Return the cost of multiple scalar invocation plus the cost of
459 // inserting and extracting the values.
460 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
463 // We already handled vector-to-vector and scalar-to-scalar conversions.
465 // is where we handle bitcast between vectors and scalars. We need to assume
466 // that the conversion is scalarized in one way or another.
467 if (Opcode == Instruction::BitCast)
468 // Illegal bitcasts are done by storing and loading from a stack slot.
469 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
471 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
474 llvm_unreachable("Unhandled cast");
477 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
478 VectorType *VecTy, unsigned Index) {
479 return static_cast<T *>(this)->getVectorInstrCost(
480 Instruction::ExtractElement, VecTy, Index) +
481 static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
482 VecTy->getElementType());
485 unsigned getCFInstrCost(unsigned Opcode) {
486 // Branches are assumed to be predicted.
490 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
491 const TargetLoweringBase *TLI = getTLI();
492 int ISD = TLI->InstructionOpcodeToISD(Opcode);
493 assert(ISD && "Invalid opcode");
495 // Selects on vectors are actually vector selects.
496 if (ISD == ISD::SELECT) {
497 assert(CondTy && "CondTy must exist");
498 if (CondTy->isVectorTy())
501 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
503 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
504 !TLI->isOperationExpand(ISD, LT.second)) {
505 // The operation is legal. Assume it costs 1. Multiply
506 // by the type-legalization overhead.
510 // Otherwise, assume that the cast is scalarized.
511 // TODO: If one of the types get legalized by splitting, handle this
512 // similarly to what getCastInstrCost() does.
513 if (ValTy->isVectorTy()) {
514 unsigned Num = ValTy->getVectorNumElements();
516 CondTy = CondTy->getScalarType();
517 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
518 Opcode, ValTy->getScalarType(), CondTy);
520 // Return the cost of multiple scalar invocation plus the cost of
521 // inserting and extracting the values.
522 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
525 // Unknown scalar opcode.
529 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
530 std::pair<unsigned, MVT> LT =
531 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
536 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
537 unsigned AddressSpace) {
538 assert(!Src->isVoidTy() && "Invalid type");
539 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
541 // Assuming that all loads of legal types cost 1.
542 unsigned Cost = LT.first;
544 if (Src->isVectorTy() &&
545 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
546 // This is a vector load that legalizes to a larger type than the vector
547 // itself. Unless the corresponding extending load or truncating store is
548 // legal, then this will scalarize.
549 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
550 EVT MemVT = getTLI()->getValueType(DL, Src);
551 if (Opcode == Instruction::Store)
552 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
554 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
556 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
557 // This is a vector load/store for some illegal type that is scalarized.
558 // We must account for the cost of building or decomposing the vector.
559 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
560 Opcode == Instruction::Store);
567 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
569 ArrayRef<unsigned> Indices,
571 unsigned AddressSpace) {
572 VectorType *VT = dyn_cast<VectorType>(VecTy);
573 assert(VT && "Expect a vector type for interleaved memory op");
575 unsigned NumElts = VT->getNumElements();
576 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
578 unsigned NumSubElts = NumElts / Factor;
579 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
581 // Firstly, the cost of load/store operation.
582 unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
583 Opcode, VecTy, Alignment, AddressSpace);
585 // Legalize the vector type, and get the legalized and unlegalized type
587 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
589 static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
590 unsigned VecTyLTSize = VecTyLT.getStoreSize();
592 // Return the ceiling of dividing A by B.
593 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
595 // Scale the cost of the memory operation by the fraction of legalized
596 // instructions that will actually be used. We shouldn't account for the
597 // cost of dead instructions since they will be removed.
599 // E.g., An interleaved load of factor 8:
600 // %vec = load <16 x i64>, <16 x i64>* %ptr
601 // %v0 = shufflevector %vec, undef, <0, 8>
603 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
604 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
605 // type). The other loads are unused.
607 // We only scale the cost of loads since interleaved store groups aren't
608 // allowed to have gaps.
609 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
611 // The number of loads of a legal type it will take to represent a load
612 // of the unlegalized vector type.
613 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
615 // The number of elements of the unlegalized type that correspond to a
616 // single legal instruction.
617 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
619 // Determine which legal instructions will be used.
620 BitVector UsedInsts(NumLegalInsts, false);
621 for (unsigned Index : Indices)
622 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
623 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
625 // Scale the cost of the load by the fraction of legal instructions that
627 Cost *= UsedInsts.count() / NumLegalInsts;
630 // Then plus the cost of interleave operation.
631 if (Opcode == Instruction::Load) {
632 // The interleave cost is similar to extract sub vectors' elements
633 // from the wide vector, and insert them into sub vectors.
635 // E.g. An interleaved load of factor 2 (with one member of index 0):
636 // %vec = load <8 x i32>, <8 x i32>* %ptr
637 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
638 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
639 // <8 x i32> vector and insert them into a <4 x i32> vector.
641 assert(Indices.size() <= Factor &&
642 "Interleaved memory op has too many members");
644 for (unsigned Index : Indices) {
645 assert(Index < Factor && "Invalid index for interleaved memory op");
647 // Extract elements from loaded vector for each sub vector.
648 for (unsigned i = 0; i < NumSubElts; i++)
649 Cost += static_cast<T *>(this)->getVectorInstrCost(
650 Instruction::ExtractElement, VT, Index + i * Factor);
653 unsigned InsSubCost = 0;
654 for (unsigned i = 0; i < NumSubElts; i++)
655 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
656 Instruction::InsertElement, SubVT, i);
658 Cost += Indices.size() * InsSubCost;
660 // The interleave cost is extract all elements from sub vectors, and
661 // insert them into the wide vector.
663 // E.g. An interleaved store of factor 2:
664 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
665 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
666 // The cost is estimated as extract all elements from both <4 x i32>
667 // vectors and insert into the <8 x i32> vector.
669 unsigned ExtSubCost = 0;
670 for (unsigned i = 0; i < NumSubElts; i++)
671 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
672 Instruction::ExtractElement, SubVT, i);
673 Cost += ExtSubCost * Factor;
675 for (unsigned i = 0; i < NumElts; i++)
676 Cost += static_cast<T *>(this)
677 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
683 /// Get intrinsic cost based on arguments
684 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
685 ArrayRef<Value *> Args, FastMathFlags FMF) {
688 SmallVector<Type *, 4> Types;
689 for (Value *Op : Args)
690 Types.push_back(Op->getType());
691 return static_cast<T *>(this)->getIntrinsicInstrCost(IID, RetTy, Types,
694 case Intrinsic::masked_scatter: {
695 Value *Mask = Args[3];
696 bool VarMask = !isa<Constant>(Mask);
697 unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
699 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
704 case Intrinsic::masked_gather: {
705 Value *Mask = Args[2];
706 bool VarMask = !isa<Constant>(Mask);
707 unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
709 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
710 RetTy, Args[0], VarMask,
716 /// Get intrinsic cost based on argument types
717 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
718 ArrayRef<Type *> Tys, FastMathFlags FMF) {
719 SmallVector<unsigned, 2> ISDs;
720 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
723 // Assume that we need to scalarize this intrinsic.
724 unsigned ScalarizationCost = 0;
725 unsigned ScalarCalls = 1;
726 Type *ScalarRetTy = RetTy;
727 if (RetTy->isVectorTy()) {
728 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
729 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
730 ScalarRetTy = RetTy->getScalarType();
732 SmallVector<Type *, 4> ScalarTys;
733 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
735 if (Ty->isVectorTy()) {
736 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
737 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
738 Ty = Ty->getScalarType();
740 ScalarTys.push_back(Ty);
742 if (ScalarCalls == 1)
743 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
745 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
746 IID, ScalarRetTy, ScalarTys, FMF);
748 return ScalarCalls * ScalarCost + ScalarizationCost;
750 // Look for intrinsics that can be lowered directly or turned into a scalar
752 case Intrinsic::sqrt:
753 ISDs.push_back(ISD::FSQRT);
756 ISDs.push_back(ISD::FSIN);
759 ISDs.push_back(ISD::FCOS);
762 ISDs.push_back(ISD::FEXP);
764 case Intrinsic::exp2:
765 ISDs.push_back(ISD::FEXP2);
768 ISDs.push_back(ISD::FLOG);
770 case Intrinsic::log10:
771 ISDs.push_back(ISD::FLOG10);
773 case Intrinsic::log2:
774 ISDs.push_back(ISD::FLOG2);
776 case Intrinsic::fabs:
777 ISDs.push_back(ISD::FABS);
779 case Intrinsic::minnum:
780 ISDs.push_back(ISD::FMINNUM);
782 ISDs.push_back(ISD::FMINNAN);
784 case Intrinsic::maxnum:
785 ISDs.push_back(ISD::FMAXNUM);
787 ISDs.push_back(ISD::FMAXNAN);
789 case Intrinsic::copysign:
790 ISDs.push_back(ISD::FCOPYSIGN);
792 case Intrinsic::floor:
793 ISDs.push_back(ISD::FFLOOR);
795 case Intrinsic::ceil:
796 ISDs.push_back(ISD::FCEIL);
798 case Intrinsic::trunc:
799 ISDs.push_back(ISD::FTRUNC);
801 case Intrinsic::nearbyint:
802 ISDs.push_back(ISD::FNEARBYINT);
804 case Intrinsic::rint:
805 ISDs.push_back(ISD::FRINT);
807 case Intrinsic::round:
808 ISDs.push_back(ISD::FROUND);
811 ISDs.push_back(ISD::FPOW);
814 ISDs.push_back(ISD::FMA);
816 case Intrinsic::fmuladd:
817 ISDs.push_back(ISD::FMA);
819 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
820 case Intrinsic::lifetime_start:
821 case Intrinsic::lifetime_end:
823 case Intrinsic::masked_store:
824 return static_cast<T *>(this)
825 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
826 case Intrinsic::masked_load:
827 return static_cast<T *>(this)
828 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
829 case Intrinsic::ctpop:
830 ISDs.push_back(ISD::CTPOP);
831 // In case of legalization use TCC_Expensive. This is cheaper than a
832 // library call but still not a cheap instruction.
833 SingleCallCost = TargetTransformInfo::TCC_Expensive;
835 // FIXME: ctlz, cttz, ...
838 const TargetLoweringBase *TLI = getTLI();
839 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
841 SmallVector<unsigned, 2> LegalCost;
842 SmallVector<unsigned, 2> CustomCost;
843 for (unsigned ISD : ISDs) {
844 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
845 if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
849 // The operation is legal. Assume it costs 1.
850 // If the type is split to multiple registers, assume that there is some
852 // TODO: Once we have extract/insert subvector cost we need to use them.
854 LegalCost.push_back(LT.first * 2);
856 LegalCost.push_back(LT.first * 1);
857 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
858 // If the operation is custom lowered then assume
859 // that the code is twice as expensive.
860 CustomCost.push_back(LT.first * 2);
864 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
865 if (MinLegalCostI != LegalCost.end())
866 return *MinLegalCostI;
868 auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
869 if (MinCustomCostI != CustomCost.end())
870 return *MinCustomCostI;
872 // If we can't lower fmuladd into an FMA estimate the cost as a floating
873 // point mul followed by an add.
874 if (IID == Intrinsic::fmuladd)
875 return static_cast<T *>(this)
876 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
877 static_cast<T *>(this)
878 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
880 // Else, assume that we need to scalarize this intrinsic. For math builtins
881 // this will emit a costly libcall, adding call overhead and spills. Make it
883 if (RetTy->isVectorTy()) {
884 unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
885 unsigned ScalarCalls = RetTy->getVectorNumElements();
886 SmallVector<Type *, 4> ScalarTys;
887 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
889 if (Ty->isVectorTy())
890 Ty = Ty->getScalarType();
891 ScalarTys.push_back(Ty);
893 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
894 IID, RetTy->getScalarType(), ScalarTys, FMF);
895 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
896 if (Tys[i]->isVectorTy()) {
897 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
898 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
902 return ScalarCalls * ScalarCost + ScalarizationCost;
905 // This is going to be turned into a library call, make it expensive.
906 return SingleCallCost;
909 /// \brief Compute a cost of the given call instruction.
911 /// Compute the cost of calling function F with return type RetTy and
912 /// argument types Tys. F might be nullptr, in this case the cost of an
913 /// arbitrary call with the specified signature will be returned.
914 /// This is used, for instance, when we estimate call of a vector
915 /// counterpart of the given function.
916 /// \param F Called function, might be nullptr.
917 /// \param RetTy Return value types.
918 /// \param Tys Argument types.
919 /// \returns The cost of Call instruction.
920 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
924 unsigned getNumberOfParts(Type *Tp) {
925 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
929 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
934 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
935 assert(Ty->isVectorTy() && "Expect a vector type");
936 Type *ScalarTy = Ty->getVectorElementType();
937 unsigned NumVecElts = Ty->getVectorNumElements();
938 unsigned NumReduxLevels = Log2_32(NumVecElts);
939 // Try to calculate arithmetic and shuffle op costs for reduction operations.
940 // We're assuming that reduction operation are performing the following way:
941 // 1. Non-pairwise reduction
942 // %val1 = shufflevector<n x t> %val, <n x t> %undef,
943 // <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
944 // \----------------v-------------/ \----------v------------/
945 // n/2 elements n/2 elements
946 // %red1 = op <n x t> %val, <n x t> val1
947 // After this operation we have a vector %red1 with only maningfull the
948 // first n/2 elements, the second n/2 elements are undefined and can be
949 // dropped. All other operations are actually working with the vector of
950 // length n/2, not n. though the real vector length is still n.
951 // %val2 = shufflevector<n x t> %red1, <n x t> %undef,
952 // <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
953 // \----------------v-------------/ \----------v------------/
954 // n/4 elements 3*n/4 elements
955 // %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
956 // length n/2, the resulting vector has length n/4 etc.
957 // 2. Pairwise reduction:
958 // Everything is the same except for an additional shuffle operation which
959 // is used to produce operands for pairwise kind of reductions.
960 // %val1 = shufflevector<n x t> %val, <n x t> %undef,
961 // <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
962 // \-------------v----------/ \----------v------------/
963 // n/2 elements n/2 elements
964 // %val2 = shufflevector<n x t> %val, <n x t> %undef,
965 // <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
966 // \-------------v----------/ \----------v------------/
967 // n/2 elements n/2 elements
968 // %red1 = op <n x t> %val1, <n x t> val2
969 // Again, the operation is performed on <n x t> vector, but the resulting
970 // vector %red1 is <n/2 x t> vector.
972 // The cost model should take into account that the actual length of the
973 // vector is reduced on each iteration.
974 unsigned ArithCost = 0;
975 unsigned ShuffleCost = 0;
976 auto *ConcreteTTI = static_cast<T *>(this);
977 std::pair<unsigned, MVT> LT =
978 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
979 unsigned LongVectorCount = 0;
981 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
982 while (NumVecElts > MVTLen) {
984 // Assume the pairwise shuffles add a cost.
985 ShuffleCost += (IsPairwise + 1) *
986 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
988 ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
989 Ty = VectorType::get(ScalarTy, NumVecElts);
992 // The minimal length of the vector is limited by the real length of vector
993 // operations performed on the current platform. That's why several final
994 // reduction opertions are perfomed on the vectors with the same
995 // architecture-dependent length.
996 ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
997 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
999 ArithCost += (NumReduxLevels - LongVectorCount) *
1000 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1001 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
1004 unsigned getVectorSplitCost() { return 1; }
1009 /// \brief Concrete BasicTTIImpl that can be used if no further customization
1011 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
1012 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
1013 friend class BasicTTIImplBase<BasicTTIImpl>;
1015 const TargetSubtargetInfo *ST;
1016 const TargetLoweringBase *TLI;
1018 const TargetSubtargetInfo *getST() const { return ST; }
1019 const TargetLoweringBase *getTLI() const { return TLI; }
1022 explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);