1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/TargetTransformInfoImpl.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetSubtargetInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate a cost of shuffle as a sequence of extract and insert
47 unsigned getPermuteShuffleOverhead(Type *Ty) {
48 assert(Ty->isVectorTy() && "Can only shuffle vectors");
50 // Shuffle cost is equal to the cost of extracting element from its argument
51 // plus the cost of inserting them onto the result vector.
53 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
54 // index 0 of first vector, index 1 of second vector,index 2 of first
55 // vector and finally index 3 of second vector and insert them at index
56 // <0,1,2,3> of result vector.
57 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
58 Cost += static_cast<T *>(this)
59 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
60 Cost += static_cast<T *>(this)
61 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
66 /// \brief Local query method delegates up to T which *must* implement this!
67 const TargetSubtargetInfo *getST() const {
68 return static_cast<const T *>(this)->getST();
71 /// \brief Local query method delegates up to T which *must* implement this!
72 const TargetLoweringBase *getTLI() const {
73 return static_cast<const T *>(this)->getTLI();
77 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
80 using TargetTransformInfoImplBase::DL;
83 /// \name Scalar TTI Implementations
85 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
86 unsigned BitWidth, unsigned AddressSpace,
87 unsigned Alignment, bool *Fast) const {
88 EVT E = EVT::getIntegerVT(Context, BitWidth);
89 return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
92 bool hasBranchDivergence() { return false; }
94 bool isSourceOfDivergence(const Value *V) { return false; }
96 bool isAlwaysUniform(const Value *V) { return false; }
98 unsigned getFlatAddressSpace() {
99 // Return an invalid address space.
103 bool isLegalAddImmediate(int64_t imm) {
104 return getTLI()->isLegalAddImmediate(imm);
107 bool isLegalICmpImmediate(int64_t imm) {
108 return getTLI()->isLegalICmpImmediate(imm);
111 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
112 bool HasBaseReg, int64_t Scale,
113 unsigned AddrSpace) {
114 TargetLoweringBase::AddrMode AM;
116 AM.BaseOffs = BaseOffset;
117 AM.HasBaseReg = HasBaseReg;
119 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
122 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
123 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
126 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
127 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
128 TargetLoweringBase::AddrMode AM;
130 AM.BaseOffs = BaseOffset;
131 AM.HasBaseReg = HasBaseReg;
133 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
136 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) {
137 return getTLI()->isFoldableMemAccessOffset(I, Offset);
140 bool isTruncateFree(Type *Ty1, Type *Ty2) {
141 return getTLI()->isTruncateFree(Ty1, Ty2);
144 bool isProfitableToHoist(Instruction *I) {
145 return getTLI()->isProfitableToHoist(I);
148 bool isTypeLegal(Type *Ty) {
149 EVT VT = getTLI()->getValueType(DL, Ty);
150 return getTLI()->isTypeLegal(VT);
153 int getGEPCost(Type *PointeeType, const Value *Ptr,
154 ArrayRef<const Value *> Operands) {
155 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
158 int getExtCost(const Instruction *I, const Value *Src) {
159 if (getTLI()->isExtFree(I))
160 return TargetTransformInfo::TCC_Free;
162 if (isa<ZExtInst>(I) || isa<SExtInst>(I))
163 if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
164 if (getTLI()->isExtLoad(LI, I, DL))
165 return TargetTransformInfo::TCC_Free;
167 return TargetTransformInfo::TCC_Basic;
170 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
171 ArrayRef<const Value *> Arguments) {
172 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
175 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
176 ArrayRef<Type *> ParamTys) {
177 if (IID == Intrinsic::cttz) {
178 if (getTLI()->isCheapToSpeculateCttz())
179 return TargetTransformInfo::TCC_Basic;
180 return TargetTransformInfo::TCC_Expensive;
183 if (IID == Intrinsic::ctlz) {
184 if (getTLI()->isCheapToSpeculateCtlz())
185 return TargetTransformInfo::TCC_Basic;
186 return TargetTransformInfo::TCC_Expensive;
189 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
192 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
193 unsigned &JumpTableSize) {
194 /// Try to find the estimated number of clusters. Note that the number of
195 /// clusters identified in this function could be different from the actural
196 /// numbers found in lowering. This function ignore switches that are
197 /// lowered with a mix of jump table / bit test / BTree. This function was
198 /// initially intended to be used when estimating the cost of switch in
199 /// inline cost heuristic, but it's a generic cost model to be used in other
200 /// places (e.g., in loop unrolling).
201 unsigned N = SI.getNumCases();
202 const TargetLoweringBase *TLI = getTLI();
203 const DataLayout &DL = this->getDataLayout();
206 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
208 // Early exit if both a jump table and bit test are not allowed.
209 if (N < 1 || (!IsJTAllowed && DL.getPointerSizeInBits() < N))
212 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
213 APInt MinCaseVal = MaxCaseVal;
214 for (auto CI : SI.cases()) {
215 const APInt &CaseVal = CI.getCaseValue()->getValue();
216 if (CaseVal.sgt(MaxCaseVal))
217 MaxCaseVal = CaseVal;
218 if (CaseVal.slt(MinCaseVal))
219 MinCaseVal = CaseVal;
222 // Check if suitable for a bit test
223 if (N <= DL.getPointerSizeInBits()) {
224 SmallPtrSet<const BasicBlock *, 4> Dests;
225 for (auto I : SI.cases())
226 Dests.insert(I.getCaseSuccessor());
228 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
233 // Check if suitable for a jump table.
235 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
238 (MaxCaseVal - MinCaseVal).getLimitedValue(UINT64_MAX - 1) + 1;
239 // Check whether a range of clusters is dense enough for a jump table
240 if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
241 JumpTableSize = Range;
248 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
250 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
252 bool shouldBuildLookupTables() {
253 const TargetLoweringBase *TLI = getTLI();
254 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
255 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
258 bool haveFastSqrt(Type *Ty) {
259 const TargetLoweringBase *TLI = getTLI();
260 EVT VT = TLI->getValueType(DL, Ty);
261 return TLI->isTypeLegal(VT) &&
262 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
265 unsigned getFPOpCost(Type *Ty) {
266 // By default, FP instructions are no more expensive since they are
267 // implemented in HW. Target specific TTI can override this.
268 return TargetTransformInfo::TCC_Basic;
271 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
272 const TargetLoweringBase *TLI = getTLI();
275 case Instruction::Trunc: {
276 if (TLI->isTruncateFree(OpTy, Ty))
277 return TargetTransformInfo::TCC_Free;
278 return TargetTransformInfo::TCC_Basic;
280 case Instruction::ZExt: {
281 if (TLI->isZExtFree(OpTy, Ty))
282 return TargetTransformInfo::TCC_Free;
283 return TargetTransformInfo::TCC_Basic;
287 return BaseT::getOperationCost(Opcode, Ty, OpTy);
290 unsigned getInliningThresholdMultiplier() { return 1; }
292 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
293 TTI::UnrollingPreferences &UP) {
294 // This unrolling functionality is target independent, but to provide some
295 // motivation for its intended use, for x86:
297 // According to the Intel 64 and IA-32 Architectures Optimization Reference
298 // Manual, Intel Core models and later have a loop stream detector (and
299 // associated uop queue) that can benefit from partial unrolling.
300 // The relevant requirements are:
301 // - The loop must have no more than 4 (8 for Nehalem and later) branches
302 // taken, and none of them may be calls.
303 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
305 // According to the Software Optimization Guide for AMD Family 15h
306 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
307 // and loop buffer which can benefit from partial unrolling.
308 // The relevant requirements are:
309 // - The loop must have fewer than 16 branches
310 // - The loop must have less than 40 uops in all executed loop branches
312 // The number of taken branches in a loop is hard to estimate here, and
313 // benchmarking has revealed that it is better not to be conservative when
314 // estimating the branch count. As a result, we'll ignore the branch limits
315 // until someone finds a case where it matters in practice.
318 const TargetSubtargetInfo *ST = getST();
319 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
320 MaxOps = PartialUnrollingThreshold;
321 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
322 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
326 // Scan the loop: don't unroll loops with calls.
327 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
331 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
332 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
333 ImmutableCallSite CS(&*J);
334 if (const Function *F = CS.getCalledFunction()) {
335 if (!static_cast<T *>(this)->isLoweredToCall(F))
343 // Enable runtime and partial unrolling up to the specified size.
344 // Enable using trip count upper bound to unroll loops.
345 UP.Partial = UP.Runtime = UP.UpperBound = true;
346 UP.PartialThreshold = MaxOps;
348 // Avoid unrolling when optimizing for size.
349 UP.OptSizeThreshold = 0;
350 UP.PartialOptSizeThreshold = 0;
352 // Set number of instructions optimized when "back edge"
353 // becomes "fall through" to default value of 2.
359 /// \name Vector TTI Implementations
362 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
364 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
366 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
367 /// are set if the result needs to be inserted and/or extracted from vectors.
368 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
369 assert(Ty->isVectorTy() && "Can only scalarize vectors");
372 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
374 Cost += static_cast<T *>(this)
375 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
377 Cost += static_cast<T *>(this)
378 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
384 /// Estimate the overhead of scalarizing an instructions unique
385 /// non-constant operands. The types of the arguments are ordinarily
386 /// scalar, in which case the costs are multiplied with VF.
387 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
390 SmallPtrSet<const Value*, 4> UniqueOperands;
391 for (const Value *A : Args) {
392 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
393 Type *VecTy = nullptr;
394 if (A->getType()->isVectorTy()) {
395 VecTy = A->getType();
396 // If A is a vector operand, VF should be 1 or correspond to A.
397 assert ((VF == 1 || VF == VecTy->getVectorNumElements()) &&
398 "Vector argument does not match VF");
401 VecTy = VectorType::get(A->getType(), VF);
403 Cost += getScalarizationOverhead(VecTy, false, true);
410 unsigned getScalarizationOverhead(Type *VecTy, ArrayRef<const Value *> Args) {
411 assert (VecTy->isVectorTy());
415 Cost += getScalarizationOverhead(VecTy, true, false);
417 Cost += getOperandsScalarizationOverhead(Args,
418 VecTy->getVectorNumElements());
420 // When no information on arguments is provided, we add the cost
421 // associated with one argument as a heuristic.
422 Cost += getScalarizationOverhead(VecTy, false, true);
427 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
429 unsigned getArithmeticInstrCost(
430 unsigned Opcode, Type *Ty,
431 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
432 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
433 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
434 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
435 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
436 // Check if any of the operands are vector operands.
437 const TargetLoweringBase *TLI = getTLI();
438 int ISD = TLI->InstructionOpcodeToISD(Opcode);
439 assert(ISD && "Invalid opcode");
441 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
443 bool IsFloat = Ty->isFPOrFPVectorTy();
444 // Assume that floating point arithmetic operations cost twice as much as
445 // integer operations.
446 unsigned OpCost = (IsFloat ? 2 : 1);
448 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
449 // The operation is legal. Assume it costs 1.
450 // TODO: Once we have extract/insert subvector cost we need to use them.
451 return LT.first * OpCost;
454 if (!TLI->isOperationExpand(ISD, LT.second)) {
455 // If the operation is custom lowered, then assume that the code is twice
457 return LT.first * 2 * OpCost;
460 // Else, assume that we need to scalarize this op.
461 // TODO: If one of the types get legalized by splitting, handle this
462 // similarly to what getCastInstrCost() does.
463 if (Ty->isVectorTy()) {
464 unsigned Num = Ty->getVectorNumElements();
465 unsigned Cost = static_cast<T *>(this)
466 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
467 // Return the cost of multiple scalar invocation plus the cost of
468 // inserting and extracting the values.
469 return getScalarizationOverhead(Ty, Args) + Num * Cost;
472 // We don't know anything about this scalar instruction.
476 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
478 if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
479 Kind == TTI::SK_PermuteSingleSrc) {
480 return getPermuteShuffleOverhead(Tp);
485 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
486 const Instruction *I = nullptr) {
487 const TargetLoweringBase *TLI = getTLI();
488 int ISD = TLI->InstructionOpcodeToISD(Opcode);
489 assert(ISD && "Invalid opcode");
490 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
491 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
493 // Check for NOOP conversions.
494 if (SrcLT.first == DstLT.first &&
495 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
497 // Bitcast between types that are legalized to the same type are free.
498 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
502 if (Opcode == Instruction::Trunc &&
503 TLI->isTruncateFree(SrcLT.second, DstLT.second))
506 if (Opcode == Instruction::ZExt &&
507 TLI->isZExtFree(SrcLT.second, DstLT.second))
510 if (Opcode == Instruction::AddrSpaceCast &&
511 TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
512 Dst->getPointerAddressSpace()))
515 // If this is a zext/sext of a load, return 0 if the corresponding
516 // extending load exists on target.
517 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
518 I && isa<LoadInst>(I->getOperand(0))) {
519 EVT ExtVT = EVT::getEVT(Dst);
520 EVT LoadVT = EVT::getEVT(Src);
522 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
523 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
527 // If the cast is marked as legal (or promote) then assume low cost.
528 if (SrcLT.first == DstLT.first &&
529 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
532 // Handle scalar conversions.
533 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
535 // Scalar bitcasts are usually free.
536 if (Opcode == Instruction::BitCast)
539 // Just check the op cost. If the operation is legal then assume it costs
541 if (!TLI->isOperationExpand(ISD, DstLT.second))
544 // Assume that illegal scalar instruction are expensive.
548 // Check vector-to-vector casts.
549 if (Dst->isVectorTy() && Src->isVectorTy()) {
551 // If the cast is between same-sized registers, then the check is simple.
552 if (SrcLT.first == DstLT.first &&
553 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
555 // Assume that Zext is done using AND.
556 if (Opcode == Instruction::ZExt)
559 // Assume that sext is done using SHL and SRA.
560 if (Opcode == Instruction::SExt)
563 // Just check the op cost. If the operation is legal then assume it
565 // 1 and multiply by the type-legalization overhead.
566 if (!TLI->isOperationExpand(ISD, DstLT.second))
567 return SrcLT.first * 1;
570 // If we are legalizing by splitting, query the concrete TTI for the cost
571 // of casting the original vector twice. We also need to factor int the
572 // cost of the split itself. Count that as 1, to be consistent with
573 // TLI->getTypeLegalizationCost().
574 if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
575 TargetLowering::TypeSplitVector) ||
576 (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
577 TargetLowering::TypeSplitVector)) {
578 Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
579 Dst->getVectorNumElements() / 2);
580 Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
581 Src->getVectorNumElements() / 2);
582 T *TTI = static_cast<T *>(this);
583 return TTI->getVectorSplitCost() +
584 (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I));
587 // In other cases where the source or destination are illegal, assume
588 // the operation will get scalarized.
589 unsigned Num = Dst->getVectorNumElements();
590 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
591 Opcode, Dst->getScalarType(), Src->getScalarType(), I);
593 // Return the cost of multiple scalar invocation plus the cost of
594 // inserting and extracting the values.
595 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
598 // We already handled vector-to-vector and scalar-to-scalar conversions.
600 // is where we handle bitcast between vectors and scalars. We need to assume
601 // that the conversion is scalarized in one way or another.
602 if (Opcode == Instruction::BitCast)
603 // Illegal bitcasts are done by storing and loading from a stack slot.
604 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
606 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
609 llvm_unreachable("Unhandled cast");
612 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
613 VectorType *VecTy, unsigned Index) {
614 return static_cast<T *>(this)->getVectorInstrCost(
615 Instruction::ExtractElement, VecTy, Index) +
616 static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
617 VecTy->getElementType());
620 unsigned getCFInstrCost(unsigned Opcode) {
621 // Branches are assumed to be predicted.
625 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
626 const Instruction *I) {
627 const TargetLoweringBase *TLI = getTLI();
628 int ISD = TLI->InstructionOpcodeToISD(Opcode);
629 assert(ISD && "Invalid opcode");
631 // Selects on vectors are actually vector selects.
632 if (ISD == ISD::SELECT) {
633 assert(CondTy && "CondTy must exist");
634 if (CondTy->isVectorTy())
637 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
639 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
640 !TLI->isOperationExpand(ISD, LT.second)) {
641 // The operation is legal. Assume it costs 1. Multiply
642 // by the type-legalization overhead.
646 // Otherwise, assume that the cast is scalarized.
647 // TODO: If one of the types get legalized by splitting, handle this
648 // similarly to what getCastInstrCost() does.
649 if (ValTy->isVectorTy()) {
650 unsigned Num = ValTy->getVectorNumElements();
652 CondTy = CondTy->getScalarType();
653 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
654 Opcode, ValTy->getScalarType(), CondTy, I);
656 // Return the cost of multiple scalar invocation plus the cost of
657 // inserting and extracting the values.
658 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
661 // Unknown scalar opcode.
665 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
666 std::pair<unsigned, MVT> LT =
667 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
672 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
673 unsigned AddressSpace, const Instruction *I = nullptr) {
674 assert(!Src->isVoidTy() && "Invalid type");
675 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
677 // Assuming that all loads of legal types cost 1.
678 unsigned Cost = LT.first;
680 if (Src->isVectorTy() &&
681 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
682 // This is a vector load that legalizes to a larger type than the vector
683 // itself. Unless the corresponding extending load or truncating store is
684 // legal, then this will scalarize.
685 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
686 EVT MemVT = getTLI()->getValueType(DL, Src);
687 if (Opcode == Instruction::Store)
688 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
690 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
692 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
693 // This is a vector load/store for some illegal type that is scalarized.
694 // We must account for the cost of building or decomposing the vector.
695 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
696 Opcode == Instruction::Store);
703 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
705 ArrayRef<unsigned> Indices,
707 unsigned AddressSpace) {
708 VectorType *VT = dyn_cast<VectorType>(VecTy);
709 assert(VT && "Expect a vector type for interleaved memory op");
711 unsigned NumElts = VT->getNumElements();
712 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
714 unsigned NumSubElts = NumElts / Factor;
715 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
717 // Firstly, the cost of load/store operation.
718 unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
719 Opcode, VecTy, Alignment, AddressSpace);
721 // Legalize the vector type, and get the legalized and unlegalized type
723 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
725 static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
726 unsigned VecTyLTSize = VecTyLT.getStoreSize();
728 // Return the ceiling of dividing A by B.
729 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
731 // Scale the cost of the memory operation by the fraction of legalized
732 // instructions that will actually be used. We shouldn't account for the
733 // cost of dead instructions since they will be removed.
735 // E.g., An interleaved load of factor 8:
736 // %vec = load <16 x i64>, <16 x i64>* %ptr
737 // %v0 = shufflevector %vec, undef, <0, 8>
739 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
740 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
741 // type). The other loads are unused.
743 // We only scale the cost of loads since interleaved store groups aren't
744 // allowed to have gaps.
745 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
747 // The number of loads of a legal type it will take to represent a load
748 // of the unlegalized vector type.
749 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
751 // The number of elements of the unlegalized type that correspond to a
752 // single legal instruction.
753 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
755 // Determine which legal instructions will be used.
756 BitVector UsedInsts(NumLegalInsts, false);
757 for (unsigned Index : Indices)
758 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
759 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
761 // Scale the cost of the load by the fraction of legal instructions that
763 Cost *= UsedInsts.count() / NumLegalInsts;
766 // Then plus the cost of interleave operation.
767 if (Opcode == Instruction::Load) {
768 // The interleave cost is similar to extract sub vectors' elements
769 // from the wide vector, and insert them into sub vectors.
771 // E.g. An interleaved load of factor 2 (with one member of index 0):
772 // %vec = load <8 x i32>, <8 x i32>* %ptr
773 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
774 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
775 // <8 x i32> vector and insert them into a <4 x i32> vector.
777 assert(Indices.size() <= Factor &&
778 "Interleaved memory op has too many members");
780 for (unsigned Index : Indices) {
781 assert(Index < Factor && "Invalid index for interleaved memory op");
783 // Extract elements from loaded vector for each sub vector.
784 for (unsigned i = 0; i < NumSubElts; i++)
785 Cost += static_cast<T *>(this)->getVectorInstrCost(
786 Instruction::ExtractElement, VT, Index + i * Factor);
789 unsigned InsSubCost = 0;
790 for (unsigned i = 0; i < NumSubElts; i++)
791 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
792 Instruction::InsertElement, SubVT, i);
794 Cost += Indices.size() * InsSubCost;
796 // The interleave cost is extract all elements from sub vectors, and
797 // insert them into the wide vector.
799 // E.g. An interleaved store of factor 2:
800 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
801 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
802 // The cost is estimated as extract all elements from both <4 x i32>
803 // vectors and insert into the <8 x i32> vector.
805 unsigned ExtSubCost = 0;
806 for (unsigned i = 0; i < NumSubElts; i++)
807 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
808 Instruction::ExtractElement, SubVT, i);
809 Cost += ExtSubCost * Factor;
811 for (unsigned i = 0; i < NumElts; i++)
812 Cost += static_cast<T *>(this)
813 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
819 /// Get intrinsic cost based on arguments.
820 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
821 ArrayRef<Value *> Args, FastMathFlags FMF,
823 unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
824 assert ((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
828 // Assume that we need to scalarize this intrinsic.
829 SmallVector<Type *, 4> Types;
830 for (Value *Op : Args) {
831 Type *OpTy = Op->getType();
832 assert (VF == 1 || !OpTy->isVectorTy());
833 Types.push_back(VF == 1 ? OpTy : VectorType::get(OpTy, VF));
836 if (VF > 1 && !RetTy->isVoidTy())
837 RetTy = VectorType::get(RetTy, VF);
839 // Compute the scalarization overhead based on Args for a vector
840 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
841 // CostModel will pass a vector RetTy and VF is 1.
842 unsigned ScalarizationCost = UINT_MAX;
843 if (RetVF > 1 || VF > 1) {
844 ScalarizationCost = 0;
845 if (!RetTy->isVoidTy())
846 ScalarizationCost += getScalarizationOverhead(RetTy, true, false);
847 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
850 return static_cast<T *>(this)->
851 getIntrinsicInstrCost(IID, RetTy, Types, FMF, ScalarizationCost);
853 case Intrinsic::masked_scatter: {
854 assert (VF == 1 && "Can't vectorize types here.");
855 Value *Mask = Args[3];
856 bool VarMask = !isa<Constant>(Mask);
857 unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
859 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
864 case Intrinsic::masked_gather: {
865 assert (VF == 1 && "Can't vectorize types here.");
866 Value *Mask = Args[2];
867 bool VarMask = !isa<Constant>(Mask);
868 unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
870 static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
871 RetTy, Args[0], VarMask,
877 /// Get intrinsic cost based on argument types.
878 /// If ScalarizationCostPassed is UINT_MAX, the cost of scalarizing the
879 /// arguments and the return value will be computed based on types.
880 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
881 ArrayRef<Type *> Tys, FastMathFlags FMF,
882 unsigned ScalarizationCostPassed = UINT_MAX) {
883 SmallVector<unsigned, 2> ISDs;
884 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
887 // Assume that we need to scalarize this intrinsic.
888 unsigned ScalarizationCost = ScalarizationCostPassed;
889 unsigned ScalarCalls = 1;
890 Type *ScalarRetTy = RetTy;
891 if (RetTy->isVectorTy()) {
892 if (ScalarizationCostPassed == UINT_MAX)
893 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
894 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
895 ScalarRetTy = RetTy->getScalarType();
897 SmallVector<Type *, 4> ScalarTys;
898 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
900 if (Ty->isVectorTy()) {
901 if (ScalarizationCostPassed == UINT_MAX)
902 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
903 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
904 Ty = Ty->getScalarType();
906 ScalarTys.push_back(Ty);
908 if (ScalarCalls == 1)
909 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
911 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
912 IID, ScalarRetTy, ScalarTys, FMF);
914 return ScalarCalls * ScalarCost + ScalarizationCost;
916 // Look for intrinsics that can be lowered directly or turned into a scalar
918 case Intrinsic::sqrt:
919 ISDs.push_back(ISD::FSQRT);
922 ISDs.push_back(ISD::FSIN);
925 ISDs.push_back(ISD::FCOS);
928 ISDs.push_back(ISD::FEXP);
930 case Intrinsic::exp2:
931 ISDs.push_back(ISD::FEXP2);
934 ISDs.push_back(ISD::FLOG);
936 case Intrinsic::log10:
937 ISDs.push_back(ISD::FLOG10);
939 case Intrinsic::log2:
940 ISDs.push_back(ISD::FLOG2);
942 case Intrinsic::fabs:
943 ISDs.push_back(ISD::FABS);
945 case Intrinsic::minnum:
946 ISDs.push_back(ISD::FMINNUM);
948 ISDs.push_back(ISD::FMINNAN);
950 case Intrinsic::maxnum:
951 ISDs.push_back(ISD::FMAXNUM);
953 ISDs.push_back(ISD::FMAXNAN);
955 case Intrinsic::copysign:
956 ISDs.push_back(ISD::FCOPYSIGN);
958 case Intrinsic::floor:
959 ISDs.push_back(ISD::FFLOOR);
961 case Intrinsic::ceil:
962 ISDs.push_back(ISD::FCEIL);
964 case Intrinsic::trunc:
965 ISDs.push_back(ISD::FTRUNC);
967 case Intrinsic::nearbyint:
968 ISDs.push_back(ISD::FNEARBYINT);
970 case Intrinsic::rint:
971 ISDs.push_back(ISD::FRINT);
973 case Intrinsic::round:
974 ISDs.push_back(ISD::FROUND);
977 ISDs.push_back(ISD::FPOW);
980 ISDs.push_back(ISD::FMA);
982 case Intrinsic::fmuladd:
983 ISDs.push_back(ISD::FMA);
985 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
986 case Intrinsic::lifetime_start:
987 case Intrinsic::lifetime_end:
989 case Intrinsic::masked_store:
990 return static_cast<T *>(this)
991 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
992 case Intrinsic::masked_load:
993 return static_cast<T *>(this)
994 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
995 case Intrinsic::ctpop:
996 ISDs.push_back(ISD::CTPOP);
997 // In case of legalization use TCC_Expensive. This is cheaper than a
998 // library call but still not a cheap instruction.
999 SingleCallCost = TargetTransformInfo::TCC_Expensive;
1001 // FIXME: ctlz, cttz, ...
1004 const TargetLoweringBase *TLI = getTLI();
1005 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1007 SmallVector<unsigned, 2> LegalCost;
1008 SmallVector<unsigned, 2> CustomCost;
1009 for (unsigned ISD : ISDs) {
1010 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1011 if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
1015 // The operation is legal. Assume it costs 1.
1016 // If the type is split to multiple registers, assume that there is some
1017 // overhead to this.
1018 // TODO: Once we have extract/insert subvector cost we need to use them.
1020 LegalCost.push_back(LT.first * 2);
1022 LegalCost.push_back(LT.first * 1);
1023 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1024 // If the operation is custom lowered then assume
1025 // that the code is twice as expensive.
1026 CustomCost.push_back(LT.first * 2);
1030 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1031 if (MinLegalCostI != LegalCost.end())
1032 return *MinLegalCostI;
1034 auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
1035 if (MinCustomCostI != CustomCost.end())
1036 return *MinCustomCostI;
1038 // If we can't lower fmuladd into an FMA estimate the cost as a floating
1039 // point mul followed by an add.
1040 if (IID == Intrinsic::fmuladd)
1041 return static_cast<T *>(this)
1042 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
1043 static_cast<T *>(this)
1044 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
1046 // Else, assume that we need to scalarize this intrinsic. For math builtins
1047 // this will emit a costly libcall, adding call overhead and spills. Make it
1049 if (RetTy->isVectorTy()) {
1050 unsigned ScalarizationCost = ((ScalarizationCostPassed != UINT_MAX) ?
1051 ScalarizationCostPassed : getScalarizationOverhead(RetTy, true, false));
1052 unsigned ScalarCalls = RetTy->getVectorNumElements();
1053 SmallVector<Type *, 4> ScalarTys;
1054 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1056 if (Ty->isVectorTy())
1057 Ty = Ty->getScalarType();
1058 ScalarTys.push_back(Ty);
1060 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
1061 IID, RetTy->getScalarType(), ScalarTys, FMF);
1062 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1063 if (Tys[i]->isVectorTy()) {
1064 if (ScalarizationCostPassed == UINT_MAX)
1065 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
1066 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
1070 return ScalarCalls * ScalarCost + ScalarizationCost;
1073 // This is going to be turned into a library call, make it expensive.
1074 return SingleCallCost;
1077 /// \brief Compute a cost of the given call instruction.
1079 /// Compute the cost of calling function F with return type RetTy and
1080 /// argument types Tys. F might be nullptr, in this case the cost of an
1081 /// arbitrary call with the specified signature will be returned.
1082 /// This is used, for instance, when we estimate call of a vector
1083 /// counterpart of the given function.
1084 /// \param F Called function, might be nullptr.
1085 /// \param RetTy Return value types.
1086 /// \param Tys Argument types.
1087 /// \returns The cost of Call instruction.
1088 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
1092 unsigned getNumberOfParts(Type *Tp) {
1093 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
1097 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
1102 /// Try to calculate arithmetic and shuffle op costs for reduction operations.
1103 /// We're assuming that reduction operation are performing the following way:
1104 /// 1. Non-pairwise reduction
1105 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1106 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
1107 /// \----------------v-------------/ \----------v------------/
1108 /// n/2 elements n/2 elements
1109 /// %red1 = op <n x t> %val, <n x t> val1
1110 /// After this operation we have a vector %red1 where only the first n/2
1111 /// elements are meaningful, the second n/2 elements are undefined and can be
1112 /// dropped. All other operations are actually working with the vector of
1113 /// length n/2, not n, though the real vector length is still n.
1114 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
1115 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
1116 /// \----------------v-------------/ \----------v------------/
1117 /// n/4 elements 3*n/4 elements
1118 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
1119 /// length n/2, the resulting vector has length n/4 etc.
1120 /// 2. Pairwise reduction:
1121 /// Everything is the same except for an additional shuffle operation which
1122 /// is used to produce operands for pairwise kind of reductions.
1123 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1124 /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
1125 /// \-------------v----------/ \----------v------------/
1126 /// n/2 elements n/2 elements
1127 /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
1128 /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
1129 /// \-------------v----------/ \----------v------------/
1130 /// n/2 elements n/2 elements
1131 /// %red1 = op <n x t> %val1, <n x t> val2
1132 /// Again, the operation is performed on <n x t> vector, but the resulting
1133 /// vector %red1 is <n/2 x t> vector.
1135 /// The cost model should take into account that the actual length of the
1136 /// vector is reduced on each iteration.
1137 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
1138 assert(Ty->isVectorTy() && "Expect a vector type");
1139 Type *ScalarTy = Ty->getVectorElementType();
1140 unsigned NumVecElts = Ty->getVectorNumElements();
1141 unsigned NumReduxLevels = Log2_32(NumVecElts);
1142 unsigned ArithCost = 0;
1143 unsigned ShuffleCost = 0;
1144 auto *ConcreteTTI = static_cast<T *>(this);
1145 std::pair<unsigned, MVT> LT =
1146 ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
1147 unsigned LongVectorCount = 0;
1149 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1150 while (NumVecElts > MVTLen) {
1152 // Assume the pairwise shuffles add a cost.
1153 ShuffleCost += (IsPairwise + 1) *
1154 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1156 ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1157 Ty = VectorType::get(ScalarTy, NumVecElts);
1160 // The minimal length of the vector is limited by the real length of vector
1161 // operations performed on the current platform. That's why several final
1162 // reduction opertions are perfomed on the vectors with the same
1163 // architecture-dependent length.
1164 ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
1165 ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
1167 ArithCost += (NumReduxLevels - LongVectorCount) *
1168 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
1169 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
1172 unsigned getVectorSplitCost() { return 1; }
1177 /// \brief Concrete BasicTTIImpl that can be used if no further customization
1179 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
1180 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
1181 friend class BasicTTIImplBase<BasicTTIImpl>;
1183 const TargetSubtargetInfo *ST;
1184 const TargetLoweringBase *TLI;
1186 const TargetSubtargetInfo *getST() const { return ST; }
1187 const TargetLoweringBase *getTLI() const { return TLI; }
1190 explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);