1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/TargetTransformInfoImpl.h"
27 #include "llvm/CodeGen/ISDOpcodes.h"
28 #include "llvm/CodeGen/TargetLowering.h"
29 #include "llvm/CodeGen/TargetSubtargetInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/MC/MCSchedule.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/MachineValueType.h"
48 #include "llvm/Support/MathExtras.h"
60 class ScalarEvolution;
64 extern cl::opt<unsigned> PartialUnrollingThreshold;
66 /// Base class which can be used to help build a TTI implementation.
68 /// This class provides as much implementation of the TTI interface as is
69 /// possible using the target independent parts of the code generator.
71 /// In order to subclass it, your class must implement a getST() method to
72 /// return the subtarget, and a getTLI() method to return the target lowering.
73 /// We need these methods implemented in the derived class so that this class
74 /// doesn't have to duplicate storage for them.
76 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
78 using BaseT = TargetTransformInfoImplCRTPBase<T>;
79 using TTI = TargetTransformInfo;
81 /// Helper function to access this as a T.
82 T *thisT() { return static_cast<T *>(this); }
84 /// Estimate a cost of Broadcast as an extract and sequence of insert
86 unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) {
88 // Broadcast cost is equal to the cost of extracting the zero'th element
89 // plus the cost of inserting it into every element of the result vector.
90 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
92 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
93 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
98 /// Estimate a cost of shuffle as a sequence of extract and insert
100 unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) {
102 // Shuffle cost is equal to the cost of extracting element from its argument
103 // plus the cost of inserting them onto the result vector.
105 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
106 // index 0 of first vector, index 1 of second vector,index 2 of first
107 // vector and finally index 3 of second vector and insert them at index
108 // <0,1,2,3> of result vector.
109 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
110 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
111 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
116 /// Estimate a cost of subvector extraction as a sequence of extract and
117 /// insert operations.
118 unsigned getExtractSubvectorOverhead(FixedVectorType *VTy, int Index,
119 FixedVectorType *SubVTy) {
120 assert(VTy && SubVTy &&
121 "Can only extract subvectors from vectors");
122 int NumSubElts = SubVTy->getNumElements();
123 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&
124 "SK_ExtractSubvector index out of range");
127 // Subvector extraction cost is equal to the cost of extracting element from
128 // the source type plus the cost of inserting them into the result vector
130 for (int i = 0; i != NumSubElts; ++i) {
131 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
134 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
139 /// Estimate a cost of subvector insertion as a sequence of extract and
140 /// insert operations.
141 unsigned getInsertSubvectorOverhead(FixedVectorType *VTy, int Index,
142 FixedVectorType *SubVTy) {
143 assert(VTy && SubVTy &&
144 "Can only insert subvectors into vectors");
145 int NumSubElts = SubVTy->getNumElements();
146 assert((Index + NumSubElts) <= (int)VTy->getNumElements() &&
147 "SK_InsertSubvector index out of range");
150 // Subvector insertion cost is equal to the cost of extracting element from
151 // the source type plus the cost of inserting them into the result vector
153 for (int i = 0; i != NumSubElts; ++i) {
155 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
156 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
162 /// Local query method delegates up to T which *must* implement this!
163 const TargetSubtargetInfo *getST() const {
164 return static_cast<const T *>(this)->getST();
167 /// Local query method delegates up to T which *must* implement this!
168 const TargetLoweringBase *getTLI() const {
169 return static_cast<const T *>(this)->getTLI();
172 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
174 case TTI::MIM_Unindexed:
175 return ISD::UNINDEXED;
176 case TTI::MIM_PreInc:
178 case TTI::MIM_PreDec:
180 case TTI::MIM_PostInc:
181 return ISD::POST_INC;
182 case TTI::MIM_PostDec:
183 return ISD::POST_DEC;
185 llvm_unreachable("Unexpected MemIndexedMode");
189 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
191 virtual ~BasicTTIImplBase() = default;
193 using TargetTransformInfoImplBase::DL;
196 /// \name Scalar TTI Implementations
198 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
199 unsigned AddressSpace, unsigned Alignment,
201 EVT E = EVT::getIntegerVT(Context, BitWidth);
202 return getTLI()->allowsMisalignedMemoryAccesses(
203 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
206 bool hasBranchDivergence() { return false; }
208 bool useGPUDivergenceAnalysis() { return false; }
210 bool isSourceOfDivergence(const Value *V) { return false; }
212 bool isAlwaysUniform(const Value *V) { return false; }
214 unsigned getFlatAddressSpace() {
215 // Return an invalid address space.
219 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
220 Intrinsic::ID IID) const {
224 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
225 return getTLI()->isNoopAddrSpaceCast(FromAS, ToAS);
228 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
233 bool isLegalAddImmediate(int64_t imm) {
234 return getTLI()->isLegalAddImmediate(imm);
237 bool isLegalICmpImmediate(int64_t imm) {
238 return getTLI()->isLegalICmpImmediate(imm);
241 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
242 bool HasBaseReg, int64_t Scale,
243 unsigned AddrSpace, Instruction *I = nullptr) {
244 TargetLoweringBase::AddrMode AM;
246 AM.BaseOffs = BaseOffset;
247 AM.HasBaseReg = HasBaseReg;
249 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
252 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
253 const DataLayout &DL) const {
254 EVT VT = getTLI()->getValueType(DL, Ty);
255 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
258 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
259 const DataLayout &DL) const {
260 EVT VT = getTLI()->getValueType(DL, Ty);
261 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
264 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
265 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
268 bool isProfitableLSRChainElement(Instruction *I) {
269 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
272 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
273 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
274 TargetLoweringBase::AddrMode AM;
276 AM.BaseOffs = BaseOffset;
277 AM.HasBaseReg = HasBaseReg;
279 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
282 bool isTruncateFree(Type *Ty1, Type *Ty2) {
283 return getTLI()->isTruncateFree(Ty1, Ty2);
286 bool isProfitableToHoist(Instruction *I) {
287 return getTLI()->isProfitableToHoist(I);
290 bool useAA() const { return getST()->useAA(); }
292 bool isTypeLegal(Type *Ty) {
293 EVT VT = getTLI()->getValueType(DL, Ty);
294 return getTLI()->isTypeLegal(VT);
297 int getGEPCost(Type *PointeeType, const Value *Ptr,
298 ArrayRef<const Value *> Operands) {
299 return BaseT::getGEPCost(PointeeType, Ptr, Operands);
302 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
303 unsigned &JumpTableSize,
304 ProfileSummaryInfo *PSI,
305 BlockFrequencyInfo *BFI) {
306 /// Try to find the estimated number of clusters. Note that the number of
307 /// clusters identified in this function could be different from the actual
308 /// numbers found in lowering. This function ignore switches that are
309 /// lowered with a mix of jump table / bit test / BTree. This function was
310 /// initially intended to be used when estimating the cost of switch in
311 /// inline cost heuristic, but it's a generic cost model to be used in other
312 /// places (e.g., in loop unrolling).
313 unsigned N = SI.getNumCases();
314 const TargetLoweringBase *TLI = getTLI();
315 const DataLayout &DL = this->getDataLayout();
318 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
320 // Early exit if both a jump table and bit test are not allowed.
321 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
324 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
325 APInt MinCaseVal = MaxCaseVal;
326 for (auto CI : SI.cases()) {
327 const APInt &CaseVal = CI.getCaseValue()->getValue();
328 if (CaseVal.sgt(MaxCaseVal))
329 MaxCaseVal = CaseVal;
330 if (CaseVal.slt(MinCaseVal))
331 MinCaseVal = CaseVal;
334 // Check if suitable for a bit test
335 if (N <= DL.getIndexSizeInBits(0u)) {
336 SmallPtrSet<const BasicBlock *, 4> Dests;
337 for (auto I : SI.cases())
338 Dests.insert(I.getCaseSuccessor());
340 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
345 // Check if suitable for a jump table.
347 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
350 (MaxCaseVal - MinCaseVal)
351 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
352 // Check whether a range of clusters is dense enough for a jump table
353 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
354 JumpTableSize = Range;
361 bool shouldBuildLookupTables() {
362 const TargetLoweringBase *TLI = getTLI();
363 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
364 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
367 bool haveFastSqrt(Type *Ty) {
368 const TargetLoweringBase *TLI = getTLI();
369 EVT VT = TLI->getValueType(DL, Ty);
370 return TLI->isTypeLegal(VT) &&
371 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
374 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
378 unsigned getFPOpCost(Type *Ty) {
379 // Check whether FADD is available, as a proxy for floating-point in
381 const TargetLoweringBase *TLI = getTLI();
382 EVT VT = TLI->getValueType(DL, Ty);
383 if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
384 return TargetTransformInfo::TCC_Basic;
385 return TargetTransformInfo::TCC_Expensive;
388 unsigned getInliningThresholdMultiplier() { return 1; }
390 int getInlinerVectorBonusPercent() { return 150; }
392 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
393 TTI::UnrollingPreferences &UP) {
394 // This unrolling functionality is target independent, but to provide some
395 // motivation for its intended use, for x86:
397 // According to the Intel 64 and IA-32 Architectures Optimization Reference
398 // Manual, Intel Core models and later have a loop stream detector (and
399 // associated uop queue) that can benefit from partial unrolling.
400 // The relevant requirements are:
401 // - The loop must have no more than 4 (8 for Nehalem and later) branches
402 // taken, and none of them may be calls.
403 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
405 // According to the Software Optimization Guide for AMD Family 15h
406 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
407 // and loop buffer which can benefit from partial unrolling.
408 // The relevant requirements are:
409 // - The loop must have fewer than 16 branches
410 // - The loop must have less than 40 uops in all executed loop branches
412 // The number of taken branches in a loop is hard to estimate here, and
413 // benchmarking has revealed that it is better not to be conservative when
414 // estimating the branch count. As a result, we'll ignore the branch limits
415 // until someone finds a case where it matters in practice.
418 const TargetSubtargetInfo *ST = getST();
419 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
420 MaxOps = PartialUnrollingThreshold;
421 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
422 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
426 // Scan the loop: don't unroll loops with calls.
427 for (BasicBlock *BB : L->blocks()) {
428 for (Instruction &I : *BB) {
429 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
430 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
431 if (!thisT()->isLoweredToCall(F))
440 // Enable runtime and partial unrolling up to the specified size.
441 // Enable using trip count upper bound to unroll loops.
442 UP.Partial = UP.Runtime = UP.UpperBound = true;
443 UP.PartialThreshold = MaxOps;
445 // Avoid unrolling when optimizing for size.
446 UP.OptSizeThreshold = 0;
447 UP.PartialOptSizeThreshold = 0;
449 // Set number of instructions optimized when "back edge"
450 // becomes "fall through" to default value of 2.
454 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
455 TTI::PeelingPreferences &PP) {
457 PP.AllowPeeling = true;
458 PP.AllowLoopNestsPeeling = false;
459 PP.PeelProfiledIterations = true;
462 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
464 TargetLibraryInfo *LibInfo,
465 HardwareLoopInfo &HWLoopInfo) {
466 return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
469 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
470 AssumptionCache &AC, TargetLibraryInfo *TLI,
472 const LoopAccessInfo *LAI) {
473 return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
476 bool emitGetActiveLaneMask() {
477 return BaseT::emitGetActiveLaneMask();
480 int getInstructionLatency(const Instruction *I) {
481 if (isa<LoadInst>(I))
482 return getST()->getSchedModel().DefaultLoadLatency;
484 return BaseT::getInstructionLatency(I);
487 virtual Optional<unsigned>
488 getCacheSize(TargetTransformInfo::CacheLevel Level) const {
489 return Optional<unsigned>(
490 getST()->getCacheSize(static_cast<unsigned>(Level)));
493 virtual Optional<unsigned>
494 getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
495 Optional<unsigned> TargetResult =
496 getST()->getCacheAssociativity(static_cast<unsigned>(Level));
501 return BaseT::getCacheAssociativity(Level);
504 virtual unsigned getCacheLineSize() const {
505 return getST()->getCacheLineSize();
508 virtual unsigned getPrefetchDistance() const {
509 return getST()->getPrefetchDistance();
512 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
513 unsigned NumStridedMemAccesses,
514 unsigned NumPrefetches,
515 bool HasCall) const {
516 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
517 NumPrefetches, HasCall);
520 virtual unsigned getMaxPrefetchIterationsAhead() const {
521 return getST()->getMaxPrefetchIterationsAhead();
524 virtual bool enableWritePrefetching() const {
525 return getST()->enableWritePrefetching();
530 /// \name Vector TTI Implementations
533 unsigned getRegisterBitWidth(bool Vector) const { return 32; }
535 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
536 /// are set if the demanded result elements need to be inserted and/or
537 /// extracted from vectors.
538 unsigned getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts,
539 bool Insert, bool Extract) {
540 /// FIXME: a bitfield is not a reasonable abstraction for talking about
541 /// which elements are needed from a scalable vector
542 auto *Ty = cast<FixedVectorType>(InTy);
544 assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
545 "Vector size mismatch");
549 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
550 if (!DemandedElts[i])
553 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i);
555 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
561 /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
562 unsigned getScalarizationOverhead(VectorType *InTy, bool Insert,
564 auto *Ty = cast<FixedVectorType>(InTy);
566 APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements());
567 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
570 /// Estimate the overhead of scalarizing an instructions unique
571 /// non-constant operands. The types of the arguments are ordinarily
572 /// scalar, in which case the costs are multiplied with VF.
573 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
576 SmallPtrSet<const Value*, 4> UniqueOperands;
577 for (const Value *A : Args) {
578 if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
579 auto *VecTy = dyn_cast<VectorType>(A->getType());
581 // If A is a vector operand, VF should be 1 or correspond to A.
583 VF == cast<FixedVectorType>(VecTy)->getNumElements()) &&
584 "Vector argument does not match VF");
587 VecTy = FixedVectorType::get(A->getType(), VF);
589 Cost += getScalarizationOverhead(VecTy, false, true);
596 unsigned getScalarizationOverhead(VectorType *InTy,
597 ArrayRef<const Value *> Args) {
598 auto *Ty = cast<FixedVectorType>(InTy);
602 Cost += getScalarizationOverhead(Ty, true, false);
604 Cost += getOperandsScalarizationOverhead(Args, Ty->getNumElements());
606 // When no information on arguments is provided, we add the cost
607 // associated with one argument as a heuristic.
608 Cost += getScalarizationOverhead(Ty, false, true);
613 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
615 unsigned getArithmeticInstrCost(
616 unsigned Opcode, Type *Ty,
617 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
618 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
619 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
620 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
621 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
622 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
623 const Instruction *CxtI = nullptr) {
624 // Check if any of the operands are vector operands.
625 const TargetLoweringBase *TLI = getTLI();
626 int ISD = TLI->InstructionOpcodeToISD(Opcode);
627 assert(ISD && "Invalid opcode");
629 // TODO: Handle more cost kinds.
630 if (CostKind != TTI::TCK_RecipThroughput)
631 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
633 Opd1PropInfo, Opd2PropInfo,
636 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
638 bool IsFloat = Ty->isFPOrFPVectorTy();
639 // Assume that floating point arithmetic operations cost twice as much as
640 // integer operations.
641 unsigned OpCost = (IsFloat ? 2 : 1);
643 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
644 // The operation is legal. Assume it costs 1.
645 // TODO: Once we have extract/insert subvector cost we need to use them.
646 return LT.first * OpCost;
649 if (!TLI->isOperationExpand(ISD, LT.second)) {
650 // If the operation is custom lowered, then assume that the code is twice
652 return LT.first * 2 * OpCost;
655 // Else, assume that we need to scalarize this op.
656 // TODO: If one of the types get legalized by splitting, handle this
657 // similarly to what getCastInstrCost() does.
658 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
659 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
660 unsigned Cost = thisT()->getArithmeticInstrCost(
661 Opcode, VTy->getScalarType(), CostKind);
662 // Return the cost of multiple scalar invocation plus the cost of
663 // inserting and extracting the values.
664 return getScalarizationOverhead(VTy, Args) + Num * Cost;
667 // We don't know anything about this scalar instruction.
671 unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, int Index,
675 case TTI::SK_Broadcast:
676 return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp));
678 case TTI::SK_Reverse:
679 case TTI::SK_Transpose:
680 case TTI::SK_PermuteSingleSrc:
681 case TTI::SK_PermuteTwoSrc:
682 return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
683 case TTI::SK_ExtractSubvector:
684 return getExtractSubvectorOverhead(cast<FixedVectorType>(Tp), Index,
685 cast<FixedVectorType>(SubTp));
686 case TTI::SK_InsertSubvector:
687 return getInsertSubvectorOverhead(cast<FixedVectorType>(Tp), Index,
688 cast<FixedVectorType>(SubTp));
690 llvm_unreachable("Unknown TTI::ShuffleKind");
693 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
694 TTI::TargetCostKind CostKind,
695 const Instruction *I = nullptr) {
696 if (BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I) == 0)
699 const TargetLoweringBase *TLI = getTLI();
700 int ISD = TLI->InstructionOpcodeToISD(Opcode);
701 assert(ISD && "Invalid opcode");
702 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
703 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
705 TypeSize SrcSize = SrcLT.second.getSizeInBits();
706 TypeSize DstSize = DstLT.second.getSizeInBits();
707 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
708 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();
713 case Instruction::Trunc:
714 // Check for NOOP conversions.
715 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
718 case Instruction::BitCast:
719 // Bitcast between types that are legalized to the same type are free and
720 // assume int to/from ptr of the same size is also free.
721 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
725 case Instruction::FPExt:
726 if (I && getTLI()->isExtFree(I))
729 case Instruction::ZExt:
730 if (TLI->isZExtFree(SrcLT.second, DstLT.second))
733 case Instruction::SExt:
737 if (getTLI()->isExtFree(I))
740 // If this is a zext/sext of a load, return 0 if the corresponding
741 // extending load exists on target.
742 if (I && isa<LoadInst>(I->getOperand(0))) {
743 EVT ExtVT = EVT::getEVT(Dst);
744 EVT LoadVT = EVT::getEVT(Src);
746 ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
747 if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
751 case Instruction::AddrSpaceCast:
752 if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
753 Dst->getPointerAddressSpace()))
758 auto *SrcVTy = dyn_cast<VectorType>(Src);
759 auto *DstVTy = dyn_cast<VectorType>(Dst);
761 // If the cast is marked as legal (or promote) then assume low cost.
762 if (SrcLT.first == DstLT.first &&
763 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
766 // Handle scalar conversions.
767 if (!SrcVTy && !DstVTy) {
768 // Just check the op cost. If the operation is legal then assume it costs
770 if (!TLI->isOperationExpand(ISD, DstLT.second))
773 // Assume that illegal scalar instruction are expensive.
777 // Check vector-to-vector casts.
778 if (DstVTy && SrcVTy) {
779 // If the cast is between same-sized registers, then the check is simple.
780 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {
782 // Assume that Zext is done using AND.
783 if (Opcode == Instruction::ZExt)
786 // Assume that sext is done using SHL and SRA.
787 if (Opcode == Instruction::SExt)
788 return SrcLT.first * 2;
790 // Just check the op cost. If the operation is legal then assume it
792 // 1 and multiply by the type-legalization overhead.
793 if (!TLI->isOperationExpand(ISD, DstLT.second))
794 return SrcLT.first * 1;
797 // If we are legalizing by splitting, query the concrete TTI for the cost
798 // of casting the original vector twice. We also need to factor in the
799 // cost of the split itself. Count that as 1, to be consistent with
800 // TLI->getTypeLegalizationCost().
802 TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
803 TargetLowering::TypeSplitVector;
805 TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
806 TargetLowering::TypeSplitVector;
807 if ((SplitSrc || SplitDst) &&
808 cast<FixedVectorType>(SrcVTy)->getNumElements() > 1 &&
809 cast<FixedVectorType>(DstVTy)->getNumElements() > 1) {
810 Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
811 Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
812 T *TTI = static_cast<T *>(this);
813 // If both types need to be split then the split is free.
815 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
817 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy,
821 // In other cases where the source or destination are illegal, assume
822 // the operation will get scalarized.
823 unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
824 unsigned Cost = thisT()->getCastInstrCost(
825 Opcode, Dst->getScalarType(), Src->getScalarType(), CostKind, I);
827 // Return the cost of multiple scalar invocation plus the cost of
828 // inserting and extracting the values.
829 return getScalarizationOverhead(DstVTy, true, true) + Num * Cost;
832 // We already handled vector-to-vector and scalar-to-scalar conversions.
834 // is where we handle bitcast between vectors and scalars. We need to assume
835 // that the conversion is scalarized in one way or another.
836 if (Opcode == Instruction::BitCast) {
837 // Illegal bitcasts are done by storing and loading from a stack slot.
838 return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) +
839 (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0);
842 llvm_unreachable("Unhandled cast");
845 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
846 VectorType *VecTy, unsigned Index) {
847 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
849 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
850 TTI::TCK_RecipThroughput);
853 unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
854 return BaseT::getCFInstrCost(Opcode, CostKind);
857 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
858 TTI::TargetCostKind CostKind,
859 const Instruction *I = nullptr) {
860 const TargetLoweringBase *TLI = getTLI();
861 int ISD = TLI->InstructionOpcodeToISD(Opcode);
862 assert(ISD && "Invalid opcode");
864 // TODO: Handle other cost kinds.
865 if (CostKind != TTI::TCK_RecipThroughput)
866 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
868 // Selects on vectors are actually vector selects.
869 if (ISD == ISD::SELECT) {
870 assert(CondTy && "CondTy must exist");
871 if (CondTy->isVectorTy())
874 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
876 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
877 !TLI->isOperationExpand(ISD, LT.second)) {
878 // The operation is legal. Assume it costs 1. Multiply
879 // by the type-legalization overhead.
883 // Otherwise, assume that the cast is scalarized.
884 // TODO: If one of the types get legalized by splitting, handle this
885 // similarly to what getCastInstrCost() does.
886 if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
887 unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
889 CondTy = CondTy->getScalarType();
890 unsigned Cost = thisT()->getCmpSelInstrCost(
891 Opcode, ValVTy->getScalarType(), CondTy, CostKind, I);
893 // Return the cost of multiple scalar invocation plus the cost of
894 // inserting and extracting the values.
895 return getScalarizationOverhead(ValVTy, true, false) + Num * Cost;
898 // Unknown scalar opcode.
902 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
903 std::pair<unsigned, MVT> LT =
904 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
909 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
910 unsigned AddressSpace,
911 TTI::TargetCostKind CostKind,
912 const Instruction *I = nullptr) {
913 assert(!Src->isVoidTy() && "Invalid type");
914 // Assume types, such as structs, are expensive.
915 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)
917 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
919 // Assuming that all loads of legal types cost 1.
920 unsigned Cost = LT.first;
921 if (CostKind != TTI::TCK_RecipThroughput)
924 if (Src->isVectorTy() &&
925 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
926 // This is a vector load that legalizes to a larger type than the vector
927 // itself. Unless the corresponding extending load or truncating store is
928 // legal, then this will scalarize.
929 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
930 EVT MemVT = getTLI()->getValueType(DL, Src);
931 if (Opcode == Instruction::Store)
932 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
934 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
936 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
937 // This is a vector load/store for some illegal type that is scalarized.
938 // We must account for the cost of building or decomposing the vector.
939 Cost += getScalarizationOverhead(cast<VectorType>(Src),
940 Opcode != Instruction::Store,
941 Opcode == Instruction::Store);
948 unsigned getInterleavedMemoryOpCost(
949 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
950 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
951 bool UseMaskForCond = false, bool UseMaskForGaps = false) {
952 auto *VT = cast<FixedVectorType>(VecTy);
954 unsigned NumElts = VT->getNumElements();
955 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
957 unsigned NumSubElts = NumElts / Factor;
958 auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);
960 // Firstly, the cost of load/store operation.
962 if (UseMaskForCond || UseMaskForGaps)
963 Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
964 AddressSpace, CostKind);
966 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
969 // Legalize the vector type, and get the legalized and unlegalized type
971 MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
972 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
973 unsigned VecTyLTSize = VecTyLT.getStoreSize();
975 // Return the ceiling of dividing A by B.
976 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
978 // Scale the cost of the memory operation by the fraction of legalized
979 // instructions that will actually be used. We shouldn't account for the
980 // cost of dead instructions since they will be removed.
982 // E.g., An interleaved load of factor 8:
983 // %vec = load <16 x i64>, <16 x i64>* %ptr
984 // %v0 = shufflevector %vec, undef, <0, 8>
986 // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
987 // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
988 // type). The other loads are unused.
990 // We only scale the cost of loads since interleaved store groups aren't
991 // allowed to have gaps.
992 if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
993 // The number of loads of a legal type it will take to represent a load
994 // of the unlegalized vector type.
995 unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
997 // The number of elements of the unlegalized type that correspond to a
998 // single legal instruction.
999 unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
1001 // Determine which legal instructions will be used.
1002 BitVector UsedInsts(NumLegalInsts, false);
1003 for (unsigned Index : Indices)
1004 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
1005 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
1007 // Scale the cost of the load by the fraction of legal instructions that
1009 Cost *= UsedInsts.count() / NumLegalInsts;
1012 // Then plus the cost of interleave operation.
1013 if (Opcode == Instruction::Load) {
1014 // The interleave cost is similar to extract sub vectors' elements
1015 // from the wide vector, and insert them into sub vectors.
1017 // E.g. An interleaved load of factor 2 (with one member of index 0):
1018 // %vec = load <8 x i32>, <8 x i32>* %ptr
1019 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
1020 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
1021 // <8 x i32> vector and insert them into a <4 x i32> vector.
1023 assert(Indices.size() <= Factor &&
1024 "Interleaved memory op has too many members");
1026 for (unsigned Index : Indices) {
1027 assert(Index < Factor && "Invalid index for interleaved memory op");
1029 // Extract elements from loaded vector for each sub vector.
1030 for (unsigned i = 0; i < NumSubElts; i++)
1031 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VT,
1032 Index + i * Factor);
1035 unsigned InsSubCost = 0;
1036 for (unsigned i = 0; i < NumSubElts; i++)
1038 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVT, i);
1040 Cost += Indices.size() * InsSubCost;
1042 // The interleave cost is extract all elements from sub vectors, and
1043 // insert them into the wide vector.
1045 // E.g. An interleaved store of factor 2:
1046 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
1047 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
1048 // The cost is estimated as extract all elements from both <4 x i32>
1049 // vectors and insert into the <8 x i32> vector.
1051 unsigned ExtSubCost = 0;
1052 for (unsigned i = 0; i < NumSubElts; i++)
1054 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
1055 Cost += ExtSubCost * Factor;
1057 for (unsigned i = 0; i < NumElts; i++)
1058 Cost += static_cast<T *>(this)
1059 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
1062 if (!UseMaskForCond)
1065 Type *I8Type = Type::getInt8Ty(VT->getContext());
1066 auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
1067 SubVT = FixedVectorType::get(I8Type, NumSubElts);
1069 // The Mask shuffling cost is extract all the elements of the Mask
1070 // and insert each of them Factor times into the wide vector:
1072 // E.g. an interleaved group with factor 3:
1073 // %mask = icmp ult <8 x i32> %vec1, %vec2
1074 // %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
1075 // <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
1076 // The cost is estimated as extract all mask elements from the <8xi1> mask
1077 // vector and insert them factor times into the <24xi1> shuffled mask
1079 for (unsigned i = 0; i < NumSubElts; i++)
1081 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVT, i);
1083 for (unsigned i = 0; i < NumElts; i++)
1085 thisT()->getVectorInstrCost(Instruction::InsertElement, MaskVT, i);
1087 // The Gaps mask is invariant and created outside the loop, therefore the
1088 // cost of creating it is not accounted for here. However if we have both
1089 // a MaskForGaps and some other mask that guards the execution of the
1090 // memory access, we need to account for the cost of And-ing the two masks
1093 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
1099 /// Get intrinsic cost based on arguments.
1100 unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1101 TTI::TargetCostKind CostKind) {
1102 Intrinsic::ID IID = ICA.getID();
1104 // Special case some scalar intrinsics.
1105 if (CostKind != TTI::TCK_RecipThroughput) {
1109 case Intrinsic::cttz:
1110 if (getTLI()->isCheapToSpeculateCttz())
1111 return TargetTransformInfo::TCC_Basic;
1113 case Intrinsic::ctlz:
1114 if (getTLI()->isCheapToSpeculateCtlz())
1115 return TargetTransformInfo::TCC_Basic;
1117 case Intrinsic::memcpy:
1118 return thisT()->getMemcpyCost(ICA.getInst());
1119 // TODO: other libc intrinsics.
1121 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1124 if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
1127 // TODO: Combine these two logic paths.
1128 if (ICA.isTypeBasedOnly())
1129 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
1131 Type *RetTy = ICA.getReturnType();
1132 unsigned VF = ICA.getVectorFactor();
1134 (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements()
1136 assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
1137 const IntrinsicInst *I = ICA.getInst();
1138 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
1139 FastMathFlags FMF = ICA.getFlags();
1143 // Assume that we need to scalarize this intrinsic.
1144 SmallVector<Type *, 4> Types;
1145 for (const Value *Op : Args) {
1146 Type *OpTy = Op->getType();
1147 assert(VF == 1 || !OpTy->isVectorTy());
1148 Types.push_back(VF == 1 ? OpTy : FixedVectorType::get(OpTy, VF));
1151 if (VF > 1 && !RetTy->isVoidTy())
1152 RetTy = FixedVectorType::get(RetTy, VF);
1154 // Compute the scalarization overhead based on Args for a vector
1155 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
1156 // CostModel will pass a vector RetTy and VF is 1.
1157 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
1158 if (RetVF > 1 || VF > 1) {
1159 ScalarizationCost = 0;
1160 if (!RetTy->isVoidTy())
1161 ScalarizationCost +=
1162 getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
1163 ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
1166 IntrinsicCostAttributes Attrs(IID, RetTy, Types, FMF,
1167 ScalarizationCost, I);
1168 return thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1170 case Intrinsic::masked_scatter: {
1171 assert(VF == 1 && "Can't vectorize types here.");
1172 const Value *Mask = Args[3];
1173 bool VarMask = !isa<Constant>(Mask);
1174 Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
1175 return thisT()->getGatherScatterOpCost(Instruction::Store,
1176 Args[0]->getType(), Args[1],
1177 VarMask, Alignment, CostKind, I);
1179 case Intrinsic::masked_gather: {
1180 assert(VF == 1 && "Can't vectorize types here.");
1181 const Value *Mask = Args[2];
1182 bool VarMask = !isa<Constant>(Mask);
1183 Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
1184 return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
1185 VarMask, Alignment, CostKind, I);
1187 case Intrinsic::experimental_vector_reduce_add:
1188 case Intrinsic::experimental_vector_reduce_mul:
1189 case Intrinsic::experimental_vector_reduce_and:
1190 case Intrinsic::experimental_vector_reduce_or:
1191 case Intrinsic::experimental_vector_reduce_xor:
1192 case Intrinsic::experimental_vector_reduce_v2_fadd:
1193 case Intrinsic::experimental_vector_reduce_v2_fmul:
1194 case Intrinsic::experimental_vector_reduce_smax:
1195 case Intrinsic::experimental_vector_reduce_smin:
1196 case Intrinsic::experimental_vector_reduce_fmax:
1197 case Intrinsic::experimental_vector_reduce_fmin:
1198 case Intrinsic::experimental_vector_reduce_umax:
1199 case Intrinsic::experimental_vector_reduce_umin: {
1200 IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, 1, I);
1201 return getIntrinsicInstrCost(Attrs, CostKind);
1203 case Intrinsic::fshl:
1204 case Intrinsic::fshr: {
1205 const Value *X = Args[0];
1206 const Value *Y = Args[1];
1207 const Value *Z = Args[2];
1208 TTI::OperandValueProperties OpPropsX, OpPropsY, OpPropsZ, OpPropsBW;
1209 TTI::OperandValueKind OpKindX = TTI::getOperandInfo(X, OpPropsX);
1210 TTI::OperandValueKind OpKindY = TTI::getOperandInfo(Y, OpPropsY);
1211 TTI::OperandValueKind OpKindZ = TTI::getOperandInfo(Z, OpPropsZ);
1212 TTI::OperandValueKind OpKindBW = TTI::OK_UniformConstantValue;
1213 OpPropsBW = isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
1215 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
1216 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
1219 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
1221 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
1222 Cost += thisT()->getArithmeticInstrCost(
1223 BinaryOperator::Shl, RetTy, CostKind, OpKindX, OpKindZ, OpPropsX);
1224 Cost += thisT()->getArithmeticInstrCost(
1225 BinaryOperator::LShr, RetTy, CostKind, OpKindY, OpKindZ, OpPropsY);
1226 // Non-constant shift amounts requires a modulo.
1227 if (OpKindZ != TTI::OK_UniformConstantValue &&
1228 OpKindZ != TTI::OK_NonUniformConstantValue)
1229 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
1230 CostKind, OpKindZ, OpKindBW,
1231 OpPropsZ, OpPropsBW);
1232 // For non-rotates (X != Y) we must add shift-by-zero handling costs.
1234 Type *CondTy = RetTy->getWithNewBitWidth(1);
1235 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1237 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
1245 /// Get intrinsic cost based on argument types.
1246 /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
1247 /// cost of scalarizing the arguments and the return value will be computed
1249 unsigned getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1250 TTI::TargetCostKind CostKind) {
1251 Intrinsic::ID IID = ICA.getID();
1252 Type *RetTy = ICA.getReturnType();
1253 const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
1254 FastMathFlags FMF = ICA.getFlags();
1255 unsigned ScalarizationCostPassed = ICA.getScalarizationCost();
1256 bool SkipScalarizationCost = ICA.skipScalarizationCost();
1258 auto *VecOpTy = Tys.empty() ? nullptr : dyn_cast<VectorType>(Tys[0]);
1260 SmallVector<unsigned, 2> ISDs;
1261 unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
1264 // Assume that we need to scalarize this intrinsic.
1265 unsigned ScalarizationCost = ScalarizationCostPassed;
1266 unsigned ScalarCalls = 1;
1267 Type *ScalarRetTy = RetTy;
1268 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1269 if (!SkipScalarizationCost)
1270 ScalarizationCost = getScalarizationOverhead(RetVTy, true, false);
1271 ScalarCalls = std::max(ScalarCalls,
1272 cast<FixedVectorType>(RetVTy)->getNumElements());
1273 ScalarRetTy = RetTy->getScalarType();
1275 SmallVector<Type *, 4> ScalarTys;
1276 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1278 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1279 if (!SkipScalarizationCost)
1280 ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1281 ScalarCalls = std::max(ScalarCalls,
1282 cast<FixedVectorType>(VTy)->getNumElements());
1283 Ty = Ty->getScalarType();
1285 ScalarTys.push_back(Ty);
1287 if (ScalarCalls == 1)
1288 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
1290 IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
1291 unsigned ScalarCost =
1292 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);
1294 return ScalarCalls * ScalarCost + ScalarizationCost;
1296 // Look for intrinsics that can be lowered directly or turned into a scalar
1298 case Intrinsic::sqrt:
1299 ISDs.push_back(ISD::FSQRT);
1301 case Intrinsic::sin:
1302 ISDs.push_back(ISD::FSIN);
1304 case Intrinsic::cos:
1305 ISDs.push_back(ISD::FCOS);
1307 case Intrinsic::exp:
1308 ISDs.push_back(ISD::FEXP);
1310 case Intrinsic::exp2:
1311 ISDs.push_back(ISD::FEXP2);
1313 case Intrinsic::log:
1314 ISDs.push_back(ISD::FLOG);
1316 case Intrinsic::log10:
1317 ISDs.push_back(ISD::FLOG10);
1319 case Intrinsic::log2:
1320 ISDs.push_back(ISD::FLOG2);
1322 case Intrinsic::fabs:
1323 ISDs.push_back(ISD::FABS);
1325 case Intrinsic::canonicalize:
1326 ISDs.push_back(ISD::FCANONICALIZE);
1328 case Intrinsic::minnum:
1329 ISDs.push_back(ISD::FMINNUM);
1331 ISDs.push_back(ISD::FMINIMUM);
1333 case Intrinsic::maxnum:
1334 ISDs.push_back(ISD::FMAXNUM);
1336 ISDs.push_back(ISD::FMAXIMUM);
1338 case Intrinsic::copysign:
1339 ISDs.push_back(ISD::FCOPYSIGN);
1341 case Intrinsic::floor:
1342 ISDs.push_back(ISD::FFLOOR);
1344 case Intrinsic::ceil:
1345 ISDs.push_back(ISD::FCEIL);
1347 case Intrinsic::trunc:
1348 ISDs.push_back(ISD::FTRUNC);
1350 case Intrinsic::nearbyint:
1351 ISDs.push_back(ISD::FNEARBYINT);
1353 case Intrinsic::rint:
1354 ISDs.push_back(ISD::FRINT);
1356 case Intrinsic::round:
1357 ISDs.push_back(ISD::FROUND);
1359 case Intrinsic::roundeven:
1360 ISDs.push_back(ISD::FROUNDEVEN);
1362 case Intrinsic::pow:
1363 ISDs.push_back(ISD::FPOW);
1365 case Intrinsic::fma:
1366 ISDs.push_back(ISD::FMA);
1368 case Intrinsic::fmuladd:
1369 ISDs.push_back(ISD::FMA);
1371 case Intrinsic::experimental_constrained_fmuladd:
1372 ISDs.push_back(ISD::STRICT_FMA);
1374 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
1375 case Intrinsic::lifetime_start:
1376 case Intrinsic::lifetime_end:
1377 case Intrinsic::sideeffect:
1379 case Intrinsic::masked_store: {
1381 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1382 return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
1385 case Intrinsic::masked_load: {
1387 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
1388 return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
1391 case Intrinsic::experimental_vector_reduce_add:
1392 return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
1393 /*IsPairwiseForm=*/false,
1395 case Intrinsic::experimental_vector_reduce_mul:
1396 return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
1397 /*IsPairwiseForm=*/false,
1399 case Intrinsic::experimental_vector_reduce_and:
1400 return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
1401 /*IsPairwiseForm=*/false,
1403 case Intrinsic::experimental_vector_reduce_or:
1404 return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
1405 /*IsPairwiseForm=*/false,
1407 case Intrinsic::experimental_vector_reduce_xor:
1408 return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
1409 /*IsPairwiseForm=*/false,
1411 case Intrinsic::experimental_vector_reduce_v2_fadd:
1412 // FIXME: Add new flag for cost of strict reductions.
1413 return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
1414 /*IsPairwiseForm=*/false,
1416 case Intrinsic::experimental_vector_reduce_v2_fmul:
1417 // FIXME: Add new flag for cost of strict reductions.
1418 return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
1419 /*IsPairwiseForm=*/false,
1421 case Intrinsic::experimental_vector_reduce_smax:
1422 case Intrinsic::experimental_vector_reduce_smin:
1423 case Intrinsic::experimental_vector_reduce_fmax:
1424 case Intrinsic::experimental_vector_reduce_fmin:
1425 return thisT()->getMinMaxReductionCost(
1426 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1427 /*IsPairwiseForm=*/false,
1428 /*IsUnsigned=*/false, CostKind);
1429 case Intrinsic::experimental_vector_reduce_umax:
1430 case Intrinsic::experimental_vector_reduce_umin:
1431 return thisT()->getMinMaxReductionCost(
1432 VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
1433 /*IsPairwiseForm=*/false,
1434 /*IsUnsigned=*/true, CostKind);
1435 case Intrinsic::sadd_sat:
1436 case Intrinsic::ssub_sat: {
1437 Type *CondTy = RetTy->getWithNewBitWidth(1);
1439 Type *OpTy = StructType::create({RetTy, CondTy});
1440 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
1441 ? Intrinsic::sadd_with_overflow
1442 : Intrinsic::ssub_with_overflow;
1444 // SatMax -> Overflow && SumDiff < 0
1445 // SatMin -> Overflow && SumDiff >= 0
1447 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1448 ScalarizationCostPassed);
1449 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1450 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
1452 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
1456 case Intrinsic::uadd_sat:
1457 case Intrinsic::usub_sat: {
1458 Type *CondTy = RetTy->getWithNewBitWidth(1);
1460 Type *OpTy = StructType::create({RetTy, CondTy});
1461 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
1462 ? Intrinsic::uadd_with_overflow
1463 : Intrinsic::usub_with_overflow;
1466 IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
1467 ScalarizationCostPassed);
1468 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1469 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
1473 case Intrinsic::smul_fix:
1474 case Intrinsic::umul_fix: {
1475 unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
1476 Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
1479 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1482 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CostKind);
1484 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1485 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
1487 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
1488 CostKind, TTI::OK_AnyValue,
1489 TTI::OK_UniformConstantValue);
1490 Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
1492 TTI::OK_UniformConstantValue);
1493 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
1496 case Intrinsic::sadd_with_overflow:
1497 case Intrinsic::ssub_with_overflow: {
1498 Type *SumTy = RetTy->getContainedType(0);
1499 Type *OverflowTy = RetTy->getContainedType(1);
1500 unsigned Opcode = IID == Intrinsic::sadd_with_overflow
1501 ? BinaryOperator::Add
1502 : BinaryOperator::Sub;
1504 // LHSSign -> LHS >= 0
1505 // RHSSign -> RHS >= 0
1506 // SumSign -> Sum >= 0
1509 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
1511 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
1513 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1514 Cost += 3 * thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
1515 OverflowTy, CostKind);
1516 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, OverflowTy,
1517 OverflowTy, CostKind);
1518 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, OverflowTy,
1522 case Intrinsic::uadd_with_overflow:
1523 case Intrinsic::usub_with_overflow: {
1524 Type *SumTy = RetTy->getContainedType(0);
1525 Type *OverflowTy = RetTy->getContainedType(1);
1526 unsigned Opcode = IID == Intrinsic::uadd_with_overflow
1527 ? BinaryOperator::Add
1528 : BinaryOperator::Sub;
1531 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
1532 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,
1533 OverflowTy, CostKind);
1536 case Intrinsic::smul_with_overflow:
1537 case Intrinsic::umul_with_overflow: {
1538 Type *MulTy = RetTy->getContainedType(0);
1539 Type *OverflowTy = RetTy->getContainedType(1);
1540 unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
1541 Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
1544 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
1547 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CostKind);
1549 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
1550 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
1552 Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, MulTy,
1553 CostKind, TTI::OK_AnyValue,
1554 TTI::OK_UniformConstantValue);
1556 if (IID == Intrinsic::smul_with_overflow)
1557 Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
1558 CostKind, TTI::OK_AnyValue,
1559 TTI::OK_UniformConstantValue);
1561 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy,
1562 OverflowTy, CostKind);
1565 case Intrinsic::ctpop:
1566 ISDs.push_back(ISD::CTPOP);
1567 // In case of legalization use TCC_Expensive. This is cheaper than a
1568 // library call but still not a cheap instruction.
1569 SingleCallCost = TargetTransformInfo::TCC_Expensive;
1571 // FIXME: ctlz, cttz, ...
1572 case Intrinsic::bswap:
1573 ISDs.push_back(ISD::BSWAP);
1575 case Intrinsic::bitreverse:
1576 ISDs.push_back(ISD::BITREVERSE);
1580 const TargetLoweringBase *TLI = getTLI();
1581 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1583 SmallVector<unsigned, 2> LegalCost;
1584 SmallVector<unsigned, 2> CustomCost;
1585 for (unsigned ISD : ISDs) {
1586 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
1587 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
1588 TLI->isFAbsFree(LT.second)) {
1592 // The operation is legal. Assume it costs 1.
1593 // If the type is split to multiple registers, assume that there is some
1594 // overhead to this.
1595 // TODO: Once we have extract/insert subvector cost we need to use them.
1597 LegalCost.push_back(LT.first * 2);
1599 LegalCost.push_back(LT.first * 1);
1600 } else if (!TLI->isOperationExpand(ISD, LT.second)) {
1601 // If the operation is custom lowered then assume
1602 // that the code is twice as expensive.
1603 CustomCost.push_back(LT.first * 2);
1607 auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
1608 if (MinLegalCostI != LegalCost.end())
1609 return *MinLegalCostI;
1611 auto MinCustomCostI =
1612 std::min_element(CustomCost.begin(), CustomCost.end());
1613 if (MinCustomCostI != CustomCost.end())
1614 return *MinCustomCostI;
1616 // If we can't lower fmuladd into an FMA estimate the cost as a floating
1617 // point mul followed by an add.
1618 if (IID == Intrinsic::fmuladd)
1619 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
1621 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
1623 if (IID == Intrinsic::experimental_constrained_fmuladd) {
1624 IntrinsicCostAttributes FMulAttrs(
1625 Intrinsic::experimental_constrained_fmul, RetTy, Tys);
1626 IntrinsicCostAttributes FAddAttrs(
1627 Intrinsic::experimental_constrained_fadd, RetTy, Tys);
1628 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
1629 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
1632 // Else, assume that we need to scalarize this intrinsic. For math builtins
1633 // this will emit a costly libcall, adding call overhead and spills. Make it
1635 if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
1636 unsigned ScalarizationCost = SkipScalarizationCost ?
1637 ScalarizationCostPassed : getScalarizationOverhead(RetVTy, true, false);
1639 unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
1640 SmallVector<Type *, 4> ScalarTys;
1641 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1643 if (Ty->isVectorTy())
1644 Ty = Ty->getScalarType();
1645 ScalarTys.push_back(Ty);
1647 IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
1648 unsigned ScalarCost = thisT()->getIntrinsicInstrCost(Attrs, CostKind);
1649 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
1650 if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
1651 if (!ICA.skipScalarizationCost())
1652 ScalarizationCost += getScalarizationOverhead(VTy, false, true);
1653 ScalarCalls = std::max(ScalarCalls,
1654 cast<FixedVectorType>(VTy)->getNumElements());
1657 return ScalarCalls * ScalarCost + ScalarizationCost;
1660 // This is going to be turned into a library call, make it expensive.
1661 return SingleCallCost;
1664 /// Compute a cost of the given call instruction.
1666 /// Compute the cost of calling function F with return type RetTy and
1667 /// argument types Tys. F might be nullptr, in this case the cost of an
1668 /// arbitrary call with the specified signature will be returned.
1669 /// This is used, for instance, when we estimate call of a vector
1670 /// counterpart of the given function.
1671 /// \param F Called function, might be nullptr.
1672 /// \param RetTy Return value types.
1673 /// \param Tys Argument types.
1674 /// \returns The cost of Call instruction.
1675 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1676 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) {
1680 unsigned getNumberOfParts(Type *Tp) {
1681 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
1685 unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
1690 /// Try to calculate arithmetic and shuffle op costs for reduction operations.
1691 /// We're assuming that reduction operation are performing the following way:
1692 /// 1. Non-pairwise reduction
1693 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1694 /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
1695 /// \----------------v-------------/ \----------v------------/
1696 /// n/2 elements n/2 elements
1697 /// %red1 = op <n x t> %val, <n x t> val1
1698 /// After this operation we have a vector %red1 where only the first n/2
1699 /// elements are meaningful, the second n/2 elements are undefined and can be
1700 /// dropped. All other operations are actually working with the vector of
1701 /// length n/2, not n, though the real vector length is still n.
1702 /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
1703 /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
1704 /// \----------------v-------------/ \----------v------------/
1705 /// n/4 elements 3*n/4 elements
1706 /// %red2 = op <n x t> %red1, <n x t> val2 - working with the vector of
1707 /// length n/2, the resulting vector has length n/4 etc.
1708 /// 2. Pairwise reduction:
1709 /// Everything is the same except for an additional shuffle operation which
1710 /// is used to produce operands for pairwise kind of reductions.
1711 /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
1712 /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
1713 /// \-------------v----------/ \----------v------------/
1714 /// n/2 elements n/2 elements
1715 /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
1716 /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
1717 /// \-------------v----------/ \----------v------------/
1718 /// n/2 elements n/2 elements
1719 /// %red1 = op <n x t> %val1, <n x t> val2
1720 /// Again, the operation is performed on <n x t> vector, but the resulting
1721 /// vector %red1 is <n/2 x t> vector.
1723 /// The cost model should take into account that the actual length of the
1724 /// vector is reduced on each iteration.
1725 unsigned getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
1727 TTI::TargetCostKind CostKind) {
1728 Type *ScalarTy = Ty->getElementType();
1729 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
1730 unsigned NumReduxLevels = Log2_32(NumVecElts);
1731 unsigned ArithCost = 0;
1732 unsigned ShuffleCost = 0;
1733 std::pair<unsigned, MVT> LT =
1734 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
1735 unsigned LongVectorCount = 0;
1737 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1738 while (NumVecElts > MVTLen) {
1740 VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
1741 // Assume the pairwise shuffles add a cost.
1743 (IsPairwise + 1) * thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1744 Ty, NumVecElts, SubTy);
1745 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
1750 NumReduxLevels -= LongVectorCount;
1752 // The minimal length of the vector is limited by the real length of vector
1753 // operations performed on the current platform. That's why several final
1754 // reduction operations are performed on the vectors with the same
1755 // architecture-dependent length.
1757 // Non pairwise reductions need one shuffle per reduction level. Pairwise
1758 // reductions need two shuffles on every level, but the last one. On that
1759 // level one of the shuffles is <0, u, u, ...> which is identity.
1760 unsigned NumShuffles = NumReduxLevels;
1761 if (IsPairwise && NumReduxLevels >= 1)
1762 NumShuffles += NumReduxLevels - 1;
1763 ShuffleCost += NumShuffles *
1764 thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, Ty);
1765 ArithCost += NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty);
1766 return ShuffleCost + ArithCost +
1767 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
1770 /// Try to calculate op costs for min/max reduction operations.
1771 /// \param CondTy Conditional type for the Select instruction.
1772 unsigned getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1773 bool IsPairwise, bool IsUnsigned,
1774 TTI::TargetCostKind CostKind) {
1775 Type *ScalarTy = Ty->getElementType();
1776 Type *ScalarCondTy = CondTy->getElementType();
1777 unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
1778 unsigned NumReduxLevels = Log2_32(NumVecElts);
1780 if (Ty->isFPOrFPVectorTy()) {
1781 CmpOpcode = Instruction::FCmp;
1783 assert(Ty->isIntOrIntVectorTy() &&
1784 "expecting floating point or integer type for min/max reduction");
1785 CmpOpcode = Instruction::ICmp;
1787 unsigned MinMaxCost = 0;
1788 unsigned ShuffleCost = 0;
1789 std::pair<unsigned, MVT> LT =
1790 thisT()->getTLI()->getTypeLegalizationCost(DL, Ty);
1791 unsigned LongVectorCount = 0;
1793 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
1794 while (NumVecElts > MVTLen) {
1796 auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
1797 CondTy = FixedVectorType::get(ScalarCondTy, NumVecElts);
1799 // Assume the pairwise shuffles add a cost.
1801 (IsPairwise + 1) * thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
1802 Ty, NumVecElts, SubTy);
1804 thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy, CostKind) +
1805 thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
1811 NumReduxLevels -= LongVectorCount;
1813 // The minimal length of the vector is limited by the real length of vector
1814 // operations performed on the current platform. That's why several final
1815 // reduction opertions are perfomed on the vectors with the same
1816 // architecture-dependent length.
1818 // Non pairwise reductions need one shuffle per reduction level. Pairwise
1819 // reductions need two shuffles on every level, but the last one. On that
1820 // level one of the shuffles is <0, u, u, ...> which is identity.
1821 unsigned NumShuffles = NumReduxLevels;
1822 if (IsPairwise && NumReduxLevels >= 1)
1823 NumShuffles += NumReduxLevels - 1;
1824 ShuffleCost += NumShuffles *
1825 thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, Ty);
1828 (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CostKind) +
1829 thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
1831 // The last min/max should be in vector registers and we counted it above.
1832 // So just need a single extractelement.
1833 return ShuffleCost + MinMaxCost +
1834 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
1837 unsigned getVectorSplitCost() { return 1; }
1842 /// Concrete BasicTTIImpl that can be used if no further customization
1844 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
1845 using BaseT = BasicTTIImplBase<BasicTTIImpl>;
1847 friend class BasicTTIImplBase<BasicTTIImpl>;
1849 const TargetSubtargetInfo *ST;
1850 const TargetLoweringBase *TLI;
1852 const TargetSubtargetInfo *getST() const { return ST; }
1853 const TargetLoweringBase *getTLI() const { return TLI; }
1856 explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
1859 } // end namespace llvm
1861 #endif // LLVM_CODEGEN_BASICTTIIMPL_H