1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "ARMTargetTransformInfo.h"
11 #include "ARMSubtarget.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/CodeGen/CostTable.h"
17 #include "llvm/CodeGen/ISDOpcodes.h"
18 #include "llvm/CodeGen/MachineValueType.h"
19 #include "llvm/CodeGen/ValueTypes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Instruction.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Target/TargetMachine.h"
37 #define DEBUG_TYPE "armtti"
39 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
40 const Function *Callee) const {
41 const TargetMachine &TM = getTLI()->getTargetMachine();
42 const FeatureBitset &CallerBits =
43 TM.getSubtargetImpl(*Caller)->getFeatureBits();
44 const FeatureBitset &CalleeBits =
45 TM.getSubtargetImpl(*Callee)->getFeatureBits();
47 // To inline a callee, all features not in the whitelist must match exactly.
48 bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
49 (CalleeBits & ~InlineFeatureWhitelist);
50 // For features in the whitelist, the callee's features must be a subset of
52 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
53 (CalleeBits & InlineFeatureWhitelist);
54 return MatchExact && MatchSubset;
57 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
58 assert(Ty->isIntegerTy());
60 unsigned Bits = Ty->getPrimitiveSizeInBits();
61 if (Bits == 0 || Imm.getActiveBits() >= 64)
64 int64_t SImmVal = Imm.getSExtValue();
65 uint64_t ZImmVal = Imm.getZExtValue();
67 if ((SImmVal >= 0 && SImmVal < 65536) ||
68 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
69 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
71 return ST->hasV6T2Ops() ? 2 : 3;
74 if ((SImmVal >= 0 && SImmVal < 65536) ||
75 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
76 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
78 return ST->hasV6T2Ops() ? 2 : 3;
81 if (SImmVal >= 0 && SImmVal < 256)
83 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
85 // Load from constantpool.
89 // Constants smaller than 256 fit in the immediate field of
90 // Thumb1 instructions so we return a zero cost and 1 otherwise.
91 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
92 const APInt &Imm, Type *Ty) {
93 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
99 int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
101 // Division by a constant can be turned into multiplication, but only if we
102 // know it's constant. So it's not so much that the immediate is cheap (it's
103 // not), but that the alternative is worse.
104 // FIXME: this is probably unneeded with GlobalISel.
105 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
106 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
110 if (Opcode == Instruction::And)
111 // Conversion to BIC is free, and means we can use ~Imm instead.
112 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(~Imm, Ty));
114 if (Opcode == Instruction::Add)
115 // Conversion to SUB is free, and means we can use -Imm instead.
116 return std::min(getIntImmCost(Imm, Ty), getIntImmCost(-Imm, Ty));
118 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
119 Ty->getIntegerBitWidth() == 32) {
120 int64_t NegImm = -Imm.getSExtValue();
121 if (ST->isThumb2() && NegImm < 1<<12)
122 // icmp X, #-C -> cmn X, #C
124 if (ST->isThumb() && NegImm < 1<<8)
125 // icmp X, #-C -> adds X, #C
129 return getIntImmCost(Imm, Ty);
132 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
133 const Instruction *I) {
134 int ISD = TLI->InstructionOpcodeToISD(Opcode);
135 assert(ISD && "Invalid opcode");
137 // Single to/from double precision conversions.
138 static const CostTblEntry NEONFltDblTbl[] = {
139 // Vector fptrunc/fpext conversions.
140 { ISD::FP_ROUND, MVT::v2f64, 2 },
141 { ISD::FP_EXTEND, MVT::v2f32, 2 },
142 { ISD::FP_EXTEND, MVT::v4f32, 4 }
145 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
146 ISD == ISD::FP_EXTEND)) {
147 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
148 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
149 return LT.first * Entry->Cost;
152 EVT SrcTy = TLI->getValueType(DL, Src);
153 EVT DstTy = TLI->getValueType(DL, Dst);
155 if (!SrcTy.isSimple() || !DstTy.isSimple())
156 return BaseT::getCastInstrCost(Opcode, Dst, Src);
158 // Some arithmetic, load and store operations have specific instructions
159 // to cast up/down their types automatically at no extra cost.
160 // TODO: Get these tables to know at least what the related operations are.
161 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
162 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
163 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
164 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
165 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
166 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
167 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
169 // The number of vmovl instructions for the extension.
170 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
171 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
172 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
173 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
174 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
175 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
176 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
177 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
178 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
179 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
181 // Operations that we legalize using splitting.
182 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
183 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
185 // Vector float <-> i32 conversions.
186 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
187 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
189 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
190 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
191 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
192 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
193 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
194 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
195 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
196 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
197 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
198 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
199 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
200 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
201 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
202 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
203 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
204 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
205 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
206 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
207 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
208 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
210 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
211 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
212 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
213 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
214 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
215 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
217 // Vector double <-> i32 conversions.
218 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
219 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
221 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
222 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
223 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
224 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
225 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
226 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
228 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
229 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
230 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
231 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
232 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
233 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
236 if (SrcTy.isVector() && ST->hasNEON()) {
237 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
239 SrcTy.getSimpleVT()))
243 // Scalar float to integer conversions.
244 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
245 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
246 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
247 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
248 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
249 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
250 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
251 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
252 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
253 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
254 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
255 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
256 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
257 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
258 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
259 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
260 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
261 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
262 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
263 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
264 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
266 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
267 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
269 SrcTy.getSimpleVT()))
273 // Scalar integer to float conversions.
274 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
275 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
276 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
277 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
278 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
279 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
280 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
281 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
282 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
283 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
284 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
285 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
286 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
287 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
288 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
289 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
290 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
291 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
292 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
293 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
294 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
297 if (SrcTy.isInteger() && ST->hasNEON()) {
298 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
299 ISD, DstTy.getSimpleVT(),
300 SrcTy.getSimpleVT()))
304 // Scalar integer conversion costs.
305 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
306 // i16 -> i64 requires two dependent operations.
307 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
309 // Truncates on i64 are assumed to be free.
310 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
311 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
312 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
313 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
316 if (SrcTy.isInteger()) {
317 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
319 SrcTy.getSimpleVT()))
323 return BaseT::getCastInstrCost(Opcode, Dst, Src);
326 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
328 // Penalize inserting into an D-subregister. We end up with a three times
329 // lower estimated throughput on swift.
330 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
331 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
334 if ((Opcode == Instruction::InsertElement ||
335 Opcode == Instruction::ExtractElement)) {
336 // Cross-class copies are expensive on many microarchitectures,
337 // so assume they are expensive by default.
338 if (ValTy->getVectorElementType()->isIntegerTy())
341 // Even if it's not a cross class copy, this likely leads to mixing
342 // of NEON and VFP code and should be therefore penalized.
343 if (ValTy->isVectorTy() &&
344 ValTy->getScalarSizeInBits() <= 32)
345 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
348 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
351 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
352 const Instruction *I) {
353 int ISD = TLI->InstructionOpcodeToISD(Opcode);
354 // On NEON a a vector select gets lowered to vbsl.
355 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
356 // Lowering of some vector selects is currently far from perfect.
357 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
358 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
359 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
360 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
363 EVT SelCondTy = TLI->getValueType(DL, CondTy);
364 EVT SelValTy = TLI->getValueType(DL, ValTy);
365 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
366 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
367 SelCondTy.getSimpleVT(),
368 SelValTy.getSimpleVT()))
372 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
376 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
379 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
381 // Address computations in vectorized code with non-consecutive addresses will
382 // likely result in more instructions compared to scalar code where the
383 // computation can more often be merged into the index mode. The resulting
384 // extra micro-ops can significantly decrease throughput.
385 unsigned NumVectorInstToHideOverhead = 10;
386 int MaxMergeDistance = 64;
388 if (Ty->isVectorTy() && SE &&
389 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
390 return NumVectorInstToHideOverhead;
392 // In many cases the address computation is not merged into the instruction
397 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
399 // We only handle costs of reverse and alternate shuffles for now.
400 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
401 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
403 if (Kind == TTI::SK_Reverse) {
404 static const CostTblEntry NEONShuffleTbl[] = {
405 // Reverse shuffle cost one instruction if we are shuffling within a
406 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
407 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
408 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
409 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
410 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
412 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
413 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
414 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
415 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
417 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
419 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE,
421 return LT.first * Entry->Cost;
423 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
425 if (Kind == TTI::SK_Alternate) {
426 static const CostTblEntry NEONAltShuffleTbl[] = {
427 // Alt shuffle cost table for ARM. Cost is the number of instructions
428 // required to create the shuffled vector.
430 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
431 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
432 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
433 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
435 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
436 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
437 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
439 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
441 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
443 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
444 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl,
445 ISD::VECTOR_SHUFFLE, LT.second))
446 return LT.first * Entry->Cost;
447 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
449 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
452 int ARMTTIImpl::getArithmeticInstrCost(
453 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
454 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
455 TTI::OperandValueProperties Opd2PropInfo,
456 ArrayRef<const Value *> Args) {
457 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
458 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
460 const unsigned FunctionCallDivCost = 20;
461 const unsigned ReciprocalDivCost = 10;
462 static const CostTblEntry CostTbl[] = {
464 // These costs are somewhat random. Choose a cost of 20 to indicate that
465 // vectorizing devision (added function call) is going to be very expensive.
466 // Double registers types.
467 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
468 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
469 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
470 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
471 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
472 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
473 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
474 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
475 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
476 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
477 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
478 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
479 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
480 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
481 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
482 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
483 // Quad register types.
484 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
485 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
486 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
487 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
488 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
489 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
490 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
491 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
492 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
493 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
494 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
495 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
496 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
497 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
498 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
499 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
504 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
505 return LT.first * Entry->Cost;
507 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
508 Opd1PropInfo, Opd2PropInfo);
510 // This is somewhat of a hack. The problem that we are facing is that SROA
511 // creates a sequence of shift, and, or instructions to construct values.
512 // These sequences are recognized by the ISel and have zero-cost. Not so for
513 // the vectorized code. Because we have support for v2i64 but not i64 those
514 // sequences look particularly beneficial to vectorize.
515 // To work around this we increase the cost of v2i64 operations to make them
516 // seem less beneficial.
517 if (LT.second == MVT::v2i64 &&
518 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
524 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
525 unsigned AddressSpace, const Instruction *I) {
526 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
528 if (Src->isVectorTy() && Alignment != 16 &&
529 Src->getVectorElementType()->isDoubleTy()) {
530 // Unaligned loads/stores are extremely inefficient.
531 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
537 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
539 ArrayRef<unsigned> Indices,
541 unsigned AddressSpace) {
542 assert(Factor >= 2 && "Invalid interleave factor");
543 assert(isa<VectorType>(VecTy) && "Expect a vector type");
545 // vldN/vstN doesn't support vector types of i64/f64 element.
546 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
548 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) {
549 unsigned NumElts = VecTy->getVectorNumElements();
550 auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
552 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
553 // Accesses having vector types that are a multiple of 128 bits can be
554 // matched to more than one vldN/vstN instruction.
555 if (NumElts % Factor == 0 &&
556 TLI->isLegalInterleavedAccessType(SubVecTy, DL))
557 return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
560 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
561 Alignment, AddressSpace);
564 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
565 TTI::UnrollingPreferences &UP) {
566 // Only currently enable these preferences for M-Class cores.
568 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
570 // Disable loop unrolling for Oz and Os.
571 UP.OptSizeThreshold = 0;
572 UP.PartialOptSizeThreshold = 0;
573 if (L->getHeader()->getParent()->optForSize())
576 // Only enable on Thumb-2 targets.
580 SmallVector<BasicBlock*, 4> ExitingBlocks;
581 L->getExitingBlocks(ExitingBlocks);
582 DEBUG(dbgs() << "Loop has:\n"
583 << "Blocks: " << L->getNumBlocks() << "\n"
584 << "Exit blocks: " << ExitingBlocks.size() << "\n");
586 // Only allow another exit other than the latch. This acts as an early exit
587 // as it mirrors the profitability calculation of the runtime unroller.
588 if (ExitingBlocks.size() > 2)
591 // Limit the CFG of the loop body for targets with a branch predictor.
592 // Allowing 4 blocks permits if-then-else diamonds in the body.
593 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
596 // Scan the loop: don't unroll loops with calls as this could prevent
599 for (auto *BB : L->getBlocks()) {
600 for (auto &I : *BB) {
601 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
602 ImmutableCallSite CS(&I);
603 if (const Function *F = CS.getCalledFunction()) {
604 if (!isLoweredToCall(F))
609 SmallVector<const Value*, 4> Operands(I.value_op_begin(),
611 Cost += getUserCost(&I, Operands);
615 DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
619 UP.UnrollRemainder = true;
620 UP.DefaultUnrollRuntimeCount = 4;
622 // Force unrolling small loops can be very useful because of the branch
623 // taken cost of the backedge.