1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/CostTable.h"
48 #include "llvm/Target/TargetLowering.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
70 if (Vector && !ST->hasSSE1())
74 if (Vector && ST->hasAVX512())
81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
98 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
99 // If the loop will not be vectorized, don't interleave the loop.
100 // Let regular unroll to unroll the loop, which saves the overflow
101 // check and memory check cost.
108 // Sandybridge and Haswell have multiple execution ports and pipelined
116 int X86TTIImpl::getArithmeticInstrCost(
117 unsigned Opcode, Type *Ty,
118 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
119 TTI::OperandValueProperties Opd1PropInfo,
120 TTI::OperandValueProperties Opd2PropInfo,
121 ArrayRef<const Value *> Args) {
122 // Legalize the type.
123 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
125 int ISD = TLI->InstructionOpcodeToISD(Opcode);
126 assert(ISD && "Invalid opcode");
128 static const CostTblEntry SLMCostTable[] = {
129 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
130 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
131 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
132 { ISD::FMUL, MVT::f64, 2 }, // mulsd
133 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
134 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
135 { ISD::FDIV, MVT::f32, 17 }, // divss
136 { ISD::FDIV, MVT::v4f32, 39 }, // divps
137 { ISD::FDIV, MVT::f64, 32 }, // divsd
138 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
139 { ISD::FADD, MVT::v2f64, 2 }, // addpd
140 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
141 // v2i64/v4i64 mul is custom lowered as a series of long
142 // multiplies(3), shifts(3) and adds(2).
143 // slm muldq version throughput is 2
144 { ISD::MUL, MVT::v2i64, 11 },
148 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
149 // Check if the operands can be shrinked into a smaller datatype.
150 bool Op1Signed = false;
151 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
152 bool Op2Signed = false;
153 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
155 bool signedMode = Op1Signed | Op2Signed;
156 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
159 return LT.first * 3; // pmullw/sext
160 if (!signedMode && OpMinSize <= 8)
161 return LT.first * 3; // pmullw/zext
163 return LT.first * 5; // pmullw/pmulhw/pshuf
164 if (!signedMode && OpMinSize <= 16)
165 return LT.first * 5; // pmullw/pmulhw/pshuf
167 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
169 return LT.first * Entry->Cost;
173 if (ISD == ISD::SDIV &&
174 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
175 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
176 // On X86, vector signed division by constants power-of-two are
177 // normally expanded to the sequence SRA + SRL + ADD + SRA.
178 // The OperandValue properties many not be same as that of previous
179 // operation;conservatively assume OP_None.
180 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
181 Op2Info, TargetTransformInfo::OP_None,
182 TargetTransformInfo::OP_None);
183 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
184 TargetTransformInfo::OP_None,
185 TargetTransformInfo::OP_None);
186 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
187 TargetTransformInfo::OP_None,
188 TargetTransformInfo::OP_None);
193 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
194 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
195 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
196 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
198 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
199 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
202 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
204 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
206 return LT.first * Entry->Cost;
209 static const CostTblEntry AVX512UniformConstCostTable[] = {
210 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
211 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
214 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
216 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
218 return LT.first * Entry->Cost;
221 static const CostTblEntry AVX2UniformConstCostTable[] = {
222 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
223 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
224 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
226 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
228 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
229 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
230 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
231 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
234 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
236 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
238 return LT.first * Entry->Cost;
241 static const CostTblEntry SSE2UniformConstCostTable[] = {
242 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
243 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
244 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
246 { ISD::SHL, MVT::v32i8, 4 }, // 2*(psllw + pand).
247 { ISD::SRL, MVT::v32i8, 4 }, // 2*(psrlw + pand).
248 { ISD::SRA, MVT::v32i8, 8 }, // 2*(psrlw, pand, pxor, psubb).
250 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence
251 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
252 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence
253 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
254 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence
255 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
256 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence
257 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
260 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
263 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
264 return LT.first * 30;
265 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
266 return LT.first * 15;
268 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
270 return LT.first * Entry->Cost;
273 static const CostTblEntry AVX2UniformCostTable[] = {
274 // Uniform splats are cheaper for the following instructions.
275 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
276 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
277 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
281 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
282 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
283 if (const auto *Entry =
284 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
285 return LT.first * Entry->Cost;
288 static const CostTblEntry SSE2UniformCostTable[] = {
289 // Uniform splats are cheaper for the following instructions.
290 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
291 { ISD::SHL, MVT::v4i32, 1 }, // pslld
292 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
294 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
295 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
296 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
298 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
299 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
303 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
304 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
305 if (const auto *Entry =
306 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
307 return LT.first * Entry->Cost;
310 static const CostTblEntry AVX512DQCostTable[] = {
311 { ISD::MUL, MVT::v2i64, 1 },
312 { ISD::MUL, MVT::v4i64, 1 },
313 { ISD::MUL, MVT::v8i64, 1 }
316 // Look for AVX512DQ lowering tricks for custom cases.
318 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
319 return LT.first * Entry->Cost;
321 static const CostTblEntry AVX512BWCostTable[] = {
322 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
323 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
324 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
326 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
327 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
328 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
330 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
331 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
332 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
334 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
335 { ISD::SDIV, MVT::v64i8, 64*20 },
336 { ISD::SDIV, MVT::v32i16, 32*20 },
337 { ISD::UDIV, MVT::v64i8, 64*20 },
338 { ISD::UDIV, MVT::v32i16, 32*20 }
341 // Look for AVX512BW lowering tricks for custom cases.
343 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
344 return LT.first * Entry->Cost;
346 static const CostTblEntry AVX512CostTable[] = {
347 { ISD::SHL, MVT::v16i32, 1 },
348 { ISD::SRL, MVT::v16i32, 1 },
349 { ISD::SRA, MVT::v16i32, 1 },
350 { ISD::SHL, MVT::v8i64, 1 },
351 { ISD::SRL, MVT::v8i64, 1 },
352 { ISD::SRA, MVT::v8i64, 1 },
354 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
355 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
356 { ISD::MUL, MVT::v16i32, 1 }, // pmulld
357 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
359 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
360 { ISD::SDIV, MVT::v16i32, 16*20 },
361 { ISD::SDIV, MVT::v8i64, 8*20 },
362 { ISD::UDIV, MVT::v16i32, 16*20 },
363 { ISD::UDIV, MVT::v8i64, 8*20 }
367 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
368 return LT.first * Entry->Cost;
370 static const CostTblEntry AVX2ShiftCostTable[] = {
371 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
372 // customize them to detect the cases where shift amount is a scalar one.
373 { ISD::SHL, MVT::v4i32, 1 },
374 { ISD::SRL, MVT::v4i32, 1 },
375 { ISD::SRA, MVT::v4i32, 1 },
376 { ISD::SHL, MVT::v8i32, 1 },
377 { ISD::SRL, MVT::v8i32, 1 },
378 { ISD::SRA, MVT::v8i32, 1 },
379 { ISD::SHL, MVT::v2i64, 1 },
380 { ISD::SRL, MVT::v2i64, 1 },
381 { ISD::SHL, MVT::v4i64, 1 },
382 { ISD::SRL, MVT::v4i64, 1 },
385 // Look for AVX2 lowering tricks.
387 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
388 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
389 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
390 // On AVX2, a packed v16i16 shift left by a constant build_vector
391 // is lowered into a vector multiply (vpmullw).
394 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
395 return LT.first * Entry->Cost;
398 static const CostTblEntry XOPShiftCostTable[] = {
399 // 128bit shifts take 1cy, but right shifts require negation beforehand.
400 { ISD::SHL, MVT::v16i8, 1 },
401 { ISD::SRL, MVT::v16i8, 2 },
402 { ISD::SRA, MVT::v16i8, 2 },
403 { ISD::SHL, MVT::v8i16, 1 },
404 { ISD::SRL, MVT::v8i16, 2 },
405 { ISD::SRA, MVT::v8i16, 2 },
406 { ISD::SHL, MVT::v4i32, 1 },
407 { ISD::SRL, MVT::v4i32, 2 },
408 { ISD::SRA, MVT::v4i32, 2 },
409 { ISD::SHL, MVT::v2i64, 1 },
410 { ISD::SRL, MVT::v2i64, 2 },
411 { ISD::SRA, MVT::v2i64, 2 },
412 // 256bit shifts require splitting if AVX2 didn't catch them above.
413 { ISD::SHL, MVT::v32i8, 2 },
414 { ISD::SRL, MVT::v32i8, 4 },
415 { ISD::SRA, MVT::v32i8, 4 },
416 { ISD::SHL, MVT::v16i16, 2 },
417 { ISD::SRL, MVT::v16i16, 4 },
418 { ISD::SRA, MVT::v16i16, 4 },
419 { ISD::SHL, MVT::v8i32, 2 },
420 { ISD::SRL, MVT::v8i32, 4 },
421 { ISD::SRA, MVT::v8i32, 4 },
422 { ISD::SHL, MVT::v4i64, 2 },
423 { ISD::SRL, MVT::v4i64, 4 },
424 { ISD::SRA, MVT::v4i64, 4 },
427 // Look for XOP lowering tricks.
429 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second))
430 return LT.first * Entry->Cost;
432 static const CostTblEntry SSE2UniformShiftCostTable[] = {
433 // Uniform splats are cheaper for the following instructions.
434 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
435 { ISD::SHL, MVT::v8i32, 2 }, // pslld
436 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
438 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
439 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
440 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
442 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
443 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
444 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
445 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
449 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
450 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
451 if (const auto *Entry =
452 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
453 return LT.first * Entry->Cost;
456 if (ISD == ISD::SHL &&
457 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
459 // Vector shift left by non uniform constant can be lowered
460 // into vector multiply.
461 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
462 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
466 static const CostTblEntry AVX2CostTable[] = {
467 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
468 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
470 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
471 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
473 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
474 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
475 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
476 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
478 { ISD::SUB, MVT::v32i8, 1 }, // psubb
479 { ISD::ADD, MVT::v32i8, 1 }, // paddb
480 { ISD::SUB, MVT::v16i16, 1 }, // psubw
481 { ISD::ADD, MVT::v16i16, 1 }, // paddw
482 { ISD::SUB, MVT::v8i32, 1 }, // psubd
483 { ISD::ADD, MVT::v8i32, 1 }, // paddd
484 { ISD::SUB, MVT::v4i64, 1 }, // psubq
485 { ISD::ADD, MVT::v4i64, 1 }, // paddq
487 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
488 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
489 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
490 { ISD::MUL, MVT::v8i32, 1 }, // pmulld
491 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
493 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
494 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
495 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
496 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
497 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
498 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
501 // Look for AVX2 lowering tricks for custom cases.
503 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
504 return LT.first * Entry->Cost;
506 static const CostTblEntry AVX1CostTable[] = {
507 // We don't have to scalarize unsupported ops. We can issue two half-sized
508 // operations and we only need to extract the upper YMM half.
509 // Two ops + 1 extract + 1 insert = 4.
510 { ISD::MUL, MVT::v16i16, 4 },
511 { ISD::MUL, MVT::v8i32, 4 },
512 { ISD::SUB, MVT::v32i8, 4 },
513 { ISD::ADD, MVT::v32i8, 4 },
514 { ISD::SUB, MVT::v16i16, 4 },
515 { ISD::ADD, MVT::v16i16, 4 },
516 { ISD::SUB, MVT::v8i32, 4 },
517 { ISD::ADD, MVT::v8i32, 4 },
518 { ISD::SUB, MVT::v4i64, 4 },
519 { ISD::ADD, MVT::v4i64, 4 },
521 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
522 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
523 // Because we believe v4i64 to be a legal type, we must also include the
524 // extract+insert in the cost table. Therefore, the cost here is 18
526 { ISD::MUL, MVT::v4i64, 18 },
528 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
530 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
531 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
532 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
533 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
534 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
535 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
537 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
538 { ISD::SDIV, MVT::v32i8, 32*20 },
539 { ISD::SDIV, MVT::v16i16, 16*20 },
540 { ISD::SDIV, MVT::v8i32, 8*20 },
541 { ISD::SDIV, MVT::v4i64, 4*20 },
542 { ISD::UDIV, MVT::v32i8, 32*20 },
543 { ISD::UDIV, MVT::v16i16, 16*20 },
544 { ISD::UDIV, MVT::v8i32, 8*20 },
545 { ISD::UDIV, MVT::v4i64, 4*20 },
549 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
550 return LT.first * Entry->Cost;
552 static const CostTblEntry SSE42CostTable[] = {
553 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
554 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
555 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
556 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
560 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
561 return LT.first * Entry->Cost;
563 static const CostTblEntry SSE41CostTable[] = {
564 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
565 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence.
566 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
567 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence.
568 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
569 { ISD::SHL, MVT::v8i32, 2*4 }, // pslld/paddd/cvttps2dq/pmulld
571 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
572 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence.
573 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
574 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence.
575 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
576 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend.
578 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
579 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence.
580 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
581 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence.
582 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
583 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend.
585 { ISD::MUL, MVT::v4i32, 1 } // pmulld
589 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
590 return LT.first * Entry->Cost;
592 static const CostTblEntry SSE2CostTable[] = {
593 // We don't correctly identify costs of casts because they are marked as
595 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
596 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
597 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
598 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
599 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
600 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
602 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
603 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
604 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
605 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
606 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
608 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
609 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
610 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
611 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
612 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
614 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
615 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
616 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
617 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
619 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
620 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
621 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
622 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
624 // It is not a good idea to vectorize division. We have to scalarize it and
625 // in the process we will often end up having to spilling regular
626 // registers. The overhead of division is going to dominate most kernels
627 // anyways so try hard to prevent vectorization of division - it is
628 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
629 // to hide "20 cycles" for each lane.
630 { ISD::SDIV, MVT::v16i8, 16*20 },
631 { ISD::SDIV, MVT::v8i16, 8*20 },
632 { ISD::SDIV, MVT::v4i32, 4*20 },
633 { ISD::SDIV, MVT::v2i64, 2*20 },
634 { ISD::UDIV, MVT::v16i8, 16*20 },
635 { ISD::UDIV, MVT::v8i16, 8*20 },
636 { ISD::UDIV, MVT::v4i32, 4*20 },
637 { ISD::UDIV, MVT::v2i64, 2*20 },
641 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
642 return LT.first * Entry->Cost;
644 static const CostTblEntry SSE1CostTable[] = {
645 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
646 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
650 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
651 return LT.first * Entry->Cost;
653 // Fallback to the default implementation.
654 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
657 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
659 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
660 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
661 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
663 // For Broadcasts we are splatting the first element from the first input
664 // register, so only need to reference that input and all the output
665 // registers are the same.
666 if (Kind == TTI::SK_Broadcast)
669 // We are going to permute multiple sources and the result will be in multiple
670 // destinations. Providing an accurate cost only for splits where the element
671 // type remains the same.
672 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
673 MVT LegalVT = LT.second;
674 if (LegalVT.getVectorElementType().getSizeInBits() ==
675 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
676 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
678 unsigned VecTySize = DL.getTypeStoreSize(Tp);
679 unsigned LegalVTSize = LegalVT.getStoreSize();
680 // Number of source vectors after legalization:
681 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
682 // Number of destination vectors after legalization:
683 unsigned NumOfDests = LT.first;
685 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
686 LegalVT.getVectorNumElements());
688 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
689 return NumOfShuffles *
690 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
693 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
696 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
697 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
698 // We assume that source and destination have the same vector type.
699 int NumOfDests = LT.first;
700 int NumOfShufflesPerDest = LT.first * 2 - 1;
701 LT.first = NumOfDests * NumOfShufflesPerDest;
704 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
705 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb
706 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb
708 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb
709 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb
711 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b
712 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b
713 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b
717 if (const auto *Entry =
718 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
719 return LT.first * Entry->Cost;
721 static const CostTblEntry AVX512BWShuffleTbl[] = {
722 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw
723 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb
725 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw
726 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw
727 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2
729 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw
730 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw
731 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw
732 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16
733 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc
735 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w
736 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w
737 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w
738 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc
739 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1
740 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc
744 if (const auto *Entry =
745 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
746 return LT.first * Entry->Cost;
748 static const CostTblEntry AVX512ShuffleTbl[] = {
749 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd
750 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps
751 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq
752 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd
754 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd
755 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps
756 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq
757 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd
759 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd
760 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd
761 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd
762 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps
763 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps
764 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps
765 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq
766 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq
767 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq
768 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd
769 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd
770 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd
771 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb
773 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd
774 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps
775 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q
776 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d
777 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd
778 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps
779 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q
780 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d
781 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd
782 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps
783 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q
784 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d
788 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
789 return LT.first * Entry->Cost;
791 static const CostTblEntry AVX2ShuffleTbl[] = {
792 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd
793 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps
794 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq
795 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd
796 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw
797 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb
799 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd
800 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps
801 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq
802 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd
803 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb
804 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb
806 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw
807 { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb
811 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
812 return LT.first * Entry->Cost;
814 static const CostTblEntry AVX1ShuffleTbl[] = {
815 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
816 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
817 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
818 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
819 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128
820 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128
822 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
823 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
824 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
825 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
826 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb
828 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb
831 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd
832 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd
833 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps
834 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps
835 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor
836 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor
840 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
841 return LT.first * Entry->Cost;
843 static const CostTblEntry SSE41ShuffleTbl[] = {
844 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw
845 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
846 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw
847 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps
848 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw
849 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb
853 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
854 return LT.first * Entry->Cost;
856 static const CostTblEntry SSSE3ShuffleTbl[] = {
857 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb
858 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb
860 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb
861 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb
863 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por
864 { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por
868 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
869 return LT.first * Entry->Cost;
871 static const CostTblEntry SSE2ShuffleTbl[] = {
872 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd
873 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd
874 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd
875 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd
876 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd
878 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd
879 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd
880 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd
881 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd
882 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw
883 // + 2*pshufd + 2*unpck + packus
885 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd
886 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
887 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps
888 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por
889 { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por
893 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
894 return LT.first * Entry->Cost;
896 static const CostTblEntry SSE1ShuffleTbl[] = {
897 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
898 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
899 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps
903 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
904 return LT.first * Entry->Cost;
906 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
909 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
910 int ISD = TLI->InstructionOpcodeToISD(Opcode);
911 assert(ISD && "Invalid opcode");
913 // FIXME: Need a better design of the cost table to handle non-simple types of
914 // potential massive combinations (elem_num x src_type x dst_type).
916 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
917 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
918 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
919 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
920 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
921 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
922 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
924 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
925 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
926 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
927 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
928 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
929 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
931 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
932 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
933 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
934 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
935 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
936 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
938 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
939 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
940 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
941 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
942 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
943 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
946 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
947 // 256-bit wide vectors.
949 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
950 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
951 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
952 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
954 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
955 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
956 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
957 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
959 // v16i1 -> v16i32 - load + broadcast
960 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
961 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
962 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
963 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
964 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
965 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
966 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
967 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
968 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
969 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
971 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
972 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
973 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
974 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
975 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
976 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
977 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
978 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
979 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
980 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
982 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
983 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
984 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
985 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
986 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
987 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
988 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
989 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
990 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
991 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
992 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
993 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
994 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
995 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
996 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
997 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
998 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
999 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1000 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1001 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1002 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1003 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
1004 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1006 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1007 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1008 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1009 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1012 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1013 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1014 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1015 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1016 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1017 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1018 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1019 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1020 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1021 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1022 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1023 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1024 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1025 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1026 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1027 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1028 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1030 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1031 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1032 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1033 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1034 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1035 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1037 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1038 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1040 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1043 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1044 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1045 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1046 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1047 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1048 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1049 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1050 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1051 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1052 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1053 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1054 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1055 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1056 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1057 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1058 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1059 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1061 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1062 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1063 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1064 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1065 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1066 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1067 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1069 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1070 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1071 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1072 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1073 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1074 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1075 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1076 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1077 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1078 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1079 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1080 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1082 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1083 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1084 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1085 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1086 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1087 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1088 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1089 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1090 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1091 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1092 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1093 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1094 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1095 // The generic code to compute the scalar overhead is currently broken.
1096 // Workaround this limitation by estimating the scalarization overhead
1097 // here. We have roughly 10 instructions per scalar element.
1098 // Multiply that by the vector width.
1099 // FIXME: remove that when PR19268 is fixed.
1100 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
1101 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
1102 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1103 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1105 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1106 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1107 // This node is expanded into scalarized operations but BasicTTI is overly
1108 // optimistic estimating its cost. It computes 3 per element (one
1109 // vector-extract, one scalar conversion and one vector-insert). The
1110 // problem is that the inserts form a read-modify-write chain so latency
1111 // should be factored in too. Inflating the cost per element by 1.
1112 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1113 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1115 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1116 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1119 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1120 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1121 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1122 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1123 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1124 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1125 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1127 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1128 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1129 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1130 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1131 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1132 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1133 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1134 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1135 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1136 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1137 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1138 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1139 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1140 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1141 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1142 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1143 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1144 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1146 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1147 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1148 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1149 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1150 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1151 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1152 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1156 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1157 // These are somewhat magic numbers justified by looking at the output of
1158 // Intel's IACA, running some kernels and making sure when we take
1159 // legalization into account the throughput will be overestimated.
1160 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1161 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1162 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1163 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1164 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1165 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1166 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1167 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1169 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1170 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1171 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1172 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1173 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1174 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1175 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1176 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1178 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1180 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1181 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1182 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1183 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1184 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1185 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1186 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1187 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1188 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1189 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1190 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1191 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1192 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1193 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1194 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1195 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1196 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1197 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1198 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1199 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1200 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1201 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1202 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1203 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1205 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1206 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1207 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1208 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1209 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1210 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1211 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1212 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1213 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1216 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1217 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1219 if (ST->hasSSE2() && !ST->hasAVX()) {
1220 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1221 LTDest.second, LTSrc.second))
1222 return LTSrc.first * Entry->Cost;
1225 EVT SrcTy = TLI->getValueType(DL, Src);
1226 EVT DstTy = TLI->getValueType(DL, Dst);
1228 // The function getSimpleVT only handles simple value types.
1229 if (!SrcTy.isSimple() || !DstTy.isSimple())
1230 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1233 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1234 DstTy.getSimpleVT(),
1235 SrcTy.getSimpleVT()))
1238 if (ST->hasAVX512())
1239 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1240 DstTy.getSimpleVT(),
1241 SrcTy.getSimpleVT()))
1244 if (ST->hasAVX2()) {
1245 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1246 DstTy.getSimpleVT(),
1247 SrcTy.getSimpleVT()))
1252 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1253 DstTy.getSimpleVT(),
1254 SrcTy.getSimpleVT()))
1258 if (ST->hasSSE41()) {
1259 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1260 DstTy.getSimpleVT(),
1261 SrcTy.getSimpleVT()))
1265 if (ST->hasSSE2()) {
1266 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1267 DstTy.getSimpleVT(),
1268 SrcTy.getSimpleVT()))
1272 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1275 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
1276 // Legalize the type.
1277 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1279 MVT MTy = LT.second;
1281 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1282 assert(ISD && "Invalid opcode");
1284 static const CostTblEntry SSE2CostTbl[] = {
1285 { ISD::SETCC, MVT::v2i64, 8 },
1286 { ISD::SETCC, MVT::v4i32, 1 },
1287 { ISD::SETCC, MVT::v8i16, 1 },
1288 { ISD::SETCC, MVT::v16i8, 1 },
1291 static const CostTblEntry SSE42CostTbl[] = {
1292 { ISD::SETCC, MVT::v2f64, 1 },
1293 { ISD::SETCC, MVT::v4f32, 1 },
1294 { ISD::SETCC, MVT::v2i64, 1 },
1297 static const CostTblEntry AVX1CostTbl[] = {
1298 { ISD::SETCC, MVT::v4f64, 1 },
1299 { ISD::SETCC, MVT::v8f32, 1 },
1300 // AVX1 does not support 8-wide integer compare.
1301 { ISD::SETCC, MVT::v4i64, 4 },
1302 { ISD::SETCC, MVT::v8i32, 4 },
1303 { ISD::SETCC, MVT::v16i16, 4 },
1304 { ISD::SETCC, MVT::v32i8, 4 },
1307 static const CostTblEntry AVX2CostTbl[] = {
1308 { ISD::SETCC, MVT::v4i64, 1 },
1309 { ISD::SETCC, MVT::v8i32, 1 },
1310 { ISD::SETCC, MVT::v16i16, 1 },
1311 { ISD::SETCC, MVT::v32i8, 1 },
1314 static const CostTblEntry AVX512CostTbl[] = {
1315 { ISD::SETCC, MVT::v8i64, 1 },
1316 { ISD::SETCC, MVT::v16i32, 1 },
1317 { ISD::SETCC, MVT::v8f64, 1 },
1318 { ISD::SETCC, MVT::v16f32, 1 },
1321 if (ST->hasAVX512())
1322 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1323 return LT.first * Entry->Cost;
1326 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1327 return LT.first * Entry->Cost;
1330 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1331 return LT.first * Entry->Cost;
1334 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1335 return LT.first * Entry->Cost;
1338 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1339 return LT.first * Entry->Cost;
1341 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
1344 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1345 ArrayRef<Type *> Tys, FastMathFlags FMF) {
1346 // Costs should match the codegen from:
1347 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1348 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1349 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1350 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1351 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1352 static const CostTblEntry XOPCostTbl[] = {
1353 { ISD::BITREVERSE, MVT::v4i64, 4 },
1354 { ISD::BITREVERSE, MVT::v8i32, 4 },
1355 { ISD::BITREVERSE, MVT::v16i16, 4 },
1356 { ISD::BITREVERSE, MVT::v32i8, 4 },
1357 { ISD::BITREVERSE, MVT::v2i64, 1 },
1358 { ISD::BITREVERSE, MVT::v4i32, 1 },
1359 { ISD::BITREVERSE, MVT::v8i16, 1 },
1360 { ISD::BITREVERSE, MVT::v16i8, 1 },
1361 { ISD::BITREVERSE, MVT::i64, 3 },
1362 { ISD::BITREVERSE, MVT::i32, 3 },
1363 { ISD::BITREVERSE, MVT::i16, 3 },
1364 { ISD::BITREVERSE, MVT::i8, 3 }
1366 static const CostTblEntry AVX2CostTbl[] = {
1367 { ISD::BITREVERSE, MVT::v4i64, 5 },
1368 { ISD::BITREVERSE, MVT::v8i32, 5 },
1369 { ISD::BITREVERSE, MVT::v16i16, 5 },
1370 { ISD::BITREVERSE, MVT::v32i8, 5 },
1371 { ISD::BSWAP, MVT::v4i64, 1 },
1372 { ISD::BSWAP, MVT::v8i32, 1 },
1373 { ISD::BSWAP, MVT::v16i16, 1 },
1374 { ISD::CTLZ, MVT::v4i64, 23 },
1375 { ISD::CTLZ, MVT::v8i32, 18 },
1376 { ISD::CTLZ, MVT::v16i16, 14 },
1377 { ISD::CTLZ, MVT::v32i8, 9 },
1378 { ISD::CTPOP, MVT::v4i64, 7 },
1379 { ISD::CTPOP, MVT::v8i32, 11 },
1380 { ISD::CTPOP, MVT::v16i16, 9 },
1381 { ISD::CTPOP, MVT::v32i8, 6 },
1382 { ISD::CTTZ, MVT::v4i64, 10 },
1383 { ISD::CTTZ, MVT::v8i32, 14 },
1384 { ISD::CTTZ, MVT::v16i16, 12 },
1385 { ISD::CTTZ, MVT::v32i8, 9 },
1386 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1387 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1388 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1389 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1390 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1391 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1393 static const CostTblEntry AVX1CostTbl[] = {
1394 { ISD::BITREVERSE, MVT::v4i64, 10 },
1395 { ISD::BITREVERSE, MVT::v8i32, 10 },
1396 { ISD::BITREVERSE, MVT::v16i16, 10 },
1397 { ISD::BITREVERSE, MVT::v32i8, 10 },
1398 { ISD::BSWAP, MVT::v4i64, 4 },
1399 { ISD::BSWAP, MVT::v8i32, 4 },
1400 { ISD::BSWAP, MVT::v16i16, 4 },
1401 { ISD::CTLZ, MVT::v4i64, 46 },
1402 { ISD::CTLZ, MVT::v8i32, 36 },
1403 { ISD::CTLZ, MVT::v16i16, 28 },
1404 { ISD::CTLZ, MVT::v32i8, 18 },
1405 { ISD::CTPOP, MVT::v4i64, 14 },
1406 { ISD::CTPOP, MVT::v8i32, 22 },
1407 { ISD::CTPOP, MVT::v16i16, 18 },
1408 { ISD::CTPOP, MVT::v32i8, 12 },
1409 { ISD::CTTZ, MVT::v4i64, 20 },
1410 { ISD::CTTZ, MVT::v8i32, 28 },
1411 { ISD::CTTZ, MVT::v16i16, 24 },
1412 { ISD::CTTZ, MVT::v32i8, 18 },
1413 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1414 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1415 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1416 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1417 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1418 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1420 static const CostTblEntry SSE42CostTbl[] = {
1421 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1422 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1424 static const CostTblEntry SSSE3CostTbl[] = {
1425 { ISD::BITREVERSE, MVT::v2i64, 5 },
1426 { ISD::BITREVERSE, MVT::v4i32, 5 },
1427 { ISD::BITREVERSE, MVT::v8i16, 5 },
1428 { ISD::BITREVERSE, MVT::v16i8, 5 },
1429 { ISD::BSWAP, MVT::v2i64, 1 },
1430 { ISD::BSWAP, MVT::v4i32, 1 },
1431 { ISD::BSWAP, MVT::v8i16, 1 },
1432 { ISD::CTLZ, MVT::v2i64, 23 },
1433 { ISD::CTLZ, MVT::v4i32, 18 },
1434 { ISD::CTLZ, MVT::v8i16, 14 },
1435 { ISD::CTLZ, MVT::v16i8, 9 },
1436 { ISD::CTPOP, MVT::v2i64, 7 },
1437 { ISD::CTPOP, MVT::v4i32, 11 },
1438 { ISD::CTPOP, MVT::v8i16, 9 },
1439 { ISD::CTPOP, MVT::v16i8, 6 },
1440 { ISD::CTTZ, MVT::v2i64, 10 },
1441 { ISD::CTTZ, MVT::v4i32, 14 },
1442 { ISD::CTTZ, MVT::v8i16, 12 },
1443 { ISD::CTTZ, MVT::v16i8, 9 }
1445 static const CostTblEntry SSE2CostTbl[] = {
1446 { ISD::BSWAP, MVT::v2i64, 7 },
1447 { ISD::BSWAP, MVT::v4i32, 7 },
1448 { ISD::BSWAP, MVT::v8i16, 7 },
1449 { ISD::CTLZ, MVT::v2i64, 25 },
1450 { ISD::CTLZ, MVT::v4i32, 26 },
1451 { ISD::CTLZ, MVT::v8i16, 20 },
1452 { ISD::CTLZ, MVT::v16i8, 17 },
1453 { ISD::CTPOP, MVT::v2i64, 12 },
1454 { ISD::CTPOP, MVT::v4i32, 15 },
1455 { ISD::CTPOP, MVT::v8i16, 13 },
1456 { ISD::CTPOP, MVT::v16i8, 10 },
1457 { ISD::CTTZ, MVT::v2i64, 14 },
1458 { ISD::CTTZ, MVT::v4i32, 18 },
1459 { ISD::CTTZ, MVT::v8i16, 16 },
1460 { ISD::CTTZ, MVT::v16i8, 13 },
1461 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1462 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1464 static const CostTblEntry SSE1CostTbl[] = {
1465 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1466 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1469 unsigned ISD = ISD::DELETED_NODE;
1473 case Intrinsic::bitreverse:
1474 ISD = ISD::BITREVERSE;
1476 case Intrinsic::bswap:
1479 case Intrinsic::ctlz:
1482 case Intrinsic::ctpop:
1485 case Intrinsic::cttz:
1488 case Intrinsic::sqrt:
1493 // Legalize the type.
1494 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1495 MVT MTy = LT.second;
1497 // Attempt to lookup cost.
1499 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1500 return LT.first * Entry->Cost;
1503 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1504 return LT.first * Entry->Cost;
1507 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1508 return LT.first * Entry->Cost;
1511 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1512 return LT.first * Entry->Cost;
1515 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1516 return LT.first * Entry->Cost;
1519 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1520 return LT.first * Entry->Cost;
1523 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1524 return LT.first * Entry->Cost;
1526 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
1529 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1530 ArrayRef<Value *> Args, FastMathFlags FMF) {
1531 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
1534 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1535 assert(Val->isVectorTy() && "This must be a vector type");
1537 Type *ScalarType = Val->getScalarType();
1540 // Legalize the type.
1541 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1543 // This type is legalized to a scalar type.
1544 if (!LT.second.isVector())
1547 // The type may be split. Normalize the index to the new type.
1548 unsigned Width = LT.second.getVectorNumElements();
1549 Index = Index % Width;
1551 // Floating point scalars are already located in index #0.
1552 if (ScalarType->isFloatingPointTy() && Index == 0)
1556 // Add to the base cost if we know that the extracted element of a vector is
1557 // destined to be moved to and used in the integer register file.
1558 int RegisterFileMoveCost = 0;
1559 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1560 RegisterFileMoveCost = 1;
1562 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1565 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
1566 assert (Ty->isVectorTy() && "Can only scalarize vectors");
1569 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
1571 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
1573 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
1579 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1580 unsigned AddressSpace) {
1581 // Handle non-power-of-two vectors such as <3 x float>
1582 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1583 unsigned NumElem = VTy->getVectorNumElements();
1585 // Handle a few common cases:
1587 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1588 // Cost = 64 bit store + extract + 32 bit store.
1592 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1593 // Cost = 128 bit store + unpack + 64 bit store.
1596 // Assume that all other non-power-of-two numbers are scalarized.
1597 if (!isPowerOf2_32(NumElem)) {
1598 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1600 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1601 Opcode == Instruction::Store);
1602 return NumElem * Cost + SplitCost;
1606 // Legalize the type.
1607 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1608 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1611 // Each load/store unit costs 1.
1612 int Cost = LT.first * 1;
1614 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1615 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1616 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1622 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1624 unsigned AddressSpace) {
1625 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1627 // To calculate scalar take the regular cost, without mask
1628 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1630 unsigned NumElem = SrcVTy->getVectorNumElements();
1631 VectorType *MaskTy =
1632 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1633 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1634 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1635 !isPowerOf2_32(NumElem)) {
1637 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1638 int ScalarCompareCost = getCmpSelInstrCost(
1639 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1640 int BranchCost = getCFInstrCost(Instruction::Br);
1641 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1643 int ValueSplitCost = getScalarizationOverhead(
1644 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1646 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1647 Alignment, AddressSpace);
1648 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1651 // Legalize the type.
1652 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1653 auto VT = TLI->getValueType(DL, SrcVTy);
1655 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1656 LT.second.getVectorNumElements() == NumElem)
1657 // Promotion requires expand/truncate for data and a shuffle for mask.
1658 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1659 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1661 else if (LT.second.getVectorNumElements() > NumElem) {
1662 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1663 LT.second.getVectorNumElements());
1664 // Expanding requires fill mask with zeroes
1665 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1667 if (!ST->hasAVX512())
1668 return Cost + LT.first*4; // Each maskmov costs 4
1670 // AVX-512 masked load/store is cheapper
1671 return Cost+LT.first;
1674 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1676 // Address computations in vectorized code with non-consecutive addresses will
1677 // likely result in more instructions compared to scalar code where the
1678 // computation can more often be merged into the index mode. The resulting
1679 // extra micro-ops can significantly decrease throughput.
1680 unsigned NumVectorInstToHideOverhead = 10;
1682 // Cost modeling of Strided Access Computation is hidden by the indexing
1683 // modes of X86 regardless of the stride value. We dont believe that there
1684 // is a difference between constant strided access in gerenal and constant
1685 // strided value which is less than or equal to 64.
1686 // Even in the case of (loop invariant) stride whose value is not known at
1687 // compile time, the address computation will not incur more than one extra
1689 if (Ty->isVectorTy() && SE) {
1690 if (!BaseT::isStridedAccess(Ptr))
1691 return NumVectorInstToHideOverhead;
1692 if (!BaseT::getConstantStrideStep(SE, Ptr))
1696 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1699 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1702 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1704 MVT MTy = LT.second;
1706 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1707 assert(ISD && "Invalid opcode");
1709 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1710 // and make it as the cost.
1712 static const CostTblEntry SSE42CostTblPairWise[] = {
1713 { ISD::FADD, MVT::v2f64, 2 },
1714 { ISD::FADD, MVT::v4f32, 4 },
1715 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1716 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1717 { ISD::ADD, MVT::v8i16, 5 },
1720 static const CostTblEntry AVX1CostTblPairWise[] = {
1721 { ISD::FADD, MVT::v4f32, 4 },
1722 { ISD::FADD, MVT::v4f64, 5 },
1723 { ISD::FADD, MVT::v8f32, 7 },
1724 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1725 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1726 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1727 { ISD::ADD, MVT::v8i16, 5 },
1728 { ISD::ADD, MVT::v8i32, 5 },
1731 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1732 { ISD::FADD, MVT::v2f64, 2 },
1733 { ISD::FADD, MVT::v4f32, 4 },
1734 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1735 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1736 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1739 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1740 { ISD::FADD, MVT::v4f32, 3 },
1741 { ISD::FADD, MVT::v4f64, 3 },
1742 { ISD::FADD, MVT::v8f32, 4 },
1743 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1744 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1745 { ISD::ADD, MVT::v4i64, 3 },
1746 { ISD::ADD, MVT::v8i16, 4 },
1747 { ISD::ADD, MVT::v8i32, 5 },
1752 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1753 return LT.first * Entry->Cost;
1756 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1757 return LT.first * Entry->Cost;
1760 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1761 return LT.first * Entry->Cost;
1764 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1765 return LT.first * Entry->Cost;
1768 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1771 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1772 /// method might only calculate a fraction of a larger immediate. Therefore it
1773 /// is valid to return a cost of ZERO.
1774 int X86TTIImpl::getIntImmCost(int64_t Val) {
1776 return TTI::TCC_Free;
1779 return TTI::TCC_Basic;
1781 return 2 * TTI::TCC_Basic;
1784 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1785 assert(Ty->isIntegerTy());
1787 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1791 // Never hoist constants larger than 128bit, because this might lead to
1792 // incorrect code generation or assertions in codegen.
1793 // Fixme: Create a cost model for types larger than i128 once the codegen
1794 // issues have been fixed.
1796 return TTI::TCC_Free;
1799 return TTI::TCC_Free;
1801 // Sign-extend all constants to a multiple of 64-bit.
1804 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1806 // Split the constant into 64-bit chunks and calculate the cost for each
1809 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1810 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1811 int64_t Val = Tmp.getSExtValue();
1812 Cost += getIntImmCost(Val);
1814 // We need at least one instruction to materialize the constant.
1815 return std::max(1, Cost);
1818 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1820 assert(Ty->isIntegerTy());
1822 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1823 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1824 // here, so that constant hoisting will ignore this constant.
1826 return TTI::TCC_Free;
1828 unsigned ImmIdx = ~0U;
1831 return TTI::TCC_Free;
1832 case Instruction::GetElementPtr:
1833 // Always hoist the base address of a GetElementPtr. This prevents the
1834 // creation of new constants for every base constant that gets constant
1835 // folded with the offset.
1837 return 2 * TTI::TCC_Basic;
1838 return TTI::TCC_Free;
1839 case Instruction::Store:
1842 case Instruction::ICmp:
1843 // This is an imperfect hack to prevent constant hoisting of
1844 // compares that might be trying to check if a 64-bit value fits in
1845 // 32-bits. The backend can optimize these cases using a right shift by 32.
1846 // Ideally we would check the compare predicate here. There also other
1847 // similar immediates the backend can use shifts for.
1848 if (Idx == 1 && Imm.getBitWidth() == 64) {
1849 uint64_t ImmVal = Imm.getZExtValue();
1850 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1851 return TTI::TCC_Free;
1855 case Instruction::And:
1856 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1857 // by using a 32-bit operation with implicit zero extension. Detect such
1858 // immediates here as the normal path expects bit 31 to be sign extended.
1859 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1860 return TTI::TCC_Free;
1862 case Instruction::Add:
1863 case Instruction::Sub:
1864 case Instruction::Mul:
1865 case Instruction::UDiv:
1866 case Instruction::SDiv:
1867 case Instruction::URem:
1868 case Instruction::SRem:
1869 case Instruction::Or:
1870 case Instruction::Xor:
1873 // Always return TCC_Free for the shift value of a shift instruction.
1874 case Instruction::Shl:
1875 case Instruction::LShr:
1876 case Instruction::AShr:
1878 return TTI::TCC_Free;
1880 case Instruction::Trunc:
1881 case Instruction::ZExt:
1882 case Instruction::SExt:
1883 case Instruction::IntToPtr:
1884 case Instruction::PtrToInt:
1885 case Instruction::BitCast:
1886 case Instruction::PHI:
1887 case Instruction::Call:
1888 case Instruction::Select:
1889 case Instruction::Ret:
1890 case Instruction::Load:
1894 if (Idx == ImmIdx) {
1895 int NumConstants = (BitSize + 63) / 64;
1896 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1897 return (Cost <= NumConstants * TTI::TCC_Basic)
1898 ? static_cast<int>(TTI::TCC_Free)
1902 return X86TTIImpl::getIntImmCost(Imm, Ty);
1905 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1907 assert(Ty->isIntegerTy());
1909 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1910 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1911 // here, so that constant hoisting will ignore this constant.
1913 return TTI::TCC_Free;
1917 return TTI::TCC_Free;
1918 case Intrinsic::sadd_with_overflow:
1919 case Intrinsic::uadd_with_overflow:
1920 case Intrinsic::ssub_with_overflow:
1921 case Intrinsic::usub_with_overflow:
1922 case Intrinsic::smul_with_overflow:
1923 case Intrinsic::umul_with_overflow:
1924 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1925 return TTI::TCC_Free;
1927 case Intrinsic::experimental_stackmap:
1928 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1929 return TTI::TCC_Free;
1931 case Intrinsic::experimental_patchpoint_void:
1932 case Intrinsic::experimental_patchpoint_i64:
1933 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1934 return TTI::TCC_Free;
1937 return X86TTIImpl::getIntImmCost(Imm, Ty);
1940 // Return an average cost of Gather / Scatter instruction, maybe improved later
1941 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1942 unsigned Alignment, unsigned AddressSpace) {
1944 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1945 unsigned VF = SrcVTy->getVectorNumElements();
1947 // Try to reduce index size from 64 bit (default for GEP)
1948 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1949 // operation will use 16 x 64 indices which do not fit in a zmm and needs
1950 // to split. Also check that the base pointer is the same for all lanes,
1951 // and that there's at most one variable index.
1952 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1953 unsigned IndexSize = DL.getPointerSizeInBits();
1954 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1955 if (IndexSize < 64 || !GEP)
1958 unsigned NumOfVarIndices = 0;
1959 Value *Ptrs = GEP->getPointerOperand();
1960 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
1962 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
1963 if (isa<Constant>(GEP->getOperand(i)))
1965 Type *IndxTy = GEP->getOperand(i)->getType();
1966 if (IndxTy->isVectorTy())
1967 IndxTy = IndxTy->getVectorElementType();
1968 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
1969 !isa<SExtInst>(GEP->getOperand(i))) ||
1970 ++NumOfVarIndices > 1)
1971 return IndexSize; // 64
1973 return (unsigned)32;
1977 // Trying to reduce IndexSize to 32 bits for vector 16.
1978 // By default the IndexSize is equal to pointer size.
1979 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
1980 DL.getPointerSizeInBits();
1982 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
1984 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
1985 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1986 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
1987 if (SplitFactor > 1) {
1988 // Handle splitting of vector of pointers
1989 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
1990 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
1994 // The gather / scatter cost is given by Intel architects. It is a rough
1995 // number since we are looking at one instruction in a time.
1996 const int GSOverhead = 2;
1997 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1998 Alignment, AddressSpace);
2001 /// Return the cost of full scalarization of gather / scatter operation.
2003 /// Opcode - Load or Store instruction.
2004 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2005 /// VariableMask - The mask is non-constant at compile time.
2006 /// Alignment - Alignment for one element.
2007 /// AddressSpace - pointer[s] address space.
2009 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2010 bool VariableMask, unsigned Alignment,
2011 unsigned AddressSpace) {
2012 unsigned VF = SrcVTy->getVectorNumElements();
2014 int MaskUnpackCost = 0;
2016 VectorType *MaskTy =
2017 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2018 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2019 int ScalarCompareCost =
2020 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2022 int BranchCost = getCFInstrCost(Instruction::Br);
2023 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2026 // The cost of the scalar loads/stores.
2027 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2028 Alignment, AddressSpace);
2030 int InsertExtractCost = 0;
2031 if (Opcode == Instruction::Load)
2032 for (unsigned i = 0; i < VF; ++i)
2033 // Add the cost of inserting each scalar load into the vector
2034 InsertExtractCost +=
2035 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2037 for (unsigned i = 0; i < VF; ++i)
2038 // Add the cost of extracting each element out of the data vector
2039 InsertExtractCost +=
2040 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2042 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2045 /// Calculate the cost of Gather / Scatter operation
2046 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2047 Value *Ptr, bool VariableMask,
2048 unsigned Alignment) {
2049 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2050 unsigned VF = SrcVTy->getVectorNumElements();
2051 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2052 if (!PtrTy && Ptr->getType()->isVectorTy())
2053 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2054 assert(PtrTy && "Unexpected type for Ptr argument");
2055 unsigned AddressSpace = PtrTy->getAddressSpace();
2057 bool Scalarize = false;
2058 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2059 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2061 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2062 // Vector-4 of gather/scatter instruction does not exist on KNL.
2063 // We can extend it to 8 elements, but zeroing upper bits of
2064 // the mask vector will add more instructions. Right now we give the scalar
2065 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2066 // is better in the VariableMask case.
2067 if (VF == 2 || (VF == 4 && !ST->hasVLX()))
2071 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2074 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2077 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2078 Type *ScalarTy = DataTy->getScalarType();
2079 int DataWidth = isa<PointerType>(ScalarTy) ?
2080 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2082 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2083 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2086 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2087 return isLegalMaskedLoad(DataType);
2090 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2091 // This function is called now in two cases: from the Loop Vectorizer
2092 // and from the Scalarizer.
2093 // When the Loop Vectorizer asks about legality of the feature,
2094 // the vectorization factor is not calculated yet. The Loop Vectorizer
2095 // sends a scalar type and the decision is based on the width of the
2097 // Later on, the cost model will estimate usage this intrinsic based on
2099 // The Scalarizer asks again about legality. It sends a vector type.
2100 // In this case we can reject non-power-of-2 vectors.
2101 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
2103 Type *ScalarTy = DataTy->getScalarType();
2104 int DataWidth = isa<PointerType>(ScalarTy) ?
2105 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2107 // AVX-512 allows gather and scatter
2108 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512();
2111 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2112 return isLegalMaskedGather(DataType);
2115 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2116 const Function *Callee) const {
2117 const TargetMachine &TM = getTLI()->getTargetMachine();
2119 // Work this as a subsetting of subtarget features.
2120 const FeatureBitset &CallerBits =
2121 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2122 const FeatureBitset &CalleeBits =
2123 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2125 // FIXME: This is likely too limiting as it will include subtarget features
2126 // that we might not care about for inlining, but it is conservatively
2128 return (CallerBits & CalleeBits) == CalleeBits;
2131 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2132 // TODO: We expect this to be beneficial regardless of arch,
2133 // but there are currently some unexplained performance artifacts on Atom.
2134 // As a temporary solution, disable on Atom.
2135 return !(ST->isAtom() || ST->isSLM());
2138 // Get estimation for interleaved load/store operations and strided load.
2139 // \p Indices contains indices for strided load.
2140 // \p Factor - the factor of interleaving.
2141 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
2142 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
2144 ArrayRef<unsigned> Indices,
2146 unsigned AddressSpace) {
2148 // VecTy for interleave memop is <VF*Factor x Elt>.
2149 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2150 // VecTy = <12 x i32>.
2152 // Calculate the number of memory operations (NumOfMemOps), required
2153 // for load/store the VecTy.
2154 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2155 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2156 unsigned LegalVTSize = LegalVT.getStoreSize();
2157 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2159 // Get the cost of one memory operation.
2160 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2161 LegalVT.getVectorNumElements());
2162 unsigned MemOpCost =
2163 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2165 if (Opcode == Instruction::Load) {
2166 // Kind of shuffle depends on number of loaded values.
2167 // If we load the entire data in one register, we can use a 1-src shuffle.
2168 // Otherwise, we'll merge 2 sources in each operation.
2169 TTI::ShuffleKind ShuffleKind =
2170 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
2172 unsigned ShuffleCost =
2173 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
2175 unsigned NumOfLoadsInInterleaveGrp =
2176 Indices.size() ? Indices.size() : Factor;
2177 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
2178 VecTy->getVectorNumElements() / Factor);
2179 unsigned NumOfResults =
2180 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
2181 NumOfLoadsInInterleaveGrp;
2183 // About a half of the loads may be folded in shuffles when we have only
2184 // one result. If we have more than one result, we do not fold loads at all.
2185 unsigned NumOfUnfoldedLoads =
2186 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
2188 // Get a number of shuffle operations per result.
2189 unsigned NumOfShufflesPerResult =
2190 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
2192 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2193 // When we have more than one destination, we need additional instructions
2195 unsigned NumOfMoves = 0;
2196 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
2197 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
2199 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
2200 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
2206 assert(Opcode == Instruction::Store &&
2207 "Expected Store Instruction at this point");
2209 // There is no strided stores meanwhile. And store can't be folded in
2211 unsigned NumOfSources = Factor; // The number of values to be merged.
2212 unsigned ShuffleCost =
2213 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
2214 unsigned NumOfShufflesPerStore = NumOfSources - 1;
2216 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2217 // We need additional instructions to keep sources.
2218 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
2219 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
2224 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
2226 ArrayRef<unsigned> Indices,
2228 unsigned AddressSpace) {
2229 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) {
2231 Type *EltTy = VecTy->getVectorElementType();
2232 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
2233 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
2235 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) {
2242 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW);
2243 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI()))
2244 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
2245 Alignment, AddressSpace);
2246 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2247 Alignment, AddressSpace);