1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/CostTable.h"
48 #include "llvm/Target/TargetLowering.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
70 if (Vector && !ST->hasSSE1())
74 if (Vector && ST->hasAVX512())
81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
83 if (ST->hasAVX512()) return 512;
84 if (ST->hasAVX()) return 256;
85 if (ST->hasSSE1()) return 128;
95 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
96 // If the loop will not be vectorized, don't interleave the loop.
97 // Let regular unroll to unroll the loop, which saves the overflow
98 // check and memory check cost.
105 // Sandybridge and Haswell have multiple execution ports and pipelined
113 int X86TTIImpl::getArithmeticInstrCost(
114 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
115 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
116 TTI::OperandValueProperties Opd2PropInfo) {
117 // Legalize the type.
118 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
120 int ISD = TLI->InstructionOpcodeToISD(Opcode);
121 assert(ISD && "Invalid opcode");
123 if (ISD == ISD::SDIV &&
124 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
125 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
126 // On X86, vector signed division by constants power-of-two are
127 // normally expanded to the sequence SRA + SRL + ADD + SRA.
128 // The OperandValue properties many not be same as that of previous
129 // operation;conservatively assume OP_None.
130 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
131 Op2Info, TargetTransformInfo::OP_None,
132 TargetTransformInfo::OP_None);
133 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
134 TargetTransformInfo::OP_None,
135 TargetTransformInfo::OP_None);
136 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
137 TargetTransformInfo::OP_None,
138 TargetTransformInfo::OP_None);
143 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
144 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
145 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
148 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
150 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
152 return LT.first * Entry->Cost;
155 static const CostTblEntry AVX512UniformConstCostTable[] = {
156 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
157 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
160 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
162 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
164 return LT.first * Entry->Cost;
167 static const CostTblEntry AVX2UniformConstCostTable[] = {
168 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
170 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
171 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
172 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
173 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
176 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
178 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
180 return LT.first * Entry->Cost;
183 static const CostTblEntry SSE2UniformConstCostTable[] = {
184 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence
185 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
186 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence
187 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
188 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence
189 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
190 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence
191 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
194 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
197 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
198 return LT.first * 30;
199 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
200 return LT.first * 15;
202 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
204 return LT.first * Entry->Cost;
207 static const CostTblEntry AVX512DQCostTable[] = {
208 { ISD::MUL, MVT::v2i64, 1 },
209 { ISD::MUL, MVT::v4i64, 1 },
210 { ISD::MUL, MVT::v8i64, 1 }
213 // Look for AVX512DQ lowering tricks for custom cases.
215 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD,
217 return LT.first * Entry->Cost;
220 static const CostTblEntry AVX512BWCostTable[] = {
221 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
222 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
223 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
225 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
226 { ISD::SDIV, MVT::v64i8, 64*20 },
227 { ISD::SDIV, MVT::v32i16, 32*20 },
228 { ISD::SDIV, MVT::v16i32, 16*20 },
229 { ISD::SDIV, MVT::v8i64, 8*20 },
230 { ISD::UDIV, MVT::v64i8, 64*20 },
231 { ISD::UDIV, MVT::v32i16, 32*20 },
232 { ISD::UDIV, MVT::v16i32, 16*20 },
233 { ISD::UDIV, MVT::v8i64, 8*20 },
236 // Look for AVX512BW lowering tricks for custom cases.
238 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD,
240 return LT.first * Entry->Cost;
243 static const CostTblEntry AVX512CostTable[] = {
244 { ISD::SHL, MVT::v16i32, 1 },
245 { ISD::SRL, MVT::v16i32, 1 },
246 { ISD::SRA, MVT::v16i32, 1 },
247 { ISD::SHL, MVT::v8i64, 1 },
248 { ISD::SRL, MVT::v8i64, 1 },
249 { ISD::SRA, MVT::v8i64, 1 },
251 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
252 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
255 if (ST->hasAVX512()) {
256 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
257 return LT.first * Entry->Cost;
260 static const CostTblEntry AVX2CostTable[] = {
261 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
262 // customize them to detect the cases where shift amount is a scalar one.
263 { ISD::SHL, MVT::v4i32, 1 },
264 { ISD::SRL, MVT::v4i32, 1 },
265 { ISD::SRA, MVT::v4i32, 1 },
266 { ISD::SHL, MVT::v8i32, 1 },
267 { ISD::SRL, MVT::v8i32, 1 },
268 { ISD::SRA, MVT::v8i32, 1 },
269 { ISD::SHL, MVT::v2i64, 1 },
270 { ISD::SRL, MVT::v2i64, 1 },
271 { ISD::SHL, MVT::v4i64, 1 },
272 { ISD::SRL, MVT::v4i64, 1 },
275 // Look for AVX2 lowering tricks.
277 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
278 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
279 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
280 // On AVX2, a packed v16i16 shift left by a constant build_vector
281 // is lowered into a vector multiply (vpmullw).
284 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
285 return LT.first * Entry->Cost;
288 static const CostTblEntry XOPCostTable[] = {
289 // 128bit shifts take 1cy, but right shifts require negation beforehand.
290 { ISD::SHL, MVT::v16i8, 1 },
291 { ISD::SRL, MVT::v16i8, 2 },
292 { ISD::SRA, MVT::v16i8, 2 },
293 { ISD::SHL, MVT::v8i16, 1 },
294 { ISD::SRL, MVT::v8i16, 2 },
295 { ISD::SRA, MVT::v8i16, 2 },
296 { ISD::SHL, MVT::v4i32, 1 },
297 { ISD::SRL, MVT::v4i32, 2 },
298 { ISD::SRA, MVT::v4i32, 2 },
299 { ISD::SHL, MVT::v2i64, 1 },
300 { ISD::SRL, MVT::v2i64, 2 },
301 { ISD::SRA, MVT::v2i64, 2 },
302 // 256bit shifts require splitting if AVX2 didn't catch them above.
303 { ISD::SHL, MVT::v32i8, 2 },
304 { ISD::SRL, MVT::v32i8, 4 },
305 { ISD::SRA, MVT::v32i8, 4 },
306 { ISD::SHL, MVT::v16i16, 2 },
307 { ISD::SRL, MVT::v16i16, 4 },
308 { ISD::SRA, MVT::v16i16, 4 },
309 { ISD::SHL, MVT::v8i32, 2 },
310 { ISD::SRL, MVT::v8i32, 4 },
311 { ISD::SRA, MVT::v8i32, 4 },
312 { ISD::SHL, MVT::v4i64, 2 },
313 { ISD::SRL, MVT::v4i64, 4 },
314 { ISD::SRA, MVT::v4i64, 4 },
317 // Look for XOP lowering tricks.
319 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
320 return LT.first * Entry->Cost;
323 static const CostTblEntry AVX2CustomCostTable[] = {
324 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
325 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
327 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
328 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
330 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
331 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
332 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
333 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
335 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
336 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
338 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
339 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
340 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
341 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
342 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
343 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
346 // Look for AVX2 lowering tricks for custom cases.
348 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
350 return LT.first * Entry->Cost;
353 static const CostTblEntry AVXCustomCostTable[] = {
354 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
356 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
357 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
358 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
359 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
360 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
361 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
363 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
364 { ISD::SDIV, MVT::v32i8, 32*20 },
365 { ISD::SDIV, MVT::v16i16, 16*20 },
366 { ISD::SDIV, MVT::v8i32, 8*20 },
367 { ISD::SDIV, MVT::v4i64, 4*20 },
368 { ISD::UDIV, MVT::v32i8, 32*20 },
369 { ISD::UDIV, MVT::v16i16, 16*20 },
370 { ISD::UDIV, MVT::v8i32, 8*20 },
371 { ISD::UDIV, MVT::v4i64, 4*20 },
374 // Look for AVX2 lowering tricks for custom cases.
376 if (const auto *Entry = CostTableLookup(AVXCustomCostTable, ISD,
378 return LT.first * Entry->Cost;
381 static const CostTblEntry SSE42FloatCostTable[] = {
382 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
383 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
384 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
385 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
388 if (ST->hasSSE42()) {
389 if (const auto *Entry = CostTableLookup(SSE42FloatCostTable, ISD,
391 return LT.first * Entry->Cost;
394 static const CostTblEntry
395 SSE2UniformCostTable[] = {
396 // Uniform splats are cheaper for the following instructions.
397 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
398 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
399 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
400 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
401 { ISD::SHL, MVT::v4i32, 1 }, // pslld
402 { ISD::SHL, MVT::v8i32, 2 }, // pslld
403 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
404 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
406 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
407 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
408 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
409 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
410 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
411 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
412 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
413 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
415 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
416 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
417 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
418 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
419 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
420 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
421 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
422 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
426 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
427 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
428 if (const auto *Entry =
429 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
430 return LT.first * Entry->Cost;
433 if (ISD == ISD::SHL &&
434 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
436 // Vector shift left by non uniform constant can be lowered
437 // into vector multiply (pmullw/pmulld).
438 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
439 (VT == MVT::v4i32 && ST->hasSSE41()))
442 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
443 // sequence of extract + two vector multiply + insert.
444 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
445 (ST->hasAVX() && !ST->hasAVX2()))
448 // A vector shift left by non uniform constant is converted
449 // into a vector multiply; the new multiply is eventually
450 // lowered into a sequence of shuffles and 2 x pmuludq.
451 if (VT == MVT::v4i32 && ST->hasSSE2())
455 static const CostTblEntry SSE41CostTable[] = {
456 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
457 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence.
458 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
459 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence.
461 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
462 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence.
463 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
464 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence.
465 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
466 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend.
468 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
469 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence.
470 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
471 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence.
472 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
473 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend.
476 if (ST->hasSSE41()) {
477 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
478 return LT.first * Entry->Cost;
481 static const CostTblEntry SSE2CostTable[] = {
482 // We don't correctly identify costs of casts because they are marked as
484 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
485 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
486 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
487 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
488 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
489 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
490 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
491 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
493 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
494 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
495 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
496 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
497 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
498 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
499 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
500 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
502 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
503 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
504 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
505 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
506 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
507 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
508 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
509 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
511 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
513 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
514 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
515 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
516 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
518 // It is not a good idea to vectorize division. We have to scalarize it and
519 // in the process we will often end up having to spilling regular
520 // registers. The overhead of division is going to dominate most kernels
521 // anyways so try hard to prevent vectorization of division - it is
522 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
523 // to hide "20 cycles" for each lane.
524 { ISD::SDIV, MVT::v16i8, 16*20 },
525 { ISD::SDIV, MVT::v8i16, 8*20 },
526 { ISD::SDIV, MVT::v4i32, 4*20 },
527 { ISD::SDIV, MVT::v2i64, 2*20 },
528 { ISD::UDIV, MVT::v16i8, 16*20 },
529 { ISD::UDIV, MVT::v8i16, 8*20 },
530 { ISD::UDIV, MVT::v4i32, 4*20 },
531 { ISD::UDIV, MVT::v2i64, 2*20 },
535 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
536 return LT.first * Entry->Cost;
539 static const CostTblEntry AVX1CostTable[] = {
540 // We don't have to scalarize unsupported ops. We can issue two half-sized
541 // operations and we only need to extract the upper YMM half.
542 // Two ops + 1 extract + 1 insert = 4.
543 { ISD::MUL, MVT::v16i16, 4 },
544 { ISD::MUL, MVT::v8i32, 4 },
545 { ISD::SUB, MVT::v32i8, 4 },
546 { ISD::ADD, MVT::v32i8, 4 },
547 { ISD::SUB, MVT::v16i16, 4 },
548 { ISD::ADD, MVT::v16i16, 4 },
549 { ISD::SUB, MVT::v8i32, 4 },
550 { ISD::ADD, MVT::v8i32, 4 },
551 { ISD::SUB, MVT::v4i64, 4 },
552 { ISD::ADD, MVT::v4i64, 4 },
553 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
554 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
555 // Because we believe v4i64 to be a legal type, we must also include the
556 // split factor of two in the cost table. Therefore, the cost here is 16
558 { ISD::MUL, MVT::v4i64, 16 },
561 // Look for AVX1 lowering tricks.
562 if (ST->hasAVX() && !ST->hasAVX2()) {
565 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
566 return LT.first * Entry->Cost;
569 // Custom lowering of vectors.
570 static const CostTblEntry CustomLowered[] = {
571 // A v2i64/v4i64 and multiply is custom lowered as a series of long
572 // multiplies(3), shifts(3) and adds(2).
573 { ISD::MUL, MVT::v2i64, 8 },
574 { ISD::MUL, MVT::v4i64, 8 },
575 { ISD::MUL, MVT::v8i64, 8 }
577 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
578 return LT.first * Entry->Cost;
580 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
581 // 2x pmuludq, 2x shuffle.
582 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
586 static const CostTblEntry SSE1FloatCostTable[] = {
587 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
588 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
592 if (const auto *Entry = CostTableLookup(SSE1FloatCostTable, ISD,
594 return LT.first * Entry->Cost;
595 // Fallback to the default implementation.
596 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
599 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
602 if (Kind == TTI::SK_Reverse) {
603 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
605 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
606 { ISD::VECTOR_SHUFFLE, MVT::v64i8, 1 }, // vpermb
607 { ISD::VECTOR_SHUFFLE, MVT::v32i8, 1 } // vpermb
611 if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl,
612 ISD::VECTOR_SHUFFLE, LT.second))
613 return LT.first * Entry->Cost;
615 static const CostTblEntry AVX512BWShuffleTbl[] = {
616 { ISD::VECTOR_SHUFFLE, MVT::v32i16, 1 }, // vpermw
617 { ISD::VECTOR_SHUFFLE, MVT::v16i16, 1 }, // vpermw
618 { ISD::VECTOR_SHUFFLE, MVT::v64i8, 6 } // vextracti64x4 + 2*vperm2i128
619 // + 2*pshufb + vinserti64x4
623 if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl,
624 ISD::VECTOR_SHUFFLE, LT.second))
625 return LT.first * Entry->Cost;
627 static const CostTblEntry AVX512ShuffleTbl[] = {
628 { ISD::VECTOR_SHUFFLE, MVT::v8f64, 1 }, // vpermpd
629 { ISD::VECTOR_SHUFFLE, MVT::v16f32, 1 }, // vpermps
630 { ISD::VECTOR_SHUFFLE, MVT::v8i64, 1 }, // vpermq
631 { ISD::VECTOR_SHUFFLE, MVT::v16i32, 1 }, // vpermd
635 if (const auto *Entry =
636 CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
637 return LT.first * Entry->Cost;
639 static const CostTblEntry AVX2ShuffleTbl[] = {
640 { ISD::VECTOR_SHUFFLE, MVT::v4f64, 1 }, // vpermpd
641 { ISD::VECTOR_SHUFFLE, MVT::v8f32, 1 }, // vpermps
642 { ISD::VECTOR_SHUFFLE, MVT::v4i64, 1 }, // vpermq
643 { ISD::VECTOR_SHUFFLE, MVT::v8i32, 1 }, // vpermd
644 { ISD::VECTOR_SHUFFLE, MVT::v16i16, 2 }, // vperm2i128 + pshufb
645 { ISD::VECTOR_SHUFFLE, MVT::v32i8, 2 } // vperm2i128 + pshufb
649 if (const auto *Entry =
650 CostTableLookup(AVX2ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
651 return LT.first * Entry->Cost;
653 static const CostTblEntry AVX1ShuffleTbl[] = {
654 { ISD::VECTOR_SHUFFLE, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
655 { ISD::VECTOR_SHUFFLE, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
656 { ISD::VECTOR_SHUFFLE, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
657 { ISD::VECTOR_SHUFFLE, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
658 { ISD::VECTOR_SHUFFLE, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb
660 { ISD::VECTOR_SHUFFLE, MVT::v32i8, 4 } // vextractf128 + 2*pshufb
665 if (const auto *Entry =
666 CostTableLookup(AVX1ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
667 return LT.first * Entry->Cost;
669 static const CostTblEntry SSSE3ShuffleTbl[] = {
670 { ISD::VECTOR_SHUFFLE, MVT::v8i16, 1 }, // pshufb
671 { ISD::VECTOR_SHUFFLE, MVT::v16i8, 1 } // pshufb
675 if (const auto *Entry =
676 CostTableLookup(SSSE3ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
677 return LT.first * Entry->Cost;
679 static const CostTblEntry SSE2ShuffleTbl[] = {
680 { ISD::VECTOR_SHUFFLE, MVT::v2f64, 1 }, // shufpd
681 { ISD::VECTOR_SHUFFLE, MVT::v2i64, 1 }, // pshufd
682 { ISD::VECTOR_SHUFFLE, MVT::v4i32, 1 }, // pshufd
683 { ISD::VECTOR_SHUFFLE, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd
684 { ISD::VECTOR_SHUFFLE, MVT::v16i8, 9 } // 2*pshuflw + 2*pshufhw
685 // + 2*pshufd + 2*unpck + packus
689 if (const auto *Entry =
690 CostTableLookup(SSE2ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
691 return LT.first * Entry->Cost;
693 static const CostTblEntry SSE1ShuffleTbl[] = {
694 { ISD::VECTOR_SHUFFLE, MVT::v4f32, 1 }, // shufps
698 if (const auto *Entry =
699 CostTableLookup(SSE1ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
700 return LT.first * Entry->Cost;
702 } else if (Kind == TTI::SK_Alternate) {
703 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
704 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
705 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
707 // The backend knows how to generate a single VEX.256 version of
708 // instruction VPBLENDW if the target supports AVX2.
709 if (ST->hasAVX2() && LT.second == MVT::v16i16)
712 static const CostTblEntry AVXAltShuffleTbl[] = {
713 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
714 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
716 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
717 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
719 // This shuffle is custom lowered into a sequence of:
720 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
721 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
723 // This shuffle is custom lowered into a long sequence of:
724 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
725 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
729 if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl,
730 ISD::VECTOR_SHUFFLE, LT.second))
731 return LT.first * Entry->Cost;
733 static const CostTblEntry SSE41AltShuffleTbl[] = {
734 // These are lowered into movsd.
735 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
736 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
738 // packed float vectors with four elements are lowered into BLENDI dag
739 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
740 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
741 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
743 // This shuffle generates a single pshufw.
744 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
746 // There is no instruction that matches a v16i8 alternate shuffle.
747 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
748 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
752 if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE,
754 return LT.first * Entry->Cost;
756 static const CostTblEntry SSSE3AltShuffleTbl[] = {
757 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
758 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
760 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
761 // the sequence 'shufps + pshufd'
762 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
763 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
765 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
766 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
770 if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl,
771 ISD::VECTOR_SHUFFLE, LT.second))
772 return LT.first * Entry->Cost;
774 static const CostTblEntry SSEAltShuffleTbl[] = {
775 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
776 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
778 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
779 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
781 // This is expanded into a long sequence of four extract + four insert.
782 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
784 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
785 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
788 // Fall-back (SSE3 and SSE2).
789 if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl,
790 ISD::VECTOR_SHUFFLE, LT.second))
791 return LT.first * Entry->Cost;
793 } else if (Kind == TTI::SK_PermuteTwoSrc) {
794 // We assume that source and destination have the same vector type.
795 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
796 int NumOfDests = LT.first;
797 int NumOfShufflesPerDest = LT.first * 2 - 1;
798 int NumOfShuffles = NumOfDests * NumOfShufflesPerDest;
800 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
801 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermt2b
802 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1}, // vpermt2b
803 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // vpermt2b
807 if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl,
808 ISD::VECTOR_SHUFFLE, LT.second))
809 return NumOfShuffles * Entry->Cost;
811 static const CostTblEntry AVX512BWShuffleTbl[] = {
812 {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermt2w
813 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermt2w
814 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermt2w
815 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3}, // zext + vpermt2w + trunc
816 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 19}, // 6 * v32i8 + 1
817 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // zext + vpermt2w + trunc
821 if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl,
822 ISD::VECTOR_SHUFFLE, LT.second))
823 return NumOfShuffles * Entry->Cost;
825 static const CostTblEntry AVX512ShuffleTbl[] = {
826 {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermt2pd
827 {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermt2ps
828 {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermt2q
829 {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermt2d
830 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermt2pd
831 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermt2ps
832 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermt2q
833 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermt2d
834 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermt2pd
835 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermt2ps
836 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermt2q
837 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1} // vpermt2d
841 if (const auto *Entry =
842 CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
843 return NumOfShuffles * Entry->Cost;
845 } else if (Kind == TTI::SK_PermuteSingleSrc) {
846 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
849 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
850 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermb
851 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1} // vpermb
855 if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl,
856 ISD::VECTOR_SHUFFLE, LT.second))
859 static const CostTblEntry AVX512BWShuffleTbl[] = {
860 {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermw
861 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermw
862 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermw
863 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 8}, // extend to v32i16
864 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3} // vpermw + zext/trunc
868 if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl,
869 ISD::VECTOR_SHUFFLE, LT.second))
872 static const CostTblEntry AVX512ShuffleTbl[] = {
873 {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermpd
874 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermpd
875 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermpd
876 {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermps
877 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermps
878 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermps
879 {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermq
880 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermq
881 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermq
882 {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermd
883 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermd
884 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, // vpermd
885 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // pshufb
889 if (const auto *Entry =
890 CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
894 // We are going to permute multiple sources and the result will be in
895 // multiple destinations. Providing an accurate cost only for splits where
896 // the element type remains the same.
898 MVT LegalVT = LT.second;
899 if (LegalVT.getVectorElementType().getSizeInBits() ==
900 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
901 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
903 unsigned VecTySize = DL.getTypeStoreSize(Tp);
904 unsigned LegalVTSize = LegalVT.getStoreSize();
905 // Number of source vectors after legalization:
906 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
907 // Number of destination vectors after legalization:
908 unsigned NumOfDests = LT.first;
910 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
911 LegalVT.getVectorNumElements());
913 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
914 return NumOfShuffles *
915 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
920 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
923 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
924 int ISD = TLI->InstructionOpcodeToISD(Opcode);
925 assert(ISD && "Invalid opcode");
927 // FIXME: Need a better design of the cost table to handle non-simple types of
928 // potential massive combinations (elem_num x src_type x dst_type).
930 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
931 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
932 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
933 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
934 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
935 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
936 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
938 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
939 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
940 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
941 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
942 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
943 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
945 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
946 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
947 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
948 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
949 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
950 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
952 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
953 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
954 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
955 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
956 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
957 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
960 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
961 // 256-bit wide vectors.
963 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
964 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
965 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
966 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
968 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
969 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
970 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
971 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
973 // v16i1 -> v16i32 - load + broadcast
974 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
975 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
976 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
977 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
978 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
979 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
980 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
981 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
982 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
983 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
985 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
986 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
987 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
988 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
989 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
990 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
991 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
992 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
993 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
994 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
996 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
997 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
998 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
999 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1000 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1001 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1002 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1003 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1004 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1005 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1006 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1007 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1008 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1009 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1010 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1011 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1012 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1013 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1014 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1015 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1016 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1017 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
1018 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1020 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1021 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1022 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1023 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1026 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1027 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1028 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1029 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1030 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1031 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1032 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1033 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1034 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1035 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1036 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1037 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1038 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1039 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1040 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1041 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1042 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1044 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1045 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1046 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1047 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1048 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1049 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1051 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1052 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1054 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1057 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1058 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1059 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1060 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1061 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1062 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1063 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1064 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1065 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1066 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1067 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1068 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1069 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1070 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1071 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1072 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1073 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1075 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1076 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1077 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1078 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1079 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1080 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1081 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1083 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1084 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1085 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1086 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1087 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1088 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1089 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1090 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1091 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1092 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1093 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1094 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1096 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1097 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1098 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1099 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1100 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1101 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1102 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1103 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1104 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1105 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1106 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1107 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1108 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1109 // The generic code to compute the scalar overhead is currently broken.
1110 // Workaround this limitation by estimating the scalarization overhead
1111 // here. We have roughly 10 instructions per scalar element.
1112 // Multiply that by the vector width.
1113 // FIXME: remove that when PR19268 is fixed.
1114 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
1115 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
1116 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1117 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1119 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1120 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1121 // This node is expanded into scalarized operations but BasicTTI is overly
1122 // optimistic estimating its cost. It computes 3 per element (one
1123 // vector-extract, one scalar conversion and one vector-insert). The
1124 // problem is that the inserts form a read-modify-write chain so latency
1125 // should be factored in too. Inflating the cost per element by 1.
1126 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1127 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1129 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1130 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1133 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1134 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1135 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1136 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1137 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1138 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1139 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1141 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1142 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1143 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1144 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1145 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1146 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1147 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1148 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1149 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1150 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1151 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1152 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1153 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1154 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1155 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1156 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1157 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1158 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1160 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1161 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1162 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1163 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1164 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1165 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1166 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1170 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1171 // These are somewhat magic numbers justified by looking at the output of
1172 // Intel's IACA, running some kernels and making sure when we take
1173 // legalization into account the throughput will be overestimated.
1174 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1175 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1176 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1177 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1178 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1179 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1180 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1181 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1183 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1184 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1185 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1186 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1187 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1188 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1189 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1190 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1192 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1194 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1195 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1196 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1197 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1198 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1199 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1200 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1201 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1202 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1203 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1204 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1205 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1206 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1207 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1208 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1209 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1210 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1211 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1212 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1213 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1214 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1215 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1216 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1217 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1219 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1220 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1221 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1222 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1223 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1224 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1225 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1226 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1227 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1230 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1231 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1233 if (ST->hasSSE2() && !ST->hasAVX()) {
1234 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1235 LTDest.second, LTSrc.second))
1236 return LTSrc.first * Entry->Cost;
1239 EVT SrcTy = TLI->getValueType(DL, Src);
1240 EVT DstTy = TLI->getValueType(DL, Dst);
1242 // The function getSimpleVT only handles simple value types.
1243 if (!SrcTy.isSimple() || !DstTy.isSimple())
1244 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1247 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1248 DstTy.getSimpleVT(),
1249 SrcTy.getSimpleVT()))
1252 if (ST->hasAVX512())
1253 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1254 DstTy.getSimpleVT(),
1255 SrcTy.getSimpleVT()))
1258 if (ST->hasAVX2()) {
1259 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1260 DstTy.getSimpleVT(),
1261 SrcTy.getSimpleVT()))
1266 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1267 DstTy.getSimpleVT(),
1268 SrcTy.getSimpleVT()))
1272 if (ST->hasSSE41()) {
1273 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1274 DstTy.getSimpleVT(),
1275 SrcTy.getSimpleVT()))
1279 if (ST->hasSSE2()) {
1280 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1281 DstTy.getSimpleVT(),
1282 SrcTy.getSimpleVT()))
1286 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1289 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
1290 // Legalize the type.
1291 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1293 MVT MTy = LT.second;
1295 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1296 assert(ISD && "Invalid opcode");
1298 static const CostTblEntry SSE2CostTbl[] = {
1299 { ISD::SETCC, MVT::v2i64, 8 },
1300 { ISD::SETCC, MVT::v4i32, 1 },
1301 { ISD::SETCC, MVT::v8i16, 1 },
1302 { ISD::SETCC, MVT::v16i8, 1 },
1305 static const CostTblEntry SSE42CostTbl[] = {
1306 { ISD::SETCC, MVT::v2f64, 1 },
1307 { ISD::SETCC, MVT::v4f32, 1 },
1308 { ISD::SETCC, MVT::v2i64, 1 },
1311 static const CostTblEntry AVX1CostTbl[] = {
1312 { ISD::SETCC, MVT::v4f64, 1 },
1313 { ISD::SETCC, MVT::v8f32, 1 },
1314 // AVX1 does not support 8-wide integer compare.
1315 { ISD::SETCC, MVT::v4i64, 4 },
1316 { ISD::SETCC, MVT::v8i32, 4 },
1317 { ISD::SETCC, MVT::v16i16, 4 },
1318 { ISD::SETCC, MVT::v32i8, 4 },
1321 static const CostTblEntry AVX2CostTbl[] = {
1322 { ISD::SETCC, MVT::v4i64, 1 },
1323 { ISD::SETCC, MVT::v8i32, 1 },
1324 { ISD::SETCC, MVT::v16i16, 1 },
1325 { ISD::SETCC, MVT::v32i8, 1 },
1328 static const CostTblEntry AVX512CostTbl[] = {
1329 { ISD::SETCC, MVT::v8i64, 1 },
1330 { ISD::SETCC, MVT::v16i32, 1 },
1331 { ISD::SETCC, MVT::v8f64, 1 },
1332 { ISD::SETCC, MVT::v16f32, 1 },
1335 if (ST->hasAVX512())
1336 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1337 return LT.first * Entry->Cost;
1340 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1341 return LT.first * Entry->Cost;
1344 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1345 return LT.first * Entry->Cost;
1348 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1349 return LT.first * Entry->Cost;
1352 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1353 return LT.first * Entry->Cost;
1355 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
1358 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1359 ArrayRef<Type *> Tys, FastMathFlags FMF) {
1360 // Costs should match the codegen from:
1361 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1362 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1363 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1364 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1365 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1366 static const CostTblEntry XOPCostTbl[] = {
1367 { ISD::BITREVERSE, MVT::v4i64, 4 },
1368 { ISD::BITREVERSE, MVT::v8i32, 4 },
1369 { ISD::BITREVERSE, MVT::v16i16, 4 },
1370 { ISD::BITREVERSE, MVT::v32i8, 4 },
1371 { ISD::BITREVERSE, MVT::v2i64, 1 },
1372 { ISD::BITREVERSE, MVT::v4i32, 1 },
1373 { ISD::BITREVERSE, MVT::v8i16, 1 },
1374 { ISD::BITREVERSE, MVT::v16i8, 1 },
1375 { ISD::BITREVERSE, MVT::i64, 3 },
1376 { ISD::BITREVERSE, MVT::i32, 3 },
1377 { ISD::BITREVERSE, MVT::i16, 3 },
1378 { ISD::BITREVERSE, MVT::i8, 3 }
1380 static const CostTblEntry AVX2CostTbl[] = {
1381 { ISD::BITREVERSE, MVT::v4i64, 5 },
1382 { ISD::BITREVERSE, MVT::v8i32, 5 },
1383 { ISD::BITREVERSE, MVT::v16i16, 5 },
1384 { ISD::BITREVERSE, MVT::v32i8, 5 },
1385 { ISD::BSWAP, MVT::v4i64, 1 },
1386 { ISD::BSWAP, MVT::v8i32, 1 },
1387 { ISD::BSWAP, MVT::v16i16, 1 },
1388 { ISD::CTLZ, MVT::v4i64, 23 },
1389 { ISD::CTLZ, MVT::v8i32, 18 },
1390 { ISD::CTLZ, MVT::v16i16, 14 },
1391 { ISD::CTLZ, MVT::v32i8, 9 },
1392 { ISD::CTPOP, MVT::v4i64, 7 },
1393 { ISD::CTPOP, MVT::v8i32, 11 },
1394 { ISD::CTPOP, MVT::v16i16, 9 },
1395 { ISD::CTPOP, MVT::v32i8, 6 },
1396 { ISD::CTTZ, MVT::v4i64, 10 },
1397 { ISD::CTTZ, MVT::v8i32, 14 },
1398 { ISD::CTTZ, MVT::v16i16, 12 },
1399 { ISD::CTTZ, MVT::v32i8, 9 },
1400 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1401 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1402 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1403 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1404 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1405 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1407 static const CostTblEntry AVX1CostTbl[] = {
1408 { ISD::BITREVERSE, MVT::v4i64, 10 },
1409 { ISD::BITREVERSE, MVT::v8i32, 10 },
1410 { ISD::BITREVERSE, MVT::v16i16, 10 },
1411 { ISD::BITREVERSE, MVT::v32i8, 10 },
1412 { ISD::BSWAP, MVT::v4i64, 4 },
1413 { ISD::BSWAP, MVT::v8i32, 4 },
1414 { ISD::BSWAP, MVT::v16i16, 4 },
1415 { ISD::CTLZ, MVT::v4i64, 46 },
1416 { ISD::CTLZ, MVT::v8i32, 36 },
1417 { ISD::CTLZ, MVT::v16i16, 28 },
1418 { ISD::CTLZ, MVT::v32i8, 18 },
1419 { ISD::CTPOP, MVT::v4i64, 14 },
1420 { ISD::CTPOP, MVT::v8i32, 22 },
1421 { ISD::CTPOP, MVT::v16i16, 18 },
1422 { ISD::CTPOP, MVT::v32i8, 12 },
1423 { ISD::CTTZ, MVT::v4i64, 20 },
1424 { ISD::CTTZ, MVT::v8i32, 28 },
1425 { ISD::CTTZ, MVT::v16i16, 24 },
1426 { ISD::CTTZ, MVT::v32i8, 18 },
1427 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1428 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1429 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1430 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1431 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1432 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1434 static const CostTblEntry SSE42CostTbl[] = {
1435 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1436 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1438 static const CostTblEntry SSSE3CostTbl[] = {
1439 { ISD::BITREVERSE, MVT::v2i64, 5 },
1440 { ISD::BITREVERSE, MVT::v4i32, 5 },
1441 { ISD::BITREVERSE, MVT::v8i16, 5 },
1442 { ISD::BITREVERSE, MVT::v16i8, 5 },
1443 { ISD::BSWAP, MVT::v2i64, 1 },
1444 { ISD::BSWAP, MVT::v4i32, 1 },
1445 { ISD::BSWAP, MVT::v8i16, 1 },
1446 { ISD::CTLZ, MVT::v2i64, 23 },
1447 { ISD::CTLZ, MVT::v4i32, 18 },
1448 { ISD::CTLZ, MVT::v8i16, 14 },
1449 { ISD::CTLZ, MVT::v16i8, 9 },
1450 { ISD::CTPOP, MVT::v2i64, 7 },
1451 { ISD::CTPOP, MVT::v4i32, 11 },
1452 { ISD::CTPOP, MVT::v8i16, 9 },
1453 { ISD::CTPOP, MVT::v16i8, 6 },
1454 { ISD::CTTZ, MVT::v2i64, 10 },
1455 { ISD::CTTZ, MVT::v4i32, 14 },
1456 { ISD::CTTZ, MVT::v8i16, 12 },
1457 { ISD::CTTZ, MVT::v16i8, 9 }
1459 static const CostTblEntry SSE2CostTbl[] = {
1460 { ISD::BSWAP, MVT::v2i64, 7 },
1461 { ISD::BSWAP, MVT::v4i32, 7 },
1462 { ISD::BSWAP, MVT::v8i16, 7 },
1463 { ISD::CTLZ, MVT::v2i64, 25 },
1464 { ISD::CTLZ, MVT::v4i32, 26 },
1465 { ISD::CTLZ, MVT::v8i16, 20 },
1466 { ISD::CTLZ, MVT::v16i8, 17 },
1467 { ISD::CTPOP, MVT::v2i64, 12 },
1468 { ISD::CTPOP, MVT::v4i32, 15 },
1469 { ISD::CTPOP, MVT::v8i16, 13 },
1470 { ISD::CTPOP, MVT::v16i8, 10 },
1471 { ISD::CTTZ, MVT::v2i64, 14 },
1472 { ISD::CTTZ, MVT::v4i32, 18 },
1473 { ISD::CTTZ, MVT::v8i16, 16 },
1474 { ISD::CTTZ, MVT::v16i8, 13 },
1475 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1476 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1478 static const CostTblEntry SSE1CostTbl[] = {
1479 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1480 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1483 unsigned ISD = ISD::DELETED_NODE;
1487 case Intrinsic::bitreverse:
1488 ISD = ISD::BITREVERSE;
1490 case Intrinsic::bswap:
1493 case Intrinsic::ctlz:
1496 case Intrinsic::ctpop:
1499 case Intrinsic::cttz:
1502 case Intrinsic::sqrt:
1507 // Legalize the type.
1508 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1509 MVT MTy = LT.second;
1511 // Attempt to lookup cost.
1513 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1514 return LT.first * Entry->Cost;
1517 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1518 return LT.first * Entry->Cost;
1521 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1522 return LT.first * Entry->Cost;
1525 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1526 return LT.first * Entry->Cost;
1529 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1530 return LT.first * Entry->Cost;
1533 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1534 return LT.first * Entry->Cost;
1537 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1538 return LT.first * Entry->Cost;
1540 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
1543 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1544 ArrayRef<Value *> Args, FastMathFlags FMF) {
1545 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
1548 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1549 assert(Val->isVectorTy() && "This must be a vector type");
1551 Type *ScalarType = Val->getScalarType();
1554 // Legalize the type.
1555 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1557 // This type is legalized to a scalar type.
1558 if (!LT.second.isVector())
1561 // The type may be split. Normalize the index to the new type.
1562 unsigned Width = LT.second.getVectorNumElements();
1563 Index = Index % Width;
1565 // Floating point scalars are already located in index #0.
1566 if (ScalarType->isFloatingPointTy() && Index == 0)
1570 // Add to the base cost if we know that the extracted element of a vector is
1571 // destined to be moved to and used in the integer register file.
1572 int RegisterFileMoveCost = 0;
1573 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1574 RegisterFileMoveCost = 1;
1576 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1579 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
1580 assert (Ty->isVectorTy() && "Can only scalarize vectors");
1583 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
1585 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
1587 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
1593 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1594 unsigned AddressSpace) {
1595 // Handle non-power-of-two vectors such as <3 x float>
1596 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1597 unsigned NumElem = VTy->getVectorNumElements();
1599 // Handle a few common cases:
1601 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1602 // Cost = 64 bit store + extract + 32 bit store.
1606 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1607 // Cost = 128 bit store + unpack + 64 bit store.
1610 // Assume that all other non-power-of-two numbers are scalarized.
1611 if (!isPowerOf2_32(NumElem)) {
1612 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1614 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1615 Opcode == Instruction::Store);
1616 return NumElem * Cost + SplitCost;
1620 // Legalize the type.
1621 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1622 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1625 // Each load/store unit costs 1.
1626 int Cost = LT.first * 1;
1628 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1629 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1630 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1636 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1638 unsigned AddressSpace) {
1639 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1641 // To calculate scalar take the regular cost, without mask
1642 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1644 unsigned NumElem = SrcVTy->getVectorNumElements();
1645 VectorType *MaskTy =
1646 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1647 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1648 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1649 !isPowerOf2_32(NumElem)) {
1651 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1652 int ScalarCompareCost = getCmpSelInstrCost(
1653 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1654 int BranchCost = getCFInstrCost(Instruction::Br);
1655 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1657 int ValueSplitCost = getScalarizationOverhead(
1658 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1660 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1661 Alignment, AddressSpace);
1662 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1665 // Legalize the type.
1666 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1667 auto VT = TLI->getValueType(DL, SrcVTy);
1669 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1670 LT.second.getVectorNumElements() == NumElem)
1671 // Promotion requires expand/truncate for data and a shuffle for mask.
1672 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1673 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1675 else if (LT.second.getVectorNumElements() > NumElem) {
1676 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1677 LT.second.getVectorNumElements());
1678 // Expanding requires fill mask with zeroes
1679 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1681 if (!ST->hasAVX512())
1682 return Cost + LT.first*4; // Each maskmov costs 4
1684 // AVX-512 masked load/store is cheapper
1685 return Cost+LT.first;
1688 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
1689 // Address computations in vectorized code with non-consecutive addresses will
1690 // likely result in more instructions compared to scalar code where the
1691 // computation can more often be merged into the index mode. The resulting
1692 // extra micro-ops can significantly decrease throughput.
1693 unsigned NumVectorInstToHideOverhead = 10;
1695 if (Ty->isVectorTy() && IsComplex)
1696 return NumVectorInstToHideOverhead;
1698 return BaseT::getAddressComputationCost(Ty, IsComplex);
1701 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1704 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1706 MVT MTy = LT.second;
1708 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1709 assert(ISD && "Invalid opcode");
1711 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1712 // and make it as the cost.
1714 static const CostTblEntry SSE42CostTblPairWise[] = {
1715 { ISD::FADD, MVT::v2f64, 2 },
1716 { ISD::FADD, MVT::v4f32, 4 },
1717 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1718 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1719 { ISD::ADD, MVT::v8i16, 5 },
1722 static const CostTblEntry AVX1CostTblPairWise[] = {
1723 { ISD::FADD, MVT::v4f32, 4 },
1724 { ISD::FADD, MVT::v4f64, 5 },
1725 { ISD::FADD, MVT::v8f32, 7 },
1726 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1727 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1728 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1729 { ISD::ADD, MVT::v8i16, 5 },
1730 { ISD::ADD, MVT::v8i32, 5 },
1733 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1734 { ISD::FADD, MVT::v2f64, 2 },
1735 { ISD::FADD, MVT::v4f32, 4 },
1736 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1737 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1738 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1741 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1742 { ISD::FADD, MVT::v4f32, 3 },
1743 { ISD::FADD, MVT::v4f64, 3 },
1744 { ISD::FADD, MVT::v8f32, 4 },
1745 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1746 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1747 { ISD::ADD, MVT::v4i64, 3 },
1748 { ISD::ADD, MVT::v8i16, 4 },
1749 { ISD::ADD, MVT::v8i32, 5 },
1754 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1755 return LT.first * Entry->Cost;
1758 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1759 return LT.first * Entry->Cost;
1762 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1763 return LT.first * Entry->Cost;
1766 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1767 return LT.first * Entry->Cost;
1770 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1773 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1774 /// method might only calculate a fraction of a larger immediate. Therefore it
1775 /// is valid to return a cost of ZERO.
1776 int X86TTIImpl::getIntImmCost(int64_t Val) {
1778 return TTI::TCC_Free;
1781 return TTI::TCC_Basic;
1783 return 2 * TTI::TCC_Basic;
1786 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1787 assert(Ty->isIntegerTy());
1789 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1793 // Never hoist constants larger than 128bit, because this might lead to
1794 // incorrect code generation or assertions in codegen.
1795 // Fixme: Create a cost model for types larger than i128 once the codegen
1796 // issues have been fixed.
1798 return TTI::TCC_Free;
1801 return TTI::TCC_Free;
1803 // Sign-extend all constants to a multiple of 64-bit.
1806 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1808 // Split the constant into 64-bit chunks and calculate the cost for each
1811 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1812 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1813 int64_t Val = Tmp.getSExtValue();
1814 Cost += getIntImmCost(Val);
1816 // We need at least one instruction to materialize the constant.
1817 return std::max(1, Cost);
1820 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1822 assert(Ty->isIntegerTy());
1824 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1825 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1826 // here, so that constant hoisting will ignore this constant.
1828 return TTI::TCC_Free;
1830 unsigned ImmIdx = ~0U;
1833 return TTI::TCC_Free;
1834 case Instruction::GetElementPtr:
1835 // Always hoist the base address of a GetElementPtr. This prevents the
1836 // creation of new constants for every base constant that gets constant
1837 // folded with the offset.
1839 return 2 * TTI::TCC_Basic;
1840 return TTI::TCC_Free;
1841 case Instruction::Store:
1844 case Instruction::ICmp:
1845 // This is an imperfect hack to prevent constant hoisting of
1846 // compares that might be trying to check if a 64-bit value fits in
1847 // 32-bits. The backend can optimize these cases using a right shift by 32.
1848 // Ideally we would check the compare predicate here. There also other
1849 // similar immediates the backend can use shifts for.
1850 if (Idx == 1 && Imm.getBitWidth() == 64) {
1851 uint64_t ImmVal = Imm.getZExtValue();
1852 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1853 return TTI::TCC_Free;
1857 case Instruction::And:
1858 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1859 // by using a 32-bit operation with implicit zero extension. Detect such
1860 // immediates here as the normal path expects bit 31 to be sign extended.
1861 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1862 return TTI::TCC_Free;
1864 case Instruction::Add:
1865 case Instruction::Sub:
1866 case Instruction::Mul:
1867 case Instruction::UDiv:
1868 case Instruction::SDiv:
1869 case Instruction::URem:
1870 case Instruction::SRem:
1871 case Instruction::Or:
1872 case Instruction::Xor:
1875 // Always return TCC_Free for the shift value of a shift instruction.
1876 case Instruction::Shl:
1877 case Instruction::LShr:
1878 case Instruction::AShr:
1880 return TTI::TCC_Free;
1882 case Instruction::Trunc:
1883 case Instruction::ZExt:
1884 case Instruction::SExt:
1885 case Instruction::IntToPtr:
1886 case Instruction::PtrToInt:
1887 case Instruction::BitCast:
1888 case Instruction::PHI:
1889 case Instruction::Call:
1890 case Instruction::Select:
1891 case Instruction::Ret:
1892 case Instruction::Load:
1896 if (Idx == ImmIdx) {
1897 int NumConstants = (BitSize + 63) / 64;
1898 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1899 return (Cost <= NumConstants * TTI::TCC_Basic)
1900 ? static_cast<int>(TTI::TCC_Free)
1904 return X86TTIImpl::getIntImmCost(Imm, Ty);
1907 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1909 assert(Ty->isIntegerTy());
1911 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1912 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1913 // here, so that constant hoisting will ignore this constant.
1915 return TTI::TCC_Free;
1919 return TTI::TCC_Free;
1920 case Intrinsic::sadd_with_overflow:
1921 case Intrinsic::uadd_with_overflow:
1922 case Intrinsic::ssub_with_overflow:
1923 case Intrinsic::usub_with_overflow:
1924 case Intrinsic::smul_with_overflow:
1925 case Intrinsic::umul_with_overflow:
1926 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1927 return TTI::TCC_Free;
1929 case Intrinsic::experimental_stackmap:
1930 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1931 return TTI::TCC_Free;
1933 case Intrinsic::experimental_patchpoint_void:
1934 case Intrinsic::experimental_patchpoint_i64:
1935 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1936 return TTI::TCC_Free;
1939 return X86TTIImpl::getIntImmCost(Imm, Ty);
1942 // Return an average cost of Gather / Scatter instruction, maybe improved later
1943 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1944 unsigned Alignment, unsigned AddressSpace) {
1946 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1947 unsigned VF = SrcVTy->getVectorNumElements();
1949 // Try to reduce index size from 64 bit (default for GEP)
1950 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1951 // operation will use 16 x 64 indices which do not fit in a zmm and needs
1952 // to split. Also check that the base pointer is the same for all lanes,
1953 // and that there's at most one variable index.
1954 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1955 unsigned IndexSize = DL.getPointerSizeInBits();
1956 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1957 if (IndexSize < 64 || !GEP)
1960 unsigned NumOfVarIndices = 0;
1961 Value *Ptrs = GEP->getPointerOperand();
1962 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
1964 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
1965 if (isa<Constant>(GEP->getOperand(i)))
1967 Type *IndxTy = GEP->getOperand(i)->getType();
1968 if (IndxTy->isVectorTy())
1969 IndxTy = IndxTy->getVectorElementType();
1970 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
1971 !isa<SExtInst>(GEP->getOperand(i))) ||
1972 ++NumOfVarIndices > 1)
1973 return IndexSize; // 64
1975 return (unsigned)32;
1979 // Trying to reduce IndexSize to 32 bits for vector 16.
1980 // By default the IndexSize is equal to pointer size.
1981 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
1982 DL.getPointerSizeInBits();
1984 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
1986 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
1987 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1988 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
1989 if (SplitFactor > 1) {
1990 // Handle splitting of vector of pointers
1991 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
1992 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
1996 // The gather / scatter cost is given by Intel architects. It is a rough
1997 // number since we are looking at one instruction in a time.
1998 const int GSOverhead = 2;
1999 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2000 Alignment, AddressSpace);
2003 /// Return the cost of full scalarization of gather / scatter operation.
2005 /// Opcode - Load or Store instruction.
2006 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2007 /// VariableMask - The mask is non-constant at compile time.
2008 /// Alignment - Alignment for one element.
2009 /// AddressSpace - pointer[s] address space.
2011 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2012 bool VariableMask, unsigned Alignment,
2013 unsigned AddressSpace) {
2014 unsigned VF = SrcVTy->getVectorNumElements();
2016 int MaskUnpackCost = 0;
2018 VectorType *MaskTy =
2019 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2020 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2021 int ScalarCompareCost =
2022 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2024 int BranchCost = getCFInstrCost(Instruction::Br);
2025 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2028 // The cost of the scalar loads/stores.
2029 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2030 Alignment, AddressSpace);
2032 int InsertExtractCost = 0;
2033 if (Opcode == Instruction::Load)
2034 for (unsigned i = 0; i < VF; ++i)
2035 // Add the cost of inserting each scalar load into the vector
2036 InsertExtractCost +=
2037 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2039 for (unsigned i = 0; i < VF; ++i)
2040 // Add the cost of extracting each element out of the data vector
2041 InsertExtractCost +=
2042 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2044 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2047 /// Calculate the cost of Gather / Scatter operation
2048 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2049 Value *Ptr, bool VariableMask,
2050 unsigned Alignment) {
2051 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2052 unsigned VF = SrcVTy->getVectorNumElements();
2053 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2054 if (!PtrTy && Ptr->getType()->isVectorTy())
2055 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2056 assert(PtrTy && "Unexpected type for Ptr argument");
2057 unsigned AddressSpace = PtrTy->getAddressSpace();
2059 bool Scalarize = false;
2060 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2061 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2063 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2064 // Vector-4 of gather/scatter instruction does not exist on KNL.
2065 // We can extend it to 8 elements, but zeroing upper bits of
2066 // the mask vector will add more instructions. Right now we give the scalar
2067 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2068 // is better in the VariableMask case.
2069 if (VF == 2 || (VF == 4 && !ST->hasVLX()))
2073 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2076 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2079 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2080 Type *ScalarTy = DataTy->getScalarType();
2081 int DataWidth = isa<PointerType>(ScalarTy) ?
2082 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2084 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2085 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2088 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2089 return isLegalMaskedLoad(DataType);
2092 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2093 // This function is called now in two cases: from the Loop Vectorizer
2094 // and from the Scalarizer.
2095 // When the Loop Vectorizer asks about legality of the feature,
2096 // the vectorization factor is not calculated yet. The Loop Vectorizer
2097 // sends a scalar type and the decision is based on the width of the
2099 // Later on, the cost model will estimate usage this intrinsic based on
2101 // The Scalarizer asks again about legality. It sends a vector type.
2102 // In this case we can reject non-power-of-2 vectors.
2103 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
2105 Type *ScalarTy = DataTy->getScalarType();
2106 int DataWidth = isa<PointerType>(ScalarTy) ?
2107 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2109 // AVX-512 allows gather and scatter
2110 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512();
2113 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2114 return isLegalMaskedGather(DataType);
2117 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2118 const Function *Callee) const {
2119 const TargetMachine &TM = getTLI()->getTargetMachine();
2121 // Work this as a subsetting of subtarget features.
2122 const FeatureBitset &CallerBits =
2123 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2124 const FeatureBitset &CalleeBits =
2125 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2127 // FIXME: This is likely too limiting as it will include subtarget features
2128 // that we might not care about for inlining, but it is conservatively
2130 return (CallerBits & CalleeBits) == CalleeBits;
2133 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2134 // TODO: We expect this to be beneficial regardless of arch,
2135 // but there are currently some unexplained performance artifacts on Atom.
2136 // As a temporary solution, disable on Atom.
2137 return !(ST->isAtom() || ST->isSLM());
2140 // Get estimation for interleaved load/store operations and strided load.
2141 // \p Indices contains indices for strided load.
2142 // \p Factor - the factor of interleaving.
2143 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
2144 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
2146 ArrayRef<unsigned> Indices,
2148 unsigned AddressSpace) {
2150 // VecTy for interleave memop is <VF*Factor x Elt>.
2151 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2152 // VecTy = <12 x i32>.
2154 // Calculate the number of memory operations (NumOfMemOps), required
2155 // for load/store the VecTy.
2156 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2157 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2158 unsigned LegalVTSize = LegalVT.getStoreSize();
2159 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2161 // Get the cost of one memory operation.
2162 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2163 LegalVT.getVectorNumElements());
2164 unsigned MemOpCost =
2165 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2167 if (Opcode == Instruction::Load) {
2168 // Kind of shuffle depends on number of loaded values.
2169 // If we load the entire data in one register, we can use a 1-src shuffle.
2170 // Otherwise, we'll merge 2 sources in each operation.
2171 TTI::ShuffleKind ShuffleKind =
2172 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
2174 unsigned ShuffleCost =
2175 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
2177 unsigned NumOfLoadsInInterleaveGrp =
2178 Indices.size() ? Indices.size() : Factor;
2179 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
2180 VecTy->getVectorNumElements() / Factor);
2181 unsigned NumOfResults =
2182 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
2183 NumOfLoadsInInterleaveGrp;
2185 // About a half of the loads may be folded in shuffles when we have only
2186 // one result. If we have more than one result, we do not fold loads at all.
2187 unsigned NumOfUnfoldedLoads =
2188 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
2190 // Get a number of shuffle operations per result.
2191 unsigned NumOfShufflesPerResult =
2192 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
2194 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2195 // When we have more than one destination, we need additional instructions
2197 unsigned NumOfMoves = 0;
2198 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
2199 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
2201 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
2202 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
2208 assert(Opcode == Instruction::Store &&
2209 "Expected Store Instruction at this point");
2211 // There is no strided stores meanwhile. And store can't be folded in
2213 unsigned NumOfSources = Factor; // The number of values to be merged.
2214 unsigned ShuffleCost =
2215 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
2216 unsigned NumOfShufflesPerStore = NumOfSources - 1;
2218 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2219 // We need additional instructions to keep sources.
2220 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
2221 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
2226 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
2228 ArrayRef<unsigned> Indices,
2230 unsigned AddressSpace) {
2231 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) {
2233 Type *EltTy = VecTy->getVectorElementType();
2234 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
2235 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
2237 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) {
2244 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW);
2245 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI()))
2246 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
2247 Alignment, AddressSpace);
2248 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2249 Alignment, AddressSpace);