1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/CostTable.h"
48 #include "llvm/Target/TargetLowering.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
70 if (Vector && !ST->hasSSE1())
74 if (Vector && ST->hasAVX512())
81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
83 if (ST->hasAVX512()) return 512;
84 if (ST->hasAVX()) return 256;
85 if (ST->hasSSE1()) return 128;
95 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
96 // If the loop will not be vectorized, don't interleave the loop.
97 // Let regular unroll to unroll the loop, which saves the overflow
98 // check and memory check cost.
105 // Sandybridge and Haswell have multiple execution ports and pipelined
113 int X86TTIImpl::getArithmeticInstrCost(
114 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
115 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
116 TTI::OperandValueProperties Opd2PropInfo) {
117 // Legalize the type.
118 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
120 int ISD = TLI->InstructionOpcodeToISD(Opcode);
121 assert(ISD && "Invalid opcode");
123 if (ISD == ISD::SDIV &&
124 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
125 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
126 // On X86, vector signed division by constants power-of-two are
127 // normally expanded to the sequence SRA + SRL + ADD + SRA.
128 // The OperandValue properties many not be same as that of previous
129 // operation;conservatively assume OP_None.
130 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
131 Op2Info, TargetTransformInfo::OP_None,
132 TargetTransformInfo::OP_None);
133 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
134 TargetTransformInfo::OP_None,
135 TargetTransformInfo::OP_None);
136 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
137 TargetTransformInfo::OP_None,
138 TargetTransformInfo::OP_None);
143 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
144 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
145 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
148 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
150 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
152 return LT.first * Entry->Cost;
155 static const CostTblEntry AVX512UniformConstCostTable[] = {
156 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
157 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
160 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
162 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
164 return LT.first * Entry->Cost;
167 static const CostTblEntry AVX2UniformConstCostTable[] = {
168 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
170 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
171 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
172 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
173 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
176 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
178 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
180 return LT.first * Entry->Cost;
183 static const CostTblEntry SSE2UniformConstCostTable[] = {
184 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence
185 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
186 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence
187 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
188 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence
189 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
190 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence
191 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
194 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
197 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
198 return LT.first * 30;
199 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
200 return LT.first * 15;
202 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
204 return LT.first * Entry->Cost;
207 static const CostTblEntry AVX512DQCostTable[] = {
208 { ISD::MUL, MVT::v2i64, 1 },
209 { ISD::MUL, MVT::v4i64, 1 },
210 { ISD::MUL, MVT::v8i64, 1 }
213 // Look for AVX512DQ lowering tricks for custom cases.
215 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD,
217 return LT.first * Entry->Cost;
220 static const CostTblEntry AVX512BWCostTable[] = {
221 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
222 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
223 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
225 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
226 { ISD::SDIV, MVT::v64i8, 64*20 },
227 { ISD::SDIV, MVT::v32i16, 32*20 },
228 { ISD::SDIV, MVT::v16i32, 16*20 },
229 { ISD::SDIV, MVT::v8i64, 8*20 },
230 { ISD::UDIV, MVT::v64i8, 64*20 },
231 { ISD::UDIV, MVT::v32i16, 32*20 },
232 { ISD::UDIV, MVT::v16i32, 16*20 },
233 { ISD::UDIV, MVT::v8i64, 8*20 },
236 // Look for AVX512BW lowering tricks for custom cases.
238 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD,
240 return LT.first * Entry->Cost;
243 static const CostTblEntry AVX512CostTable[] = {
244 { ISD::SHL, MVT::v16i32, 1 },
245 { ISD::SRL, MVT::v16i32, 1 },
246 { ISD::SRA, MVT::v16i32, 1 },
247 { ISD::SHL, MVT::v8i64, 1 },
248 { ISD::SRL, MVT::v8i64, 1 },
249 { ISD::SRA, MVT::v8i64, 1 },
251 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
252 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
255 if (ST->hasAVX512()) {
256 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
257 return LT.first * Entry->Cost;
260 static const CostTblEntry AVX2CostTable[] = {
261 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
262 // customize them to detect the cases where shift amount is a scalar one.
263 { ISD::SHL, MVT::v4i32, 1 },
264 { ISD::SRL, MVT::v4i32, 1 },
265 { ISD::SRA, MVT::v4i32, 1 },
266 { ISD::SHL, MVT::v8i32, 1 },
267 { ISD::SRL, MVT::v8i32, 1 },
268 { ISD::SRA, MVT::v8i32, 1 },
269 { ISD::SHL, MVT::v2i64, 1 },
270 { ISD::SRL, MVT::v2i64, 1 },
271 { ISD::SHL, MVT::v4i64, 1 },
272 { ISD::SRL, MVT::v4i64, 1 },
275 // Look for AVX2 lowering tricks.
277 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
278 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
279 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
280 // On AVX2, a packed v16i16 shift left by a constant build_vector
281 // is lowered into a vector multiply (vpmullw).
284 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
285 return LT.first * Entry->Cost;
288 static const CostTblEntry XOPCostTable[] = {
289 // 128bit shifts take 1cy, but right shifts require negation beforehand.
290 { ISD::SHL, MVT::v16i8, 1 },
291 { ISD::SRL, MVT::v16i8, 2 },
292 { ISD::SRA, MVT::v16i8, 2 },
293 { ISD::SHL, MVT::v8i16, 1 },
294 { ISD::SRL, MVT::v8i16, 2 },
295 { ISD::SRA, MVT::v8i16, 2 },
296 { ISD::SHL, MVT::v4i32, 1 },
297 { ISD::SRL, MVT::v4i32, 2 },
298 { ISD::SRA, MVT::v4i32, 2 },
299 { ISD::SHL, MVT::v2i64, 1 },
300 { ISD::SRL, MVT::v2i64, 2 },
301 { ISD::SRA, MVT::v2i64, 2 },
302 // 256bit shifts require splitting if AVX2 didn't catch them above.
303 { ISD::SHL, MVT::v32i8, 2 },
304 { ISD::SRL, MVT::v32i8, 4 },
305 { ISD::SRA, MVT::v32i8, 4 },
306 { ISD::SHL, MVT::v16i16, 2 },
307 { ISD::SRL, MVT::v16i16, 4 },
308 { ISD::SRA, MVT::v16i16, 4 },
309 { ISD::SHL, MVT::v8i32, 2 },
310 { ISD::SRL, MVT::v8i32, 4 },
311 { ISD::SRA, MVT::v8i32, 4 },
312 { ISD::SHL, MVT::v4i64, 2 },
313 { ISD::SRL, MVT::v4i64, 4 },
314 { ISD::SRA, MVT::v4i64, 4 },
317 // Look for XOP lowering tricks.
319 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
320 return LT.first * Entry->Cost;
323 static const CostTblEntry AVX2CustomCostTable[] = {
324 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
325 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
327 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
328 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
330 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
331 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
332 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
333 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
335 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
336 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
338 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
339 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
340 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
341 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
342 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
343 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
346 // Look for AVX2 lowering tricks for custom cases.
348 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
350 return LT.first * Entry->Cost;
353 static const CostTblEntry AVXCustomCostTable[] = {
354 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
356 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
357 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
358 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
359 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
360 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
361 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
363 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
364 { ISD::SDIV, MVT::v32i8, 32*20 },
365 { ISD::SDIV, MVT::v16i16, 16*20 },
366 { ISD::SDIV, MVT::v8i32, 8*20 },
367 { ISD::SDIV, MVT::v4i64, 4*20 },
368 { ISD::UDIV, MVT::v32i8, 32*20 },
369 { ISD::UDIV, MVT::v16i16, 16*20 },
370 { ISD::UDIV, MVT::v8i32, 8*20 },
371 { ISD::UDIV, MVT::v4i64, 4*20 },
374 // Look for AVX2 lowering tricks for custom cases.
376 if (const auto *Entry = CostTableLookup(AVXCustomCostTable, ISD,
378 return LT.first * Entry->Cost;
381 static const CostTblEntry SSE42FloatCostTable[] = {
382 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
383 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
384 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
385 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
388 if (ST->hasSSE42()) {
389 if (const auto *Entry = CostTableLookup(SSE42FloatCostTable, ISD,
391 return LT.first * Entry->Cost;
394 static const CostTblEntry
395 SSE2UniformCostTable[] = {
396 // Uniform splats are cheaper for the following instructions.
397 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
398 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
399 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
400 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
401 { ISD::SHL, MVT::v4i32, 1 }, // pslld
402 { ISD::SHL, MVT::v8i32, 2 }, // pslld
403 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
404 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
406 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
407 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
408 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
409 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
410 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
411 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
412 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
413 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
415 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
416 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
417 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
418 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
419 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
420 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
421 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
422 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
426 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
427 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
428 if (const auto *Entry =
429 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
430 return LT.first * Entry->Cost;
433 if (ISD == ISD::SHL &&
434 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
436 // Vector shift left by non uniform constant can be lowered
437 // into vector multiply (pmullw/pmulld).
438 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
439 (VT == MVT::v4i32 && ST->hasSSE41()))
442 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
443 // sequence of extract + two vector multiply + insert.
444 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
445 (ST->hasAVX() && !ST->hasAVX2()))
448 // A vector shift left by non uniform constant is converted
449 // into a vector multiply; the new multiply is eventually
450 // lowered into a sequence of shuffles and 2 x pmuludq.
451 if (VT == MVT::v4i32 && ST->hasSSE2())
455 static const CostTblEntry SSE41CostTable[] = {
456 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
457 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence.
458 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
459 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence.
461 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
462 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence.
463 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
464 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence.
465 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
466 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend.
468 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
469 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence.
470 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
471 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence.
472 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
473 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend.
476 if (ST->hasSSE41()) {
477 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
478 return LT.first * Entry->Cost;
481 static const CostTblEntry SSE2CostTable[] = {
482 // We don't correctly identify costs of casts because they are marked as
484 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
485 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
486 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
487 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
488 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
489 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
490 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
491 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
493 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
494 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
495 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
496 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
497 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
498 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
499 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
500 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
502 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
503 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
504 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
505 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
506 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
507 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
508 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
509 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
511 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
513 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
514 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
515 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
516 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
518 // It is not a good idea to vectorize division. We have to scalarize it and
519 // in the process we will often end up having to spilling regular
520 // registers. The overhead of division is going to dominate most kernels
521 // anyways so try hard to prevent vectorization of division - it is
522 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
523 // to hide "20 cycles" for each lane.
524 { ISD::SDIV, MVT::v16i8, 16*20 },
525 { ISD::SDIV, MVT::v8i16, 8*20 },
526 { ISD::SDIV, MVT::v4i32, 4*20 },
527 { ISD::SDIV, MVT::v2i64, 2*20 },
528 { ISD::UDIV, MVT::v16i8, 16*20 },
529 { ISD::UDIV, MVT::v8i16, 8*20 },
530 { ISD::UDIV, MVT::v4i32, 4*20 },
531 { ISD::UDIV, MVT::v2i64, 2*20 },
535 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
536 return LT.first * Entry->Cost;
539 static const CostTblEntry AVX1CostTable[] = {
540 // We don't have to scalarize unsupported ops. We can issue two half-sized
541 // operations and we only need to extract the upper YMM half.
542 // Two ops + 1 extract + 1 insert = 4.
543 { ISD::MUL, MVT::v16i16, 4 },
544 { ISD::MUL, MVT::v8i32, 4 },
545 { ISD::SUB, MVT::v32i8, 4 },
546 { ISD::ADD, MVT::v32i8, 4 },
547 { ISD::SUB, MVT::v16i16, 4 },
548 { ISD::ADD, MVT::v16i16, 4 },
549 { ISD::SUB, MVT::v8i32, 4 },
550 { ISD::ADD, MVT::v8i32, 4 },
551 { ISD::SUB, MVT::v4i64, 4 },
552 { ISD::ADD, MVT::v4i64, 4 },
553 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
554 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
555 // Because we believe v4i64 to be a legal type, we must also include the
556 // split factor of two in the cost table. Therefore, the cost here is 16
558 { ISD::MUL, MVT::v4i64, 16 },
561 // Look for AVX1 lowering tricks.
562 if (ST->hasAVX() && !ST->hasAVX2()) {
565 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
566 return LT.first * Entry->Cost;
569 // Custom lowering of vectors.
570 static const CostTblEntry CustomLowered[] = {
571 // A v2i64/v4i64 and multiply is custom lowered as a series of long
572 // multiplies(3), shifts(3) and adds(2).
573 { ISD::MUL, MVT::v2i64, 8 },
574 { ISD::MUL, MVT::v4i64, 8 },
575 { ISD::MUL, MVT::v8i64, 8 }
577 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
578 return LT.first * Entry->Cost;
580 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
581 // 2x pmuludq, 2x shuffle.
582 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
586 static const CostTblEntry SSE1FloatCostTable[] = {
587 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
588 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
592 if (const auto *Entry = CostTableLookup(SSE1FloatCostTable, ISD,
594 return LT.first * Entry->Cost;
595 // Fallback to the default implementation.
596 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
599 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
601 if (Kind == TTI::SK_Reverse || Kind == TTI::SK_Alternate) {
602 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
603 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
604 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
606 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
607 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb
608 { TTI::SK_Reverse, MVT::v32i8, 1 } // vpermb
612 if (const auto *Entry =
613 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
614 return LT.first * Entry->Cost;
616 static const CostTblEntry AVX512BWShuffleTbl[] = {
617 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw
618 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw
619 { TTI::SK_Reverse, MVT::v64i8, 6 } // vextracti64x4 + 2*vperm2i128
620 // + 2*pshufb + vinserti64x4
624 if (const auto *Entry =
625 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
626 return LT.first * Entry->Cost;
628 static const CostTblEntry AVX512ShuffleTbl[] = {
629 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd
630 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps
631 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq
632 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd
636 if (const auto *Entry =
637 CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
638 return LT.first * Entry->Cost;
640 static const CostTblEntry AVX2ShuffleTbl[] = {
641 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd
642 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps
643 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq
644 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd
645 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb
646 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb
648 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw
649 { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb
653 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
654 return LT.first * Entry->Cost;
656 static const CostTblEntry AVX1ShuffleTbl[] = {
657 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
658 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
659 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
660 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
661 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb
663 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb
666 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd
667 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd
668 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps
669 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps
670 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor
671 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor
675 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
676 return LT.first * Entry->Cost;
678 static const CostTblEntry SSE41ShuffleTbl[] = {
679 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw
680 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
681 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw
682 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps
683 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw
684 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb
688 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
689 return LT.first * Entry->Cost;
691 static const CostTblEntry SSSE3ShuffleTbl[] = {
692 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb
693 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb
695 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por
696 { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por
700 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
701 return LT.first * Entry->Cost;
703 static const CostTblEntry SSE2ShuffleTbl[] = {
704 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd
705 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd
706 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd
707 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd
708 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw
709 // + 2*pshufd + 2*unpck + packus
711 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd
712 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
713 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps
714 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por
715 { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por
719 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
720 return LT.first * Entry->Cost;
722 static const CostTblEntry SSE1ShuffleTbl[] = {
723 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
724 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps
728 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
729 return LT.first * Entry->Cost;
731 } else if (Kind == TTI::SK_PermuteTwoSrc) {
732 // We assume that source and destination have the same vector type.
733 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
734 int NumOfDests = LT.first;
735 int NumOfShufflesPerDest = LT.first * 2 - 1;
736 int NumOfShuffles = NumOfDests * NumOfShufflesPerDest;
738 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
739 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermt2b
740 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1}, // vpermt2b
741 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // vpermt2b
745 if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl,
746 ISD::VECTOR_SHUFFLE, LT.second))
747 return NumOfShuffles * Entry->Cost;
749 static const CostTblEntry AVX512BWShuffleTbl[] = {
750 {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermt2w
751 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermt2w
752 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermt2w
753 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3}, // zext + vpermt2w + trunc
754 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 19}, // 6 * v32i8 + 1
755 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // zext + vpermt2w + trunc
759 if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl,
760 ISD::VECTOR_SHUFFLE, LT.second))
761 return NumOfShuffles * Entry->Cost;
763 static const CostTblEntry AVX512ShuffleTbl[] = {
764 {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermt2pd
765 {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermt2ps
766 {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermt2q
767 {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermt2d
768 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermt2pd
769 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermt2ps
770 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermt2q
771 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermt2d
772 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermt2pd
773 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermt2ps
774 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermt2q
775 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1} // vpermt2d
779 if (const auto *Entry =
780 CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
781 return NumOfShuffles * Entry->Cost;
783 } else if (Kind == TTI::SK_PermuteSingleSrc) {
784 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
787 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
788 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermb
789 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1} // vpermb
793 if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl,
794 ISD::VECTOR_SHUFFLE, LT.second))
797 static const CostTblEntry AVX512BWShuffleTbl[] = {
798 {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermw
799 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermw
800 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermw
801 {ISD::VECTOR_SHUFFLE, MVT::v64i8, 8}, // extend to v32i16
802 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3} // vpermw + zext/trunc
806 if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl,
807 ISD::VECTOR_SHUFFLE, LT.second))
810 static const CostTblEntry AVX512ShuffleTbl[] = {
811 {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermpd
812 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermpd
813 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermpd
814 {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermps
815 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermps
816 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermps
817 {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermq
818 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermq
819 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermq
820 {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermd
821 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermd
822 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, // vpermd
823 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // pshufb
827 if (const auto *Entry =
828 CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
832 // We are going to permute multiple sources and the result will be in
833 // multiple destinations. Providing an accurate cost only for splits where
834 // the element type remains the same.
836 MVT LegalVT = LT.second;
837 if (LegalVT.getVectorElementType().getSizeInBits() ==
838 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
839 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
841 unsigned VecTySize = DL.getTypeStoreSize(Tp);
842 unsigned LegalVTSize = LegalVT.getStoreSize();
843 // Number of source vectors after legalization:
844 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
845 // Number of destination vectors after legalization:
846 unsigned NumOfDests = LT.first;
848 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
849 LegalVT.getVectorNumElements());
851 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
852 return NumOfShuffles *
853 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
858 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
861 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
862 int ISD = TLI->InstructionOpcodeToISD(Opcode);
863 assert(ISD && "Invalid opcode");
865 // FIXME: Need a better design of the cost table to handle non-simple types of
866 // potential massive combinations (elem_num x src_type x dst_type).
868 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
869 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
870 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
871 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
872 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
873 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
874 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
876 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
877 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
878 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
879 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
880 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
881 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
883 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
884 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
885 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
886 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
887 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
888 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
890 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
891 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
892 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
893 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
894 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
895 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
898 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
899 // 256-bit wide vectors.
901 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
902 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
903 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
904 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
906 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
907 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
908 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
909 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
911 // v16i1 -> v16i32 - load + broadcast
912 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
913 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
914 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
915 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
916 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
917 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
918 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
919 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
920 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
921 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
923 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
924 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
925 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
926 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
927 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
928 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
929 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
930 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
931 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
932 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
934 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
935 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
936 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
937 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
938 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
939 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
940 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
941 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
942 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
943 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
944 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
945 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
946 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
947 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
948 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
949 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
950 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
951 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
952 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
953 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
954 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
955 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
956 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
958 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
959 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
960 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
961 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
964 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
965 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
966 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
967 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
968 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
969 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
970 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
971 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
972 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
973 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
974 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
975 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
976 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
977 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
978 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
979 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
980 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
982 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
983 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
984 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
985 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
986 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
987 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
989 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
990 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
992 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
995 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
996 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
997 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
998 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
999 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1000 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1001 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1002 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1003 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1004 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1005 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1006 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1007 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1008 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1009 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1010 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1011 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1013 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1014 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1015 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1016 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1017 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1018 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1019 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1021 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1022 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1023 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1024 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1025 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1026 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1027 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1028 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1029 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1030 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1031 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1032 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1034 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1035 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1036 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1037 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1038 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1039 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1040 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1041 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1042 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1043 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1044 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1045 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1046 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1047 // The generic code to compute the scalar overhead is currently broken.
1048 // Workaround this limitation by estimating the scalarization overhead
1049 // here. We have roughly 10 instructions per scalar element.
1050 // Multiply that by the vector width.
1051 // FIXME: remove that when PR19268 is fixed.
1052 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
1053 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
1054 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1055 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1057 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1058 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1059 // This node is expanded into scalarized operations but BasicTTI is overly
1060 // optimistic estimating its cost. It computes 3 per element (one
1061 // vector-extract, one scalar conversion and one vector-insert). The
1062 // problem is that the inserts form a read-modify-write chain so latency
1063 // should be factored in too. Inflating the cost per element by 1.
1064 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1065 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1067 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1068 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1071 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1072 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1073 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1074 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1075 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1076 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1077 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1079 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1080 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1081 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1082 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1083 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1084 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1085 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1086 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1087 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1088 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1089 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1090 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1091 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1092 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1093 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1094 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1095 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1096 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1098 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1099 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1100 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1101 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1102 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1103 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1104 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1108 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1109 // These are somewhat magic numbers justified by looking at the output of
1110 // Intel's IACA, running some kernels and making sure when we take
1111 // legalization into account the throughput will be overestimated.
1112 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1113 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1114 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1115 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1116 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1117 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1118 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1119 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1121 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1122 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1123 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1124 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1125 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1126 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1127 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1128 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1130 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1132 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1133 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1134 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1135 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1136 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1137 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1138 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1139 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1140 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1141 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1142 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1143 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1144 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1145 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1146 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1147 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1148 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1149 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1150 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1151 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1152 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1153 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1154 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1155 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1157 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1158 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1159 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1160 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1161 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1162 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1163 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1164 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1165 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1168 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1169 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1171 if (ST->hasSSE2() && !ST->hasAVX()) {
1172 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1173 LTDest.second, LTSrc.second))
1174 return LTSrc.first * Entry->Cost;
1177 EVT SrcTy = TLI->getValueType(DL, Src);
1178 EVT DstTy = TLI->getValueType(DL, Dst);
1180 // The function getSimpleVT only handles simple value types.
1181 if (!SrcTy.isSimple() || !DstTy.isSimple())
1182 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1185 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1186 DstTy.getSimpleVT(),
1187 SrcTy.getSimpleVT()))
1190 if (ST->hasAVX512())
1191 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1192 DstTy.getSimpleVT(),
1193 SrcTy.getSimpleVT()))
1196 if (ST->hasAVX2()) {
1197 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1198 DstTy.getSimpleVT(),
1199 SrcTy.getSimpleVT()))
1204 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1205 DstTy.getSimpleVT(),
1206 SrcTy.getSimpleVT()))
1210 if (ST->hasSSE41()) {
1211 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1212 DstTy.getSimpleVT(),
1213 SrcTy.getSimpleVT()))
1217 if (ST->hasSSE2()) {
1218 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1219 DstTy.getSimpleVT(),
1220 SrcTy.getSimpleVT()))
1224 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1227 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
1228 // Legalize the type.
1229 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1231 MVT MTy = LT.second;
1233 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1234 assert(ISD && "Invalid opcode");
1236 static const CostTblEntry SSE2CostTbl[] = {
1237 { ISD::SETCC, MVT::v2i64, 8 },
1238 { ISD::SETCC, MVT::v4i32, 1 },
1239 { ISD::SETCC, MVT::v8i16, 1 },
1240 { ISD::SETCC, MVT::v16i8, 1 },
1243 static const CostTblEntry SSE42CostTbl[] = {
1244 { ISD::SETCC, MVT::v2f64, 1 },
1245 { ISD::SETCC, MVT::v4f32, 1 },
1246 { ISD::SETCC, MVT::v2i64, 1 },
1249 static const CostTblEntry AVX1CostTbl[] = {
1250 { ISD::SETCC, MVT::v4f64, 1 },
1251 { ISD::SETCC, MVT::v8f32, 1 },
1252 // AVX1 does not support 8-wide integer compare.
1253 { ISD::SETCC, MVT::v4i64, 4 },
1254 { ISD::SETCC, MVT::v8i32, 4 },
1255 { ISD::SETCC, MVT::v16i16, 4 },
1256 { ISD::SETCC, MVT::v32i8, 4 },
1259 static const CostTblEntry AVX2CostTbl[] = {
1260 { ISD::SETCC, MVT::v4i64, 1 },
1261 { ISD::SETCC, MVT::v8i32, 1 },
1262 { ISD::SETCC, MVT::v16i16, 1 },
1263 { ISD::SETCC, MVT::v32i8, 1 },
1266 static const CostTblEntry AVX512CostTbl[] = {
1267 { ISD::SETCC, MVT::v8i64, 1 },
1268 { ISD::SETCC, MVT::v16i32, 1 },
1269 { ISD::SETCC, MVT::v8f64, 1 },
1270 { ISD::SETCC, MVT::v16f32, 1 },
1273 if (ST->hasAVX512())
1274 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1275 return LT.first * Entry->Cost;
1278 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1279 return LT.first * Entry->Cost;
1282 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1283 return LT.first * Entry->Cost;
1286 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1287 return LT.first * Entry->Cost;
1290 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1291 return LT.first * Entry->Cost;
1293 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
1296 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1297 ArrayRef<Type *> Tys, FastMathFlags FMF) {
1298 // Costs should match the codegen from:
1299 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1300 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1301 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1302 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1303 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1304 static const CostTblEntry XOPCostTbl[] = {
1305 { ISD::BITREVERSE, MVT::v4i64, 4 },
1306 { ISD::BITREVERSE, MVT::v8i32, 4 },
1307 { ISD::BITREVERSE, MVT::v16i16, 4 },
1308 { ISD::BITREVERSE, MVT::v32i8, 4 },
1309 { ISD::BITREVERSE, MVT::v2i64, 1 },
1310 { ISD::BITREVERSE, MVT::v4i32, 1 },
1311 { ISD::BITREVERSE, MVT::v8i16, 1 },
1312 { ISD::BITREVERSE, MVT::v16i8, 1 },
1313 { ISD::BITREVERSE, MVT::i64, 3 },
1314 { ISD::BITREVERSE, MVT::i32, 3 },
1315 { ISD::BITREVERSE, MVT::i16, 3 },
1316 { ISD::BITREVERSE, MVT::i8, 3 }
1318 static const CostTblEntry AVX2CostTbl[] = {
1319 { ISD::BITREVERSE, MVT::v4i64, 5 },
1320 { ISD::BITREVERSE, MVT::v8i32, 5 },
1321 { ISD::BITREVERSE, MVT::v16i16, 5 },
1322 { ISD::BITREVERSE, MVT::v32i8, 5 },
1323 { ISD::BSWAP, MVT::v4i64, 1 },
1324 { ISD::BSWAP, MVT::v8i32, 1 },
1325 { ISD::BSWAP, MVT::v16i16, 1 },
1326 { ISD::CTLZ, MVT::v4i64, 23 },
1327 { ISD::CTLZ, MVT::v8i32, 18 },
1328 { ISD::CTLZ, MVT::v16i16, 14 },
1329 { ISD::CTLZ, MVT::v32i8, 9 },
1330 { ISD::CTPOP, MVT::v4i64, 7 },
1331 { ISD::CTPOP, MVT::v8i32, 11 },
1332 { ISD::CTPOP, MVT::v16i16, 9 },
1333 { ISD::CTPOP, MVT::v32i8, 6 },
1334 { ISD::CTTZ, MVT::v4i64, 10 },
1335 { ISD::CTTZ, MVT::v8i32, 14 },
1336 { ISD::CTTZ, MVT::v16i16, 12 },
1337 { ISD::CTTZ, MVT::v32i8, 9 },
1338 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1339 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1340 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1341 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1342 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1343 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1345 static const CostTblEntry AVX1CostTbl[] = {
1346 { ISD::BITREVERSE, MVT::v4i64, 10 },
1347 { ISD::BITREVERSE, MVT::v8i32, 10 },
1348 { ISD::BITREVERSE, MVT::v16i16, 10 },
1349 { ISD::BITREVERSE, MVT::v32i8, 10 },
1350 { ISD::BSWAP, MVT::v4i64, 4 },
1351 { ISD::BSWAP, MVT::v8i32, 4 },
1352 { ISD::BSWAP, MVT::v16i16, 4 },
1353 { ISD::CTLZ, MVT::v4i64, 46 },
1354 { ISD::CTLZ, MVT::v8i32, 36 },
1355 { ISD::CTLZ, MVT::v16i16, 28 },
1356 { ISD::CTLZ, MVT::v32i8, 18 },
1357 { ISD::CTPOP, MVT::v4i64, 14 },
1358 { ISD::CTPOP, MVT::v8i32, 22 },
1359 { ISD::CTPOP, MVT::v16i16, 18 },
1360 { ISD::CTPOP, MVT::v32i8, 12 },
1361 { ISD::CTTZ, MVT::v4i64, 20 },
1362 { ISD::CTTZ, MVT::v8i32, 28 },
1363 { ISD::CTTZ, MVT::v16i16, 24 },
1364 { ISD::CTTZ, MVT::v32i8, 18 },
1365 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1366 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1367 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1368 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1369 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1370 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1372 static const CostTblEntry SSE42CostTbl[] = {
1373 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1374 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1376 static const CostTblEntry SSSE3CostTbl[] = {
1377 { ISD::BITREVERSE, MVT::v2i64, 5 },
1378 { ISD::BITREVERSE, MVT::v4i32, 5 },
1379 { ISD::BITREVERSE, MVT::v8i16, 5 },
1380 { ISD::BITREVERSE, MVT::v16i8, 5 },
1381 { ISD::BSWAP, MVT::v2i64, 1 },
1382 { ISD::BSWAP, MVT::v4i32, 1 },
1383 { ISD::BSWAP, MVT::v8i16, 1 },
1384 { ISD::CTLZ, MVT::v2i64, 23 },
1385 { ISD::CTLZ, MVT::v4i32, 18 },
1386 { ISD::CTLZ, MVT::v8i16, 14 },
1387 { ISD::CTLZ, MVT::v16i8, 9 },
1388 { ISD::CTPOP, MVT::v2i64, 7 },
1389 { ISD::CTPOP, MVT::v4i32, 11 },
1390 { ISD::CTPOP, MVT::v8i16, 9 },
1391 { ISD::CTPOP, MVT::v16i8, 6 },
1392 { ISD::CTTZ, MVT::v2i64, 10 },
1393 { ISD::CTTZ, MVT::v4i32, 14 },
1394 { ISD::CTTZ, MVT::v8i16, 12 },
1395 { ISD::CTTZ, MVT::v16i8, 9 }
1397 static const CostTblEntry SSE2CostTbl[] = {
1398 { ISD::BSWAP, MVT::v2i64, 7 },
1399 { ISD::BSWAP, MVT::v4i32, 7 },
1400 { ISD::BSWAP, MVT::v8i16, 7 },
1401 { ISD::CTLZ, MVT::v2i64, 25 },
1402 { ISD::CTLZ, MVT::v4i32, 26 },
1403 { ISD::CTLZ, MVT::v8i16, 20 },
1404 { ISD::CTLZ, MVT::v16i8, 17 },
1405 { ISD::CTPOP, MVT::v2i64, 12 },
1406 { ISD::CTPOP, MVT::v4i32, 15 },
1407 { ISD::CTPOP, MVT::v8i16, 13 },
1408 { ISD::CTPOP, MVT::v16i8, 10 },
1409 { ISD::CTTZ, MVT::v2i64, 14 },
1410 { ISD::CTTZ, MVT::v4i32, 18 },
1411 { ISD::CTTZ, MVT::v8i16, 16 },
1412 { ISD::CTTZ, MVT::v16i8, 13 },
1413 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1414 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1416 static const CostTblEntry SSE1CostTbl[] = {
1417 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1418 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1421 unsigned ISD = ISD::DELETED_NODE;
1425 case Intrinsic::bitreverse:
1426 ISD = ISD::BITREVERSE;
1428 case Intrinsic::bswap:
1431 case Intrinsic::ctlz:
1434 case Intrinsic::ctpop:
1437 case Intrinsic::cttz:
1440 case Intrinsic::sqrt:
1445 // Legalize the type.
1446 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1447 MVT MTy = LT.second;
1449 // Attempt to lookup cost.
1451 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1452 return LT.first * Entry->Cost;
1455 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1456 return LT.first * Entry->Cost;
1459 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1460 return LT.first * Entry->Cost;
1463 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1464 return LT.first * Entry->Cost;
1467 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1468 return LT.first * Entry->Cost;
1471 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1472 return LT.first * Entry->Cost;
1475 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1476 return LT.first * Entry->Cost;
1478 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
1481 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1482 ArrayRef<Value *> Args, FastMathFlags FMF) {
1483 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
1486 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1487 assert(Val->isVectorTy() && "This must be a vector type");
1489 Type *ScalarType = Val->getScalarType();
1492 // Legalize the type.
1493 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1495 // This type is legalized to a scalar type.
1496 if (!LT.second.isVector())
1499 // The type may be split. Normalize the index to the new type.
1500 unsigned Width = LT.second.getVectorNumElements();
1501 Index = Index % Width;
1503 // Floating point scalars are already located in index #0.
1504 if (ScalarType->isFloatingPointTy() && Index == 0)
1508 // Add to the base cost if we know that the extracted element of a vector is
1509 // destined to be moved to and used in the integer register file.
1510 int RegisterFileMoveCost = 0;
1511 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1512 RegisterFileMoveCost = 1;
1514 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1517 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
1518 assert (Ty->isVectorTy() && "Can only scalarize vectors");
1521 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
1523 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
1525 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
1531 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1532 unsigned AddressSpace) {
1533 // Handle non-power-of-two vectors such as <3 x float>
1534 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1535 unsigned NumElem = VTy->getVectorNumElements();
1537 // Handle a few common cases:
1539 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1540 // Cost = 64 bit store + extract + 32 bit store.
1544 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1545 // Cost = 128 bit store + unpack + 64 bit store.
1548 // Assume that all other non-power-of-two numbers are scalarized.
1549 if (!isPowerOf2_32(NumElem)) {
1550 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1552 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1553 Opcode == Instruction::Store);
1554 return NumElem * Cost + SplitCost;
1558 // Legalize the type.
1559 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1560 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1563 // Each load/store unit costs 1.
1564 int Cost = LT.first * 1;
1566 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1567 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1568 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1574 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1576 unsigned AddressSpace) {
1577 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1579 // To calculate scalar take the regular cost, without mask
1580 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1582 unsigned NumElem = SrcVTy->getVectorNumElements();
1583 VectorType *MaskTy =
1584 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1585 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1586 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1587 !isPowerOf2_32(NumElem)) {
1589 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1590 int ScalarCompareCost = getCmpSelInstrCost(
1591 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1592 int BranchCost = getCFInstrCost(Instruction::Br);
1593 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1595 int ValueSplitCost = getScalarizationOverhead(
1596 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1598 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1599 Alignment, AddressSpace);
1600 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1603 // Legalize the type.
1604 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1605 auto VT = TLI->getValueType(DL, SrcVTy);
1607 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1608 LT.second.getVectorNumElements() == NumElem)
1609 // Promotion requires expand/truncate for data and a shuffle for mask.
1610 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1611 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1613 else if (LT.second.getVectorNumElements() > NumElem) {
1614 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1615 LT.second.getVectorNumElements());
1616 // Expanding requires fill mask with zeroes
1617 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1619 if (!ST->hasAVX512())
1620 return Cost + LT.first*4; // Each maskmov costs 4
1622 // AVX-512 masked load/store is cheapper
1623 return Cost+LT.first;
1626 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
1627 // Address computations in vectorized code with non-consecutive addresses will
1628 // likely result in more instructions compared to scalar code where the
1629 // computation can more often be merged into the index mode. The resulting
1630 // extra micro-ops can significantly decrease throughput.
1631 unsigned NumVectorInstToHideOverhead = 10;
1633 if (Ty->isVectorTy() && IsComplex)
1634 return NumVectorInstToHideOverhead;
1636 return BaseT::getAddressComputationCost(Ty, IsComplex);
1639 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1642 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1644 MVT MTy = LT.second;
1646 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1647 assert(ISD && "Invalid opcode");
1649 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1650 // and make it as the cost.
1652 static const CostTblEntry SSE42CostTblPairWise[] = {
1653 { ISD::FADD, MVT::v2f64, 2 },
1654 { ISD::FADD, MVT::v4f32, 4 },
1655 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1656 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1657 { ISD::ADD, MVT::v8i16, 5 },
1660 static const CostTblEntry AVX1CostTblPairWise[] = {
1661 { ISD::FADD, MVT::v4f32, 4 },
1662 { ISD::FADD, MVT::v4f64, 5 },
1663 { ISD::FADD, MVT::v8f32, 7 },
1664 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1665 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1666 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1667 { ISD::ADD, MVT::v8i16, 5 },
1668 { ISD::ADD, MVT::v8i32, 5 },
1671 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1672 { ISD::FADD, MVT::v2f64, 2 },
1673 { ISD::FADD, MVT::v4f32, 4 },
1674 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1675 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1676 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1679 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1680 { ISD::FADD, MVT::v4f32, 3 },
1681 { ISD::FADD, MVT::v4f64, 3 },
1682 { ISD::FADD, MVT::v8f32, 4 },
1683 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1684 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1685 { ISD::ADD, MVT::v4i64, 3 },
1686 { ISD::ADD, MVT::v8i16, 4 },
1687 { ISD::ADD, MVT::v8i32, 5 },
1692 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1693 return LT.first * Entry->Cost;
1696 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1697 return LT.first * Entry->Cost;
1700 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1701 return LT.first * Entry->Cost;
1704 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1705 return LT.first * Entry->Cost;
1708 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1711 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1712 /// method might only calculate a fraction of a larger immediate. Therefore it
1713 /// is valid to return a cost of ZERO.
1714 int X86TTIImpl::getIntImmCost(int64_t Val) {
1716 return TTI::TCC_Free;
1719 return TTI::TCC_Basic;
1721 return 2 * TTI::TCC_Basic;
1724 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1725 assert(Ty->isIntegerTy());
1727 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1731 // Never hoist constants larger than 128bit, because this might lead to
1732 // incorrect code generation or assertions in codegen.
1733 // Fixme: Create a cost model for types larger than i128 once the codegen
1734 // issues have been fixed.
1736 return TTI::TCC_Free;
1739 return TTI::TCC_Free;
1741 // Sign-extend all constants to a multiple of 64-bit.
1744 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1746 // Split the constant into 64-bit chunks and calculate the cost for each
1749 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1750 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1751 int64_t Val = Tmp.getSExtValue();
1752 Cost += getIntImmCost(Val);
1754 // We need at least one instruction to materialize the constant.
1755 return std::max(1, Cost);
1758 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1760 assert(Ty->isIntegerTy());
1762 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1763 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1764 // here, so that constant hoisting will ignore this constant.
1766 return TTI::TCC_Free;
1768 unsigned ImmIdx = ~0U;
1771 return TTI::TCC_Free;
1772 case Instruction::GetElementPtr:
1773 // Always hoist the base address of a GetElementPtr. This prevents the
1774 // creation of new constants for every base constant that gets constant
1775 // folded with the offset.
1777 return 2 * TTI::TCC_Basic;
1778 return TTI::TCC_Free;
1779 case Instruction::Store:
1782 case Instruction::ICmp:
1783 // This is an imperfect hack to prevent constant hoisting of
1784 // compares that might be trying to check if a 64-bit value fits in
1785 // 32-bits. The backend can optimize these cases using a right shift by 32.
1786 // Ideally we would check the compare predicate here. There also other
1787 // similar immediates the backend can use shifts for.
1788 if (Idx == 1 && Imm.getBitWidth() == 64) {
1789 uint64_t ImmVal = Imm.getZExtValue();
1790 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1791 return TTI::TCC_Free;
1795 case Instruction::And:
1796 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1797 // by using a 32-bit operation with implicit zero extension. Detect such
1798 // immediates here as the normal path expects bit 31 to be sign extended.
1799 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1800 return TTI::TCC_Free;
1802 case Instruction::Add:
1803 case Instruction::Sub:
1804 case Instruction::Mul:
1805 case Instruction::UDiv:
1806 case Instruction::SDiv:
1807 case Instruction::URem:
1808 case Instruction::SRem:
1809 case Instruction::Or:
1810 case Instruction::Xor:
1813 // Always return TCC_Free for the shift value of a shift instruction.
1814 case Instruction::Shl:
1815 case Instruction::LShr:
1816 case Instruction::AShr:
1818 return TTI::TCC_Free;
1820 case Instruction::Trunc:
1821 case Instruction::ZExt:
1822 case Instruction::SExt:
1823 case Instruction::IntToPtr:
1824 case Instruction::PtrToInt:
1825 case Instruction::BitCast:
1826 case Instruction::PHI:
1827 case Instruction::Call:
1828 case Instruction::Select:
1829 case Instruction::Ret:
1830 case Instruction::Load:
1834 if (Idx == ImmIdx) {
1835 int NumConstants = (BitSize + 63) / 64;
1836 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1837 return (Cost <= NumConstants * TTI::TCC_Basic)
1838 ? static_cast<int>(TTI::TCC_Free)
1842 return X86TTIImpl::getIntImmCost(Imm, Ty);
1845 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1847 assert(Ty->isIntegerTy());
1849 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1850 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1851 // here, so that constant hoisting will ignore this constant.
1853 return TTI::TCC_Free;
1857 return TTI::TCC_Free;
1858 case Intrinsic::sadd_with_overflow:
1859 case Intrinsic::uadd_with_overflow:
1860 case Intrinsic::ssub_with_overflow:
1861 case Intrinsic::usub_with_overflow:
1862 case Intrinsic::smul_with_overflow:
1863 case Intrinsic::umul_with_overflow:
1864 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1865 return TTI::TCC_Free;
1867 case Intrinsic::experimental_stackmap:
1868 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1869 return TTI::TCC_Free;
1871 case Intrinsic::experimental_patchpoint_void:
1872 case Intrinsic::experimental_patchpoint_i64:
1873 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1874 return TTI::TCC_Free;
1877 return X86TTIImpl::getIntImmCost(Imm, Ty);
1880 // Return an average cost of Gather / Scatter instruction, maybe improved later
1881 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1882 unsigned Alignment, unsigned AddressSpace) {
1884 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1885 unsigned VF = SrcVTy->getVectorNumElements();
1887 // Try to reduce index size from 64 bit (default for GEP)
1888 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1889 // operation will use 16 x 64 indices which do not fit in a zmm and needs
1890 // to split. Also check that the base pointer is the same for all lanes,
1891 // and that there's at most one variable index.
1892 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1893 unsigned IndexSize = DL.getPointerSizeInBits();
1894 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1895 if (IndexSize < 64 || !GEP)
1898 unsigned NumOfVarIndices = 0;
1899 Value *Ptrs = GEP->getPointerOperand();
1900 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
1902 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
1903 if (isa<Constant>(GEP->getOperand(i)))
1905 Type *IndxTy = GEP->getOperand(i)->getType();
1906 if (IndxTy->isVectorTy())
1907 IndxTy = IndxTy->getVectorElementType();
1908 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
1909 !isa<SExtInst>(GEP->getOperand(i))) ||
1910 ++NumOfVarIndices > 1)
1911 return IndexSize; // 64
1913 return (unsigned)32;
1917 // Trying to reduce IndexSize to 32 bits for vector 16.
1918 // By default the IndexSize is equal to pointer size.
1919 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
1920 DL.getPointerSizeInBits();
1922 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
1924 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
1925 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1926 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
1927 if (SplitFactor > 1) {
1928 // Handle splitting of vector of pointers
1929 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
1930 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
1934 // The gather / scatter cost is given by Intel architects. It is a rough
1935 // number since we are looking at one instruction in a time.
1936 const int GSOverhead = 2;
1937 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1938 Alignment, AddressSpace);
1941 /// Return the cost of full scalarization of gather / scatter operation.
1943 /// Opcode - Load or Store instruction.
1944 /// SrcVTy - The type of the data vector that should be gathered or scattered.
1945 /// VariableMask - The mask is non-constant at compile time.
1946 /// Alignment - Alignment for one element.
1947 /// AddressSpace - pointer[s] address space.
1949 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
1950 bool VariableMask, unsigned Alignment,
1951 unsigned AddressSpace) {
1952 unsigned VF = SrcVTy->getVectorNumElements();
1954 int MaskUnpackCost = 0;
1956 VectorType *MaskTy =
1957 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
1958 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
1959 int ScalarCompareCost =
1960 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
1962 int BranchCost = getCFInstrCost(Instruction::Br);
1963 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
1966 // The cost of the scalar loads/stores.
1967 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1968 Alignment, AddressSpace);
1970 int InsertExtractCost = 0;
1971 if (Opcode == Instruction::Load)
1972 for (unsigned i = 0; i < VF; ++i)
1973 // Add the cost of inserting each scalar load into the vector
1974 InsertExtractCost +=
1975 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
1977 for (unsigned i = 0; i < VF; ++i)
1978 // Add the cost of extracting each element out of the data vector
1979 InsertExtractCost +=
1980 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
1982 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
1985 /// Calculate the cost of Gather / Scatter operation
1986 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
1987 Value *Ptr, bool VariableMask,
1988 unsigned Alignment) {
1989 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
1990 unsigned VF = SrcVTy->getVectorNumElements();
1991 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
1992 if (!PtrTy && Ptr->getType()->isVectorTy())
1993 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
1994 assert(PtrTy && "Unexpected type for Ptr argument");
1995 unsigned AddressSpace = PtrTy->getAddressSpace();
1997 bool Scalarize = false;
1998 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
1999 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2001 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2002 // Vector-4 of gather/scatter instruction does not exist on KNL.
2003 // We can extend it to 8 elements, but zeroing upper bits of
2004 // the mask vector will add more instructions. Right now we give the scalar
2005 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2006 // is better in the VariableMask case.
2007 if (VF == 2 || (VF == 4 && !ST->hasVLX()))
2011 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2014 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2017 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2018 Type *ScalarTy = DataTy->getScalarType();
2019 int DataWidth = isa<PointerType>(ScalarTy) ?
2020 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2022 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2023 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2026 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2027 return isLegalMaskedLoad(DataType);
2030 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2031 // This function is called now in two cases: from the Loop Vectorizer
2032 // and from the Scalarizer.
2033 // When the Loop Vectorizer asks about legality of the feature,
2034 // the vectorization factor is not calculated yet. The Loop Vectorizer
2035 // sends a scalar type and the decision is based on the width of the
2037 // Later on, the cost model will estimate usage this intrinsic based on
2039 // The Scalarizer asks again about legality. It sends a vector type.
2040 // In this case we can reject non-power-of-2 vectors.
2041 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
2043 Type *ScalarTy = DataTy->getScalarType();
2044 int DataWidth = isa<PointerType>(ScalarTy) ?
2045 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2047 // AVX-512 allows gather and scatter
2048 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512();
2051 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2052 return isLegalMaskedGather(DataType);
2055 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2056 const Function *Callee) const {
2057 const TargetMachine &TM = getTLI()->getTargetMachine();
2059 // Work this as a subsetting of subtarget features.
2060 const FeatureBitset &CallerBits =
2061 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2062 const FeatureBitset &CalleeBits =
2063 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2065 // FIXME: This is likely too limiting as it will include subtarget features
2066 // that we might not care about for inlining, but it is conservatively
2068 return (CallerBits & CalleeBits) == CalleeBits;
2071 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2072 // TODO: We expect this to be beneficial regardless of arch,
2073 // but there are currently some unexplained performance artifacts on Atom.
2074 // As a temporary solution, disable on Atom.
2075 return !(ST->isAtom() || ST->isSLM());
2078 // Get estimation for interleaved load/store operations and strided load.
2079 // \p Indices contains indices for strided load.
2080 // \p Factor - the factor of interleaving.
2081 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
2082 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
2084 ArrayRef<unsigned> Indices,
2086 unsigned AddressSpace) {
2088 // VecTy for interleave memop is <VF*Factor x Elt>.
2089 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2090 // VecTy = <12 x i32>.
2092 // Calculate the number of memory operations (NumOfMemOps), required
2093 // for load/store the VecTy.
2094 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2095 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2096 unsigned LegalVTSize = LegalVT.getStoreSize();
2097 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2099 // Get the cost of one memory operation.
2100 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2101 LegalVT.getVectorNumElements());
2102 unsigned MemOpCost =
2103 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2105 if (Opcode == Instruction::Load) {
2106 // Kind of shuffle depends on number of loaded values.
2107 // If we load the entire data in one register, we can use a 1-src shuffle.
2108 // Otherwise, we'll merge 2 sources in each operation.
2109 TTI::ShuffleKind ShuffleKind =
2110 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
2112 unsigned ShuffleCost =
2113 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
2115 unsigned NumOfLoadsInInterleaveGrp =
2116 Indices.size() ? Indices.size() : Factor;
2117 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
2118 VecTy->getVectorNumElements() / Factor);
2119 unsigned NumOfResults =
2120 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
2121 NumOfLoadsInInterleaveGrp;
2123 // About a half of the loads may be folded in shuffles when we have only
2124 // one result. If we have more than one result, we do not fold loads at all.
2125 unsigned NumOfUnfoldedLoads =
2126 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
2128 // Get a number of shuffle operations per result.
2129 unsigned NumOfShufflesPerResult =
2130 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
2132 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2133 // When we have more than one destination, we need additional instructions
2135 unsigned NumOfMoves = 0;
2136 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
2137 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
2139 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
2140 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
2146 assert(Opcode == Instruction::Store &&
2147 "Expected Store Instruction at this point");
2149 // There is no strided stores meanwhile. And store can't be folded in
2151 unsigned NumOfSources = Factor; // The number of values to be merged.
2152 unsigned ShuffleCost =
2153 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
2154 unsigned NumOfShufflesPerStore = NumOfSources - 1;
2156 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2157 // We need additional instructions to keep sources.
2158 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
2159 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
2164 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
2166 ArrayRef<unsigned> Indices,
2168 unsigned AddressSpace) {
2169 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) {
2171 Type *EltTy = VecTy->getVectorElementType();
2172 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
2173 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
2175 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) {
2182 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW);
2183 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI()))
2184 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
2185 Alignment, AddressSpace);
2186 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2187 Alignment, AddressSpace);