1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/CodeGen/CostTable.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/Support/Debug.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
70 TargetTransformInfo::CacheLevel Level) const {
72 case TargetTransformInfo::CacheLevel::L1D:
82 return 32 * 1024; // 32 KByte
83 case TargetTransformInfo::CacheLevel::L2D:
93 return 256 * 1024; // 256 KByte
96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
100 TargetTransformInfo::CacheLevel Level) const {
111 case TargetTransformInfo::CacheLevel::L1D:
113 case TargetTransformInfo::CacheLevel::L2D:
117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
121 if (Vector && !ST->hasSSE1())
125 if (Vector && ST->hasAVX512())
132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
133 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135 if (ST->hasAVX512() && PreferVectorWidth >= 512)
137 if (ST->hasAVX() && PreferVectorWidth >= 256)
139 if (ST->hasSSE1() && PreferVectorWidth >= 128)
150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
151 return getRegisterBitWidth(true);
154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
155 // If the loop will not be vectorized, don't interleave the loop.
156 // Let regular unroll to unroll the loop, which saves the overflow
157 // check and memory check cost.
164 // Sandybridge and Haswell have multiple execution ports and pipelined
172 int X86TTIImpl::getArithmeticInstrCost(
173 unsigned Opcode, Type *Ty,
174 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
175 TTI::OperandValueProperties Opd1PropInfo,
176 TTI::OperandValueProperties Opd2PropInfo,
177 ArrayRef<const Value *> Args) {
178 // Legalize the type.
179 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
181 int ISD = TLI->InstructionOpcodeToISD(Opcode);
182 assert(ISD && "Invalid opcode");
184 static const CostTblEntry GLMCostTable[] = {
185 { ISD::FDIV, MVT::f32, 18 }, // divss
186 { ISD::FDIV, MVT::v4f32, 35 }, // divps
187 { ISD::FDIV, MVT::f64, 33 }, // divsd
188 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
192 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
194 return LT.first * Entry->Cost;
196 static const CostTblEntry SLMCostTable[] = {
197 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
198 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
199 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
200 { ISD::FMUL, MVT::f64, 2 }, // mulsd
201 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
202 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
203 { ISD::FDIV, MVT::f32, 17 }, // divss
204 { ISD::FDIV, MVT::v4f32, 39 }, // divps
205 { ISD::FDIV, MVT::f64, 32 }, // divsd
206 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
207 { ISD::FADD, MVT::v2f64, 2 }, // addpd
208 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
209 // v2i64/v4i64 mul is custom lowered as a series of long:
210 // multiplies(3), shifts(3) and adds(2)
211 // slm muldq version throughput is 2 and addq throughput 4
212 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
213 // 3X4 (addq throughput) = 17
214 { ISD::MUL, MVT::v2i64, 17 },
215 // slm addq\subq throughput is 4
216 { ISD::ADD, MVT::v2i64, 4 },
217 { ISD::SUB, MVT::v2i64, 4 },
221 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
222 // Check if the operands can be shrinked into a smaller datatype.
223 bool Op1Signed = false;
224 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
225 bool Op2Signed = false;
226 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
228 bool signedMode = Op1Signed | Op2Signed;
229 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
232 return LT.first * 3; // pmullw/sext
233 if (!signedMode && OpMinSize <= 8)
234 return LT.first * 3; // pmullw/zext
236 return LT.first * 5; // pmullw/pmulhw/pshuf
237 if (!signedMode && OpMinSize <= 16)
238 return LT.first * 5; // pmullw/pmulhw/pshuf
241 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
243 return LT.first * Entry->Cost;
247 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
249 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
250 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
251 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
252 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
253 // On X86, vector signed division by constants power-of-two are
254 // normally expanded to the sequence SRA + SRL + ADD + SRA.
255 // The OperandValue properties may not be the same as that of the previous
256 // operation; conservatively assume OP_None.
258 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
259 TargetTransformInfo::OP_None,
260 TargetTransformInfo::OP_None);
261 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
262 TargetTransformInfo::OP_None,
263 TargetTransformInfo::OP_None);
264 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
265 TargetTransformInfo::OP_None,
266 TargetTransformInfo::OP_None);
268 if (ISD == ISD::SREM) {
269 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
270 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info);
271 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Op1Info, Op2Info);
277 // Vector unsigned division/remainder will be simplified to shifts/masks.
278 if (ISD == ISD::UDIV)
279 return getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
280 TargetTransformInfo::OP_None,
281 TargetTransformInfo::OP_None);
283 if (ISD == ISD::UREM)
284 return getArithmeticInstrCost(Instruction::And, Ty, Op1Info, Op2Info,
285 TargetTransformInfo::OP_None,
286 TargetTransformInfo::OP_None);
289 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
290 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
291 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
292 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
295 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
297 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
299 return LT.first * Entry->Cost;
302 static const CostTblEntry AVX512UniformConstCostTable[] = {
303 { ISD::SRA, MVT::v2i64, 1 },
304 { ISD::SRA, MVT::v4i64, 1 },
305 { ISD::SRA, MVT::v8i64, 1 },
308 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
310 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
312 return LT.first * Entry->Cost;
315 static const CostTblEntry AVX2UniformConstCostTable[] = {
316 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
317 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
318 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
320 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
323 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
325 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
327 return LT.first * Entry->Cost;
330 static const CostTblEntry SSE2UniformConstCostTable[] = {
331 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
332 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
333 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
335 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
336 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
337 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
340 // XOP has faster vXi8 shifts.
341 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
342 ST->hasSSE2() && !ST->hasXOP()) {
343 if (const auto *Entry =
344 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
345 return LT.first * Entry->Cost;
348 static const CostTblEntry AVX512BWConstCostTable[] = {
349 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
350 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
351 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
352 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
353 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
354 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
355 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
356 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
359 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
360 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
362 if (const auto *Entry =
363 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
364 return LT.first * Entry->Cost;
367 static const CostTblEntry AVX512ConstCostTable[] = {
368 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
369 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
370 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
371 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
374 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
375 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
377 if (const auto *Entry =
378 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
379 return LT.first * Entry->Cost;
382 static const CostTblEntry AVX2ConstCostTable[] = {
383 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
384 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
385 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
386 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
387 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
388 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
389 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
390 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
391 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
392 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
393 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
394 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
397 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
398 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
400 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
401 return LT.first * Entry->Cost;
404 static const CostTblEntry SSE2ConstCostTable[] = {
405 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
406 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
407 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
408 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
409 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
410 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
411 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
412 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
413 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
414 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
415 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
416 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
417 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
418 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
419 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
420 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
421 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
422 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
423 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
424 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
425 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
426 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
427 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
428 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
431 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
432 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
435 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
436 return LT.first * 32;
437 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
438 return LT.first * 38;
439 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
440 return LT.first * 15;
441 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
442 return LT.first * 20;
444 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
445 return LT.first * Entry->Cost;
448 static const CostTblEntry AVX2UniformCostTable[] = {
449 // Uniform splats are cheaper for the following instructions.
450 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
451 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
452 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
456 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
457 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
458 if (const auto *Entry =
459 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
460 return LT.first * Entry->Cost;
463 static const CostTblEntry SSE2UniformCostTable[] = {
464 // Uniform splats are cheaper for the following instructions.
465 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
466 { ISD::SHL, MVT::v4i32, 1 }, // pslld
467 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
469 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
470 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
471 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
473 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
474 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
478 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
479 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
480 if (const auto *Entry =
481 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
482 return LT.first * Entry->Cost;
485 static const CostTblEntry AVX512DQCostTable[] = {
486 { ISD::MUL, MVT::v2i64, 1 },
487 { ISD::MUL, MVT::v4i64, 1 },
488 { ISD::MUL, MVT::v8i64, 1 }
491 // Look for AVX512DQ lowering tricks for custom cases.
493 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
494 return LT.first * Entry->Cost;
496 static const CostTblEntry AVX512BWCostTable[] = {
497 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
498 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
499 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
501 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
502 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
503 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
505 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
506 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
507 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
509 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
510 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
511 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
513 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
514 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
515 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
518 // Look for AVX512BW lowering tricks for custom cases.
520 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
521 return LT.first * Entry->Cost;
523 static const CostTblEntry AVX512CostTable[] = {
524 { ISD::SHL, MVT::v16i32, 1 },
525 { ISD::SRL, MVT::v16i32, 1 },
526 { ISD::SRA, MVT::v16i32, 1 },
528 { ISD::SHL, MVT::v8i64, 1 },
529 { ISD::SRL, MVT::v8i64, 1 },
531 { ISD::SRA, MVT::v2i64, 1 },
532 { ISD::SRA, MVT::v4i64, 1 },
533 { ISD::SRA, MVT::v8i64, 1 },
535 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
536 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
537 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
538 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
539 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
540 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
542 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
543 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
544 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
546 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
547 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
548 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
552 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
553 return LT.first * Entry->Cost;
555 static const CostTblEntry AVX2ShiftCostTable[] = {
556 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
557 // customize them to detect the cases where shift amount is a scalar one.
558 { ISD::SHL, MVT::v4i32, 1 },
559 { ISD::SRL, MVT::v4i32, 1 },
560 { ISD::SRA, MVT::v4i32, 1 },
561 { ISD::SHL, MVT::v8i32, 1 },
562 { ISD::SRL, MVT::v8i32, 1 },
563 { ISD::SRA, MVT::v8i32, 1 },
564 { ISD::SHL, MVT::v2i64, 1 },
565 { ISD::SRL, MVT::v2i64, 1 },
566 { ISD::SHL, MVT::v4i64, 1 },
567 { ISD::SRL, MVT::v4i64, 1 },
570 // Look for AVX2 lowering tricks.
572 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
573 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
574 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
575 // On AVX2, a packed v16i16 shift left by a constant build_vector
576 // is lowered into a vector multiply (vpmullw).
577 return getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info,
578 TargetTransformInfo::OP_None,
579 TargetTransformInfo::OP_None);
581 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
582 return LT.first * Entry->Cost;
585 static const CostTblEntry XOPShiftCostTable[] = {
586 // 128bit shifts take 1cy, but right shifts require negation beforehand.
587 { ISD::SHL, MVT::v16i8, 1 },
588 { ISD::SRL, MVT::v16i8, 2 },
589 { ISD::SRA, MVT::v16i8, 2 },
590 { ISD::SHL, MVT::v8i16, 1 },
591 { ISD::SRL, MVT::v8i16, 2 },
592 { ISD::SRA, MVT::v8i16, 2 },
593 { ISD::SHL, MVT::v4i32, 1 },
594 { ISD::SRL, MVT::v4i32, 2 },
595 { ISD::SRA, MVT::v4i32, 2 },
596 { ISD::SHL, MVT::v2i64, 1 },
597 { ISD::SRL, MVT::v2i64, 2 },
598 { ISD::SRA, MVT::v2i64, 2 },
599 // 256bit shifts require splitting if AVX2 didn't catch them above.
600 { ISD::SHL, MVT::v32i8, 2+2 },
601 { ISD::SRL, MVT::v32i8, 4+2 },
602 { ISD::SRA, MVT::v32i8, 4+2 },
603 { ISD::SHL, MVT::v16i16, 2+2 },
604 { ISD::SRL, MVT::v16i16, 4+2 },
605 { ISD::SRA, MVT::v16i16, 4+2 },
606 { ISD::SHL, MVT::v8i32, 2+2 },
607 { ISD::SRL, MVT::v8i32, 4+2 },
608 { ISD::SRA, MVT::v8i32, 4+2 },
609 { ISD::SHL, MVT::v4i64, 2+2 },
610 { ISD::SRL, MVT::v4i64, 4+2 },
611 { ISD::SRA, MVT::v4i64, 4+2 },
614 // Look for XOP lowering tricks.
616 // If the right shift is constant then we'll fold the negation so
617 // it's as cheap as a left shift.
619 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
620 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
621 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
623 if (const auto *Entry =
624 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
625 return LT.first * Entry->Cost;
628 static const CostTblEntry SSE2UniformShiftCostTable[] = {
629 // Uniform splats are cheaper for the following instructions.
630 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
631 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
632 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
634 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
635 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
636 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
638 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
639 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
640 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
641 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
645 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
646 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
648 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
649 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
650 return LT.first * 4; // 2*psrad + shuffle.
652 if (const auto *Entry =
653 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
654 return LT.first * Entry->Cost;
657 if (ISD == ISD::SHL &&
658 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
660 // Vector shift left by non uniform constant can be lowered
661 // into vector multiply.
662 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
663 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
667 static const CostTblEntry AVX2CostTable[] = {
668 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
669 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
671 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
672 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
674 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
675 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
676 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
677 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
679 { ISD::SUB, MVT::v32i8, 1 }, // psubb
680 { ISD::ADD, MVT::v32i8, 1 }, // paddb
681 { ISD::SUB, MVT::v16i16, 1 }, // psubw
682 { ISD::ADD, MVT::v16i16, 1 }, // paddw
683 { ISD::SUB, MVT::v8i32, 1 }, // psubd
684 { ISD::ADD, MVT::v8i32, 1 }, // paddd
685 { ISD::SUB, MVT::v4i64, 1 }, // psubq
686 { ISD::ADD, MVT::v4i64, 1 }, // paddq
688 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
689 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
690 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
691 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
692 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
694 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
695 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
696 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
697 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
698 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
699 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
701 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
702 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
703 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
704 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
705 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
706 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
709 // Look for AVX2 lowering tricks for custom cases.
711 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
712 return LT.first * Entry->Cost;
714 static const CostTblEntry AVX1CostTable[] = {
715 // We don't have to scalarize unsupported ops. We can issue two half-sized
716 // operations and we only need to extract the upper YMM half.
717 // Two ops + 1 extract + 1 insert = 4.
718 { ISD::MUL, MVT::v16i16, 4 },
719 { ISD::MUL, MVT::v8i32, 4 },
720 { ISD::SUB, MVT::v32i8, 4 },
721 { ISD::ADD, MVT::v32i8, 4 },
722 { ISD::SUB, MVT::v16i16, 4 },
723 { ISD::ADD, MVT::v16i16, 4 },
724 { ISD::SUB, MVT::v8i32, 4 },
725 { ISD::ADD, MVT::v8i32, 4 },
726 { ISD::SUB, MVT::v4i64, 4 },
727 { ISD::ADD, MVT::v4i64, 4 },
729 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
730 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
731 // Because we believe v4i64 to be a legal type, we must also include the
732 // extract+insert in the cost table. Therefore, the cost here is 18
734 { ISD::MUL, MVT::v4i64, 18 },
736 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
738 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
739 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
740 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
741 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
742 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
743 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
747 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
748 return LT.first * Entry->Cost;
750 static const CostTblEntry SSE42CostTable[] = {
751 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
752 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
753 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
754 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
756 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
757 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
758 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
759 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
761 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
762 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
763 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
764 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
766 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
767 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
768 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
769 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
773 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
774 return LT.first * Entry->Cost;
776 static const CostTblEntry SSE41CostTable[] = {
777 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
778 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
779 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
780 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
781 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
782 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
784 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
785 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
786 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
787 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
788 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
789 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
791 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
792 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
793 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
794 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
795 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
796 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
798 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
802 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
803 return LT.first * Entry->Cost;
805 static const CostTblEntry SSE2CostTable[] = {
806 // We don't correctly identify costs of casts because they are marked as
808 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
809 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
810 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
811 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
812 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
814 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
815 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
816 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
817 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
818 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
820 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
821 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
822 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
823 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
824 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
826 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
827 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
828 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
829 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
831 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
832 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
833 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
834 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
836 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
837 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
839 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
840 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
844 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
845 return LT.first * Entry->Cost;
847 static const CostTblEntry SSE1CostTable[] = {
848 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
849 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
851 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
852 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
854 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
855 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
857 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
858 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
859 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
861 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
862 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
863 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
867 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
868 return LT.first * Entry->Cost;
870 // It is not a good idea to vectorize division. We have to scalarize it and
871 // in the process we will often end up having to spilling regular
872 // registers. The overhead of division is going to dominate most kernels
873 // anyways so try hard to prevent vectorization of division - it is
874 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
875 // to hide "20 cycles" for each lane.
876 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
877 ISD == ISD::UDIV || ISD == ISD::UREM)) {
878 int ScalarCost = getArithmeticInstrCost(
879 Opcode, Ty->getScalarType(), Op1Info, Op2Info,
880 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
881 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
884 // Fallback to the default implementation.
885 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
888 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
890 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
891 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
892 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
894 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
895 if (Kind == TTI::SK_Transpose)
896 Kind = TTI::SK_PermuteTwoSrc;
898 // For Broadcasts we are splatting the first element from the first input
899 // register, so only need to reference that input and all the output
900 // registers are the same.
901 if (Kind == TTI::SK_Broadcast)
904 // Subvector extractions are free if they start at the beginning of a
905 // vector and cheap if the subvectors are aligned.
906 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
907 int NumElts = LT.second.getVectorNumElements();
908 if ((Index % NumElts) == 0)
910 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
911 if (SubLT.second.isVector()) {
912 int NumSubElts = SubLT.second.getVectorNumElements();
913 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
918 // We are going to permute multiple sources and the result will be in multiple
919 // destinations. Providing an accurate cost only for splits where the element
920 // type remains the same.
921 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
922 MVT LegalVT = LT.second;
923 if (LegalVT.isVector() &&
924 LegalVT.getVectorElementType().getSizeInBits() ==
925 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
926 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
928 unsigned VecTySize = DL.getTypeStoreSize(Tp);
929 unsigned LegalVTSize = LegalVT.getStoreSize();
930 // Number of source vectors after legalization:
931 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
932 // Number of destination vectors after legalization:
933 unsigned NumOfDests = LT.first;
935 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
936 LegalVT.getVectorNumElements());
938 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
939 return NumOfShuffles *
940 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
943 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
946 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
947 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
948 // We assume that source and destination have the same vector type.
949 int NumOfDests = LT.first;
950 int NumOfShufflesPerDest = LT.first * 2 - 1;
951 LT.first = NumOfDests * NumOfShufflesPerDest;
954 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
955 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
956 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
958 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
959 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
961 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 1}, // vpermt2b
962 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 1}, // vpermt2b
963 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1} // vpermt2b
967 if (const auto *Entry =
968 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
969 return LT.first * Entry->Cost;
971 static const CostTblEntry AVX512BWShuffleTbl[] = {
972 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
973 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
975 {TTI::SK_Reverse, MVT::v32i16, 1}, // vpermw
976 {TTI::SK_Reverse, MVT::v16i16, 1}, // vpermw
977 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
979 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 1}, // vpermw
980 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 1}, // vpermw
981 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // vpermw
982 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
983 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 3}, // vpermw + zext/trunc
985 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 1}, // vpermt2w
986 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 1}, // vpermt2w
987 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpermt2w
988 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 3}, // zext + vpermt2w + trunc
989 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
990 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3} // zext + vpermt2w + trunc
994 if (const auto *Entry =
995 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
996 return LT.first * Entry->Cost;
998 static const CostTblEntry AVX512ShuffleTbl[] = {
999 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1000 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1001 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1002 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1004 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1005 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1006 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1007 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1009 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1010 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1011 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1012 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1013 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1014 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1015 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1016 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1017 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1018 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1019 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1020 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1021 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1023 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1024 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1025 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1026 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1027 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1028 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1029 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1030 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1031 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1032 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1033 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1034 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1} // vpermt2d
1037 if (ST->hasAVX512())
1038 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1039 return LT.first * Entry->Cost;
1041 static const CostTblEntry AVX2ShuffleTbl[] = {
1042 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1043 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1044 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1045 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1046 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1047 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1049 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1050 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1051 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1052 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1053 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1054 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1056 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1057 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1059 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1060 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1061 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1062 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1063 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1065 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1068 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1069 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1070 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1071 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1072 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1074 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1079 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1080 return LT.first * Entry->Cost;
1082 static const CostTblEntry XOPShuffleTbl[] = {
1083 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1084 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1085 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1086 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1087 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1089 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1092 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1094 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1095 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1097 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1101 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1102 return LT.first * Entry->Cost;
1104 static const CostTblEntry AVX1ShuffleTbl[] = {
1105 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1106 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1107 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1108 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1109 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1110 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1112 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1113 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1114 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1115 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1116 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1118 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1121 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1122 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1123 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1124 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1125 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1126 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1128 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1129 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1130 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1131 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1132 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1133 // + 2*por + vinsertf128
1134 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1135 // + 2*por + vinsertf128
1137 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1138 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1139 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1140 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1141 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1142 // + 4*por + vinsertf128
1143 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1144 // + 4*por + vinsertf128
1148 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1149 return LT.first * Entry->Cost;
1151 static const CostTblEntry SSE41ShuffleTbl[] = {
1152 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1153 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1154 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1155 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1156 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1157 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1161 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1162 return LT.first * Entry->Cost;
1164 static const CostTblEntry SSSE3ShuffleTbl[] = {
1165 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1166 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1168 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1169 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1171 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1172 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1174 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1175 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1177 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1178 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1182 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1183 return LT.first * Entry->Cost;
1185 static const CostTblEntry SSE2ShuffleTbl[] = {
1186 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1187 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1188 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1189 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1190 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1192 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1193 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1194 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1195 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1196 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1197 // + 2*pshufd + 2*unpck + packus
1199 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1200 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1201 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1202 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1203 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1205 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1206 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1207 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1208 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1210 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1211 // + 2*pshufd + 2*unpck + 2*packus
1213 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1214 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1215 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1216 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1217 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1221 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1222 return LT.first * Entry->Cost;
1224 static const CostTblEntry SSE1ShuffleTbl[] = {
1225 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1226 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1227 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1228 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1229 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1233 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1234 return LT.first * Entry->Cost;
1236 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1239 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1240 const Instruction *I) {
1241 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1242 assert(ISD && "Invalid opcode");
1244 // FIXME: Need a better design of the cost table to handle non-simple types of
1245 // potential massive combinations (elem_num x src_type x dst_type).
1247 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1248 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1249 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1251 // Mask sign extend has an instruction.
1252 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1253 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1254 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1255 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1256 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1257 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1259 // Mask zero extend is a load + broadcast.
1260 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1261 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1262 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1263 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1264 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1265 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1268 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1269 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1270 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1271 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1272 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1273 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1274 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1276 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1277 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1278 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1279 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1280 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1281 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1283 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1284 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1285 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1286 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1287 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1288 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1290 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1291 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1292 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1293 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1294 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1295 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1298 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1299 // 256-bit wide vectors.
1301 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1302 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1303 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1304 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1306 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
1307 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
1308 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
1309 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1311 // v16i1 -> v16i32 - load + broadcast
1312 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1313 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1314 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1315 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1316 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1317 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1318 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1319 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1320 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1321 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1323 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1324 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1325 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1326 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1327 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1328 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1329 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1330 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1332 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1333 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1334 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1335 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1336 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1337 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1338 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1339 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1340 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1341 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1342 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1343 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1344 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1345 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1346 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1347 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1348 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1349 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1350 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1351 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1352 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1353 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1354 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1355 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1357 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1359 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1360 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1361 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1362 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1363 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 },
1364 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 },
1365 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1366 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 },
1367 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 },
1370 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1371 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1372 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1373 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1374 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1375 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1376 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1377 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1378 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1379 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1380 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1381 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1382 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1383 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1384 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1385 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1386 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1388 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1389 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1390 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1391 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1392 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1393 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1395 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1396 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1398 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1401 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1402 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1403 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1404 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1405 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1406 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1407 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1408 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1409 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1410 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1411 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1412 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1413 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1414 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1415 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1416 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1417 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1419 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1420 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1421 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1422 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1423 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1424 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1425 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1427 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1428 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1429 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1430 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1431 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1432 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1433 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1434 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1435 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1436 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1437 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1438 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1440 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1441 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1442 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1443 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1444 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1445 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1446 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1447 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1448 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1449 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1450 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1451 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1452 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1453 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1454 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1455 // The generic code to compute the scalar overhead is currently broken.
1456 // Workaround this limitation by estimating the scalarization overhead
1457 // here. We have roughly 10 instructions per scalar element.
1458 // Multiply that by the vector width.
1459 // FIXME: remove that when PR19268 is fixed.
1460 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1461 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1463 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1464 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1465 // This node is expanded into scalarized operations but BasicTTI is overly
1466 // optimistic estimating its cost. It computes 3 per element (one
1467 // vector-extract, one scalar conversion and one vector-insert). The
1468 // problem is that the inserts form a read-modify-write chain so latency
1469 // should be factored in too. Inflating the cost per element by 1.
1470 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1471 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1473 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1474 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1477 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1478 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1479 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1480 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1481 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1482 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1483 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1485 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1486 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1487 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1488 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1489 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1490 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1491 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1492 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1493 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1494 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1495 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1496 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1497 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1498 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1499 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1500 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1501 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1502 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1504 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1505 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1506 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1507 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1508 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1509 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1510 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1512 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1515 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1516 // These are somewhat magic numbers justified by looking at the output of
1517 // Intel's IACA, running some kernels and making sure when we take
1518 // legalization into account the throughput will be overestimated.
1519 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1520 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1521 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1522 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1523 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1524 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1525 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1526 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1528 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1529 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1530 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1531 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1532 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1533 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1534 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1535 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1537 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1539 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1541 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1542 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1543 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1544 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1545 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1546 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1547 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1548 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1549 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1550 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1551 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1552 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1553 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1554 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1555 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1556 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1557 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1558 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1559 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1560 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1561 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1562 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1563 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1564 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1566 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1567 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1568 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1569 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1570 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1571 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1572 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1573 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1574 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1577 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1578 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1580 if (ST->hasSSE2() && !ST->hasAVX()) {
1581 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1582 LTDest.second, LTSrc.second))
1583 return LTSrc.first * Entry->Cost;
1586 EVT SrcTy = TLI->getValueType(DL, Src);
1587 EVT DstTy = TLI->getValueType(DL, Dst);
1589 // The function getSimpleVT only handles simple value types.
1590 if (!SrcTy.isSimple() || !DstTy.isSimple())
1591 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1593 MVT SimpleSrcTy = SrcTy.getSimpleVT();
1594 MVT SimpleDstTy = DstTy.getSimpleVT();
1596 // Make sure that neither type is going to be split before using the
1597 // AVX512 tables. This handles -mprefer-vector-width=256
1598 // with -min-legal-vector-width<=256
1599 if (TLI->getTypeAction(SimpleSrcTy) != TargetLowering::TypeSplitVector &&
1600 TLI->getTypeAction(SimpleDstTy) != TargetLowering::TypeSplitVector) {
1602 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
1603 SimpleDstTy, SimpleSrcTy))
1607 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1608 SimpleDstTy, SimpleSrcTy))
1611 if (ST->hasAVX512())
1612 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1613 SimpleDstTy, SimpleSrcTy))
1617 if (ST->hasAVX2()) {
1618 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1619 SimpleDstTy, SimpleSrcTy))
1624 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1625 SimpleDstTy, SimpleSrcTy))
1629 if (ST->hasSSE41()) {
1630 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1631 SimpleDstTy, SimpleSrcTy))
1635 if (ST->hasSSE2()) {
1636 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1637 SimpleDstTy, SimpleSrcTy))
1641 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
1644 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1645 const Instruction *I) {
1646 // Legalize the type.
1647 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1649 MVT MTy = LT.second;
1651 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1652 assert(ISD && "Invalid opcode");
1654 static const CostTblEntry SSE2CostTbl[] = {
1655 { ISD::SETCC, MVT::v2i64, 8 },
1656 { ISD::SETCC, MVT::v4i32, 1 },
1657 { ISD::SETCC, MVT::v8i16, 1 },
1658 { ISD::SETCC, MVT::v16i8, 1 },
1661 static const CostTblEntry SSE42CostTbl[] = {
1662 { ISD::SETCC, MVT::v2f64, 1 },
1663 { ISD::SETCC, MVT::v4f32, 1 },
1664 { ISD::SETCC, MVT::v2i64, 1 },
1667 static const CostTblEntry AVX1CostTbl[] = {
1668 { ISD::SETCC, MVT::v4f64, 1 },
1669 { ISD::SETCC, MVT::v8f32, 1 },
1670 // AVX1 does not support 8-wide integer compare.
1671 { ISD::SETCC, MVT::v4i64, 4 },
1672 { ISD::SETCC, MVT::v8i32, 4 },
1673 { ISD::SETCC, MVT::v16i16, 4 },
1674 { ISD::SETCC, MVT::v32i8, 4 },
1677 static const CostTblEntry AVX2CostTbl[] = {
1678 { ISD::SETCC, MVT::v4i64, 1 },
1679 { ISD::SETCC, MVT::v8i32, 1 },
1680 { ISD::SETCC, MVT::v16i16, 1 },
1681 { ISD::SETCC, MVT::v32i8, 1 },
1684 static const CostTblEntry AVX512CostTbl[] = {
1685 { ISD::SETCC, MVT::v8i64, 1 },
1686 { ISD::SETCC, MVT::v16i32, 1 },
1687 { ISD::SETCC, MVT::v8f64, 1 },
1688 { ISD::SETCC, MVT::v16f32, 1 },
1691 static const CostTblEntry AVX512BWCostTbl[] = {
1692 { ISD::SETCC, MVT::v32i16, 1 },
1693 { ISD::SETCC, MVT::v64i8, 1 },
1697 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
1698 return LT.first * Entry->Cost;
1700 if (ST->hasAVX512())
1701 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1702 return LT.first * Entry->Cost;
1705 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1706 return LT.first * Entry->Cost;
1709 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1710 return LT.first * Entry->Cost;
1713 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1714 return LT.first * Entry->Cost;
1717 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1718 return LT.first * Entry->Cost;
1720 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1723 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
1725 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1726 ArrayRef<Type *> Tys, FastMathFlags FMF,
1727 unsigned ScalarizationCostPassed) {
1728 // Costs should match the codegen from:
1729 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1730 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1731 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1732 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1733 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1734 static const CostTblEntry AVX512CDCostTbl[] = {
1735 { ISD::CTLZ, MVT::v8i64, 1 },
1736 { ISD::CTLZ, MVT::v16i32, 1 },
1737 { ISD::CTLZ, MVT::v32i16, 8 },
1738 { ISD::CTLZ, MVT::v64i8, 20 },
1739 { ISD::CTLZ, MVT::v4i64, 1 },
1740 { ISD::CTLZ, MVT::v8i32, 1 },
1741 { ISD::CTLZ, MVT::v16i16, 4 },
1742 { ISD::CTLZ, MVT::v32i8, 10 },
1743 { ISD::CTLZ, MVT::v2i64, 1 },
1744 { ISD::CTLZ, MVT::v4i32, 1 },
1745 { ISD::CTLZ, MVT::v8i16, 4 },
1746 { ISD::CTLZ, MVT::v16i8, 4 },
1748 static const CostTblEntry AVX512BWCostTbl[] = {
1749 { ISD::BITREVERSE, MVT::v8i64, 5 },
1750 { ISD::BITREVERSE, MVT::v16i32, 5 },
1751 { ISD::BITREVERSE, MVT::v32i16, 5 },
1752 { ISD::BITREVERSE, MVT::v64i8, 5 },
1753 { ISD::CTLZ, MVT::v8i64, 23 },
1754 { ISD::CTLZ, MVT::v16i32, 22 },
1755 { ISD::CTLZ, MVT::v32i16, 18 },
1756 { ISD::CTLZ, MVT::v64i8, 17 },
1757 { ISD::CTPOP, MVT::v8i64, 7 },
1758 { ISD::CTPOP, MVT::v16i32, 11 },
1759 { ISD::CTPOP, MVT::v32i16, 9 },
1760 { ISD::CTPOP, MVT::v64i8, 6 },
1761 { ISD::CTTZ, MVT::v8i64, 10 },
1762 { ISD::CTTZ, MVT::v16i32, 14 },
1763 { ISD::CTTZ, MVT::v32i16, 12 },
1764 { ISD::CTTZ, MVT::v64i8, 9 },
1765 { ISD::SADDSAT, MVT::v32i16, 1 },
1766 { ISD::SADDSAT, MVT::v64i8, 1 },
1767 { ISD::SSUBSAT, MVT::v32i16, 1 },
1768 { ISD::SSUBSAT, MVT::v64i8, 1 },
1769 { ISD::UADDSAT, MVT::v32i16, 1 },
1770 { ISD::UADDSAT, MVT::v64i8, 1 },
1771 { ISD::USUBSAT, MVT::v32i16, 1 },
1772 { ISD::USUBSAT, MVT::v64i8, 1 },
1774 static const CostTblEntry AVX512CostTbl[] = {
1775 { ISD::BITREVERSE, MVT::v8i64, 36 },
1776 { ISD::BITREVERSE, MVT::v16i32, 24 },
1777 { ISD::CTLZ, MVT::v8i64, 29 },
1778 { ISD::CTLZ, MVT::v16i32, 35 },
1779 { ISD::CTPOP, MVT::v8i64, 16 },
1780 { ISD::CTPOP, MVT::v16i32, 24 },
1781 { ISD::CTTZ, MVT::v8i64, 20 },
1782 { ISD::CTTZ, MVT::v16i32, 28 },
1783 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
1784 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
1785 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
1786 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
1788 static const CostTblEntry XOPCostTbl[] = {
1789 { ISD::BITREVERSE, MVT::v4i64, 4 },
1790 { ISD::BITREVERSE, MVT::v8i32, 4 },
1791 { ISD::BITREVERSE, MVT::v16i16, 4 },
1792 { ISD::BITREVERSE, MVT::v32i8, 4 },
1793 { ISD::BITREVERSE, MVT::v2i64, 1 },
1794 { ISD::BITREVERSE, MVT::v4i32, 1 },
1795 { ISD::BITREVERSE, MVT::v8i16, 1 },
1796 { ISD::BITREVERSE, MVT::v16i8, 1 },
1797 { ISD::BITREVERSE, MVT::i64, 3 },
1798 { ISD::BITREVERSE, MVT::i32, 3 },
1799 { ISD::BITREVERSE, MVT::i16, 3 },
1800 { ISD::BITREVERSE, MVT::i8, 3 }
1802 static const CostTblEntry AVX2CostTbl[] = {
1803 { ISD::BITREVERSE, MVT::v4i64, 5 },
1804 { ISD::BITREVERSE, MVT::v8i32, 5 },
1805 { ISD::BITREVERSE, MVT::v16i16, 5 },
1806 { ISD::BITREVERSE, MVT::v32i8, 5 },
1807 { ISD::BSWAP, MVT::v4i64, 1 },
1808 { ISD::BSWAP, MVT::v8i32, 1 },
1809 { ISD::BSWAP, MVT::v16i16, 1 },
1810 { ISD::CTLZ, MVT::v4i64, 23 },
1811 { ISD::CTLZ, MVT::v8i32, 18 },
1812 { ISD::CTLZ, MVT::v16i16, 14 },
1813 { ISD::CTLZ, MVT::v32i8, 9 },
1814 { ISD::CTPOP, MVT::v4i64, 7 },
1815 { ISD::CTPOP, MVT::v8i32, 11 },
1816 { ISD::CTPOP, MVT::v16i16, 9 },
1817 { ISD::CTPOP, MVT::v32i8, 6 },
1818 { ISD::CTTZ, MVT::v4i64, 10 },
1819 { ISD::CTTZ, MVT::v8i32, 14 },
1820 { ISD::CTTZ, MVT::v16i16, 12 },
1821 { ISD::CTTZ, MVT::v32i8, 9 },
1822 { ISD::SADDSAT, MVT::v16i16, 1 },
1823 { ISD::SADDSAT, MVT::v32i8, 1 },
1824 { ISD::SSUBSAT, MVT::v16i16, 1 },
1825 { ISD::SSUBSAT, MVT::v32i8, 1 },
1826 { ISD::UADDSAT, MVT::v16i16, 1 },
1827 { ISD::UADDSAT, MVT::v32i8, 1 },
1828 { ISD::USUBSAT, MVT::v16i16, 1 },
1829 { ISD::USUBSAT, MVT::v32i8, 1 },
1830 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
1831 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1832 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1833 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1834 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1835 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1836 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1838 static const CostTblEntry AVX1CostTbl[] = {
1839 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
1840 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
1841 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
1842 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
1843 { ISD::BSWAP, MVT::v4i64, 4 },
1844 { ISD::BSWAP, MVT::v8i32, 4 },
1845 { ISD::BSWAP, MVT::v16i16, 4 },
1846 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
1847 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
1848 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
1849 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1850 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
1851 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
1852 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
1853 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
1854 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
1855 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
1856 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
1857 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1858 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1859 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1860 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1861 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1862 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1863 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1864 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1865 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1866 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
1867 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1868 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1869 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1870 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1871 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1872 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1874 static const CostTblEntry GLMCostTbl[] = {
1875 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
1876 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
1877 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
1878 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
1880 static const CostTblEntry SLMCostTbl[] = {
1881 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
1882 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
1883 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
1884 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
1886 static const CostTblEntry SSE42CostTbl[] = {
1887 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
1888 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1889 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1891 static const CostTblEntry SSSE3CostTbl[] = {
1892 { ISD::BITREVERSE, MVT::v2i64, 5 },
1893 { ISD::BITREVERSE, MVT::v4i32, 5 },
1894 { ISD::BITREVERSE, MVT::v8i16, 5 },
1895 { ISD::BITREVERSE, MVT::v16i8, 5 },
1896 { ISD::BSWAP, MVT::v2i64, 1 },
1897 { ISD::BSWAP, MVT::v4i32, 1 },
1898 { ISD::BSWAP, MVT::v8i16, 1 },
1899 { ISD::CTLZ, MVT::v2i64, 23 },
1900 { ISD::CTLZ, MVT::v4i32, 18 },
1901 { ISD::CTLZ, MVT::v8i16, 14 },
1902 { ISD::CTLZ, MVT::v16i8, 9 },
1903 { ISD::CTPOP, MVT::v2i64, 7 },
1904 { ISD::CTPOP, MVT::v4i32, 11 },
1905 { ISD::CTPOP, MVT::v8i16, 9 },
1906 { ISD::CTPOP, MVT::v16i8, 6 },
1907 { ISD::CTTZ, MVT::v2i64, 10 },
1908 { ISD::CTTZ, MVT::v4i32, 14 },
1909 { ISD::CTTZ, MVT::v8i16, 12 },
1910 { ISD::CTTZ, MVT::v16i8, 9 }
1912 static const CostTblEntry SSE2CostTbl[] = {
1913 { ISD::BITREVERSE, MVT::v2i64, 29 },
1914 { ISD::BITREVERSE, MVT::v4i32, 27 },
1915 { ISD::BITREVERSE, MVT::v8i16, 27 },
1916 { ISD::BITREVERSE, MVT::v16i8, 20 },
1917 { ISD::BSWAP, MVT::v2i64, 7 },
1918 { ISD::BSWAP, MVT::v4i32, 7 },
1919 { ISD::BSWAP, MVT::v8i16, 7 },
1920 { ISD::CTLZ, MVT::v2i64, 25 },
1921 { ISD::CTLZ, MVT::v4i32, 26 },
1922 { ISD::CTLZ, MVT::v8i16, 20 },
1923 { ISD::CTLZ, MVT::v16i8, 17 },
1924 { ISD::CTPOP, MVT::v2i64, 12 },
1925 { ISD::CTPOP, MVT::v4i32, 15 },
1926 { ISD::CTPOP, MVT::v8i16, 13 },
1927 { ISD::CTPOP, MVT::v16i8, 10 },
1928 { ISD::CTTZ, MVT::v2i64, 14 },
1929 { ISD::CTTZ, MVT::v4i32, 18 },
1930 { ISD::CTTZ, MVT::v8i16, 16 },
1931 { ISD::CTTZ, MVT::v16i8, 13 },
1932 { ISD::SADDSAT, MVT::v8i16, 1 },
1933 { ISD::SADDSAT, MVT::v16i8, 1 },
1934 { ISD::SSUBSAT, MVT::v8i16, 1 },
1935 { ISD::SSUBSAT, MVT::v16i8, 1 },
1936 { ISD::UADDSAT, MVT::v8i16, 1 },
1937 { ISD::UADDSAT, MVT::v16i8, 1 },
1938 { ISD::USUBSAT, MVT::v8i16, 1 },
1939 { ISD::USUBSAT, MVT::v16i8, 1 },
1940 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1941 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1943 static const CostTblEntry SSE1CostTbl[] = {
1944 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1945 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1947 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1948 { ISD::BITREVERSE, MVT::i64, 14 }
1950 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1951 { ISD::BITREVERSE, MVT::i32, 14 },
1952 { ISD::BITREVERSE, MVT::i16, 14 },
1953 { ISD::BITREVERSE, MVT::i8, 11 }
1956 unsigned ISD = ISD::DELETED_NODE;
1960 case Intrinsic::bitreverse:
1961 ISD = ISD::BITREVERSE;
1963 case Intrinsic::bswap:
1966 case Intrinsic::ctlz:
1969 case Intrinsic::ctpop:
1972 case Intrinsic::cttz:
1975 case Intrinsic::sadd_sat:
1978 case Intrinsic::ssub_sat:
1981 case Intrinsic::uadd_sat:
1984 case Intrinsic::usub_sat:
1987 case Intrinsic::sqrt:
1992 if (ISD != ISD::DELETED_NODE) {
1993 // Legalize the type.
1994 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1995 MVT MTy = LT.second;
1997 // Attempt to lookup cost.
1999 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2000 return LT.first * Entry->Cost;
2003 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2004 return LT.first * Entry->Cost;
2007 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2008 return LT.first * Entry->Cost;
2011 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2012 return LT.first * Entry->Cost;
2014 if (ST->hasAVX512())
2015 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2016 return LT.first * Entry->Cost;
2019 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2020 return LT.first * Entry->Cost;
2023 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2024 return LT.first * Entry->Cost;
2027 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2028 return LT.first * Entry->Cost;
2031 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2032 return LT.first * Entry->Cost;
2035 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2036 return LT.first * Entry->Cost;
2039 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2040 return LT.first * Entry->Cost;
2043 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2044 return LT.first * Entry->Cost;
2047 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2048 return LT.first * Entry->Cost;
2050 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2051 return LT.first * Entry->Cost;
2054 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed);
2057 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
2058 ArrayRef<Value *> Args, FastMathFlags FMF,
2060 static const CostTblEntry AVX512CostTbl[] = {
2061 { ISD::ROTL, MVT::v8i64, 1 },
2062 { ISD::ROTL, MVT::v4i64, 1 },
2063 { ISD::ROTL, MVT::v2i64, 1 },
2064 { ISD::ROTL, MVT::v16i32, 1 },
2065 { ISD::ROTL, MVT::v8i32, 1 },
2066 { ISD::ROTL, MVT::v4i32, 1 },
2067 { ISD::ROTR, MVT::v8i64, 1 },
2068 { ISD::ROTR, MVT::v4i64, 1 },
2069 { ISD::ROTR, MVT::v2i64, 1 },
2070 { ISD::ROTR, MVT::v16i32, 1 },
2071 { ISD::ROTR, MVT::v8i32, 1 },
2072 { ISD::ROTR, MVT::v4i32, 1 }
2074 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2075 static const CostTblEntry XOPCostTbl[] = {
2076 { ISD::ROTL, MVT::v4i64, 4 },
2077 { ISD::ROTL, MVT::v8i32, 4 },
2078 { ISD::ROTL, MVT::v16i16, 4 },
2079 { ISD::ROTL, MVT::v32i8, 4 },
2080 { ISD::ROTL, MVT::v2i64, 1 },
2081 { ISD::ROTL, MVT::v4i32, 1 },
2082 { ISD::ROTL, MVT::v8i16, 1 },
2083 { ISD::ROTL, MVT::v16i8, 1 },
2084 { ISD::ROTR, MVT::v4i64, 6 },
2085 { ISD::ROTR, MVT::v8i32, 6 },
2086 { ISD::ROTR, MVT::v16i16, 6 },
2087 { ISD::ROTR, MVT::v32i8, 6 },
2088 { ISD::ROTR, MVT::v2i64, 2 },
2089 { ISD::ROTR, MVT::v4i32, 2 },
2090 { ISD::ROTR, MVT::v8i16, 2 },
2091 { ISD::ROTR, MVT::v16i8, 2 }
2093 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2094 { ISD::ROTL, MVT::i64, 1 },
2095 { ISD::ROTR, MVT::i64, 1 },
2096 { ISD::FSHL, MVT::i64, 4 }
2098 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2099 { ISD::ROTL, MVT::i32, 1 },
2100 { ISD::ROTL, MVT::i16, 1 },
2101 { ISD::ROTL, MVT::i8, 1 },
2102 { ISD::ROTR, MVT::i32, 1 },
2103 { ISD::ROTR, MVT::i16, 1 },
2104 { ISD::ROTR, MVT::i8, 1 },
2105 { ISD::FSHL, MVT::i32, 4 },
2106 { ISD::FSHL, MVT::i16, 4 },
2107 { ISD::FSHL, MVT::i8, 4 }
2110 unsigned ISD = ISD::DELETED_NODE;
2114 case Intrinsic::fshl:
2116 if (Args[0] == Args[1])
2119 case Intrinsic::fshr:
2120 // FSHR has same costs so don't duplicate.
2122 if (Args[0] == Args[1])
2127 if (ISD != ISD::DELETED_NODE) {
2128 // Legalize the type.
2129 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2130 MVT MTy = LT.second;
2132 // Attempt to lookup cost.
2133 if (ST->hasAVX512())
2134 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2135 return LT.first * Entry->Cost;
2138 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2139 return LT.first * Entry->Cost;
2142 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2143 return LT.first * Entry->Cost;
2145 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2146 return LT.first * Entry->Cost;
2149 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF);
2152 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
2153 assert(Val->isVectorTy() && "This must be a vector type");
2155 Type *ScalarType = Val->getScalarType();
2158 // Legalize the type.
2159 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
2161 // This type is legalized to a scalar type.
2162 if (!LT.second.isVector())
2165 // The type may be split. Normalize the index to the new type.
2166 unsigned Width = LT.second.getVectorNumElements();
2167 Index = Index % Width;
2169 // Floating point scalars are already located in index #0.
2170 if (ScalarType->isFloatingPointTy() && Index == 0)
2174 // Add to the base cost if we know that the extracted element of a vector is
2175 // destined to be moved to and used in the integer register file.
2176 int RegisterFileMoveCost = 0;
2177 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
2178 RegisterFileMoveCost = 1;
2180 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
2183 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
2184 unsigned AddressSpace, const Instruction *I) {
2185 // Handle non-power-of-two vectors such as <3 x float>
2186 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
2187 unsigned NumElem = VTy->getVectorNumElements();
2189 // Handle a few common cases:
2191 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
2192 // Cost = 64 bit store + extract + 32 bit store.
2196 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
2197 // Cost = 128 bit store + unpack + 64 bit store.
2200 // Assume that all other non-power-of-two numbers are scalarized.
2201 if (!isPowerOf2_32(NumElem)) {
2202 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
2204 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
2205 Opcode == Instruction::Store);
2206 return NumElem * Cost + SplitCost;
2210 // Legalize the type.
2211 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
2212 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
2215 // Each load/store unit costs 1.
2216 int Cost = LT.first * 1;
2218 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
2219 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
2220 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
2226 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
2228 unsigned AddressSpace) {
2229 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
2231 // To calculate scalar take the regular cost, without mask
2232 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
2234 unsigned NumElem = SrcVTy->getVectorNumElements();
2235 VectorType *MaskTy =
2236 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
2237 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
2238 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
2239 !isPowerOf2_32(NumElem)) {
2241 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
2242 int ScalarCompareCost = getCmpSelInstrCost(
2243 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
2244 int BranchCost = getCFInstrCost(Instruction::Br);
2245 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
2247 int ValueSplitCost = getScalarizationOverhead(
2248 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
2250 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2251 Alignment, AddressSpace);
2252 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
2255 // Legalize the type.
2256 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2257 auto VT = TLI->getValueType(DL, SrcVTy);
2259 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
2260 LT.second.getVectorNumElements() == NumElem)
2261 // Promotion requires expand/truncate for data and a shuffle for mask.
2262 Cost += getShuffleCost(TTI::SK_Select, SrcVTy, 0, nullptr) +
2263 getShuffleCost(TTI::SK_Select, MaskTy, 0, nullptr);
2265 else if (LT.second.getVectorNumElements() > NumElem) {
2266 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
2267 LT.second.getVectorNumElements());
2268 // Expanding requires fill mask with zeroes
2269 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
2271 if (!ST->hasAVX512())
2272 return Cost + LT.first*4; // Each maskmov costs 4
2274 // AVX-512 masked load/store is cheapper
2275 return Cost+LT.first;
2278 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2280 // Address computations in vectorized code with non-consecutive addresses will
2281 // likely result in more instructions compared to scalar code where the
2282 // computation can more often be merged into the index mode. The resulting
2283 // extra micro-ops can significantly decrease throughput.
2284 unsigned NumVectorInstToHideOverhead = 10;
2286 // Cost modeling of Strided Access Computation is hidden by the indexing
2287 // modes of X86 regardless of the stride value. We dont believe that there
2288 // is a difference between constant strided access in gerenal and constant
2289 // strided value which is less than or equal to 64.
2290 // Even in the case of (loop invariant) stride whose value is not known at
2291 // compile time, the address computation will not incur more than one extra
2293 if (Ty->isVectorTy() && SE) {
2294 if (!BaseT::isStridedAccess(Ptr))
2295 return NumVectorInstToHideOverhead;
2296 if (!BaseT::getConstantStrideStep(SE, Ptr))
2300 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
2303 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy,
2306 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2308 MVT MTy = LT.second;
2310 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2311 assert(ISD && "Invalid opcode");
2313 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2314 // and make it as the cost.
2316 static const CostTblEntry SSE42CostTblPairWise[] = {
2317 { ISD::FADD, MVT::v2f64, 2 },
2318 { ISD::FADD, MVT::v4f32, 4 },
2319 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
2320 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
2321 { ISD::ADD, MVT::v8i16, 5 },
2324 static const CostTblEntry AVX1CostTblPairWise[] = {
2325 { ISD::FADD, MVT::v4f32, 4 },
2326 { ISD::FADD, MVT::v4f64, 5 },
2327 { ISD::FADD, MVT::v8f32, 7 },
2328 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
2329 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
2330 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
2331 { ISD::ADD, MVT::v8i16, 5 },
2332 { ISD::ADD, MVT::v8i32, 5 },
2335 static const CostTblEntry SSE42CostTblNoPairWise[] = {
2336 { ISD::FADD, MVT::v2f64, 2 },
2337 { ISD::FADD, MVT::v4f32, 4 },
2338 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
2339 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
2340 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
2343 static const CostTblEntry AVX1CostTblNoPairWise[] = {
2344 { ISD::FADD, MVT::v4f32, 3 },
2345 { ISD::FADD, MVT::v4f64, 3 },
2346 { ISD::FADD, MVT::v8f32, 4 },
2347 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
2348 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
2349 { ISD::ADD, MVT::v4i64, 3 },
2350 { ISD::ADD, MVT::v8i16, 4 },
2351 { ISD::ADD, MVT::v8i32, 5 },
2356 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
2357 return LT.first * Entry->Cost;
2360 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
2361 return LT.first * Entry->Cost;
2364 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
2365 return LT.first * Entry->Cost;
2368 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
2369 return LT.first * Entry->Cost;
2372 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise);
2375 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy,
2376 bool IsPairwise, bool IsUnsigned) {
2377 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2379 MVT MTy = LT.second;
2382 if (ValTy->isIntOrIntVectorTy()) {
2383 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
2385 assert(ValTy->isFPOrFPVectorTy() &&
2386 "Expected float point or integer vector type.");
2390 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2391 // and make it as the cost.
2393 static const CostTblEntry SSE42CostTblPairWise[] = {
2394 {ISD::FMINNUM, MVT::v2f64, 3},
2395 {ISD::FMINNUM, MVT::v4f32, 2},
2396 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2397 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6"
2398 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2399 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2400 {ISD::SMIN, MVT::v8i16, 2},
2401 {ISD::UMIN, MVT::v8i16, 2},
2404 static const CostTblEntry AVX1CostTblPairWise[] = {
2405 {ISD::FMINNUM, MVT::v4f32, 1},
2406 {ISD::FMINNUM, MVT::v4f64, 1},
2407 {ISD::FMINNUM, MVT::v8f32, 2},
2408 {ISD::SMIN, MVT::v2i64, 3},
2409 {ISD::UMIN, MVT::v2i64, 3},
2410 {ISD::SMIN, MVT::v4i32, 1},
2411 {ISD::UMIN, MVT::v4i32, 1},
2412 {ISD::SMIN, MVT::v8i16, 1},
2413 {ISD::UMIN, MVT::v8i16, 1},
2414 {ISD::SMIN, MVT::v8i32, 3},
2415 {ISD::UMIN, MVT::v8i32, 3},
2418 static const CostTblEntry AVX2CostTblPairWise[] = {
2419 {ISD::SMIN, MVT::v4i64, 2},
2420 {ISD::UMIN, MVT::v4i64, 2},
2421 {ISD::SMIN, MVT::v8i32, 1},
2422 {ISD::UMIN, MVT::v8i32, 1},
2423 {ISD::SMIN, MVT::v16i16, 1},
2424 {ISD::UMIN, MVT::v16i16, 1},
2425 {ISD::SMIN, MVT::v32i8, 2},
2426 {ISD::UMIN, MVT::v32i8, 2},
2429 static const CostTblEntry AVX512CostTblPairWise[] = {
2430 {ISD::FMINNUM, MVT::v8f64, 1},
2431 {ISD::FMINNUM, MVT::v16f32, 2},
2432 {ISD::SMIN, MVT::v8i64, 2},
2433 {ISD::UMIN, MVT::v8i64, 2},
2434 {ISD::SMIN, MVT::v16i32, 1},
2435 {ISD::UMIN, MVT::v16i32, 1},
2438 static const CostTblEntry SSE42CostTblNoPairWise[] = {
2439 {ISD::FMINNUM, MVT::v2f64, 3},
2440 {ISD::FMINNUM, MVT::v4f32, 3},
2441 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2442 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6"
2443 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2444 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2445 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5"
2446 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8"
2449 static const CostTblEntry AVX1CostTblNoPairWise[] = {
2450 {ISD::FMINNUM, MVT::v4f32, 1},
2451 {ISD::FMINNUM, MVT::v4f64, 1},
2452 {ISD::FMINNUM, MVT::v8f32, 1},
2453 {ISD::SMIN, MVT::v2i64, 3},
2454 {ISD::UMIN, MVT::v2i64, 3},
2455 {ISD::SMIN, MVT::v4i32, 1},
2456 {ISD::UMIN, MVT::v4i32, 1},
2457 {ISD::SMIN, MVT::v8i16, 1},
2458 {ISD::UMIN, MVT::v8i16, 1},
2459 {ISD::SMIN, MVT::v8i32, 2},
2460 {ISD::UMIN, MVT::v8i32, 2},
2463 static const CostTblEntry AVX2CostTblNoPairWise[] = {
2464 {ISD::SMIN, MVT::v4i64, 1},
2465 {ISD::UMIN, MVT::v4i64, 1},
2466 {ISD::SMIN, MVT::v8i32, 1},
2467 {ISD::UMIN, MVT::v8i32, 1},
2468 {ISD::SMIN, MVT::v16i16, 1},
2469 {ISD::UMIN, MVT::v16i16, 1},
2470 {ISD::SMIN, MVT::v32i8, 1},
2471 {ISD::UMIN, MVT::v32i8, 1},
2474 static const CostTblEntry AVX512CostTblNoPairWise[] = {
2475 {ISD::FMINNUM, MVT::v8f64, 1},
2476 {ISD::FMINNUM, MVT::v16f32, 2},
2477 {ISD::SMIN, MVT::v8i64, 1},
2478 {ISD::UMIN, MVT::v8i64, 1},
2479 {ISD::SMIN, MVT::v16i32, 1},
2480 {ISD::UMIN, MVT::v16i32, 1},
2484 if (ST->hasAVX512())
2485 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy))
2486 return LT.first * Entry->Cost;
2489 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy))
2490 return LT.first * Entry->Cost;
2493 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
2494 return LT.first * Entry->Cost;
2497 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
2498 return LT.first * Entry->Cost;
2500 if (ST->hasAVX512())
2501 if (const auto *Entry =
2502 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy))
2503 return LT.first * Entry->Cost;
2506 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy))
2507 return LT.first * Entry->Cost;
2510 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
2511 return LT.first * Entry->Cost;
2514 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
2515 return LT.first * Entry->Cost;
2518 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned);
2521 /// Calculate the cost of materializing a 64-bit value. This helper
2522 /// method might only calculate a fraction of a larger immediate. Therefore it
2523 /// is valid to return a cost of ZERO.
2524 int X86TTIImpl::getIntImmCost(int64_t Val) {
2526 return TTI::TCC_Free;
2529 return TTI::TCC_Basic;
2531 return 2 * TTI::TCC_Basic;
2534 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
2535 assert(Ty->isIntegerTy());
2537 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2541 // Never hoist constants larger than 128bit, because this might lead to
2542 // incorrect code generation or assertions in codegen.
2543 // Fixme: Create a cost model for types larger than i128 once the codegen
2544 // issues have been fixed.
2546 return TTI::TCC_Free;
2549 return TTI::TCC_Free;
2551 // Sign-extend all constants to a multiple of 64-bit.
2553 if (BitSize % 64 != 0)
2554 ImmVal = Imm.sext(alignTo(BitSize, 64));
2556 // Split the constant into 64-bit chunks and calculate the cost for each
2559 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
2560 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
2561 int64_t Val = Tmp.getSExtValue();
2562 Cost += getIntImmCost(Val);
2564 // We need at least one instruction to materialize the constant.
2565 return std::max(1, Cost);
2568 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
2570 assert(Ty->isIntegerTy());
2572 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2573 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2574 // here, so that constant hoisting will ignore this constant.
2576 return TTI::TCC_Free;
2578 unsigned ImmIdx = ~0U;
2581 return TTI::TCC_Free;
2582 case Instruction::GetElementPtr:
2583 // Always hoist the base address of a GetElementPtr. This prevents the
2584 // creation of new constants for every base constant that gets constant
2585 // folded with the offset.
2587 return 2 * TTI::TCC_Basic;
2588 return TTI::TCC_Free;
2589 case Instruction::Store:
2592 case Instruction::ICmp:
2593 // This is an imperfect hack to prevent constant hoisting of
2594 // compares that might be trying to check if a 64-bit value fits in
2595 // 32-bits. The backend can optimize these cases using a right shift by 32.
2596 // Ideally we would check the compare predicate here. There also other
2597 // similar immediates the backend can use shifts for.
2598 if (Idx == 1 && Imm.getBitWidth() == 64) {
2599 uint64_t ImmVal = Imm.getZExtValue();
2600 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
2601 return TTI::TCC_Free;
2605 case Instruction::And:
2606 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
2607 // by using a 32-bit operation with implicit zero extension. Detect such
2608 // immediates here as the normal path expects bit 31 to be sign extended.
2609 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
2610 return TTI::TCC_Free;
2613 case Instruction::Add:
2614 case Instruction::Sub:
2615 // For add/sub, we can use the opposite instruction for INT32_MIN.
2616 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
2617 return TTI::TCC_Free;
2620 case Instruction::UDiv:
2621 case Instruction::SDiv:
2622 case Instruction::URem:
2623 case Instruction::SRem:
2624 // Division by constant is typically expanded later into a different
2625 // instruction sequence. This completely changes the constants.
2626 // Report them as "free" to stop ConstantHoist from marking them as opaque.
2627 return TTI::TCC_Free;
2628 case Instruction::Mul:
2629 case Instruction::Or:
2630 case Instruction::Xor:
2633 // Always return TCC_Free for the shift value of a shift instruction.
2634 case Instruction::Shl:
2635 case Instruction::LShr:
2636 case Instruction::AShr:
2638 return TTI::TCC_Free;
2640 case Instruction::Trunc:
2641 case Instruction::ZExt:
2642 case Instruction::SExt:
2643 case Instruction::IntToPtr:
2644 case Instruction::PtrToInt:
2645 case Instruction::BitCast:
2646 case Instruction::PHI:
2647 case Instruction::Call:
2648 case Instruction::Select:
2649 case Instruction::Ret:
2650 case Instruction::Load:
2654 if (Idx == ImmIdx) {
2655 int NumConstants = divideCeil(BitSize, 64);
2656 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
2657 return (Cost <= NumConstants * TTI::TCC_Basic)
2658 ? static_cast<int>(TTI::TCC_Free)
2662 return X86TTIImpl::getIntImmCost(Imm, Ty);
2665 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
2667 assert(Ty->isIntegerTy());
2669 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2670 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2671 // here, so that constant hoisting will ignore this constant.
2673 return TTI::TCC_Free;
2677 return TTI::TCC_Free;
2678 case Intrinsic::sadd_with_overflow:
2679 case Intrinsic::uadd_with_overflow:
2680 case Intrinsic::ssub_with_overflow:
2681 case Intrinsic::usub_with_overflow:
2682 case Intrinsic::smul_with_overflow:
2683 case Intrinsic::umul_with_overflow:
2684 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
2685 return TTI::TCC_Free;
2687 case Intrinsic::experimental_stackmap:
2688 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2689 return TTI::TCC_Free;
2691 case Intrinsic::experimental_patchpoint_void:
2692 case Intrinsic::experimental_patchpoint_i64:
2693 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2694 return TTI::TCC_Free;
2697 return X86TTIImpl::getIntImmCost(Imm, Ty);
2700 unsigned X86TTIImpl::getUserCost(const User *U,
2701 ArrayRef<const Value *> Operands) {
2702 if (isa<StoreInst>(U)) {
2703 Value *Ptr = U->getOperand(1);
2704 // Store instruction with index and scale costs 2 Uops.
2705 // Check the preceding GEP to identify non-const indices.
2706 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
2707 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
2708 return TTI::TCC_Basic * 2;
2710 return TTI::TCC_Basic;
2712 return BaseT::getUserCost(U, Operands);
2715 // Return an average cost of Gather / Scatter instruction, maybe improved later
2716 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
2717 unsigned Alignment, unsigned AddressSpace) {
2719 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
2720 unsigned VF = SrcVTy->getVectorNumElements();
2722 // Try to reduce index size from 64 bit (default for GEP)
2723 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
2724 // operation will use 16 x 64 indices which do not fit in a zmm and needs
2725 // to split. Also check that the base pointer is the same for all lanes,
2726 // and that there's at most one variable index.
2727 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
2728 unsigned IndexSize = DL.getPointerSizeInBits();
2729 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2730 if (IndexSize < 64 || !GEP)
2733 unsigned NumOfVarIndices = 0;
2734 Value *Ptrs = GEP->getPointerOperand();
2735 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
2737 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
2738 if (isa<Constant>(GEP->getOperand(i)))
2740 Type *IndxTy = GEP->getOperand(i)->getType();
2741 if (IndxTy->isVectorTy())
2742 IndxTy = IndxTy->getVectorElementType();
2743 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
2744 !isa<SExtInst>(GEP->getOperand(i))) ||
2745 ++NumOfVarIndices > 1)
2746 return IndexSize; // 64
2748 return (unsigned)32;
2752 // Trying to reduce IndexSize to 32 bits for vector 16.
2753 // By default the IndexSize is equal to pointer size.
2754 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
2755 ? getIndexSizeInBits(Ptr, DL)
2756 : DL.getPointerSizeInBits();
2758 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
2760 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
2761 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2762 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
2763 if (SplitFactor > 1) {
2764 // Handle splitting of vector of pointers
2765 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
2766 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
2770 // The gather / scatter cost is given by Intel architects. It is a rough
2771 // number since we are looking at one instruction in a time.
2772 const int GSOverhead = (Opcode == Instruction::Load)
2773 ? ST->getGatherOverhead()
2774 : ST->getScatterOverhead();
2775 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2776 Alignment, AddressSpace);
2779 /// Return the cost of full scalarization of gather / scatter operation.
2781 /// Opcode - Load or Store instruction.
2782 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2783 /// VariableMask - The mask is non-constant at compile time.
2784 /// Alignment - Alignment for one element.
2785 /// AddressSpace - pointer[s] address space.
2787 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2788 bool VariableMask, unsigned Alignment,
2789 unsigned AddressSpace) {
2790 unsigned VF = SrcVTy->getVectorNumElements();
2792 int MaskUnpackCost = 0;
2794 VectorType *MaskTy =
2795 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2796 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2797 int ScalarCompareCost =
2798 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2800 int BranchCost = getCFInstrCost(Instruction::Br);
2801 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2804 // The cost of the scalar loads/stores.
2805 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2806 Alignment, AddressSpace);
2808 int InsertExtractCost = 0;
2809 if (Opcode == Instruction::Load)
2810 for (unsigned i = 0; i < VF; ++i)
2811 // Add the cost of inserting each scalar load into the vector
2812 InsertExtractCost +=
2813 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2815 for (unsigned i = 0; i < VF; ++i)
2816 // Add the cost of extracting each element out of the data vector
2817 InsertExtractCost +=
2818 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2820 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2823 /// Calculate the cost of Gather / Scatter operation
2824 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2825 Value *Ptr, bool VariableMask,
2826 unsigned Alignment) {
2827 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2828 unsigned VF = SrcVTy->getVectorNumElements();
2829 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2830 if (!PtrTy && Ptr->getType()->isVectorTy())
2831 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2832 assert(PtrTy && "Unexpected type for Ptr argument");
2833 unsigned AddressSpace = PtrTy->getAddressSpace();
2835 bool Scalarize = false;
2836 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2837 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2839 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2840 // Vector-4 of gather/scatter instruction does not exist on KNL.
2841 // We can extend it to 8 elements, but zeroing upper bits of
2842 // the mask vector will add more instructions. Right now we give the scalar
2843 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2844 // is better in the VariableMask case.
2845 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
2849 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2852 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2855 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
2856 TargetTransformInfo::LSRCost &C2) {
2857 // X86 specific here are "instruction number 1st priority".
2858 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
2859 C1.NumIVMuls, C1.NumBaseAdds,
2860 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
2861 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
2862 C2.NumIVMuls, C2.NumBaseAdds,
2863 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
2866 bool X86TTIImpl::canMacroFuseCmp() {
2867 return ST->hasMacroFusion();
2870 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2871 // The backend can't handle a single element vector.
2872 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1)
2874 Type *ScalarTy = DataTy->getScalarType();
2875 int DataWidth = isa<PointerType>(ScalarTy) ?
2876 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2878 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2879 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2882 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2883 return isLegalMaskedLoad(DataType);
2886 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2887 // This function is called now in two cases: from the Loop Vectorizer
2888 // and from the Scalarizer.
2889 // When the Loop Vectorizer asks about legality of the feature,
2890 // the vectorization factor is not calculated yet. The Loop Vectorizer
2891 // sends a scalar type and the decision is based on the width of the
2893 // Later on, the cost model will estimate usage this intrinsic based on
2895 // The Scalarizer asks again about legality. It sends a vector type.
2896 // In this case we can reject non-power-of-2 vectors.
2897 // We also reject single element vectors as the type legalizer can't
2899 if (isa<VectorType>(DataTy)) {
2900 unsigned NumElts = DataTy->getVectorNumElements();
2901 if (NumElts == 1 || !isPowerOf2_32(NumElts))
2904 Type *ScalarTy = DataTy->getScalarType();
2905 int DataWidth = isa<PointerType>(ScalarTy) ?
2906 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2908 // Some CPUs have better gather performance than others.
2909 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
2910 // enable gather with a -march.
2911 return (DataWidth == 32 || DataWidth == 64) &&
2912 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()));
2915 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2916 // AVX2 doesn't support scatter
2917 if (!ST->hasAVX512())
2919 return isLegalMaskedGather(DataType);
2922 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
2923 EVT VT = TLI->getValueType(DL, DataType);
2924 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
2927 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
2931 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2932 const Function *Callee) const {
2933 const TargetMachine &TM = getTLI()->getTargetMachine();
2935 // Work this as a subsetting of subtarget features.
2936 const FeatureBitset &CallerBits =
2937 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2938 const FeatureBitset &CalleeBits =
2939 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2941 // FIXME: This is likely too limiting as it will include subtarget features
2942 // that we might not care about for inlining, but it is conservatively
2944 return (CallerBits & CalleeBits) == CalleeBits;
2947 const X86TTIImpl::TTI::MemCmpExpansionOptions *
2948 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
2949 // Only enable vector loads for equality comparison.
2950 // Right now the vector version is not as fast, see #33329.
2951 static const auto ThreeWayOptions = [this]() {
2952 TTI::MemCmpExpansionOptions Options;
2953 if (ST->is64Bit()) {
2954 Options.LoadSizes.push_back(8);
2956 Options.LoadSizes.push_back(4);
2957 Options.LoadSizes.push_back(2);
2958 Options.LoadSizes.push_back(1);
2961 static const auto EqZeroOptions = [this]() {
2962 TTI::MemCmpExpansionOptions Options;
2963 // TODO: enable AVX512 when the DAG is ready.
2964 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64);
2965 if (ST->hasAVX2()) Options.LoadSizes.push_back(32);
2966 if (ST->hasSSE2()) Options.LoadSizes.push_back(16);
2967 if (ST->is64Bit()) {
2968 Options.LoadSizes.push_back(8);
2970 Options.LoadSizes.push_back(4);
2971 Options.LoadSizes.push_back(2);
2972 Options.LoadSizes.push_back(1);
2973 // All GPR and vector loads can be unaligned. SIMD compare requires integer
2974 // vectors (SSE2/AVX2).
2975 Options.AllowOverlappingLoads = true;
2978 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions;
2981 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2982 // TODO: We expect this to be beneficial regardless of arch,
2983 // but there are currently some unexplained performance artifacts on Atom.
2984 // As a temporary solution, disable on Atom.
2985 return !(ST->isAtom());
2988 // Get estimation for interleaved load/store operations for AVX2.
2989 // \p Factor is the interleaved-access factor (stride) - number of
2990 // (interleaved) elements in the group.
2991 // \p Indices contains the indices for a strided load: when the
2992 // interleaved load has gaps they indicate which elements are used.
2993 // If Indices is empty (or if the number of indices is equal to the size
2994 // of the interleaved-access as given in \p Factor) the access has no gaps.
2996 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
2997 // computing the cost using a generic formula as a function of generic
2998 // shuffles. We therefore use a lookup table instead, filled according to
2999 // the instruction sequences that codegen currently generates.
3000 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy,
3002 ArrayRef<unsigned> Indices,
3004 unsigned AddressSpace,
3005 bool UseMaskForCond,
3006 bool UseMaskForGaps) {
3008 if (UseMaskForCond || UseMaskForGaps)
3009 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3010 Alignment, AddressSpace,
3011 UseMaskForCond, UseMaskForGaps);
3013 // We currently Support only fully-interleaved groups, with no gaps.
3014 // TODO: Support also strided loads (interleaved-groups with gaps).
3015 if (Indices.size() && Indices.size() != Factor)
3016 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3017 Alignment, AddressSpace);
3019 // VecTy for interleave memop is <VF*Factor x Elt>.
3020 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3021 // VecTy = <12 x i32>.
3022 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
3024 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
3025 // the VF=2, while v2i128 is an unsupported MVT vector type
3026 // (see MachineValueType.h::getVectorVT()).
3027 if (!LegalVT.isVector())
3028 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3029 Alignment, AddressSpace);
3031 unsigned VF = VecTy->getVectorNumElements() / Factor;
3032 Type *ScalarTy = VecTy->getVectorElementType();
3034 // Calculate the number of memory operations (NumOfMemOps), required
3035 // for load/store the VecTy.
3036 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
3037 unsigned LegalVTSize = LegalVT.getStoreSize();
3038 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
3040 // Get the cost of one memory operation.
3041 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
3042 LegalVT.getVectorNumElements());
3043 unsigned MemOpCost =
3044 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
3046 VectorType *VT = VectorType::get(ScalarTy, VF);
3047 EVT ETy = TLI->getValueType(DL, VT);
3048 if (!ETy.isSimple())
3049 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3050 Alignment, AddressSpace);
3052 // TODO: Complete for other data-types and strides.
3053 // Each combination of Stride, ElementTy and VF results in a different
3054 // sequence; The cost tables are therefore accessed with:
3055 // Factor (stride) and VectorType=VFxElemType.
3056 // The Cost accounts only for the shuffle sequence;
3057 // The cost of the loads/stores is accounted for separately.
3059 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
3060 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
3061 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
3063 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
3064 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
3065 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
3066 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
3067 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
3068 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
3070 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
3071 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
3072 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
3073 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
3074 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
3076 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
3079 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
3080 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
3081 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
3083 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
3084 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
3085 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
3086 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
3087 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
3089 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
3090 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
3091 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
3092 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
3093 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
3096 if (Opcode == Instruction::Load) {
3097 if (const auto *Entry =
3098 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
3099 return NumOfMemOps * MemOpCost + Entry->Cost;
3101 assert(Opcode == Instruction::Store &&
3102 "Expected Store Instruction at this point");
3103 if (const auto *Entry =
3104 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
3105 return NumOfMemOps * MemOpCost + Entry->Cost;
3108 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3109 Alignment, AddressSpace);
3112 // Get estimation for interleaved load/store operations and strided load.
3113 // \p Indices contains indices for strided load.
3114 // \p Factor - the factor of interleaving.
3115 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
3116 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
3118 ArrayRef<unsigned> Indices,
3120 unsigned AddressSpace,
3121 bool UseMaskForCond,
3122 bool UseMaskForGaps) {
3124 if (UseMaskForCond || UseMaskForGaps)
3125 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3126 Alignment, AddressSpace,
3127 UseMaskForCond, UseMaskForGaps);
3129 // VecTy for interleave memop is <VF*Factor x Elt>.
3130 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3131 // VecTy = <12 x i32>.
3133 // Calculate the number of memory operations (NumOfMemOps), required
3134 // for load/store the VecTy.
3135 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
3136 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
3137 unsigned LegalVTSize = LegalVT.getStoreSize();
3138 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
3140 // Get the cost of one memory operation.
3141 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
3142 LegalVT.getVectorNumElements());
3143 unsigned MemOpCost =
3144 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
3146 unsigned VF = VecTy->getVectorNumElements() / Factor;
3147 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
3149 if (Opcode == Instruction::Load) {
3150 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
3151 // contain the cost of the optimized shuffle sequence that the
3152 // X86InterleavedAccess pass will generate.
3153 // The cost of loads and stores are computed separately from the table.
3155 // X86InterleavedAccess support only the following interleaved-access group.
3156 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
3157 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
3158 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
3159 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
3162 if (const auto *Entry =
3163 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
3164 return NumOfMemOps * MemOpCost + Entry->Cost;
3165 //If an entry does not exist, fallback to the default implementation.
3167 // Kind of shuffle depends on number of loaded values.
3168 // If we load the entire data in one register, we can use a 1-src shuffle.
3169 // Otherwise, we'll merge 2 sources in each operation.
3170 TTI::ShuffleKind ShuffleKind =
3171 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
3173 unsigned ShuffleCost =
3174 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
3176 unsigned NumOfLoadsInInterleaveGrp =
3177 Indices.size() ? Indices.size() : Factor;
3178 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
3179 VecTy->getVectorNumElements() / Factor);
3180 unsigned NumOfResults =
3181 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
3182 NumOfLoadsInInterleaveGrp;
3184 // About a half of the loads may be folded in shuffles when we have only
3185 // one result. If we have more than one result, we do not fold loads at all.
3186 unsigned NumOfUnfoldedLoads =
3187 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
3189 // Get a number of shuffle operations per result.
3190 unsigned NumOfShufflesPerResult =
3191 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
3193 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3194 // When we have more than one destination, we need additional instructions
3196 unsigned NumOfMoves = 0;
3197 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
3198 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
3200 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
3201 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
3207 assert(Opcode == Instruction::Store &&
3208 "Expected Store Instruction at this point");
3209 // X86InterleavedAccess support only the following interleaved-access group.
3210 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
3211 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
3212 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
3213 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
3215 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
3216 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
3217 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
3218 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
3221 if (const auto *Entry =
3222 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
3223 return NumOfMemOps * MemOpCost + Entry->Cost;
3224 //If an entry does not exist, fallback to the default implementation.
3226 // There is no strided stores meanwhile. And store can't be folded in
3228 unsigned NumOfSources = Factor; // The number of values to be merged.
3229 unsigned ShuffleCost =
3230 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
3231 unsigned NumOfShufflesPerStore = NumOfSources - 1;
3233 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3234 // We need additional instructions to keep sources.
3235 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
3236 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
3241 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
3243 ArrayRef<unsigned> Indices,
3245 unsigned AddressSpace,
3246 bool UseMaskForCond,
3247 bool UseMaskForGaps) {
3248 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
3249 Type *EltTy = VecTy->getVectorElementType();
3250 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
3251 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
3253 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
3257 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
3258 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
3259 Alignment, AddressSpace,
3260 UseMaskForCond, UseMaskForGaps);
3262 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices,
3263 Alignment, AddressSpace,
3264 UseMaskForCond, UseMaskForGaps);
3266 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3267 Alignment, AddressSpace,
3268 UseMaskForCond, UseMaskForGaps);