1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/CodeGen/CostTable.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/Support/Debug.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
70 TargetTransformInfo::CacheLevel Level) const {
72 case TargetTransformInfo::CacheLevel::L1D:
82 return 32 * 1024; // 32 KByte
83 case TargetTransformInfo::CacheLevel::L2D:
93 return 256 * 1024; // 256 KByte
96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
100 TargetTransformInfo::CacheLevel Level) const {
111 case TargetTransformInfo::CacheLevel::L1D:
113 case TargetTransformInfo::CacheLevel::L2D:
117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
121 if (Vector && !ST->hasSSE1())
125 if (Vector && ST->hasAVX512())
132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
149 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
150 return getRegisterBitWidth(true);
153 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
154 // If the loop will not be vectorized, don't interleave the loop.
155 // Let regular unroll to unroll the loop, which saves the overflow
156 // check and memory check cost.
163 // Sandybridge and Haswell have multiple execution ports and pipelined
171 int X86TTIImpl::getArithmeticInstrCost(
172 unsigned Opcode, Type *Ty,
173 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
174 TTI::OperandValueProperties Opd1PropInfo,
175 TTI::OperandValueProperties Opd2PropInfo,
176 ArrayRef<const Value *> Args) {
177 // Legalize the type.
178 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
180 int ISD = TLI->InstructionOpcodeToISD(Opcode);
181 assert(ISD && "Invalid opcode");
183 static const CostTblEntry SLMCostTable[] = {
184 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
185 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
186 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
187 { ISD::FMUL, MVT::f64, 2 }, // mulsd
188 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
189 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
190 { ISD::FDIV, MVT::f32, 17 }, // divss
191 { ISD::FDIV, MVT::v4f32, 39 }, // divps
192 { ISD::FDIV, MVT::f64, 32 }, // divsd
193 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
194 { ISD::FADD, MVT::v2f64, 2 }, // addpd
195 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
196 // v2i64/v4i64 mul is custom lowered as a series of long:
197 // multiplies(3), shifts(3) and adds(2)
198 // slm muldq version throughput is 2 and addq throughput 4
199 // thus: 3X2 (muldq throughput) + 3X1 (shift throuput) +
200 // 3X4 (addq throughput) = 17
201 { ISD::MUL, MVT::v2i64, 17 },
202 // slm addq\subq throughput is 4
203 { ISD::ADD, MVT::v2i64, 4 },
204 { ISD::SUB, MVT::v2i64, 4 },
208 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
209 // Check if the operands can be shrinked into a smaller datatype.
210 bool Op1Signed = false;
211 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
212 bool Op2Signed = false;
213 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
215 bool signedMode = Op1Signed | Op2Signed;
216 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
219 return LT.first * 3; // pmullw/sext
220 if (!signedMode && OpMinSize <= 8)
221 return LT.first * 3; // pmullw/zext
223 return LT.first * 5; // pmullw/pmulhw/pshuf
224 if (!signedMode && OpMinSize <= 16)
225 return LT.first * 5; // pmullw/pmulhw/pshuf
227 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
229 return LT.first * Entry->Cost;
233 if (ISD == ISD::SDIV &&
234 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
235 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
236 // On X86, vector signed division by constants power-of-two are
237 // normally expanded to the sequence SRA + SRL + ADD + SRA.
238 // The OperandValue properties many not be same as that of previous
239 // operation;conservatively assume OP_None.
240 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
241 Op2Info, TargetTransformInfo::OP_None,
242 TargetTransformInfo::OP_None);
243 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
244 TargetTransformInfo::OP_None,
245 TargetTransformInfo::OP_None);
246 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
247 TargetTransformInfo::OP_None,
248 TargetTransformInfo::OP_None);
253 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
254 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
255 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
256 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
258 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
259 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
262 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
264 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
266 return LT.first * Entry->Cost;
269 static const CostTblEntry AVX512UniformConstCostTable[] = {
270 { ISD::SRA, MVT::v2i64, 1 },
271 { ISD::SRA, MVT::v4i64, 1 },
272 { ISD::SRA, MVT::v8i64, 1 },
274 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
275 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
278 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
280 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
282 return LT.first * Entry->Cost;
285 static const CostTblEntry AVX2UniformConstCostTable[] = {
286 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
287 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
288 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
290 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
292 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
293 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
294 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
295 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
298 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
300 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
302 return LT.first * Entry->Cost;
305 static const CostTblEntry SSE2UniformConstCostTable[] = {
306 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
307 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
308 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
310 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
311 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
312 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
314 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
315 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
316 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
317 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
318 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
319 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
320 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
321 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
324 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
327 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
328 return LT.first * 32;
329 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
330 return LT.first * 15;
332 // XOP has faster vXi8 shifts.
333 if ((ISD != ISD::SHL && ISD != ISD::SRL && ISD != ISD::SRA) ||
335 if (const auto *Entry =
336 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
337 return LT.first * Entry->Cost;
340 static const CostTblEntry AVX2UniformCostTable[] = {
341 // Uniform splats are cheaper for the following instructions.
342 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
343 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
344 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
348 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
349 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
350 if (const auto *Entry =
351 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
352 return LT.first * Entry->Cost;
355 static const CostTblEntry SSE2UniformCostTable[] = {
356 // Uniform splats are cheaper for the following instructions.
357 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
358 { ISD::SHL, MVT::v4i32, 1 }, // pslld
359 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
361 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
362 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
363 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
365 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
366 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
370 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
371 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
372 if (const auto *Entry =
373 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
374 return LT.first * Entry->Cost;
377 static const CostTblEntry AVX512DQCostTable[] = {
378 { ISD::MUL, MVT::v2i64, 1 },
379 { ISD::MUL, MVT::v4i64, 1 },
380 { ISD::MUL, MVT::v8i64, 1 }
383 // Look for AVX512DQ lowering tricks for custom cases.
385 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
386 return LT.first * Entry->Cost;
388 static const CostTblEntry AVX512BWCostTable[] = {
389 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
390 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
391 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
393 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
394 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
395 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
397 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
398 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
399 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
401 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
402 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
403 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
405 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
406 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
407 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
409 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
410 { ISD::SDIV, MVT::v64i8, 64*20 },
411 { ISD::SDIV, MVT::v32i16, 32*20 },
412 { ISD::UDIV, MVT::v64i8, 64*20 },
413 { ISD::UDIV, MVT::v32i16, 32*20 }
416 // Look for AVX512BW lowering tricks for custom cases.
418 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
419 return LT.first * Entry->Cost;
421 static const CostTblEntry AVX512CostTable[] = {
422 { ISD::SHL, MVT::v16i32, 1 },
423 { ISD::SRL, MVT::v16i32, 1 },
424 { ISD::SRA, MVT::v16i32, 1 },
426 { ISD::SHL, MVT::v8i64, 1 },
427 { ISD::SRL, MVT::v8i64, 1 },
429 { ISD::SRA, MVT::v2i64, 1 },
430 { ISD::SRA, MVT::v4i64, 1 },
431 { ISD::SRA, MVT::v8i64, 1 },
433 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
434 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
435 { ISD::MUL, MVT::v16i32, 1 }, // pmulld
436 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
438 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
439 { ISD::SDIV, MVT::v16i32, 16*20 },
440 { ISD::SDIV, MVT::v8i64, 8*20 },
441 { ISD::UDIV, MVT::v16i32, 16*20 },
442 { ISD::UDIV, MVT::v8i64, 8*20 }
446 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
447 return LT.first * Entry->Cost;
449 static const CostTblEntry AVX2ShiftCostTable[] = {
450 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
451 // customize them to detect the cases where shift amount is a scalar one.
452 { ISD::SHL, MVT::v4i32, 1 },
453 { ISD::SRL, MVT::v4i32, 1 },
454 { ISD::SRA, MVT::v4i32, 1 },
455 { ISD::SHL, MVT::v8i32, 1 },
456 { ISD::SRL, MVT::v8i32, 1 },
457 { ISD::SRA, MVT::v8i32, 1 },
458 { ISD::SHL, MVT::v2i64, 1 },
459 { ISD::SRL, MVT::v2i64, 1 },
460 { ISD::SHL, MVT::v4i64, 1 },
461 { ISD::SRL, MVT::v4i64, 1 },
464 // Look for AVX2 lowering tricks.
466 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
467 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
468 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
469 // On AVX2, a packed v16i16 shift left by a constant build_vector
470 // is lowered into a vector multiply (vpmullw).
473 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
474 return LT.first * Entry->Cost;
477 static const CostTblEntry XOPShiftCostTable[] = {
478 // 128bit shifts take 1cy, but right shifts require negation beforehand.
479 { ISD::SHL, MVT::v16i8, 1 },
480 { ISD::SRL, MVT::v16i8, 2 },
481 { ISD::SRA, MVT::v16i8, 2 },
482 { ISD::SHL, MVT::v8i16, 1 },
483 { ISD::SRL, MVT::v8i16, 2 },
484 { ISD::SRA, MVT::v8i16, 2 },
485 { ISD::SHL, MVT::v4i32, 1 },
486 { ISD::SRL, MVT::v4i32, 2 },
487 { ISD::SRA, MVT::v4i32, 2 },
488 { ISD::SHL, MVT::v2i64, 1 },
489 { ISD::SRL, MVT::v2i64, 2 },
490 { ISD::SRA, MVT::v2i64, 2 },
491 // 256bit shifts require splitting if AVX2 didn't catch them above.
492 { ISD::SHL, MVT::v32i8, 2+2 },
493 { ISD::SRL, MVT::v32i8, 4+2 },
494 { ISD::SRA, MVT::v32i8, 4+2 },
495 { ISD::SHL, MVT::v16i16, 2+2 },
496 { ISD::SRL, MVT::v16i16, 4+2 },
497 { ISD::SRA, MVT::v16i16, 4+2 },
498 { ISD::SHL, MVT::v8i32, 2+2 },
499 { ISD::SRL, MVT::v8i32, 4+2 },
500 { ISD::SRA, MVT::v8i32, 4+2 },
501 { ISD::SHL, MVT::v4i64, 2+2 },
502 { ISD::SRL, MVT::v4i64, 4+2 },
503 { ISD::SRA, MVT::v4i64, 4+2 },
506 // Look for XOP lowering tricks.
508 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second))
509 return LT.first * Entry->Cost;
511 static const CostTblEntry SSE2UniformShiftCostTable[] = {
512 // Uniform splats are cheaper for the following instructions.
513 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
514 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
515 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
517 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
518 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
519 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
521 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
522 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
523 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
524 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
528 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
529 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
531 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
532 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
533 return LT.first * 4; // 2*psrad + shuffle.
535 if (const auto *Entry =
536 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
537 return LT.first * Entry->Cost;
540 if (ISD == ISD::SHL &&
541 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
543 // Vector shift left by non uniform constant can be lowered
544 // into vector multiply.
545 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
546 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
550 static const CostTblEntry AVX2CostTable[] = {
551 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
552 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
554 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
555 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
557 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
558 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
559 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
560 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
562 { ISD::SUB, MVT::v32i8, 1 }, // psubb
563 { ISD::ADD, MVT::v32i8, 1 }, // paddb
564 { ISD::SUB, MVT::v16i16, 1 }, // psubw
565 { ISD::ADD, MVT::v16i16, 1 }, // paddw
566 { ISD::SUB, MVT::v8i32, 1 }, // psubd
567 { ISD::ADD, MVT::v8i32, 1 }, // paddd
568 { ISD::SUB, MVT::v4i64, 1 }, // psubq
569 { ISD::ADD, MVT::v4i64, 1 }, // paddq
571 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
572 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
573 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
574 { ISD::MUL, MVT::v8i32, 1 }, // pmulld
575 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
577 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
578 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
579 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
580 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
581 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
582 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
585 // Look for AVX2 lowering tricks for custom cases.
587 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
588 return LT.first * Entry->Cost;
590 static const CostTblEntry AVX1CostTable[] = {
591 // We don't have to scalarize unsupported ops. We can issue two half-sized
592 // operations and we only need to extract the upper YMM half.
593 // Two ops + 1 extract + 1 insert = 4.
594 { ISD::MUL, MVT::v16i16, 4 },
595 { ISD::MUL, MVT::v8i32, 4 },
596 { ISD::SUB, MVT::v32i8, 4 },
597 { ISD::ADD, MVT::v32i8, 4 },
598 { ISD::SUB, MVT::v16i16, 4 },
599 { ISD::ADD, MVT::v16i16, 4 },
600 { ISD::SUB, MVT::v8i32, 4 },
601 { ISD::ADD, MVT::v8i32, 4 },
602 { ISD::SUB, MVT::v4i64, 4 },
603 { ISD::ADD, MVT::v4i64, 4 },
605 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
606 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
607 // Because we believe v4i64 to be a legal type, we must also include the
608 // extract+insert in the cost table. Therefore, the cost here is 18
610 { ISD::MUL, MVT::v4i64, 18 },
612 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
614 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
615 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
616 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
617 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
618 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
619 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
621 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
622 { ISD::SDIV, MVT::v32i8, 32*20 },
623 { ISD::SDIV, MVT::v16i16, 16*20 },
624 { ISD::SDIV, MVT::v8i32, 8*20 },
625 { ISD::SDIV, MVT::v4i64, 4*20 },
626 { ISD::UDIV, MVT::v32i8, 32*20 },
627 { ISD::UDIV, MVT::v16i16, 16*20 },
628 { ISD::UDIV, MVT::v8i32, 8*20 },
629 { ISD::UDIV, MVT::v4i64, 4*20 },
633 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
634 return LT.first * Entry->Cost;
636 static const CostTblEntry SSE42CostTable[] = {
637 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
638 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
639 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
640 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
644 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
645 return LT.first * Entry->Cost;
647 static const CostTblEntry SSE41CostTable[] = {
648 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
649 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
650 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
651 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
652 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
653 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
655 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
656 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
657 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
658 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
659 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
660 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
662 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
663 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
664 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
665 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
666 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
667 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
669 { ISD::MUL, MVT::v4i32, 1 } // pmulld
673 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
674 return LT.first * Entry->Cost;
676 static const CostTblEntry SSE2CostTable[] = {
677 // We don't correctly identify costs of casts because they are marked as
679 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
680 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
681 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
682 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
683 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
685 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
686 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
687 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
688 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
689 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
691 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
692 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
693 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
694 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
695 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
697 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
698 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
699 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
700 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
702 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
703 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
704 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
705 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
707 // It is not a good idea to vectorize division. We have to scalarize it and
708 // in the process we will often end up having to spilling regular
709 // registers. The overhead of division is going to dominate most kernels
710 // anyways so try hard to prevent vectorization of division - it is
711 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
712 // to hide "20 cycles" for each lane.
713 { ISD::SDIV, MVT::v16i8, 16*20 },
714 { ISD::SDIV, MVT::v8i16, 8*20 },
715 { ISD::SDIV, MVT::v4i32, 4*20 },
716 { ISD::SDIV, MVT::v2i64, 2*20 },
717 { ISD::UDIV, MVT::v16i8, 16*20 },
718 { ISD::UDIV, MVT::v8i16, 8*20 },
719 { ISD::UDIV, MVT::v4i32, 4*20 },
720 { ISD::UDIV, MVT::v2i64, 2*20 },
724 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
725 return LT.first * Entry->Cost;
727 static const CostTblEntry SSE1CostTable[] = {
728 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
729 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
733 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
734 return LT.first * Entry->Cost;
736 // Fallback to the default implementation.
737 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
740 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
742 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
743 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
744 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
746 // For Broadcasts we are splatting the first element from the first input
747 // register, so only need to reference that input and all the output
748 // registers are the same.
749 if (Kind == TTI::SK_Broadcast)
752 // We are going to permute multiple sources and the result will be in multiple
753 // destinations. Providing an accurate cost only for splits where the element
754 // type remains the same.
755 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
756 MVT LegalVT = LT.second;
757 if (LegalVT.isVector() &&
758 LegalVT.getVectorElementType().getSizeInBits() ==
759 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
760 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
762 unsigned VecTySize = DL.getTypeStoreSize(Tp);
763 unsigned LegalVTSize = LegalVT.getStoreSize();
764 // Number of source vectors after legalization:
765 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
766 // Number of destination vectors after legalization:
767 unsigned NumOfDests = LT.first;
769 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
770 LegalVT.getVectorNumElements());
772 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
773 return NumOfShuffles *
774 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
777 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
780 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
781 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
782 // We assume that source and destination have the same vector type.
783 int NumOfDests = LT.first;
784 int NumOfShufflesPerDest = LT.first * 2 - 1;
785 LT.first = NumOfDests * NumOfShufflesPerDest;
788 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
789 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb
790 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb
792 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb
793 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb
795 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b
796 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b
797 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b
801 if (const auto *Entry =
802 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
803 return LT.first * Entry->Cost;
805 static const CostTblEntry AVX512BWShuffleTbl[] = {
806 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw
807 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb
809 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw
810 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw
811 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2
813 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw
814 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw
815 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw
816 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16
817 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc
819 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w
820 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w
821 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w
822 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc
823 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1
824 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc
828 if (const auto *Entry =
829 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
830 return LT.first * Entry->Cost;
832 static const CostTblEntry AVX512ShuffleTbl[] = {
833 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd
834 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps
835 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq
836 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd
838 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd
839 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps
840 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq
841 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd
843 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd
844 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd
845 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd
846 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps
847 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps
848 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps
849 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq
850 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq
851 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq
852 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd
853 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd
854 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd
855 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb
857 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd
858 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps
859 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q
860 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d
861 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd
862 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps
863 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q
864 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d
865 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd
866 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps
867 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q
868 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d
872 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
873 return LT.first * Entry->Cost;
875 static const CostTblEntry AVX2ShuffleTbl[] = {
876 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd
877 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps
878 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq
879 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd
880 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw
881 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb
883 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd
884 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps
885 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq
886 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd
887 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb
888 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb
890 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw
891 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb
893 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd
894 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps
895 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq
896 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd
897 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2*vpshufb
899 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vperm2i128 + 2*vpshufb
902 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vpermpd + vblendpd
903 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 3 }, // 2*vpermps + vblendps
904 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vpermq + vpblendd
905 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 3 }, // 2*vpermd + vpblendd
906 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 7 }, // 2*vperm2i128 + 4*vpshufb
908 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 7 }, // 2*vperm2i128 + 4*vpshufb
913 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
914 return LT.first * Entry->Cost;
916 static const CostTblEntry XOPShuffleTbl[] = {
917 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vpermil2pd
918 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 2 }, // vperm2f128 + vpermil2ps
919 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vpermil2pd
920 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 2 }, // vperm2f128 + vpermil2ps
921 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vextractf128 + 2*vpperm
923 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vextractf128 + 2*vpperm
926 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 9 }, // 2*vextractf128 + 6*vpperm
928 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpperm
929 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 9 }, // 2*vextractf128 + 6*vpperm
931 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 }, // vpperm
935 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
936 return LT.first * Entry->Cost;
938 static const CostTblEntry AVX1ShuffleTbl[] = {
939 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
940 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
941 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
942 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
943 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128
944 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128
946 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
947 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
948 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
949 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
950 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb
952 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb
955 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd
956 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd
957 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps
958 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps
959 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor
960 { TTI::SK_Alternate, MVT::v32i8, 3 }, // vpand + vpandn + vpor
962 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 3 }, // 2*vperm2f128 + vshufpd
963 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 3 }, // 2*vperm2f128 + vshufpd
964 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps
965 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps
966 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 8 }, // vextractf128 + 4*pshufb
967 // + 2*por + vinsertf128
968 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 8 }, // vextractf128 + 4*pshufb
969 // + 2*por + vinsertf128
971 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 4 }, // 2*vperm2f128 + 2*vshufpd
972 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps
973 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 4 }, // 2*vperm2f128 + 2*vshufpd
974 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps
975 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 15 }, // 2*vextractf128 + 8*pshufb
976 // + 4*por + vinsertf128
977 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 15 }, // 2*vextractf128 + 8*pshufb
978 // + 4*por + vinsertf128
982 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
983 return LT.first * Entry->Cost;
985 static const CostTblEntry SSE41ShuffleTbl[] = {
986 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw
987 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
988 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw
989 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps
990 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw
991 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb
995 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
996 return LT.first * Entry->Cost;
998 static const CostTblEntry SSSE3ShuffleTbl[] = {
999 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb
1000 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb
1002 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb
1003 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb
1005 { TTI::SK_Alternate, MVT::v8i16, 3 }, // 2*pshufb + por
1006 { TTI::SK_Alternate, MVT::v16i8, 3 }, // 2*pshufb + por
1008 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb
1009 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb
1011 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 3 }, // 2*pshufb + por
1012 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 }, // 2*pshufb + por
1016 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1017 return LT.first * Entry->Cost;
1019 static const CostTblEntry SSE2ShuffleTbl[] = {
1020 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd
1021 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd
1022 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd
1023 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd
1024 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd
1026 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd
1027 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd
1028 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd
1029 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd
1030 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw
1031 // + 2*pshufd + 2*unpck + packus
1033 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd
1034 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
1035 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps
1036 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por
1037 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por
1039 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // shufpd
1040 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd
1041 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // pshufd
1042 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 5 }, // 2*pshuflw + 2*pshufhw
1044 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1045 // + 2*pshufd + 2*unpck + 2*packus
1047 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1048 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1049 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1050 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1051 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1055 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1056 return LT.first * Entry->Cost;
1058 static const CostTblEntry SSE1ShuffleTbl[] = {
1059 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1060 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1061 { TTI::SK_Alternate, MVT::v4f32, 2 }, // 2*shufps
1062 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1063 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1067 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1068 return LT.first * Entry->Cost;
1070 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1073 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1074 const Instruction *I) {
1075 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1076 assert(ISD && "Invalid opcode");
1078 // FIXME: Need a better design of the cost table to handle non-simple types of
1079 // potential massive combinations (elem_num x src_type x dst_type).
1081 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1082 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1083 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1084 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1085 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1086 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1087 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1089 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1090 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1091 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1092 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1093 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1094 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1096 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1097 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1098 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1099 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1100 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1101 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1103 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1104 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1105 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1106 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1107 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1108 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1111 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1112 // 256-bit wide vectors.
1114 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1115 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1116 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1117 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1119 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
1120 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
1121 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
1122 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1124 // v16i1 -> v16i32 - load + broadcast
1125 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1126 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1127 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1128 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1129 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1130 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1131 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1132 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1133 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1134 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1136 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1137 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1138 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1139 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1140 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1141 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1142 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1143 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1144 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1145 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1147 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1148 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1149 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1150 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1151 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1152 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1153 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1154 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1155 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1156 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1157 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1158 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1159 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1160 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1161 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1162 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1163 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1164 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1165 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1166 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1167 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1168 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
1169 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1171 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1172 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1173 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1174 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 },
1175 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 },
1176 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1177 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 },
1178 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 },
1181 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1182 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1183 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1184 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1185 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1186 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1187 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1188 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1189 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1190 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1191 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1192 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1193 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1194 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1195 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1196 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1197 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1199 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1200 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1201 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1202 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1203 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1204 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1206 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1207 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1209 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1212 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1213 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1214 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1215 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1216 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1217 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1218 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1219 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1220 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1221 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1222 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1223 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1224 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1225 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1226 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1227 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1228 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1230 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1231 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1232 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1233 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1234 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1235 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1236 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1238 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1239 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1240 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1241 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1242 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1243 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1244 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1245 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1246 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1247 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1248 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1249 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1251 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1252 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1253 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1254 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1255 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1256 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1257 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1258 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1259 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1260 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1261 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1262 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1263 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1264 // The generic code to compute the scalar overhead is currently broken.
1265 // Workaround this limitation by estimating the scalarization overhead
1266 // here. We have roughly 10 instructions per scalar element.
1267 // Multiply that by the vector width.
1268 // FIXME: remove that when PR19268 is fixed.
1269 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
1270 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
1271 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1272 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1274 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1275 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1276 // This node is expanded into scalarized operations but BasicTTI is overly
1277 // optimistic estimating its cost. It computes 3 per element (one
1278 // vector-extract, one scalar conversion and one vector-insert). The
1279 // problem is that the inserts form a read-modify-write chain so latency
1280 // should be factored in too. Inflating the cost per element by 1.
1281 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1282 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1284 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1285 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1288 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1289 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1290 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1291 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1292 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1293 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1294 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1296 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1297 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1298 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1299 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1300 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1301 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1302 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1303 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1304 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1305 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1306 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1307 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1308 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1309 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1310 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1311 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1312 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1313 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1315 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1316 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1317 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1318 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1319 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1320 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1321 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1325 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1326 // These are somewhat magic numbers justified by looking at the output of
1327 // Intel's IACA, running some kernels and making sure when we take
1328 // legalization into account the throughput will be overestimated.
1329 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1330 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1331 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1332 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1333 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1334 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1335 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1336 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1338 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1339 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1340 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1341 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1342 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1343 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1344 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1345 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1347 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1349 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1350 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1351 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1352 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1353 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1354 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1355 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1356 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1357 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1358 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1359 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1360 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1361 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1362 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1363 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1364 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1365 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1366 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1367 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1368 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1369 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1370 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1371 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1372 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1374 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1375 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1376 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1377 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1378 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1379 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1380 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1381 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1382 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1385 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1386 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1388 if (ST->hasSSE2() && !ST->hasAVX()) {
1389 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1390 LTDest.second, LTSrc.second))
1391 return LTSrc.first * Entry->Cost;
1394 EVT SrcTy = TLI->getValueType(DL, Src);
1395 EVT DstTy = TLI->getValueType(DL, Dst);
1397 // The function getSimpleVT only handles simple value types.
1398 if (!SrcTy.isSimple() || !DstTy.isSimple())
1399 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1402 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1403 DstTy.getSimpleVT(),
1404 SrcTy.getSimpleVT()))
1407 if (ST->hasAVX512())
1408 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1409 DstTy.getSimpleVT(),
1410 SrcTy.getSimpleVT()))
1413 if (ST->hasAVX2()) {
1414 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1415 DstTy.getSimpleVT(),
1416 SrcTy.getSimpleVT()))
1421 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1422 DstTy.getSimpleVT(),
1423 SrcTy.getSimpleVT()))
1427 if (ST->hasSSE41()) {
1428 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1429 DstTy.getSimpleVT(),
1430 SrcTy.getSimpleVT()))
1434 if (ST->hasSSE2()) {
1435 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1436 DstTy.getSimpleVT(),
1437 SrcTy.getSimpleVT()))
1441 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
1444 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1445 const Instruction *I) {
1446 // Legalize the type.
1447 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1449 MVT MTy = LT.second;
1451 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1452 assert(ISD && "Invalid opcode");
1454 static const CostTblEntry SSE2CostTbl[] = {
1455 { ISD::SETCC, MVT::v2i64, 8 },
1456 { ISD::SETCC, MVT::v4i32, 1 },
1457 { ISD::SETCC, MVT::v8i16, 1 },
1458 { ISD::SETCC, MVT::v16i8, 1 },
1461 static const CostTblEntry SSE42CostTbl[] = {
1462 { ISD::SETCC, MVT::v2f64, 1 },
1463 { ISD::SETCC, MVT::v4f32, 1 },
1464 { ISD::SETCC, MVT::v2i64, 1 },
1467 static const CostTblEntry AVX1CostTbl[] = {
1468 { ISD::SETCC, MVT::v4f64, 1 },
1469 { ISD::SETCC, MVT::v8f32, 1 },
1470 // AVX1 does not support 8-wide integer compare.
1471 { ISD::SETCC, MVT::v4i64, 4 },
1472 { ISD::SETCC, MVT::v8i32, 4 },
1473 { ISD::SETCC, MVT::v16i16, 4 },
1474 { ISD::SETCC, MVT::v32i8, 4 },
1477 static const CostTblEntry AVX2CostTbl[] = {
1478 { ISD::SETCC, MVT::v4i64, 1 },
1479 { ISD::SETCC, MVT::v8i32, 1 },
1480 { ISD::SETCC, MVT::v16i16, 1 },
1481 { ISD::SETCC, MVT::v32i8, 1 },
1484 static const CostTblEntry AVX512CostTbl[] = {
1485 { ISD::SETCC, MVT::v8i64, 1 },
1486 { ISD::SETCC, MVT::v16i32, 1 },
1487 { ISD::SETCC, MVT::v8f64, 1 },
1488 { ISD::SETCC, MVT::v16f32, 1 },
1491 if (ST->hasAVX512())
1492 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1493 return LT.first * Entry->Cost;
1496 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1497 return LT.first * Entry->Cost;
1500 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1501 return LT.first * Entry->Cost;
1504 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1505 return LT.first * Entry->Cost;
1508 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1509 return LT.first * Entry->Cost;
1511 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1514 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
1516 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1517 ArrayRef<Type *> Tys, FastMathFlags FMF,
1518 unsigned ScalarizationCostPassed) {
1519 // Costs should match the codegen from:
1520 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1521 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1522 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1523 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1524 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1525 static const CostTblEntry AVX512CDCostTbl[] = {
1526 { ISD::CTLZ, MVT::v8i64, 1 },
1527 { ISD::CTLZ, MVT::v16i32, 1 },
1528 { ISD::CTLZ, MVT::v32i16, 8 },
1529 { ISD::CTLZ, MVT::v64i8, 20 },
1530 { ISD::CTLZ, MVT::v4i64, 1 },
1531 { ISD::CTLZ, MVT::v8i32, 1 },
1532 { ISD::CTLZ, MVT::v16i16, 4 },
1533 { ISD::CTLZ, MVT::v32i8, 10 },
1534 { ISD::CTLZ, MVT::v2i64, 1 },
1535 { ISD::CTLZ, MVT::v4i32, 1 },
1536 { ISD::CTLZ, MVT::v8i16, 4 },
1537 { ISD::CTLZ, MVT::v16i8, 4 },
1539 static const CostTblEntry AVX512BWCostTbl[] = {
1540 { ISD::BITREVERSE, MVT::v8i64, 5 },
1541 { ISD::BITREVERSE, MVT::v16i32, 5 },
1542 { ISD::BITREVERSE, MVT::v32i16, 5 },
1543 { ISD::BITREVERSE, MVT::v64i8, 5 },
1544 { ISD::CTLZ, MVT::v8i64, 23 },
1545 { ISD::CTLZ, MVT::v16i32, 22 },
1546 { ISD::CTLZ, MVT::v32i16, 18 },
1547 { ISD::CTLZ, MVT::v64i8, 17 },
1548 { ISD::CTPOP, MVT::v8i64, 7 },
1549 { ISD::CTPOP, MVT::v16i32, 11 },
1550 { ISD::CTPOP, MVT::v32i16, 9 },
1551 { ISD::CTPOP, MVT::v64i8, 6 },
1552 { ISD::CTTZ, MVT::v8i64, 10 },
1553 { ISD::CTTZ, MVT::v16i32, 14 },
1554 { ISD::CTTZ, MVT::v32i16, 12 },
1555 { ISD::CTTZ, MVT::v64i8, 9 },
1557 static const CostTblEntry AVX512CostTbl[] = {
1558 { ISD::BITREVERSE, MVT::v8i64, 36 },
1559 { ISD::BITREVERSE, MVT::v16i32, 24 },
1560 { ISD::CTLZ, MVT::v8i64, 29 },
1561 { ISD::CTLZ, MVT::v16i32, 35 },
1562 { ISD::CTPOP, MVT::v8i64, 16 },
1563 { ISD::CTPOP, MVT::v16i32, 24 },
1564 { ISD::CTTZ, MVT::v8i64, 20 },
1565 { ISD::CTTZ, MVT::v16i32, 28 },
1567 static const CostTblEntry XOPCostTbl[] = {
1568 { ISD::BITREVERSE, MVT::v4i64, 4 },
1569 { ISD::BITREVERSE, MVT::v8i32, 4 },
1570 { ISD::BITREVERSE, MVT::v16i16, 4 },
1571 { ISD::BITREVERSE, MVT::v32i8, 4 },
1572 { ISD::BITREVERSE, MVT::v2i64, 1 },
1573 { ISD::BITREVERSE, MVT::v4i32, 1 },
1574 { ISD::BITREVERSE, MVT::v8i16, 1 },
1575 { ISD::BITREVERSE, MVT::v16i8, 1 },
1576 { ISD::BITREVERSE, MVT::i64, 3 },
1577 { ISD::BITREVERSE, MVT::i32, 3 },
1578 { ISD::BITREVERSE, MVT::i16, 3 },
1579 { ISD::BITREVERSE, MVT::i8, 3 }
1581 static const CostTblEntry AVX2CostTbl[] = {
1582 { ISD::BITREVERSE, MVT::v4i64, 5 },
1583 { ISD::BITREVERSE, MVT::v8i32, 5 },
1584 { ISD::BITREVERSE, MVT::v16i16, 5 },
1585 { ISD::BITREVERSE, MVT::v32i8, 5 },
1586 { ISD::BSWAP, MVT::v4i64, 1 },
1587 { ISD::BSWAP, MVT::v8i32, 1 },
1588 { ISD::BSWAP, MVT::v16i16, 1 },
1589 { ISD::CTLZ, MVT::v4i64, 23 },
1590 { ISD::CTLZ, MVT::v8i32, 18 },
1591 { ISD::CTLZ, MVT::v16i16, 14 },
1592 { ISD::CTLZ, MVT::v32i8, 9 },
1593 { ISD::CTPOP, MVT::v4i64, 7 },
1594 { ISD::CTPOP, MVT::v8i32, 11 },
1595 { ISD::CTPOP, MVT::v16i16, 9 },
1596 { ISD::CTPOP, MVT::v32i8, 6 },
1597 { ISD::CTTZ, MVT::v4i64, 10 },
1598 { ISD::CTTZ, MVT::v8i32, 14 },
1599 { ISD::CTTZ, MVT::v16i16, 12 },
1600 { ISD::CTTZ, MVT::v32i8, 9 },
1601 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1602 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1603 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1604 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1605 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1606 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1608 static const CostTblEntry AVX1CostTbl[] = {
1609 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
1610 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
1611 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
1612 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
1613 { ISD::BSWAP, MVT::v4i64, 4 },
1614 { ISD::BSWAP, MVT::v8i32, 4 },
1615 { ISD::BSWAP, MVT::v16i16, 4 },
1616 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
1617 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
1618 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
1619 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1620 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
1621 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
1622 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
1623 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
1624 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
1625 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
1626 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
1627 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1628 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1629 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1630 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1631 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1632 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1633 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1635 static const CostTblEntry SSE42CostTbl[] = {
1636 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1637 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1639 static const CostTblEntry SSSE3CostTbl[] = {
1640 { ISD::BITREVERSE, MVT::v2i64, 5 },
1641 { ISD::BITREVERSE, MVT::v4i32, 5 },
1642 { ISD::BITREVERSE, MVT::v8i16, 5 },
1643 { ISD::BITREVERSE, MVT::v16i8, 5 },
1644 { ISD::BSWAP, MVT::v2i64, 1 },
1645 { ISD::BSWAP, MVT::v4i32, 1 },
1646 { ISD::BSWAP, MVT::v8i16, 1 },
1647 { ISD::CTLZ, MVT::v2i64, 23 },
1648 { ISD::CTLZ, MVT::v4i32, 18 },
1649 { ISD::CTLZ, MVT::v8i16, 14 },
1650 { ISD::CTLZ, MVT::v16i8, 9 },
1651 { ISD::CTPOP, MVT::v2i64, 7 },
1652 { ISD::CTPOP, MVT::v4i32, 11 },
1653 { ISD::CTPOP, MVT::v8i16, 9 },
1654 { ISD::CTPOP, MVT::v16i8, 6 },
1655 { ISD::CTTZ, MVT::v2i64, 10 },
1656 { ISD::CTTZ, MVT::v4i32, 14 },
1657 { ISD::CTTZ, MVT::v8i16, 12 },
1658 { ISD::CTTZ, MVT::v16i8, 9 }
1660 static const CostTblEntry SSE2CostTbl[] = {
1661 { ISD::BITREVERSE, MVT::v2i64, 29 },
1662 { ISD::BITREVERSE, MVT::v4i32, 27 },
1663 { ISD::BITREVERSE, MVT::v8i16, 27 },
1664 { ISD::BITREVERSE, MVT::v16i8, 20 },
1665 { ISD::BSWAP, MVT::v2i64, 7 },
1666 { ISD::BSWAP, MVT::v4i32, 7 },
1667 { ISD::BSWAP, MVT::v8i16, 7 },
1668 { ISD::CTLZ, MVT::v2i64, 25 },
1669 { ISD::CTLZ, MVT::v4i32, 26 },
1670 { ISD::CTLZ, MVT::v8i16, 20 },
1671 { ISD::CTLZ, MVT::v16i8, 17 },
1672 { ISD::CTPOP, MVT::v2i64, 12 },
1673 { ISD::CTPOP, MVT::v4i32, 15 },
1674 { ISD::CTPOP, MVT::v8i16, 13 },
1675 { ISD::CTPOP, MVT::v16i8, 10 },
1676 { ISD::CTTZ, MVT::v2i64, 14 },
1677 { ISD::CTTZ, MVT::v4i32, 18 },
1678 { ISD::CTTZ, MVT::v8i16, 16 },
1679 { ISD::CTTZ, MVT::v16i8, 13 },
1680 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1681 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1683 static const CostTblEntry SSE1CostTbl[] = {
1684 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1685 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1687 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1688 { ISD::BITREVERSE, MVT::i64, 14 }
1690 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1691 { ISD::BITREVERSE, MVT::i32, 14 },
1692 { ISD::BITREVERSE, MVT::i16, 14 },
1693 { ISD::BITREVERSE, MVT::i8, 11 }
1696 unsigned ISD = ISD::DELETED_NODE;
1700 case Intrinsic::bitreverse:
1701 ISD = ISD::BITREVERSE;
1703 case Intrinsic::bswap:
1706 case Intrinsic::ctlz:
1709 case Intrinsic::ctpop:
1712 case Intrinsic::cttz:
1715 case Intrinsic::sqrt:
1720 // Legalize the type.
1721 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1722 MVT MTy = LT.second;
1724 // Attempt to lookup cost.
1726 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
1727 return LT.first * Entry->Cost;
1730 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
1731 return LT.first * Entry->Cost;
1733 if (ST->hasAVX512())
1734 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1735 return LT.first * Entry->Cost;
1738 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1739 return LT.first * Entry->Cost;
1742 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1743 return LT.first * Entry->Cost;
1746 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1747 return LT.first * Entry->Cost;
1750 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1751 return LT.first * Entry->Cost;
1754 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1755 return LT.first * Entry->Cost;
1758 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1759 return LT.first * Entry->Cost;
1762 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1763 return LT.first * Entry->Cost;
1766 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
1767 return LT.first * Entry->Cost;
1769 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
1770 return LT.first * Entry->Cost;
1772 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed);
1775 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1776 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
1777 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF);
1780 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1781 assert(Val->isVectorTy() && "This must be a vector type");
1783 Type *ScalarType = Val->getScalarType();
1786 // Legalize the type.
1787 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1789 // This type is legalized to a scalar type.
1790 if (!LT.second.isVector())
1793 // The type may be split. Normalize the index to the new type.
1794 unsigned Width = LT.second.getVectorNumElements();
1795 Index = Index % Width;
1797 // Floating point scalars are already located in index #0.
1798 if (ScalarType->isFloatingPointTy() && Index == 0)
1802 // Add to the base cost if we know that the extracted element of a vector is
1803 // destined to be moved to and used in the integer register file.
1804 int RegisterFileMoveCost = 0;
1805 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1806 RegisterFileMoveCost = 1;
1808 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1811 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1812 unsigned AddressSpace, const Instruction *I) {
1813 // Handle non-power-of-two vectors such as <3 x float>
1814 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1815 unsigned NumElem = VTy->getVectorNumElements();
1817 // Handle a few common cases:
1819 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1820 // Cost = 64 bit store + extract + 32 bit store.
1824 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1825 // Cost = 128 bit store + unpack + 64 bit store.
1828 // Assume that all other non-power-of-two numbers are scalarized.
1829 if (!isPowerOf2_32(NumElem)) {
1830 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1832 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1833 Opcode == Instruction::Store);
1834 return NumElem * Cost + SplitCost;
1838 // Legalize the type.
1839 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1840 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1843 // Each load/store unit costs 1.
1844 int Cost = LT.first * 1;
1846 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1847 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1848 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1854 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1856 unsigned AddressSpace) {
1857 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1859 // To calculate scalar take the regular cost, without mask
1860 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1862 unsigned NumElem = SrcVTy->getVectorNumElements();
1863 VectorType *MaskTy =
1864 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1865 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1866 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1867 !isPowerOf2_32(NumElem)) {
1869 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1870 int ScalarCompareCost = getCmpSelInstrCost(
1871 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1872 int BranchCost = getCFInstrCost(Instruction::Br);
1873 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1875 int ValueSplitCost = getScalarizationOverhead(
1876 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1878 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1879 Alignment, AddressSpace);
1880 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1883 // Legalize the type.
1884 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1885 auto VT = TLI->getValueType(DL, SrcVTy);
1887 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1888 LT.second.getVectorNumElements() == NumElem)
1889 // Promotion requires expand/truncate for data and a shuffle for mask.
1890 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1891 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1893 else if (LT.second.getVectorNumElements() > NumElem) {
1894 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1895 LT.second.getVectorNumElements());
1896 // Expanding requires fill mask with zeroes
1897 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1899 if (!ST->hasAVX512())
1900 return Cost + LT.first*4; // Each maskmov costs 4
1902 // AVX-512 masked load/store is cheapper
1903 return Cost+LT.first;
1906 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1908 // Address computations in vectorized code with non-consecutive addresses will
1909 // likely result in more instructions compared to scalar code where the
1910 // computation can more often be merged into the index mode. The resulting
1911 // extra micro-ops can significantly decrease throughput.
1912 unsigned NumVectorInstToHideOverhead = 10;
1914 // Cost modeling of Strided Access Computation is hidden by the indexing
1915 // modes of X86 regardless of the stride value. We dont believe that there
1916 // is a difference between constant strided access in gerenal and constant
1917 // strided value which is less than or equal to 64.
1918 // Even in the case of (loop invariant) stride whose value is not known at
1919 // compile time, the address computation will not incur more than one extra
1921 if (Ty->isVectorTy() && SE) {
1922 if (!BaseT::isStridedAccess(Ptr))
1923 return NumVectorInstToHideOverhead;
1924 if (!BaseT::getConstantStrideStep(SE, Ptr))
1928 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1931 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy,
1934 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1936 MVT MTy = LT.second;
1938 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1939 assert(ISD && "Invalid opcode");
1941 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1942 // and make it as the cost.
1944 static const CostTblEntry SSE42CostTblPairWise[] = {
1945 { ISD::FADD, MVT::v2f64, 2 },
1946 { ISD::FADD, MVT::v4f32, 4 },
1947 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1948 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1949 { ISD::ADD, MVT::v8i16, 5 },
1952 static const CostTblEntry AVX1CostTblPairWise[] = {
1953 { ISD::FADD, MVT::v4f32, 4 },
1954 { ISD::FADD, MVT::v4f64, 5 },
1955 { ISD::FADD, MVT::v8f32, 7 },
1956 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1957 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1958 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1959 { ISD::ADD, MVT::v8i16, 5 },
1960 { ISD::ADD, MVT::v8i32, 5 },
1963 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1964 { ISD::FADD, MVT::v2f64, 2 },
1965 { ISD::FADD, MVT::v4f32, 4 },
1966 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1967 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1968 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1971 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1972 { ISD::FADD, MVT::v4f32, 3 },
1973 { ISD::FADD, MVT::v4f64, 3 },
1974 { ISD::FADD, MVT::v8f32, 4 },
1975 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1976 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1977 { ISD::ADD, MVT::v4i64, 3 },
1978 { ISD::ADD, MVT::v8i16, 4 },
1979 { ISD::ADD, MVT::v8i32, 5 },
1984 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1985 return LT.first * Entry->Cost;
1988 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1989 return LT.first * Entry->Cost;
1992 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1993 return LT.first * Entry->Cost;
1996 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1997 return LT.first * Entry->Cost;
2000 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise);
2003 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy,
2004 bool IsPairwise, bool IsUnsigned) {
2005 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2007 MVT MTy = LT.second;
2010 if (ValTy->isIntOrIntVectorTy()) {
2011 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
2013 assert(ValTy->isFPOrFPVectorTy() &&
2014 "Expected float point or integer vector type.");
2018 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2019 // and make it as the cost.
2021 static const CostTblEntry SSE42CostTblPairWise[] = {
2022 {ISD::FMINNUM, MVT::v2f64, 3},
2023 {ISD::FMINNUM, MVT::v4f32, 2},
2024 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2025 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6"
2026 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2027 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2028 {ISD::SMIN, MVT::v8i16, 2},
2029 {ISD::UMIN, MVT::v8i16, 2},
2032 static const CostTblEntry AVX1CostTblPairWise[] = {
2033 {ISD::FMINNUM, MVT::v4f32, 1},
2034 {ISD::FMINNUM, MVT::v4f64, 1},
2035 {ISD::FMINNUM, MVT::v8f32, 2},
2036 {ISD::SMIN, MVT::v2i64, 3},
2037 {ISD::UMIN, MVT::v2i64, 3},
2038 {ISD::SMIN, MVT::v4i32, 1},
2039 {ISD::UMIN, MVT::v4i32, 1},
2040 {ISD::SMIN, MVT::v8i16, 1},
2041 {ISD::UMIN, MVT::v8i16, 1},
2042 {ISD::SMIN, MVT::v8i32, 3},
2043 {ISD::UMIN, MVT::v8i32, 3},
2046 static const CostTblEntry AVX2CostTblPairWise[] = {
2047 {ISD::SMIN, MVT::v4i64, 2},
2048 {ISD::UMIN, MVT::v4i64, 2},
2049 {ISD::SMIN, MVT::v8i32, 1},
2050 {ISD::UMIN, MVT::v8i32, 1},
2051 {ISD::SMIN, MVT::v16i16, 1},
2052 {ISD::UMIN, MVT::v16i16, 1},
2053 {ISD::SMIN, MVT::v32i8, 2},
2054 {ISD::UMIN, MVT::v32i8, 2},
2057 static const CostTblEntry AVX512CostTblPairWise[] = {
2058 {ISD::FMINNUM, MVT::v8f64, 1},
2059 {ISD::FMINNUM, MVT::v16f32, 2},
2060 {ISD::SMIN, MVT::v8i64, 2},
2061 {ISD::UMIN, MVT::v8i64, 2},
2062 {ISD::SMIN, MVT::v16i32, 1},
2063 {ISD::UMIN, MVT::v16i32, 1},
2066 static const CostTblEntry SSE42CostTblNoPairWise[] = {
2067 {ISD::FMINNUM, MVT::v2f64, 3},
2068 {ISD::FMINNUM, MVT::v4f32, 3},
2069 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2070 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6"
2071 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2072 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2073 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5"
2074 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8"
2077 static const CostTblEntry AVX1CostTblNoPairWise[] = {
2078 {ISD::FMINNUM, MVT::v4f32, 1},
2079 {ISD::FMINNUM, MVT::v4f64, 1},
2080 {ISD::FMINNUM, MVT::v8f32, 1},
2081 {ISD::SMIN, MVT::v2i64, 3},
2082 {ISD::UMIN, MVT::v2i64, 3},
2083 {ISD::SMIN, MVT::v4i32, 1},
2084 {ISD::UMIN, MVT::v4i32, 1},
2085 {ISD::SMIN, MVT::v8i16, 1},
2086 {ISD::UMIN, MVT::v8i16, 1},
2087 {ISD::SMIN, MVT::v8i32, 2},
2088 {ISD::UMIN, MVT::v8i32, 2},
2091 static const CostTblEntry AVX2CostTblNoPairWise[] = {
2092 {ISD::SMIN, MVT::v4i64, 1},
2093 {ISD::UMIN, MVT::v4i64, 1},
2094 {ISD::SMIN, MVT::v8i32, 1},
2095 {ISD::UMIN, MVT::v8i32, 1},
2096 {ISD::SMIN, MVT::v16i16, 1},
2097 {ISD::UMIN, MVT::v16i16, 1},
2098 {ISD::SMIN, MVT::v32i8, 1},
2099 {ISD::UMIN, MVT::v32i8, 1},
2102 static const CostTblEntry AVX512CostTblNoPairWise[] = {
2103 {ISD::FMINNUM, MVT::v8f64, 1},
2104 {ISD::FMINNUM, MVT::v16f32, 2},
2105 {ISD::SMIN, MVT::v8i64, 1},
2106 {ISD::UMIN, MVT::v8i64, 1},
2107 {ISD::SMIN, MVT::v16i32, 1},
2108 {ISD::UMIN, MVT::v16i32, 1},
2112 if (ST->hasAVX512())
2113 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy))
2114 return LT.first * Entry->Cost;
2117 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy))
2118 return LT.first * Entry->Cost;
2121 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
2122 return LT.first * Entry->Cost;
2125 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
2126 return LT.first * Entry->Cost;
2128 if (ST->hasAVX512())
2129 if (const auto *Entry =
2130 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy))
2131 return LT.first * Entry->Cost;
2134 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy))
2135 return LT.first * Entry->Cost;
2138 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
2139 return LT.first * Entry->Cost;
2142 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
2143 return LT.first * Entry->Cost;
2146 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned);
2149 /// \brief Calculate the cost of materializing a 64-bit value. This helper
2150 /// method might only calculate a fraction of a larger immediate. Therefore it
2151 /// is valid to return a cost of ZERO.
2152 int X86TTIImpl::getIntImmCost(int64_t Val) {
2154 return TTI::TCC_Free;
2157 return TTI::TCC_Basic;
2159 return 2 * TTI::TCC_Basic;
2162 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
2163 assert(Ty->isIntegerTy());
2165 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2169 // Never hoist constants larger than 128bit, because this might lead to
2170 // incorrect code generation or assertions in codegen.
2171 // Fixme: Create a cost model for types larger than i128 once the codegen
2172 // issues have been fixed.
2174 return TTI::TCC_Free;
2177 return TTI::TCC_Free;
2179 // Sign-extend all constants to a multiple of 64-bit.
2182 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
2184 // Split the constant into 64-bit chunks and calculate the cost for each
2187 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
2188 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
2189 int64_t Val = Tmp.getSExtValue();
2190 Cost += getIntImmCost(Val);
2192 // We need at least one instruction to materialize the constant.
2193 return std::max(1, Cost);
2196 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
2198 assert(Ty->isIntegerTy());
2200 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2201 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2202 // here, so that constant hoisting will ignore this constant.
2204 return TTI::TCC_Free;
2206 unsigned ImmIdx = ~0U;
2209 return TTI::TCC_Free;
2210 case Instruction::GetElementPtr:
2211 // Always hoist the base address of a GetElementPtr. This prevents the
2212 // creation of new constants for every base constant that gets constant
2213 // folded with the offset.
2215 return 2 * TTI::TCC_Basic;
2216 return TTI::TCC_Free;
2217 case Instruction::Store:
2220 case Instruction::ICmp:
2221 // This is an imperfect hack to prevent constant hoisting of
2222 // compares that might be trying to check if a 64-bit value fits in
2223 // 32-bits. The backend can optimize these cases using a right shift by 32.
2224 // Ideally we would check the compare predicate here. There also other
2225 // similar immediates the backend can use shifts for.
2226 if (Idx == 1 && Imm.getBitWidth() == 64) {
2227 uint64_t ImmVal = Imm.getZExtValue();
2228 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
2229 return TTI::TCC_Free;
2233 case Instruction::And:
2234 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
2235 // by using a 32-bit operation with implicit zero extension. Detect such
2236 // immediates here as the normal path expects bit 31 to be sign extended.
2237 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
2238 return TTI::TCC_Free;
2240 case Instruction::Add:
2241 case Instruction::Sub:
2242 case Instruction::Mul:
2243 case Instruction::UDiv:
2244 case Instruction::SDiv:
2245 case Instruction::URem:
2246 case Instruction::SRem:
2247 case Instruction::Or:
2248 case Instruction::Xor:
2251 // Always return TCC_Free for the shift value of a shift instruction.
2252 case Instruction::Shl:
2253 case Instruction::LShr:
2254 case Instruction::AShr:
2256 return TTI::TCC_Free;
2258 case Instruction::Trunc:
2259 case Instruction::ZExt:
2260 case Instruction::SExt:
2261 case Instruction::IntToPtr:
2262 case Instruction::PtrToInt:
2263 case Instruction::BitCast:
2264 case Instruction::PHI:
2265 case Instruction::Call:
2266 case Instruction::Select:
2267 case Instruction::Ret:
2268 case Instruction::Load:
2272 if (Idx == ImmIdx) {
2273 int NumConstants = (BitSize + 63) / 64;
2274 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
2275 return (Cost <= NumConstants * TTI::TCC_Basic)
2276 ? static_cast<int>(TTI::TCC_Free)
2280 return X86TTIImpl::getIntImmCost(Imm, Ty);
2283 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
2285 assert(Ty->isIntegerTy());
2287 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2288 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2289 // here, so that constant hoisting will ignore this constant.
2291 return TTI::TCC_Free;
2295 return TTI::TCC_Free;
2296 case Intrinsic::sadd_with_overflow:
2297 case Intrinsic::uadd_with_overflow:
2298 case Intrinsic::ssub_with_overflow:
2299 case Intrinsic::usub_with_overflow:
2300 case Intrinsic::smul_with_overflow:
2301 case Intrinsic::umul_with_overflow:
2302 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
2303 return TTI::TCC_Free;
2305 case Intrinsic::experimental_stackmap:
2306 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2307 return TTI::TCC_Free;
2309 case Intrinsic::experimental_patchpoint_void:
2310 case Intrinsic::experimental_patchpoint_i64:
2311 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2312 return TTI::TCC_Free;
2315 return X86TTIImpl::getIntImmCost(Imm, Ty);
2318 unsigned X86TTIImpl::getUserCost(const User *U,
2319 ArrayRef<const Value *> Operands) {
2320 if (isa<StoreInst>(U)) {
2321 Value *Ptr = U->getOperand(1);
2322 // Store instruction with index and scale costs 2 Uops.
2323 // Check the preceding GEP to identify non-const indices.
2324 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
2325 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
2326 return TTI::TCC_Basic * 2;
2328 return TTI::TCC_Basic;
2330 return BaseT::getUserCost(U, Operands);
2333 // Return an average cost of Gather / Scatter instruction, maybe improved later
2334 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
2335 unsigned Alignment, unsigned AddressSpace) {
2337 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
2338 unsigned VF = SrcVTy->getVectorNumElements();
2340 // Try to reduce index size from 64 bit (default for GEP)
2341 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
2342 // operation will use 16 x 64 indices which do not fit in a zmm and needs
2343 // to split. Also check that the base pointer is the same for all lanes,
2344 // and that there's at most one variable index.
2345 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
2346 unsigned IndexSize = DL.getPointerSizeInBits();
2347 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2348 if (IndexSize < 64 || !GEP)
2351 unsigned NumOfVarIndices = 0;
2352 Value *Ptrs = GEP->getPointerOperand();
2353 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
2355 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
2356 if (isa<Constant>(GEP->getOperand(i)))
2358 Type *IndxTy = GEP->getOperand(i)->getType();
2359 if (IndxTy->isVectorTy())
2360 IndxTy = IndxTy->getVectorElementType();
2361 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
2362 !isa<SExtInst>(GEP->getOperand(i))) ||
2363 ++NumOfVarIndices > 1)
2364 return IndexSize; // 64
2366 return (unsigned)32;
2370 // Trying to reduce IndexSize to 32 bits for vector 16.
2371 // By default the IndexSize is equal to pointer size.
2372 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
2373 ? getIndexSizeInBits(Ptr, DL)
2374 : DL.getPointerSizeInBits();
2376 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
2378 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
2379 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2380 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
2381 if (SplitFactor > 1) {
2382 // Handle splitting of vector of pointers
2383 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
2384 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
2388 // The gather / scatter cost is given by Intel architects. It is a rough
2389 // number since we are looking at one instruction in a time.
2390 const int GSOverhead = (Opcode == Instruction::Load)
2391 ? ST->getGatherOverhead()
2392 : ST->getScatterOverhead();
2393 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2394 Alignment, AddressSpace);
2397 /// Return the cost of full scalarization of gather / scatter operation.
2399 /// Opcode - Load or Store instruction.
2400 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2401 /// VariableMask - The mask is non-constant at compile time.
2402 /// Alignment - Alignment for one element.
2403 /// AddressSpace - pointer[s] address space.
2405 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2406 bool VariableMask, unsigned Alignment,
2407 unsigned AddressSpace) {
2408 unsigned VF = SrcVTy->getVectorNumElements();
2410 int MaskUnpackCost = 0;
2412 VectorType *MaskTy =
2413 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2414 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2415 int ScalarCompareCost =
2416 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2418 int BranchCost = getCFInstrCost(Instruction::Br);
2419 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2422 // The cost of the scalar loads/stores.
2423 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2424 Alignment, AddressSpace);
2426 int InsertExtractCost = 0;
2427 if (Opcode == Instruction::Load)
2428 for (unsigned i = 0; i < VF; ++i)
2429 // Add the cost of inserting each scalar load into the vector
2430 InsertExtractCost +=
2431 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2433 for (unsigned i = 0; i < VF; ++i)
2434 // Add the cost of extracting each element out of the data vector
2435 InsertExtractCost +=
2436 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2438 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2441 /// Calculate the cost of Gather / Scatter operation
2442 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2443 Value *Ptr, bool VariableMask,
2444 unsigned Alignment) {
2445 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2446 unsigned VF = SrcVTy->getVectorNumElements();
2447 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2448 if (!PtrTy && Ptr->getType()->isVectorTy())
2449 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2450 assert(PtrTy && "Unexpected type for Ptr argument");
2451 unsigned AddressSpace = PtrTy->getAddressSpace();
2453 bool Scalarize = false;
2454 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2455 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2457 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2458 // Vector-4 of gather/scatter instruction does not exist on KNL.
2459 // We can extend it to 8 elements, but zeroing upper bits of
2460 // the mask vector will add more instructions. Right now we give the scalar
2461 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2462 // is better in the VariableMask case.
2463 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
2467 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2470 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2473 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
2474 TargetTransformInfo::LSRCost &C2) {
2475 // X86 specific here are "instruction number 1st priority".
2476 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
2477 C1.NumIVMuls, C1.NumBaseAdds,
2478 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
2479 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
2480 C2.NumIVMuls, C2.NumBaseAdds,
2481 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
2484 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2485 // The backend can't handle a single element vector.
2486 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1)
2488 Type *ScalarTy = DataTy->getScalarType();
2489 int DataWidth = isa<PointerType>(ScalarTy) ?
2490 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2492 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2493 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2496 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2497 return isLegalMaskedLoad(DataType);
2500 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2501 // This function is called now in two cases: from the Loop Vectorizer
2502 // and from the Scalarizer.
2503 // When the Loop Vectorizer asks about legality of the feature,
2504 // the vectorization factor is not calculated yet. The Loop Vectorizer
2505 // sends a scalar type and the decision is based on the width of the
2507 // Later on, the cost model will estimate usage this intrinsic based on
2509 // The Scalarizer asks again about legality. It sends a vector type.
2510 // In this case we can reject non-power-of-2 vectors.
2511 // We also reject single element vectors as the type legalizer can't
2513 if (isa<VectorType>(DataTy)) {
2514 unsigned NumElts = DataTy->getVectorNumElements();
2515 if (NumElts == 1 || !isPowerOf2_32(NumElts))
2518 Type *ScalarTy = DataTy->getScalarType();
2519 int DataWidth = isa<PointerType>(ScalarTy) ?
2520 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2522 // Some CPUs have better gather performance than others.
2523 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
2524 // enable gather with a -march.
2525 return (DataWidth == 32 || DataWidth == 64) &&
2526 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()));
2529 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2530 // AVX2 doesn't support scatter
2531 if (!ST->hasAVX512())
2533 return isLegalMaskedGather(DataType);
2536 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
2537 EVT VT = TLI->getValueType(DL, DataType);
2538 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
2541 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
2545 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2546 const Function *Callee) const {
2547 const TargetMachine &TM = getTLI()->getTargetMachine();
2549 // Work this as a subsetting of subtarget features.
2550 const FeatureBitset &CallerBits =
2551 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2552 const FeatureBitset &CalleeBits =
2553 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2555 // FIXME: This is likely too limiting as it will include subtarget features
2556 // that we might not care about for inlining, but it is conservatively
2558 return (CallerBits & CalleeBits) == CalleeBits;
2561 const X86TTIImpl::TTI::MemCmpExpansionOptions *
2562 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
2563 // Only enable vector loads for equality comparison.
2564 // Right now the vector version is not as fast, see #33329.
2565 static const auto ThreeWayOptions = [this]() {
2566 TTI::MemCmpExpansionOptions Options;
2567 if (ST->is64Bit()) {
2568 Options.LoadSizes.push_back(8);
2570 Options.LoadSizes.push_back(4);
2571 Options.LoadSizes.push_back(2);
2572 Options.LoadSizes.push_back(1);
2575 static const auto EqZeroOptions = [this]() {
2576 TTI::MemCmpExpansionOptions Options;
2577 // TODO: enable AVX512 when the DAG is ready.
2578 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64);
2579 if (ST->hasAVX2()) Options.LoadSizes.push_back(32);
2580 if (ST->hasSSE2()) Options.LoadSizes.push_back(16);
2581 if (ST->is64Bit()) {
2582 Options.LoadSizes.push_back(8);
2584 Options.LoadSizes.push_back(4);
2585 Options.LoadSizes.push_back(2);
2586 Options.LoadSizes.push_back(1);
2589 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions;
2592 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2593 // TODO: We expect this to be beneficial regardless of arch,
2594 // but there are currently some unexplained performance artifacts on Atom.
2595 // As a temporary solution, disable on Atom.
2596 return !(ST->isAtom());
2599 // Get estimation for interleaved load/store operations for AVX2.
2600 // \p Factor is the interleaved-access factor (stride) - number of
2601 // (interleaved) elements in the group.
2602 // \p Indices contains the indices for a strided load: when the
2603 // interleaved load has gaps they indicate which elements are used.
2604 // If Indices is empty (or if the number of indices is equal to the size
2605 // of the interleaved-access as given in \p Factor) the access has no gaps.
2607 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
2608 // computing the cost using a generic formula as a function of generic
2609 // shuffles. We therefore use a lookup table instead, filled according to
2610 // the instruction sequences that codegen currently generates.
2611 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy,
2613 ArrayRef<unsigned> Indices,
2615 unsigned AddressSpace) {
2617 // We currently Support only fully-interleaved groups, with no gaps.
2618 // TODO: Support also strided loads (interleaved-groups with gaps).
2619 if (Indices.size() && Indices.size() != Factor)
2620 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2621 Alignment, AddressSpace);
2623 // VecTy for interleave memop is <VF*Factor x Elt>.
2624 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2625 // VecTy = <12 x i32>.
2626 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2628 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
2629 // the VF=2, while v2i128 is an unsupported MVT vector type
2630 // (see MachineValueType.h::getVectorVT()).
2631 if (!LegalVT.isVector())
2632 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2633 Alignment, AddressSpace);
2635 unsigned VF = VecTy->getVectorNumElements() / Factor;
2636 Type *ScalarTy = VecTy->getVectorElementType();
2638 // Calculate the number of memory operations (NumOfMemOps), required
2639 // for load/store the VecTy.
2640 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2641 unsigned LegalVTSize = LegalVT.getStoreSize();
2642 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2644 // Get the cost of one memory operation.
2645 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2646 LegalVT.getVectorNumElements());
2647 unsigned MemOpCost =
2648 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2650 VectorType *VT = VectorType::get(ScalarTy, VF);
2651 EVT ETy = TLI->getValueType(DL, VT);
2652 if (!ETy.isSimple())
2653 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2654 Alignment, AddressSpace);
2656 // TODO: Complete for other data-types and strides.
2657 // Each combination of Stride, ElementTy and VF results in a different
2658 // sequence; The cost tables are therefore accessed with:
2659 // Factor (stride) and VectorType=VFxElemType.
2660 // The Cost accounts only for the shuffle sequence;
2661 // The cost of the loads/stores is accounted for separately.
2663 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
2664 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
2665 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
2667 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
2668 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
2669 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
2670 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
2671 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
2672 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
2674 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
2675 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
2676 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
2677 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
2678 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
2680 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
2683 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
2684 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
2685 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
2687 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
2688 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
2689 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
2690 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
2691 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
2693 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
2694 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
2695 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
2696 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
2697 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
2700 if (Opcode == Instruction::Load) {
2701 if (const auto *Entry =
2702 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
2703 return NumOfMemOps * MemOpCost + Entry->Cost;
2705 assert(Opcode == Instruction::Store &&
2706 "Expected Store Instruction at this point");
2707 if (const auto *Entry =
2708 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
2709 return NumOfMemOps * MemOpCost + Entry->Cost;
2712 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2713 Alignment, AddressSpace);
2716 // Get estimation for interleaved load/store operations and strided load.
2717 // \p Indices contains indices for strided load.
2718 // \p Factor - the factor of interleaving.
2719 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
2720 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
2722 ArrayRef<unsigned> Indices,
2724 unsigned AddressSpace) {
2726 // VecTy for interleave memop is <VF*Factor x Elt>.
2727 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2728 // VecTy = <12 x i32>.
2730 // Calculate the number of memory operations (NumOfMemOps), required
2731 // for load/store the VecTy.
2732 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2733 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2734 unsigned LegalVTSize = LegalVT.getStoreSize();
2735 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2737 // Get the cost of one memory operation.
2738 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2739 LegalVT.getVectorNumElements());
2740 unsigned MemOpCost =
2741 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2743 unsigned VF = VecTy->getVectorNumElements() / Factor;
2744 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
2746 if (Opcode == Instruction::Load) {
2747 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
2748 // contain the cost of the optimized shuffle sequence that the
2749 // X86InterleavedAccess pass will generate.
2750 // The cost of loads and stores are computed separately from the table.
2752 // X86InterleavedAccess support only the following interleaved-access group.
2753 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
2754 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
2755 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
2756 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
2759 if (const auto *Entry =
2760 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
2761 return NumOfMemOps * MemOpCost + Entry->Cost;
2762 //If an entry does not exist, fallback to the default implementation.
2764 // Kind of shuffle depends on number of loaded values.
2765 // If we load the entire data in one register, we can use a 1-src shuffle.
2766 // Otherwise, we'll merge 2 sources in each operation.
2767 TTI::ShuffleKind ShuffleKind =
2768 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
2770 unsigned ShuffleCost =
2771 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
2773 unsigned NumOfLoadsInInterleaveGrp =
2774 Indices.size() ? Indices.size() : Factor;
2775 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
2776 VecTy->getVectorNumElements() / Factor);
2777 unsigned NumOfResults =
2778 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
2779 NumOfLoadsInInterleaveGrp;
2781 // About a half of the loads may be folded in shuffles when we have only
2782 // one result. If we have more than one result, we do not fold loads at all.
2783 unsigned NumOfUnfoldedLoads =
2784 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
2786 // Get a number of shuffle operations per result.
2787 unsigned NumOfShufflesPerResult =
2788 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
2790 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2791 // When we have more than one destination, we need additional instructions
2793 unsigned NumOfMoves = 0;
2794 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
2795 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
2797 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
2798 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
2804 assert(Opcode == Instruction::Store &&
2805 "Expected Store Instruction at this point");
2806 // X86InterleavedAccess support only the following interleaved-access group.
2807 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
2808 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
2809 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
2810 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
2812 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
2813 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
2814 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
2815 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
2818 if (const auto *Entry =
2819 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
2820 return NumOfMemOps * MemOpCost + Entry->Cost;
2821 //If an entry does not exist, fallback to the default implementation.
2823 // There is no strided stores meanwhile. And store can't be folded in
2825 unsigned NumOfSources = Factor; // The number of values to be merged.
2826 unsigned ShuffleCost =
2827 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
2828 unsigned NumOfShufflesPerStore = NumOfSources - 1;
2830 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2831 // We need additional instructions to keep sources.
2832 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
2833 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
2838 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
2840 ArrayRef<unsigned> Indices,
2842 unsigned AddressSpace) {
2843 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
2844 Type *EltTy = VecTy->getVectorElementType();
2845 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
2846 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
2848 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
2852 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
2853 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
2854 Alignment, AddressSpace);
2856 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices,
2857 Alignment, AddressSpace);
2859 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2860 Alignment, AddressSpace);