1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 /// SSE 3 - Pentium4 / Athlon64
26 /// AVX - Sandy Bridge
28 /// AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 /// divss sqrtss rsqrtss
32 /// Piledriver 9-24 13-15 5
34 /// Pentium II,III 18 30 2
35 /// Nehalem 7-14 7-18 3
36 /// Haswell 10-13 11 5
37 /// TODO: Develop and implement the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/CostTable.h"
48 #include "llvm/Target/TargetLowering.h"
52 #define DEBUG_TYPE "x86tti"
54 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63 // TODO: Currently the __builtin_popcount() implementation using SSE3
64 // instructions is inefficient. Once the problem is fixed, we should
65 // call ST->hasSSE3() instead of ST->hasPOPCNT().
66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
70 if (Vector && !ST->hasSSE1())
74 if (Vector && ST->hasAVX512())
81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
98 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
99 return getRegisterBitWidth(true);
102 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
103 // If the loop will not be vectorized, don't interleave the loop.
104 // Let regular unroll to unroll the loop, which saves the overflow
105 // check and memory check cost.
112 // Sandybridge and Haswell have multiple execution ports and pipelined
120 int X86TTIImpl::getArithmeticInstrCost(
121 unsigned Opcode, Type *Ty,
122 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
123 TTI::OperandValueProperties Opd1PropInfo,
124 TTI::OperandValueProperties Opd2PropInfo,
125 ArrayRef<const Value *> Args) {
126 // Legalize the type.
127 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
129 int ISD = TLI->InstructionOpcodeToISD(Opcode);
130 assert(ISD && "Invalid opcode");
132 static const CostTblEntry SLMCostTable[] = {
133 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
134 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
135 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
136 { ISD::FMUL, MVT::f64, 2 }, // mulsd
137 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
138 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
139 { ISD::FDIV, MVT::f32, 17 }, // divss
140 { ISD::FDIV, MVT::v4f32, 39 }, // divps
141 { ISD::FDIV, MVT::f64, 32 }, // divsd
142 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
143 { ISD::FADD, MVT::v2f64, 2 }, // addpd
144 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
145 // v2i64/v4i64 mul is custom lowered as a series of long
146 // multiplies(3), shifts(3) and adds(2).
147 // slm muldq version throughput is 2
148 { ISD::MUL, MVT::v2i64, 11 },
152 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
153 // Check if the operands can be shrinked into a smaller datatype.
154 bool Op1Signed = false;
155 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
156 bool Op2Signed = false;
157 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
159 bool signedMode = Op1Signed | Op2Signed;
160 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
163 return LT.first * 3; // pmullw/sext
164 if (!signedMode && OpMinSize <= 8)
165 return LT.first * 3; // pmullw/zext
167 return LT.first * 5; // pmullw/pmulhw/pshuf
168 if (!signedMode && OpMinSize <= 16)
169 return LT.first * 5; // pmullw/pmulhw/pshuf
171 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
173 return LT.first * Entry->Cost;
177 if (ISD == ISD::SDIV &&
178 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
179 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
180 // On X86, vector signed division by constants power-of-two are
181 // normally expanded to the sequence SRA + SRL + ADD + SRA.
182 // The OperandValue properties many not be same as that of previous
183 // operation;conservatively assume OP_None.
184 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
185 Op2Info, TargetTransformInfo::OP_None,
186 TargetTransformInfo::OP_None);
187 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
188 TargetTransformInfo::OP_None,
189 TargetTransformInfo::OP_None);
190 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
191 TargetTransformInfo::OP_None,
192 TargetTransformInfo::OP_None);
197 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
198 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
199 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
200 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
202 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
203 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
206 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
208 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
210 return LT.first * Entry->Cost;
213 static const CostTblEntry AVX512UniformConstCostTable[] = {
214 { ISD::SRA, MVT::v2i64, 1 },
215 { ISD::SRA, MVT::v4i64, 1 },
216 { ISD::SRA, MVT::v8i64, 1 },
218 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
219 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
222 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
224 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
226 return LT.first * Entry->Cost;
229 static const CostTblEntry AVX2UniformConstCostTable[] = {
230 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
231 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
232 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
234 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
236 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
237 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
238 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
239 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
242 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
244 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
246 return LT.first * Entry->Cost;
249 static const CostTblEntry SSE2UniformConstCostTable[] = {
250 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
251 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
252 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
254 { ISD::SHL, MVT::v32i8, 4 }, // 2*(psllw + pand).
255 { ISD::SRL, MVT::v32i8, 4 }, // 2*(psrlw + pand).
256 { ISD::SRA, MVT::v32i8, 8 }, // 2*(psrlw, pand, pxor, psubb).
258 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence
259 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
260 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence
261 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
262 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence
263 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
264 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence
265 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
268 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
271 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
272 return LT.first * 30;
273 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
274 return LT.first * 15;
276 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
278 return LT.first * Entry->Cost;
281 static const CostTblEntry AVX2UniformCostTable[] = {
282 // Uniform splats are cheaper for the following instructions.
283 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
284 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
285 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
289 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
290 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
291 if (const auto *Entry =
292 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
293 return LT.first * Entry->Cost;
296 static const CostTblEntry SSE2UniformCostTable[] = {
297 // Uniform splats are cheaper for the following instructions.
298 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
299 { ISD::SHL, MVT::v4i32, 1 }, // pslld
300 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
302 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
303 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
304 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
306 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
307 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
311 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
312 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
313 if (const auto *Entry =
314 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
315 return LT.first * Entry->Cost;
318 static const CostTblEntry AVX512DQCostTable[] = {
319 { ISD::MUL, MVT::v2i64, 1 },
320 { ISD::MUL, MVT::v4i64, 1 },
321 { ISD::MUL, MVT::v8i64, 1 }
324 // Look for AVX512DQ lowering tricks for custom cases.
326 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
327 return LT.first * Entry->Cost;
329 static const CostTblEntry AVX512BWCostTable[] = {
330 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
331 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
332 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
334 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
335 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
336 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
338 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
339 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
340 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
342 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
343 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
344 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
346 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
347 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
348 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
350 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
351 { ISD::SDIV, MVT::v64i8, 64*20 },
352 { ISD::SDIV, MVT::v32i16, 32*20 },
353 { ISD::UDIV, MVT::v64i8, 64*20 },
354 { ISD::UDIV, MVT::v32i16, 32*20 }
357 // Look for AVX512BW lowering tricks for custom cases.
359 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
360 return LT.first * Entry->Cost;
362 static const CostTblEntry AVX512CostTable[] = {
363 { ISD::SHL, MVT::v16i32, 1 },
364 { ISD::SRL, MVT::v16i32, 1 },
365 { ISD::SRA, MVT::v16i32, 1 },
367 { ISD::SHL, MVT::v8i64, 1 },
368 { ISD::SRL, MVT::v8i64, 1 },
370 { ISD::SRA, MVT::v2i64, 1 },
371 { ISD::SRA, MVT::v4i64, 1 },
372 { ISD::SRA, MVT::v8i64, 1 },
374 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
375 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
376 { ISD::MUL, MVT::v16i32, 1 }, // pmulld
377 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
379 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
380 { ISD::SDIV, MVT::v16i32, 16*20 },
381 { ISD::SDIV, MVT::v8i64, 8*20 },
382 { ISD::UDIV, MVT::v16i32, 16*20 },
383 { ISD::UDIV, MVT::v8i64, 8*20 }
387 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
388 return LT.first * Entry->Cost;
390 static const CostTblEntry AVX2ShiftCostTable[] = {
391 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
392 // customize them to detect the cases where shift amount is a scalar one.
393 { ISD::SHL, MVT::v4i32, 1 },
394 { ISD::SRL, MVT::v4i32, 1 },
395 { ISD::SRA, MVT::v4i32, 1 },
396 { ISD::SHL, MVT::v8i32, 1 },
397 { ISD::SRL, MVT::v8i32, 1 },
398 { ISD::SRA, MVT::v8i32, 1 },
399 { ISD::SHL, MVT::v2i64, 1 },
400 { ISD::SRL, MVT::v2i64, 1 },
401 { ISD::SHL, MVT::v4i64, 1 },
402 { ISD::SRL, MVT::v4i64, 1 },
405 // Look for AVX2 lowering tricks.
407 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
408 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
409 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
410 // On AVX2, a packed v16i16 shift left by a constant build_vector
411 // is lowered into a vector multiply (vpmullw).
414 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
415 return LT.first * Entry->Cost;
418 static const CostTblEntry XOPShiftCostTable[] = {
419 // 128bit shifts take 1cy, but right shifts require negation beforehand.
420 { ISD::SHL, MVT::v16i8, 1 },
421 { ISD::SRL, MVT::v16i8, 2 },
422 { ISD::SRA, MVT::v16i8, 2 },
423 { ISD::SHL, MVT::v8i16, 1 },
424 { ISD::SRL, MVT::v8i16, 2 },
425 { ISD::SRA, MVT::v8i16, 2 },
426 { ISD::SHL, MVT::v4i32, 1 },
427 { ISD::SRL, MVT::v4i32, 2 },
428 { ISD::SRA, MVT::v4i32, 2 },
429 { ISD::SHL, MVT::v2i64, 1 },
430 { ISD::SRL, MVT::v2i64, 2 },
431 { ISD::SRA, MVT::v2i64, 2 },
432 // 256bit shifts require splitting if AVX2 didn't catch them above.
433 { ISD::SHL, MVT::v32i8, 2 },
434 { ISD::SRL, MVT::v32i8, 4 },
435 { ISD::SRA, MVT::v32i8, 4 },
436 { ISD::SHL, MVT::v16i16, 2 },
437 { ISD::SRL, MVT::v16i16, 4 },
438 { ISD::SRA, MVT::v16i16, 4 },
439 { ISD::SHL, MVT::v8i32, 2 },
440 { ISD::SRL, MVT::v8i32, 4 },
441 { ISD::SRA, MVT::v8i32, 4 },
442 { ISD::SHL, MVT::v4i64, 2 },
443 { ISD::SRL, MVT::v4i64, 4 },
444 { ISD::SRA, MVT::v4i64, 4 },
447 // Look for XOP lowering tricks.
449 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second))
450 return LT.first * Entry->Cost;
452 static const CostTblEntry SSE2UniformShiftCostTable[] = {
453 // Uniform splats are cheaper for the following instructions.
454 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
455 { ISD::SHL, MVT::v8i32, 2 }, // pslld
456 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
458 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
459 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
460 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
462 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
463 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
464 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
465 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
469 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
470 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
471 if (const auto *Entry =
472 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
473 return LT.first * Entry->Cost;
476 if (ISD == ISD::SHL &&
477 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
479 // Vector shift left by non uniform constant can be lowered
480 // into vector multiply.
481 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
482 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
486 static const CostTblEntry AVX2CostTable[] = {
487 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
488 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
490 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
491 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
493 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
494 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
495 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
496 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
498 { ISD::SUB, MVT::v32i8, 1 }, // psubb
499 { ISD::ADD, MVT::v32i8, 1 }, // paddb
500 { ISD::SUB, MVT::v16i16, 1 }, // psubw
501 { ISD::ADD, MVT::v16i16, 1 }, // paddw
502 { ISD::SUB, MVT::v8i32, 1 }, // psubd
503 { ISD::ADD, MVT::v8i32, 1 }, // paddd
504 { ISD::SUB, MVT::v4i64, 1 }, // psubq
505 { ISD::ADD, MVT::v4i64, 1 }, // paddq
507 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
508 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
509 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
510 { ISD::MUL, MVT::v8i32, 1 }, // pmulld
511 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
513 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
514 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
515 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
516 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
517 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
518 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
521 // Look for AVX2 lowering tricks for custom cases.
523 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
524 return LT.first * Entry->Cost;
526 static const CostTblEntry AVX1CostTable[] = {
527 // We don't have to scalarize unsupported ops. We can issue two half-sized
528 // operations and we only need to extract the upper YMM half.
529 // Two ops + 1 extract + 1 insert = 4.
530 { ISD::MUL, MVT::v16i16, 4 },
531 { ISD::MUL, MVT::v8i32, 4 },
532 { ISD::SUB, MVT::v32i8, 4 },
533 { ISD::ADD, MVT::v32i8, 4 },
534 { ISD::SUB, MVT::v16i16, 4 },
535 { ISD::ADD, MVT::v16i16, 4 },
536 { ISD::SUB, MVT::v8i32, 4 },
537 { ISD::ADD, MVT::v8i32, 4 },
538 { ISD::SUB, MVT::v4i64, 4 },
539 { ISD::ADD, MVT::v4i64, 4 },
541 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
542 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
543 // Because we believe v4i64 to be a legal type, we must also include the
544 // extract+insert in the cost table. Therefore, the cost here is 18
546 { ISD::MUL, MVT::v4i64, 18 },
548 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
550 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
551 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
552 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
553 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
554 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
555 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
557 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
558 { ISD::SDIV, MVT::v32i8, 32*20 },
559 { ISD::SDIV, MVT::v16i16, 16*20 },
560 { ISD::SDIV, MVT::v8i32, 8*20 },
561 { ISD::SDIV, MVT::v4i64, 4*20 },
562 { ISD::UDIV, MVT::v32i8, 32*20 },
563 { ISD::UDIV, MVT::v16i16, 16*20 },
564 { ISD::UDIV, MVT::v8i32, 8*20 },
565 { ISD::UDIV, MVT::v4i64, 4*20 },
569 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
570 return LT.first * Entry->Cost;
572 static const CostTblEntry SSE42CostTable[] = {
573 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
574 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
575 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
576 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
580 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
581 return LT.first * Entry->Cost;
583 static const CostTblEntry SSE41CostTable[] = {
584 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
585 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence.
586 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
587 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence.
588 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
589 { ISD::SHL, MVT::v8i32, 2*4 }, // pslld/paddd/cvttps2dq/pmulld
591 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
592 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence.
593 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
594 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence.
595 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
596 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend.
598 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
599 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence.
600 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
601 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence.
602 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
603 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend.
605 { ISD::MUL, MVT::v4i32, 1 } // pmulld
609 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
610 return LT.first * Entry->Cost;
612 static const CostTblEntry SSE2CostTable[] = {
613 // We don't correctly identify costs of casts because they are marked as
615 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
616 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
617 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
618 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
619 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
621 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
622 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
623 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
624 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
625 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
627 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
628 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
629 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
630 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
631 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
633 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
634 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
635 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
636 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
638 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
639 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
640 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
641 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
643 // It is not a good idea to vectorize division. We have to scalarize it and
644 // in the process we will often end up having to spilling regular
645 // registers. The overhead of division is going to dominate most kernels
646 // anyways so try hard to prevent vectorization of division - it is
647 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
648 // to hide "20 cycles" for each lane.
649 { ISD::SDIV, MVT::v16i8, 16*20 },
650 { ISD::SDIV, MVT::v8i16, 8*20 },
651 { ISD::SDIV, MVT::v4i32, 4*20 },
652 { ISD::SDIV, MVT::v2i64, 2*20 },
653 { ISD::UDIV, MVT::v16i8, 16*20 },
654 { ISD::UDIV, MVT::v8i16, 8*20 },
655 { ISD::UDIV, MVT::v4i32, 4*20 },
656 { ISD::UDIV, MVT::v2i64, 2*20 },
660 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
661 return LT.first * Entry->Cost;
663 static const CostTblEntry SSE1CostTable[] = {
664 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
665 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
669 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
670 return LT.first * Entry->Cost;
672 // Fallback to the default implementation.
673 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
676 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
678 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
679 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
680 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
682 // For Broadcasts we are splatting the first element from the first input
683 // register, so only need to reference that input and all the output
684 // registers are the same.
685 if (Kind == TTI::SK_Broadcast)
688 // We are going to permute multiple sources and the result will be in multiple
689 // destinations. Providing an accurate cost only for splits where the element
690 // type remains the same.
691 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
692 MVT LegalVT = LT.second;
693 if (LegalVT.getVectorElementType().getSizeInBits() ==
694 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
695 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
697 unsigned VecTySize = DL.getTypeStoreSize(Tp);
698 unsigned LegalVTSize = LegalVT.getStoreSize();
699 // Number of source vectors after legalization:
700 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
701 // Number of destination vectors after legalization:
702 unsigned NumOfDests = LT.first;
704 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
705 LegalVT.getVectorNumElements());
707 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
708 return NumOfShuffles *
709 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
712 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
715 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
716 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
717 // We assume that source and destination have the same vector type.
718 int NumOfDests = LT.first;
719 int NumOfShufflesPerDest = LT.first * 2 - 1;
720 LT.first = NumOfDests * NumOfShufflesPerDest;
723 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
724 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb
725 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb
727 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb
728 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb
730 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b
731 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b
732 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b
736 if (const auto *Entry =
737 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
738 return LT.first * Entry->Cost;
740 static const CostTblEntry AVX512BWShuffleTbl[] = {
741 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw
742 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb
744 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw
745 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw
746 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2
748 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw
749 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw
750 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw
751 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16
752 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc
754 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w
755 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w
756 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w
757 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc
758 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1
759 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc
763 if (const auto *Entry =
764 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
765 return LT.first * Entry->Cost;
767 static const CostTblEntry AVX512ShuffleTbl[] = {
768 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd
769 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps
770 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq
771 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd
773 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd
774 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps
775 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq
776 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd
778 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd
779 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd
780 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd
781 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps
782 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps
783 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps
784 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq
785 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq
786 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq
787 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd
788 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd
789 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd
790 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb
792 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd
793 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps
794 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q
795 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d
796 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd
797 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps
798 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q
799 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d
800 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd
801 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps
802 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q
803 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d
807 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
808 return LT.first * Entry->Cost;
810 static const CostTblEntry AVX2ShuffleTbl[] = {
811 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd
812 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps
813 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq
814 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd
815 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw
816 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb
818 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd
819 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps
820 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq
821 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd
822 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb
823 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb
825 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw
826 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb
828 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq
829 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd
830 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2 * vpshufb
832 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 } // vperm2i128 + 2 * vpshufb
837 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
838 return LT.first * Entry->Cost;
840 static const CostTblEntry AVX1ShuffleTbl[] = {
841 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
842 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
843 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
844 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
845 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128
846 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128
848 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd
849 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps
850 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd
851 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps
852 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb
854 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb
857 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd
858 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd
859 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps
860 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps
861 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor
862 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor
866 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
867 return LT.first * Entry->Cost;
869 static const CostTblEntry SSE41ShuffleTbl[] = {
870 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw
871 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
872 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw
873 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps
874 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw
875 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb
879 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
880 return LT.first * Entry->Cost;
882 static const CostTblEntry SSSE3ShuffleTbl[] = {
883 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb
884 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb
886 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb
887 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb
889 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por
890 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pshufb + pshufb + por
892 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb
893 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 } // pshufb
897 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
898 return LT.first * Entry->Cost;
900 static const CostTblEntry SSE2ShuffleTbl[] = {
901 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd
902 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd
903 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd
904 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd
905 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd
907 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd
908 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd
909 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd
910 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd
911 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw
912 // + 2*pshufd + 2*unpck + packus
914 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd
915 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd
916 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps
917 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por
918 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por
920 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd
921 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 } // pshufd
925 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
926 return LT.first * Entry->Cost;
928 static const CostTblEntry SSE1ShuffleTbl[] = {
929 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
930 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
931 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps
935 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
936 return LT.first * Entry->Cost;
938 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
941 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
942 const Instruction *I) {
943 int ISD = TLI->InstructionOpcodeToISD(Opcode);
944 assert(ISD && "Invalid opcode");
946 // FIXME: Need a better design of the cost table to handle non-simple types of
947 // potential massive combinations (elem_num x src_type x dst_type).
949 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
950 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
951 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
952 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
953 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
954 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
955 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
957 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
958 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
959 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
960 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
961 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
962 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
964 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
965 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
966 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
967 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
968 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
969 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
971 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
972 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
973 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
974 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
975 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
976 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
979 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
980 // 256-bit wide vectors.
982 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
983 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
984 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
985 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
987 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
988 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
989 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
990 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
992 // v16i1 -> v16i32 - load + broadcast
993 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
994 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
995 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
996 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
997 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
998 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
999 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1000 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1001 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1002 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1004 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1005 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1006 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1007 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1008 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1009 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1010 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1011 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1012 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1013 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1015 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1016 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1017 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1018 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1019 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1020 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1021 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1022 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1023 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1024 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1025 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1026 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1027 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1028 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1029 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1030 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1031 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1032 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1033 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1034 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1035 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1036 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
1037 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
1039 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1040 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1041 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1042 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1045 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1046 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1047 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1048 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1049 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1050 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1051 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1052 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1053 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1054 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1055 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1056 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1057 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1058 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1059 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1060 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1061 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1063 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1064 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1065 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1066 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1067 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1068 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1070 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1071 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1073 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1076 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1077 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1078 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1079 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1080 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1081 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1082 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1083 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1084 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1085 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1086 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1087 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1088 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1089 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1090 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1091 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1092 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1094 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1095 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1096 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1097 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1098 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1099 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1100 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1102 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1103 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1104 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1105 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1106 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1107 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1108 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1109 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1110 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1111 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1112 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1113 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1115 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1116 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1117 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1118 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1119 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1120 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1121 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1122 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1123 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1124 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1125 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1126 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1127 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1128 // The generic code to compute the scalar overhead is currently broken.
1129 // Workaround this limitation by estimating the scalarization overhead
1130 // here. We have roughly 10 instructions per scalar element.
1131 // Multiply that by the vector width.
1132 // FIXME: remove that when PR19268 is fixed.
1133 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
1134 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
1135 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1136 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1138 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1139 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1140 // This node is expanded into scalarized operations but BasicTTI is overly
1141 // optimistic estimating its cost. It computes 3 per element (one
1142 // vector-extract, one scalar conversion and one vector-insert). The
1143 // problem is that the inserts form a read-modify-write chain so latency
1144 // should be factored in too. Inflating the cost per element by 1.
1145 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1146 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1148 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1149 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1152 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1153 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1154 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1155 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1156 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1157 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1158 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1160 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1161 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1162 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1163 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1164 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1165 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1166 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1167 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1168 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1169 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1170 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1171 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1172 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1173 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1174 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1175 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1176 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1177 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1179 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1180 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1181 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1182 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1183 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1184 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1185 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1189 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1190 // These are somewhat magic numbers justified by looking at the output of
1191 // Intel's IACA, running some kernels and making sure when we take
1192 // legalization into account the throughput will be overestimated.
1193 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1194 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1195 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1196 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1197 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1198 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1199 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1200 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1202 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1203 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1204 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1205 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1206 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1207 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1208 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1209 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1211 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1213 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1214 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1215 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1216 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1217 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1218 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1219 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1220 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1221 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1222 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1223 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1224 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1225 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1226 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1227 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1228 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1229 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1230 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1231 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1232 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1233 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1234 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1235 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1236 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1238 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1239 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1240 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1241 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1242 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1243 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1244 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1245 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1246 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1249 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1250 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1252 if (ST->hasSSE2() && !ST->hasAVX()) {
1253 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1254 LTDest.second, LTSrc.second))
1255 return LTSrc.first * Entry->Cost;
1258 EVT SrcTy = TLI->getValueType(DL, Src);
1259 EVT DstTy = TLI->getValueType(DL, Dst);
1261 // The function getSimpleVT only handles simple value types.
1262 if (!SrcTy.isSimple() || !DstTy.isSimple())
1263 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1266 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1267 DstTy.getSimpleVT(),
1268 SrcTy.getSimpleVT()))
1271 if (ST->hasAVX512())
1272 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1273 DstTy.getSimpleVT(),
1274 SrcTy.getSimpleVT()))
1277 if (ST->hasAVX2()) {
1278 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1279 DstTy.getSimpleVT(),
1280 SrcTy.getSimpleVT()))
1285 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1286 DstTy.getSimpleVT(),
1287 SrcTy.getSimpleVT()))
1291 if (ST->hasSSE41()) {
1292 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1293 DstTy.getSimpleVT(),
1294 SrcTy.getSimpleVT()))
1298 if (ST->hasSSE2()) {
1299 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1300 DstTy.getSimpleVT(),
1301 SrcTy.getSimpleVT()))
1305 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1308 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1309 const Instruction *I) {
1310 // Legalize the type.
1311 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1313 MVT MTy = LT.second;
1315 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1316 assert(ISD && "Invalid opcode");
1318 static const CostTblEntry SSE2CostTbl[] = {
1319 { ISD::SETCC, MVT::v2i64, 8 },
1320 { ISD::SETCC, MVT::v4i32, 1 },
1321 { ISD::SETCC, MVT::v8i16, 1 },
1322 { ISD::SETCC, MVT::v16i8, 1 },
1325 static const CostTblEntry SSE42CostTbl[] = {
1326 { ISD::SETCC, MVT::v2f64, 1 },
1327 { ISD::SETCC, MVT::v4f32, 1 },
1328 { ISD::SETCC, MVT::v2i64, 1 },
1331 static const CostTblEntry AVX1CostTbl[] = {
1332 { ISD::SETCC, MVT::v4f64, 1 },
1333 { ISD::SETCC, MVT::v8f32, 1 },
1334 // AVX1 does not support 8-wide integer compare.
1335 { ISD::SETCC, MVT::v4i64, 4 },
1336 { ISD::SETCC, MVT::v8i32, 4 },
1337 { ISD::SETCC, MVT::v16i16, 4 },
1338 { ISD::SETCC, MVT::v32i8, 4 },
1341 static const CostTblEntry AVX2CostTbl[] = {
1342 { ISD::SETCC, MVT::v4i64, 1 },
1343 { ISD::SETCC, MVT::v8i32, 1 },
1344 { ISD::SETCC, MVT::v16i16, 1 },
1345 { ISD::SETCC, MVT::v32i8, 1 },
1348 static const CostTblEntry AVX512CostTbl[] = {
1349 { ISD::SETCC, MVT::v8i64, 1 },
1350 { ISD::SETCC, MVT::v16i32, 1 },
1351 { ISD::SETCC, MVT::v8f64, 1 },
1352 { ISD::SETCC, MVT::v16f32, 1 },
1355 if (ST->hasAVX512())
1356 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1357 return LT.first * Entry->Cost;
1360 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1361 return LT.first * Entry->Cost;
1364 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1365 return LT.first * Entry->Cost;
1368 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1369 return LT.first * Entry->Cost;
1372 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1373 return LT.first * Entry->Cost;
1375 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1378 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1379 ArrayRef<Type *> Tys, FastMathFlags FMF,
1380 unsigned ScalarizationCostPassed) {
1381 // Costs should match the codegen from:
1382 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1383 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1384 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1385 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1386 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1387 static const CostTblEntry XOPCostTbl[] = {
1388 { ISD::BITREVERSE, MVT::v4i64, 4 },
1389 { ISD::BITREVERSE, MVT::v8i32, 4 },
1390 { ISD::BITREVERSE, MVT::v16i16, 4 },
1391 { ISD::BITREVERSE, MVT::v32i8, 4 },
1392 { ISD::BITREVERSE, MVT::v2i64, 1 },
1393 { ISD::BITREVERSE, MVT::v4i32, 1 },
1394 { ISD::BITREVERSE, MVT::v8i16, 1 },
1395 { ISD::BITREVERSE, MVT::v16i8, 1 },
1396 { ISD::BITREVERSE, MVT::i64, 3 },
1397 { ISD::BITREVERSE, MVT::i32, 3 },
1398 { ISD::BITREVERSE, MVT::i16, 3 },
1399 { ISD::BITREVERSE, MVT::i8, 3 }
1401 static const CostTblEntry AVX2CostTbl[] = {
1402 { ISD::BITREVERSE, MVT::v4i64, 5 },
1403 { ISD::BITREVERSE, MVT::v8i32, 5 },
1404 { ISD::BITREVERSE, MVT::v16i16, 5 },
1405 { ISD::BITREVERSE, MVT::v32i8, 5 },
1406 { ISD::BSWAP, MVT::v4i64, 1 },
1407 { ISD::BSWAP, MVT::v8i32, 1 },
1408 { ISD::BSWAP, MVT::v16i16, 1 },
1409 { ISD::CTLZ, MVT::v4i64, 23 },
1410 { ISD::CTLZ, MVT::v8i32, 18 },
1411 { ISD::CTLZ, MVT::v16i16, 14 },
1412 { ISD::CTLZ, MVT::v32i8, 9 },
1413 { ISD::CTPOP, MVT::v4i64, 7 },
1414 { ISD::CTPOP, MVT::v8i32, 11 },
1415 { ISD::CTPOP, MVT::v16i16, 9 },
1416 { ISD::CTPOP, MVT::v32i8, 6 },
1417 { ISD::CTTZ, MVT::v4i64, 10 },
1418 { ISD::CTTZ, MVT::v8i32, 14 },
1419 { ISD::CTTZ, MVT::v16i16, 12 },
1420 { ISD::CTTZ, MVT::v32i8, 9 },
1421 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1422 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1423 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1424 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1425 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1426 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1428 static const CostTblEntry AVX1CostTbl[] = {
1429 { ISD::BITREVERSE, MVT::v4i64, 10 },
1430 { ISD::BITREVERSE, MVT::v8i32, 10 },
1431 { ISD::BITREVERSE, MVT::v16i16, 10 },
1432 { ISD::BITREVERSE, MVT::v32i8, 10 },
1433 { ISD::BSWAP, MVT::v4i64, 4 },
1434 { ISD::BSWAP, MVT::v8i32, 4 },
1435 { ISD::BSWAP, MVT::v16i16, 4 },
1436 { ISD::CTLZ, MVT::v4i64, 46 },
1437 { ISD::CTLZ, MVT::v8i32, 36 },
1438 { ISD::CTLZ, MVT::v16i16, 28 },
1439 { ISD::CTLZ, MVT::v32i8, 18 },
1440 { ISD::CTPOP, MVT::v4i64, 14 },
1441 { ISD::CTPOP, MVT::v8i32, 22 },
1442 { ISD::CTPOP, MVT::v16i16, 18 },
1443 { ISD::CTPOP, MVT::v32i8, 12 },
1444 { ISD::CTTZ, MVT::v4i64, 20 },
1445 { ISD::CTTZ, MVT::v8i32, 28 },
1446 { ISD::CTTZ, MVT::v16i16, 24 },
1447 { ISD::CTTZ, MVT::v32i8, 18 },
1448 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1449 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1450 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1451 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1452 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1453 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1455 static const CostTblEntry SSE42CostTbl[] = {
1456 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1457 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1459 static const CostTblEntry SSSE3CostTbl[] = {
1460 { ISD::BITREVERSE, MVT::v2i64, 5 },
1461 { ISD::BITREVERSE, MVT::v4i32, 5 },
1462 { ISD::BITREVERSE, MVT::v8i16, 5 },
1463 { ISD::BITREVERSE, MVT::v16i8, 5 },
1464 { ISD::BSWAP, MVT::v2i64, 1 },
1465 { ISD::BSWAP, MVT::v4i32, 1 },
1466 { ISD::BSWAP, MVT::v8i16, 1 },
1467 { ISD::CTLZ, MVT::v2i64, 23 },
1468 { ISD::CTLZ, MVT::v4i32, 18 },
1469 { ISD::CTLZ, MVT::v8i16, 14 },
1470 { ISD::CTLZ, MVT::v16i8, 9 },
1471 { ISD::CTPOP, MVT::v2i64, 7 },
1472 { ISD::CTPOP, MVT::v4i32, 11 },
1473 { ISD::CTPOP, MVT::v8i16, 9 },
1474 { ISD::CTPOP, MVT::v16i8, 6 },
1475 { ISD::CTTZ, MVT::v2i64, 10 },
1476 { ISD::CTTZ, MVT::v4i32, 14 },
1477 { ISD::CTTZ, MVT::v8i16, 12 },
1478 { ISD::CTTZ, MVT::v16i8, 9 }
1480 static const CostTblEntry SSE2CostTbl[] = {
1481 { ISD::BITREVERSE, MVT::v2i64, 29 },
1482 { ISD::BITREVERSE, MVT::v4i32, 27 },
1483 { ISD::BITREVERSE, MVT::v8i16, 27 },
1484 { ISD::BITREVERSE, MVT::v16i8, 20 },
1485 { ISD::BSWAP, MVT::v2i64, 7 },
1486 { ISD::BSWAP, MVT::v4i32, 7 },
1487 { ISD::BSWAP, MVT::v8i16, 7 },
1488 { ISD::CTLZ, MVT::v2i64, 25 },
1489 { ISD::CTLZ, MVT::v4i32, 26 },
1490 { ISD::CTLZ, MVT::v8i16, 20 },
1491 { ISD::CTLZ, MVT::v16i8, 17 },
1492 { ISD::CTPOP, MVT::v2i64, 12 },
1493 { ISD::CTPOP, MVT::v4i32, 15 },
1494 { ISD::CTPOP, MVT::v8i16, 13 },
1495 { ISD::CTPOP, MVT::v16i8, 10 },
1496 { ISD::CTTZ, MVT::v2i64, 14 },
1497 { ISD::CTTZ, MVT::v4i32, 18 },
1498 { ISD::CTTZ, MVT::v8i16, 16 },
1499 { ISD::CTTZ, MVT::v16i8, 13 },
1500 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
1501 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
1503 static const CostTblEntry SSE1CostTbl[] = {
1504 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
1505 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
1507 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1508 { ISD::BITREVERSE, MVT::i64, 14 }
1510 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1511 { ISD::BITREVERSE, MVT::i32, 14 },
1512 { ISD::BITREVERSE, MVT::i16, 14 },
1513 { ISD::BITREVERSE, MVT::i8, 11 }
1516 unsigned ISD = ISD::DELETED_NODE;
1520 case Intrinsic::bitreverse:
1521 ISD = ISD::BITREVERSE;
1523 case Intrinsic::bswap:
1526 case Intrinsic::ctlz:
1529 case Intrinsic::ctpop:
1532 case Intrinsic::cttz:
1535 case Intrinsic::sqrt:
1540 // Legalize the type.
1541 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1542 MVT MTy = LT.second;
1544 // Attempt to lookup cost.
1546 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1547 return LT.first * Entry->Cost;
1550 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1551 return LT.first * Entry->Cost;
1554 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1555 return LT.first * Entry->Cost;
1558 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1559 return LT.first * Entry->Cost;
1562 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1563 return LT.first * Entry->Cost;
1566 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1567 return LT.first * Entry->Cost;
1570 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1571 return LT.first * Entry->Cost;
1574 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
1575 return LT.first * Entry->Cost;
1577 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
1578 return LT.first * Entry->Cost;
1580 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed);
1583 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1584 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) {
1585 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF);
1588 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1589 assert(Val->isVectorTy() && "This must be a vector type");
1591 Type *ScalarType = Val->getScalarType();
1594 // Legalize the type.
1595 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1597 // This type is legalized to a scalar type.
1598 if (!LT.second.isVector())
1601 // The type may be split. Normalize the index to the new type.
1602 unsigned Width = LT.second.getVectorNumElements();
1603 Index = Index % Width;
1605 // Floating point scalars are already located in index #0.
1606 if (ScalarType->isFloatingPointTy() && Index == 0)
1610 // Add to the base cost if we know that the extracted element of a vector is
1611 // destined to be moved to and used in the integer register file.
1612 int RegisterFileMoveCost = 0;
1613 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1614 RegisterFileMoveCost = 1;
1616 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1619 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1620 unsigned AddressSpace, const Instruction *I) {
1621 // Handle non-power-of-two vectors such as <3 x float>
1622 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1623 unsigned NumElem = VTy->getVectorNumElements();
1625 // Handle a few common cases:
1627 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1628 // Cost = 64 bit store + extract + 32 bit store.
1632 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1633 // Cost = 128 bit store + unpack + 64 bit store.
1636 // Assume that all other non-power-of-two numbers are scalarized.
1637 if (!isPowerOf2_32(NumElem)) {
1638 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1640 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1641 Opcode == Instruction::Store);
1642 return NumElem * Cost + SplitCost;
1646 // Legalize the type.
1647 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1648 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1651 // Each load/store unit costs 1.
1652 int Cost = LT.first * 1;
1654 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1655 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1656 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1662 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1664 unsigned AddressSpace) {
1665 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1667 // To calculate scalar take the regular cost, without mask
1668 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1670 unsigned NumElem = SrcVTy->getVectorNumElements();
1671 VectorType *MaskTy =
1672 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1673 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1674 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1675 !isPowerOf2_32(NumElem)) {
1677 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1678 int ScalarCompareCost = getCmpSelInstrCost(
1679 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1680 int BranchCost = getCFInstrCost(Instruction::Br);
1681 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1683 int ValueSplitCost = getScalarizationOverhead(
1684 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1686 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1687 Alignment, AddressSpace);
1688 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1691 // Legalize the type.
1692 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1693 auto VT = TLI->getValueType(DL, SrcVTy);
1695 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1696 LT.second.getVectorNumElements() == NumElem)
1697 // Promotion requires expand/truncate for data and a shuffle for mask.
1698 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1699 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1701 else if (LT.second.getVectorNumElements() > NumElem) {
1702 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1703 LT.second.getVectorNumElements());
1704 // Expanding requires fill mask with zeroes
1705 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1707 if (!ST->hasAVX512())
1708 return Cost + LT.first*4; // Each maskmov costs 4
1710 // AVX-512 masked load/store is cheapper
1711 return Cost+LT.first;
1714 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1716 // Address computations in vectorized code with non-consecutive addresses will
1717 // likely result in more instructions compared to scalar code where the
1718 // computation can more often be merged into the index mode. The resulting
1719 // extra micro-ops can significantly decrease throughput.
1720 unsigned NumVectorInstToHideOverhead = 10;
1722 // Cost modeling of Strided Access Computation is hidden by the indexing
1723 // modes of X86 regardless of the stride value. We dont believe that there
1724 // is a difference between constant strided access in gerenal and constant
1725 // strided value which is less than or equal to 64.
1726 // Even in the case of (loop invariant) stride whose value is not known at
1727 // compile time, the address computation will not incur more than one extra
1729 if (Ty->isVectorTy() && SE) {
1730 if (!BaseT::isStridedAccess(Ptr))
1731 return NumVectorInstToHideOverhead;
1732 if (!BaseT::getConstantStrideStep(SE, Ptr))
1736 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1739 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1742 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1744 MVT MTy = LT.second;
1746 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1747 assert(ISD && "Invalid opcode");
1749 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1750 // and make it as the cost.
1752 static const CostTblEntry SSE42CostTblPairWise[] = {
1753 { ISD::FADD, MVT::v2f64, 2 },
1754 { ISD::FADD, MVT::v4f32, 4 },
1755 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1756 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1757 { ISD::ADD, MVT::v8i16, 5 },
1760 static const CostTblEntry AVX1CostTblPairWise[] = {
1761 { ISD::FADD, MVT::v4f32, 4 },
1762 { ISD::FADD, MVT::v4f64, 5 },
1763 { ISD::FADD, MVT::v8f32, 7 },
1764 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1765 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
1766 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
1767 { ISD::ADD, MVT::v8i16, 5 },
1768 { ISD::ADD, MVT::v8i32, 5 },
1771 static const CostTblEntry SSE42CostTblNoPairWise[] = {
1772 { ISD::FADD, MVT::v2f64, 2 },
1773 { ISD::FADD, MVT::v4f32, 4 },
1774 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1775 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1776 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1779 static const CostTblEntry AVX1CostTblNoPairWise[] = {
1780 { ISD::FADD, MVT::v4f32, 3 },
1781 { ISD::FADD, MVT::v4f64, 3 },
1782 { ISD::FADD, MVT::v8f32, 4 },
1783 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1784 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1785 { ISD::ADD, MVT::v4i64, 3 },
1786 { ISD::ADD, MVT::v8i16, 4 },
1787 { ISD::ADD, MVT::v8i32, 5 },
1792 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1793 return LT.first * Entry->Cost;
1796 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1797 return LT.first * Entry->Cost;
1800 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1801 return LT.first * Entry->Cost;
1804 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1805 return LT.first * Entry->Cost;
1808 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1811 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1812 /// method might only calculate a fraction of a larger immediate. Therefore it
1813 /// is valid to return a cost of ZERO.
1814 int X86TTIImpl::getIntImmCost(int64_t Val) {
1816 return TTI::TCC_Free;
1819 return TTI::TCC_Basic;
1821 return 2 * TTI::TCC_Basic;
1824 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1825 assert(Ty->isIntegerTy());
1827 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1831 // Never hoist constants larger than 128bit, because this might lead to
1832 // incorrect code generation or assertions in codegen.
1833 // Fixme: Create a cost model for types larger than i128 once the codegen
1834 // issues have been fixed.
1836 return TTI::TCC_Free;
1839 return TTI::TCC_Free;
1841 // Sign-extend all constants to a multiple of 64-bit.
1844 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1846 // Split the constant into 64-bit chunks and calculate the cost for each
1849 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1850 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1851 int64_t Val = Tmp.getSExtValue();
1852 Cost += getIntImmCost(Val);
1854 // We need at least one instruction to materialize the constant.
1855 return std::max(1, Cost);
1858 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1860 assert(Ty->isIntegerTy());
1862 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1863 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1864 // here, so that constant hoisting will ignore this constant.
1866 return TTI::TCC_Free;
1868 unsigned ImmIdx = ~0U;
1871 return TTI::TCC_Free;
1872 case Instruction::GetElementPtr:
1873 // Always hoist the base address of a GetElementPtr. This prevents the
1874 // creation of new constants for every base constant that gets constant
1875 // folded with the offset.
1877 return 2 * TTI::TCC_Basic;
1878 return TTI::TCC_Free;
1879 case Instruction::Store:
1882 case Instruction::ICmp:
1883 // This is an imperfect hack to prevent constant hoisting of
1884 // compares that might be trying to check if a 64-bit value fits in
1885 // 32-bits. The backend can optimize these cases using a right shift by 32.
1886 // Ideally we would check the compare predicate here. There also other
1887 // similar immediates the backend can use shifts for.
1888 if (Idx == 1 && Imm.getBitWidth() == 64) {
1889 uint64_t ImmVal = Imm.getZExtValue();
1890 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1891 return TTI::TCC_Free;
1895 case Instruction::And:
1896 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1897 // by using a 32-bit operation with implicit zero extension. Detect such
1898 // immediates here as the normal path expects bit 31 to be sign extended.
1899 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1900 return TTI::TCC_Free;
1902 case Instruction::Add:
1903 case Instruction::Sub:
1904 case Instruction::Mul:
1905 case Instruction::UDiv:
1906 case Instruction::SDiv:
1907 case Instruction::URem:
1908 case Instruction::SRem:
1909 case Instruction::Or:
1910 case Instruction::Xor:
1913 // Always return TCC_Free for the shift value of a shift instruction.
1914 case Instruction::Shl:
1915 case Instruction::LShr:
1916 case Instruction::AShr:
1918 return TTI::TCC_Free;
1920 case Instruction::Trunc:
1921 case Instruction::ZExt:
1922 case Instruction::SExt:
1923 case Instruction::IntToPtr:
1924 case Instruction::PtrToInt:
1925 case Instruction::BitCast:
1926 case Instruction::PHI:
1927 case Instruction::Call:
1928 case Instruction::Select:
1929 case Instruction::Ret:
1930 case Instruction::Load:
1934 if (Idx == ImmIdx) {
1935 int NumConstants = (BitSize + 63) / 64;
1936 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1937 return (Cost <= NumConstants * TTI::TCC_Basic)
1938 ? static_cast<int>(TTI::TCC_Free)
1942 return X86TTIImpl::getIntImmCost(Imm, Ty);
1945 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1947 assert(Ty->isIntegerTy());
1949 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1950 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1951 // here, so that constant hoisting will ignore this constant.
1953 return TTI::TCC_Free;
1957 return TTI::TCC_Free;
1958 case Intrinsic::sadd_with_overflow:
1959 case Intrinsic::uadd_with_overflow:
1960 case Intrinsic::ssub_with_overflow:
1961 case Intrinsic::usub_with_overflow:
1962 case Intrinsic::smul_with_overflow:
1963 case Intrinsic::umul_with_overflow:
1964 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1965 return TTI::TCC_Free;
1967 case Intrinsic::experimental_stackmap:
1968 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1969 return TTI::TCC_Free;
1971 case Intrinsic::experimental_patchpoint_void:
1972 case Intrinsic::experimental_patchpoint_i64:
1973 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1974 return TTI::TCC_Free;
1977 return X86TTIImpl::getIntImmCost(Imm, Ty);
1980 // Return an average cost of Gather / Scatter instruction, maybe improved later
1981 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1982 unsigned Alignment, unsigned AddressSpace) {
1984 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1985 unsigned VF = SrcVTy->getVectorNumElements();
1987 // Try to reduce index size from 64 bit (default for GEP)
1988 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1989 // operation will use 16 x 64 indices which do not fit in a zmm and needs
1990 // to split. Also check that the base pointer is the same for all lanes,
1991 // and that there's at most one variable index.
1992 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1993 unsigned IndexSize = DL.getPointerSizeInBits();
1994 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1995 if (IndexSize < 64 || !GEP)
1998 unsigned NumOfVarIndices = 0;
1999 Value *Ptrs = GEP->getPointerOperand();
2000 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
2002 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
2003 if (isa<Constant>(GEP->getOperand(i)))
2005 Type *IndxTy = GEP->getOperand(i)->getType();
2006 if (IndxTy->isVectorTy())
2007 IndxTy = IndxTy->getVectorElementType();
2008 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
2009 !isa<SExtInst>(GEP->getOperand(i))) ||
2010 ++NumOfVarIndices > 1)
2011 return IndexSize; // 64
2013 return (unsigned)32;
2017 // Trying to reduce IndexSize to 32 bits for vector 16.
2018 // By default the IndexSize is equal to pointer size.
2019 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
2020 DL.getPointerSizeInBits();
2022 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
2024 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
2025 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2026 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
2027 if (SplitFactor > 1) {
2028 // Handle splitting of vector of pointers
2029 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
2030 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
2034 // The gather / scatter cost is given by Intel architects. It is a rough
2035 // number since we are looking at one instruction in a time.
2036 const int GSOverhead = 2;
2037 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2038 Alignment, AddressSpace);
2041 /// Return the cost of full scalarization of gather / scatter operation.
2043 /// Opcode - Load or Store instruction.
2044 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2045 /// VariableMask - The mask is non-constant at compile time.
2046 /// Alignment - Alignment for one element.
2047 /// AddressSpace - pointer[s] address space.
2049 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2050 bool VariableMask, unsigned Alignment,
2051 unsigned AddressSpace) {
2052 unsigned VF = SrcVTy->getVectorNumElements();
2054 int MaskUnpackCost = 0;
2056 VectorType *MaskTy =
2057 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2058 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2059 int ScalarCompareCost =
2060 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2062 int BranchCost = getCFInstrCost(Instruction::Br);
2063 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2066 // The cost of the scalar loads/stores.
2067 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2068 Alignment, AddressSpace);
2070 int InsertExtractCost = 0;
2071 if (Opcode == Instruction::Load)
2072 for (unsigned i = 0; i < VF; ++i)
2073 // Add the cost of inserting each scalar load into the vector
2074 InsertExtractCost +=
2075 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2077 for (unsigned i = 0; i < VF; ++i)
2078 // Add the cost of extracting each element out of the data vector
2079 InsertExtractCost +=
2080 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2082 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2085 /// Calculate the cost of Gather / Scatter operation
2086 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2087 Value *Ptr, bool VariableMask,
2088 unsigned Alignment) {
2089 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2090 unsigned VF = SrcVTy->getVectorNumElements();
2091 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2092 if (!PtrTy && Ptr->getType()->isVectorTy())
2093 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2094 assert(PtrTy && "Unexpected type for Ptr argument");
2095 unsigned AddressSpace = PtrTy->getAddressSpace();
2097 bool Scalarize = false;
2098 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2099 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
2101 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
2102 // Vector-4 of gather/scatter instruction does not exist on KNL.
2103 // We can extend it to 8 elements, but zeroing upper bits of
2104 // the mask vector will add more instructions. Right now we give the scalar
2105 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
2106 // is better in the VariableMask case.
2107 if (VF == 2 || (VF == 4 && !ST->hasVLX()))
2111 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
2114 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
2117 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
2118 Type *ScalarTy = DataTy->getScalarType();
2119 int DataWidth = isa<PointerType>(ScalarTy) ?
2120 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2122 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
2123 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
2126 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
2127 return isLegalMaskedLoad(DataType);
2130 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
2131 // This function is called now in two cases: from the Loop Vectorizer
2132 // and from the Scalarizer.
2133 // When the Loop Vectorizer asks about legality of the feature,
2134 // the vectorization factor is not calculated yet. The Loop Vectorizer
2135 // sends a scalar type and the decision is based on the width of the
2137 // Later on, the cost model will estimate usage this intrinsic based on
2139 // The Scalarizer asks again about legality. It sends a vector type.
2140 // In this case we can reject non-power-of-2 vectors.
2141 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
2143 Type *ScalarTy = DataTy->getScalarType();
2144 int DataWidth = isa<PointerType>(ScalarTy) ?
2145 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
2147 // AVX-512 allows gather and scatter
2148 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512();
2151 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
2152 return isLegalMaskedGather(DataType);
2155 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
2156 const Function *Callee) const {
2157 const TargetMachine &TM = getTLI()->getTargetMachine();
2159 // Work this as a subsetting of subtarget features.
2160 const FeatureBitset &CallerBits =
2161 TM.getSubtargetImpl(*Caller)->getFeatureBits();
2162 const FeatureBitset &CalleeBits =
2163 TM.getSubtargetImpl(*Callee)->getFeatureBits();
2165 // FIXME: This is likely too limiting as it will include subtarget features
2166 // that we might not care about for inlining, but it is conservatively
2168 return (CallerBits & CalleeBits) == CalleeBits;
2171 bool X86TTIImpl::enableInterleavedAccessVectorization() {
2172 // TODO: We expect this to be beneficial regardless of arch,
2173 // but there are currently some unexplained performance artifacts on Atom.
2174 // As a temporary solution, disable on Atom.
2175 return !(ST->isAtom());
2178 // Get estimation for interleaved load/store operations and strided load.
2179 // \p Indices contains indices for strided load.
2180 // \p Factor - the factor of interleaving.
2181 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
2182 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
2184 ArrayRef<unsigned> Indices,
2186 unsigned AddressSpace) {
2188 // VecTy for interleave memop is <VF*Factor x Elt>.
2189 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
2190 // VecTy = <12 x i32>.
2192 // Calculate the number of memory operations (NumOfMemOps), required
2193 // for load/store the VecTy.
2194 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
2195 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
2196 unsigned LegalVTSize = LegalVT.getStoreSize();
2197 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
2199 // Get the cost of one memory operation.
2200 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
2201 LegalVT.getVectorNumElements());
2202 unsigned MemOpCost =
2203 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
2205 if (Opcode == Instruction::Load) {
2206 // Kind of shuffle depends on number of loaded values.
2207 // If we load the entire data in one register, we can use a 1-src shuffle.
2208 // Otherwise, we'll merge 2 sources in each operation.
2209 TTI::ShuffleKind ShuffleKind =
2210 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
2212 unsigned ShuffleCost =
2213 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
2215 unsigned NumOfLoadsInInterleaveGrp =
2216 Indices.size() ? Indices.size() : Factor;
2217 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
2218 VecTy->getVectorNumElements() / Factor);
2219 unsigned NumOfResults =
2220 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
2221 NumOfLoadsInInterleaveGrp;
2223 // About a half of the loads may be folded in shuffles when we have only
2224 // one result. If we have more than one result, we do not fold loads at all.
2225 unsigned NumOfUnfoldedLoads =
2226 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
2228 // Get a number of shuffle operations per result.
2229 unsigned NumOfShufflesPerResult =
2230 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
2232 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2233 // When we have more than one destination, we need additional instructions
2235 unsigned NumOfMoves = 0;
2236 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
2237 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
2239 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
2240 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
2246 assert(Opcode == Instruction::Store &&
2247 "Expected Store Instruction at this point");
2249 // There is no strided stores meanwhile. And store can't be folded in
2251 unsigned NumOfSources = Factor; // The number of values to be merged.
2252 unsigned ShuffleCost =
2253 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
2254 unsigned NumOfShufflesPerStore = NumOfSources - 1;
2256 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
2257 // We need additional instructions to keep sources.
2258 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
2259 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
2264 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
2266 ArrayRef<unsigned> Indices,
2268 unsigned AddressSpace) {
2269 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) {
2271 Type *EltTy = VecTy->getVectorElementType();
2272 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
2273 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
2275 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) {
2282 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW);
2283 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI()))
2284 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
2285 Alignment, AddressSpace);
2286 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2287 Alignment, AddressSpace);