1 //===-- X86InstrAVX512.td - AVX512 Instruction Set ---------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 AVX512 instruction set, defining the
11 // instructions, and properties of the instructions which are needed for code
12 // generation, machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 // Group template arguments that can be derived from the vector type (EltNum x
17 // EltVT). These are things like the register class for the writemask, etc.
18 // The idea is to pass one of these as the template argument rather than the
19 // individual arguments.
20 // The template is also used for scalar types, in this case numelts is 1.
21 class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
23 RegisterClass RC = rc;
24 ValueType EltVT = eltvt;
25 int NumElts = numelts;
27 // Corresponding mask register class.
28 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
30 // Corresponding write-mask register class.
31 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
34 ValueType KVT = !cast<ValueType>(!if (!eq (NumElts, 1), "i1",
35 "v" # NumElts # "i1"));
37 // The GPR register class that can hold the write mask. Use GR8 for fewer
38 // than 8 elements. Use shift-right and equal to work around the lack of
41 !cast<RegisterClass>("GR" #
42 !if (!eq (!srl(NumElts, 3), 0), 8, NumElts));
44 // Suffix used in the instruction mnemonic.
45 string Suffix = suffix;
47 // VTName is a string name for vector VT. For vector types it will be
48 // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32
49 // It is a little bit complex for scalar types, where NumElts = 1.
50 // In this case we build v4f32 or v2f64
51 string VTName = "v" # !if (!eq (NumElts, 1),
52 !if (!eq (EltVT.Size, 32), 4,
53 !if (!eq (EltVT.Size, 64), 2, NumElts)), NumElts) # EltVT;
56 ValueType VT = !cast<ValueType>(VTName);
58 string EltTypeName = !cast<string>(EltVT);
59 // Size of the element type in bits, e.g. 32 for v16i32.
60 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
61 int EltSize = EltVT.Size;
63 // "i" for integer types and "f" for floating-point types
64 string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
66 // Size of RC in bits, e.g. 512 for VR512.
69 // The corresponding memory operand, e.g. i512mem for VR512.
70 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
71 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
74 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
75 // due to load promotion during legalization
76 PatFrag LdFrag = !cast<PatFrag>("load" #
77 !if (!eq (TypeVariantName, "i"),
78 !if (!eq (Size, 128), "v2i64",
79 !if (!eq (Size, 256), "v4i64",
80 !if (!eq (Size, 512), "v8i64",
83 PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" #
84 !if (!eq (TypeVariantName, "i"),
85 !if (!eq (Size, 128), "v2i64",
86 !if (!eq (Size, 256), "v4i64",
87 !if (!eq (Size, 512), "v8i64",
90 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
92 // The corresponding float type, e.g. v16f32 for v16i32
93 // Note: For EltSize < 32, FloatVT is illegal and TableGen
94 // fails to compile, so we choose FloatVT = VT
95 ValueType FloatVT = !cast<ValueType>(
96 !if (!eq (!srl(EltSize,5),0),
98 !if (!eq(TypeVariantName, "i"),
99 "v" # NumElts # "f" # EltSize,
102 ValueType IntVT = !cast<ValueType>(
103 !if (!eq (!srl(EltSize,5),0),
105 !if (!eq(TypeVariantName, "f"),
106 "v" # NumElts # "i" # EltSize,
108 // The string to specify embedded broadcast in assembly.
109 string BroadcastStr = "{1to" # NumElts # "}";
111 // 8-bit compressed displacement tuple/subvector format. This is only
112 // defined for NumElts <= 8.
113 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
114 !cast<CD8VForm>("CD8VT" # NumElts), ?);
116 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
117 !if (!eq (Size, 256), sub_ymm, ?));
119 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
120 !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
123 RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X, FR64X);
125 // A vector tye of the same width with element type i64. This is used to
126 // create patterns for logic ops.
127 ValueType i64VT = !cast<ValueType>("v" # !srl(Size, 6) # "i64");
129 // A vector type of the same width with element type i32. This is used to
130 // create the canonical constant zero node ImmAllZerosV.
131 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
132 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
134 string ZSuffix = !if (!eq (Size, 128), "Z128",
135 !if (!eq (Size, 256), "Z256", "Z"));
138 def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
139 def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
140 def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
141 def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
142 def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
143 def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
145 // "x" in v32i8x_info means RC = VR256X
146 def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
147 def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
148 def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
149 def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
150 def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">;
151 def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">;
153 def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
154 def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
155 def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
156 def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
157 def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
158 def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
160 // We map scalar types to the smallest (128-bit) vector type
161 // with the appropriate element type. This allows to use the same masking logic.
162 def i32x_info : X86VectorVTInfo<1, i32, GR32, "si">;
163 def i64x_info : X86VectorVTInfo<1, i64, GR64, "sq">;
164 def f32x_info : X86VectorVTInfo<1, f32, VR128X, "ss">;
165 def f64x_info : X86VectorVTInfo<1, f64, VR128X, "sd">;
167 class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
168 X86VectorVTInfo i128> {
169 X86VectorVTInfo info512 = i512;
170 X86VectorVTInfo info256 = i256;
171 X86VectorVTInfo info128 = i128;
174 def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
176 def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
178 def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
180 def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
182 def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info,
184 def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info,
187 // This multiclass generates the masking variants from the non-masking
188 // variant. It only provides the assembly pieces for the masking variants.
189 // It assumes custom ISel patterns for masking which can be provided as
190 // template arguments.
191 multiclass AVX512_maskable_custom<bits<8> O, Format F,
193 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
195 string AttSrcAsm, string IntelSrcAsm,
197 list<dag> MaskingPattern,
198 list<dag> ZeroMaskingPattern,
199 string MaskingConstraint = "",
200 InstrItinClass itin = NoItinerary,
201 bit IsCommutable = 0,
202 bit IsKCommutable = 0> {
203 let isCommutable = IsCommutable in
204 def NAME: AVX512<O, F, Outs, Ins,
205 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
206 "$dst, "#IntelSrcAsm#"}",
209 // Prefer over VMOV*rrk Pat<>
210 let AddedComplexity = 20, isCommutable = IsKCommutable in
211 def NAME#k: AVX512<O, F, Outs, MaskingIns,
212 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
213 "$dst {${mask}}, "#IntelSrcAsm#"}",
214 MaskingPattern, itin>,
216 // In case of the 3src subclass this is overridden with a let.
217 string Constraints = MaskingConstraint;
220 // Zero mask does not add any restrictions to commute operands transformation.
221 // So, it is Ok to use IsCommutable instead of IsKCommutable.
222 let AddedComplexity = 30, isCommutable = IsCommutable in // Prefer over VMOV*rrkz Pat<>
223 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
224 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}|"#
225 "$dst {${mask}} {z}, "#IntelSrcAsm#"}",
232 // Common base class of AVX512_maskable and AVX512_maskable_3src.
233 multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
235 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
237 string AttSrcAsm, string IntelSrcAsm,
238 dag RHS, dag MaskingRHS,
239 SDNode Select = vselect,
240 string MaskingConstraint = "",
241 InstrItinClass itin = NoItinerary,
242 bit IsCommutable = 0,
243 bit IsKCommutable = 0> :
244 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
245 AttSrcAsm, IntelSrcAsm,
246 [(set _.RC:$dst, RHS)],
247 [(set _.RC:$dst, MaskingRHS)],
249 (Select _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
250 MaskingConstraint, NoItinerary, IsCommutable,
253 // This multiclass generates the unconditional/non-masking, the masking and
254 // the zero-masking variant of the vector instruction. In the masking case, the
255 // perserved vector elements come from a new dummy input operand tied to $dst.
256 multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
257 dag Outs, dag Ins, string OpcodeStr,
258 string AttSrcAsm, string IntelSrcAsm,
260 InstrItinClass itin = NoItinerary,
261 bit IsCommutable = 0, bit IsKCommutable = 0,
262 SDNode Select = vselect> :
263 AVX512_maskable_common<O, F, _, Outs, Ins,
264 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
265 !con((ins _.KRCWM:$mask), Ins),
266 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
267 (Select _.KRCWM:$mask, RHS, _.RC:$src0), Select,
268 "$src0 = $dst", itin, IsCommutable, IsKCommutable>;
270 // This multiclass generates the unconditional/non-masking, the masking and
271 // the zero-masking variant of the scalar instruction.
272 multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
273 dag Outs, dag Ins, string OpcodeStr,
274 string AttSrcAsm, string IntelSrcAsm,
276 InstrItinClass itin = NoItinerary,
277 bit IsCommutable = 0> :
278 AVX512_maskable_common<O, F, _, Outs, Ins,
279 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
280 !con((ins _.KRCWM:$mask), Ins),
281 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
282 (X86selects _.KRCWM:$mask, RHS, _.RC:$src0),
283 X86selects, "$src0 = $dst", itin, IsCommutable>;
285 // Similar to AVX512_maskable but in this case one of the source operands
286 // ($src1) is already tied to $dst so we just use that for the preserved
287 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
289 multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
290 dag Outs, dag NonTiedIns, string OpcodeStr,
291 string AttSrcAsm, string IntelSrcAsm,
292 dag RHS, bit IsCommutable = 0,
293 bit IsKCommutable = 0> :
294 AVX512_maskable_common<O, F, _, Outs,
295 !con((ins _.RC:$src1), NonTiedIns),
296 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
297 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
298 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
299 (vselect _.KRCWM:$mask, RHS, _.RC:$src1),
300 vselect, "", NoItinerary, IsCommutable, IsKCommutable>;
302 multiclass AVX512_maskable_3src_scalar<bits<8> O, Format F, X86VectorVTInfo _,
303 dag Outs, dag NonTiedIns, string OpcodeStr,
304 string AttSrcAsm, string IntelSrcAsm,
305 dag RHS, bit IsCommutable = 0,
306 bit IsKCommutable = 0> :
307 AVX512_maskable_common<O, F, _, Outs,
308 !con((ins _.RC:$src1), NonTiedIns),
309 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
310 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
311 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
312 (X86selects _.KRCWM:$mask, RHS, _.RC:$src1),
313 X86selects, "", NoItinerary, IsCommutable,
316 multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
319 string AttSrcAsm, string IntelSrcAsm,
321 AVX512_maskable_custom<O, F, Outs, Ins,
322 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
323 !con((ins _.KRCWM:$mask), Ins),
324 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [],
328 // Instruction with mask that puts result in mask register,
329 // like "compare" and "vptest"
330 multiclass AVX512_maskable_custom_cmp<bits<8> O, Format F,
332 dag Ins, dag MaskingIns,
334 string AttSrcAsm, string IntelSrcAsm,
336 list<dag> MaskingPattern,
337 bit IsCommutable = 0> {
338 let isCommutable = IsCommutable in
339 def NAME: AVX512<O, F, Outs, Ins,
340 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
341 "$dst, "#IntelSrcAsm#"}",
342 Pattern, NoItinerary>;
344 def NAME#k: AVX512<O, F, Outs, MaskingIns,
345 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
346 "$dst {${mask}}, "#IntelSrcAsm#"}",
347 MaskingPattern, NoItinerary>, EVEX_K;
350 multiclass AVX512_maskable_common_cmp<bits<8> O, Format F, X86VectorVTInfo _,
352 dag Ins, dag MaskingIns,
354 string AttSrcAsm, string IntelSrcAsm,
355 dag RHS, dag MaskingRHS,
356 bit IsCommutable = 0> :
357 AVX512_maskable_custom_cmp<O, F, Outs, Ins, MaskingIns, OpcodeStr,
358 AttSrcAsm, IntelSrcAsm,
359 [(set _.KRC:$dst, RHS)],
360 [(set _.KRC:$dst, MaskingRHS)], IsCommutable>;
362 multiclass AVX512_maskable_cmp<bits<8> O, Format F, X86VectorVTInfo _,
363 dag Outs, dag Ins, string OpcodeStr,
364 string AttSrcAsm, string IntelSrcAsm,
365 dag RHS, bit IsCommutable = 0> :
366 AVX512_maskable_common_cmp<O, F, _, Outs, Ins,
367 !con((ins _.KRCWM:$mask), Ins),
368 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
369 (and _.KRCWM:$mask, RHS), IsCommutable>;
371 multiclass AVX512_maskable_cmp_alt<bits<8> O, Format F, X86VectorVTInfo _,
372 dag Outs, dag Ins, string OpcodeStr,
373 string AttSrcAsm, string IntelSrcAsm> :
374 AVX512_maskable_custom_cmp<O, F, Outs,
375 Ins, !con((ins _.KRCWM:$mask),Ins), OpcodeStr,
376 AttSrcAsm, IntelSrcAsm, [],[]>;
378 // This multiclass generates the unconditional/non-masking, the masking and
379 // the zero-masking variant of the vector instruction. In the masking case, the
380 // perserved vector elements come from a new dummy input operand tied to $dst.
381 multiclass AVX512_maskable_logic<bits<8> O, Format F, X86VectorVTInfo _,
382 dag Outs, dag Ins, string OpcodeStr,
383 string AttSrcAsm, string IntelSrcAsm,
384 dag RHS, dag MaskedRHS,
385 InstrItinClass itin = NoItinerary,
386 bit IsCommutable = 0, SDNode Select = vselect> :
387 AVX512_maskable_custom<O, F, Outs, Ins,
388 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
389 !con((ins _.KRCWM:$mask), Ins),
390 OpcodeStr, AttSrcAsm, IntelSrcAsm,
391 [(set _.RC:$dst, RHS)],
393 (Select _.KRCWM:$mask, MaskedRHS, _.RC:$src0))],
395 (Select _.KRCWM:$mask, MaskedRHS,
397 "$src0 = $dst", itin, IsCommutable>;
399 // Bitcasts between 512-bit vector types. Return the original type since
400 // no instruction is needed for the conversion.
401 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
402 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
403 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
404 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
405 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
406 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
407 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
408 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
409 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
410 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
411 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
412 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
413 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
414 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
415 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
416 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
417 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
418 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
419 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
420 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
421 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
422 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
423 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
424 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
425 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
426 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
427 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
428 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
429 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
430 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
431 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
433 // Alias instruction that maps zero vector to pxor / xorp* for AVX-512.
434 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
435 // swizzled by ExecutionDepsFix to pxor.
436 // We set canFoldAsLoad because this can be converted to a constant-pool
437 // load of an all-zeros value if folding it would be beneficial.
438 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
439 isPseudo = 1, Predicates = [HasAVX512], SchedRW = [WriteZero] in {
440 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
441 [(set VR512:$dst, (v16i32 immAllZerosV))]>;
442 def AVX512_512_SETALLONES : I<0, Pseudo, (outs VR512:$dst), (ins), "",
443 [(set VR512:$dst, (v16i32 immAllOnesV))]>;
446 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
447 isPseudo = 1, Predicates = [HasVLX], SchedRW = [WriteZero] in {
448 def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "",
449 [(set VR128X:$dst, (v4i32 immAllZerosV))]>;
450 def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "",
451 [(set VR256X:$dst, (v8i32 immAllZerosV))]>;
454 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
455 // This is expanded by ExpandPostRAPseudos.
456 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
457 isPseudo = 1, SchedRW = [WriteZero], Predicates = [HasVLX, HasDQI] in {
458 def AVX512_FsFLD0SS : I<0, Pseudo, (outs FR32X:$dst), (ins), "",
459 [(set FR32X:$dst, fp32imm0)]>;
460 def AVX512_FsFLD0SD : I<0, Pseudo, (outs FR64X:$dst), (ins), "",
461 [(set FR64X:$dst, fpimm0)]>;
464 //===----------------------------------------------------------------------===//
465 // AVX-512 - VECTOR INSERT
467 multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To,
468 PatFrag vinsert_insert> {
469 let ExeDomain = To.ExeDomain in {
470 defm rr : AVX512_maskable<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
471 (ins To.RC:$src1, From.RC:$src2, i32u8imm:$src3),
472 "vinsert" # From.EltTypeName # "x" # From.NumElts,
473 "$src3, $src2, $src1", "$src1, $src2, $src3",
474 (vinsert_insert:$src3 (To.VT To.RC:$src1),
475 (From.VT From.RC:$src2),
476 (iPTR imm))>, AVX512AIi8Base, EVEX_4V;
478 defm rm : AVX512_maskable<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
479 (ins To.RC:$src1, From.MemOp:$src2, i32u8imm:$src3),
480 "vinsert" # From.EltTypeName # "x" # From.NumElts,
481 "$src3, $src2, $src1", "$src1, $src2, $src3",
482 (vinsert_insert:$src3 (To.VT To.RC:$src1),
483 (From.VT (bitconvert (From.LdFrag addr:$src2))),
484 (iPTR imm))>, AVX512AIi8Base, EVEX_4V,
485 EVEX_CD8<From.EltSize, From.CD8TupleForm>;
489 multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From,
490 X86VectorVTInfo To, PatFrag vinsert_insert,
491 SDNodeXForm INSERT_get_vinsert_imm , list<Predicate> p> {
492 let Predicates = p in {
493 def : Pat<(vinsert_insert:$ins
494 (To.VT To.RC:$src1), (From.VT From.RC:$src2), (iPTR imm)),
495 (To.VT (!cast<Instruction>(InstrStr#"rr")
496 To.RC:$src1, From.RC:$src2,
497 (INSERT_get_vinsert_imm To.RC:$ins)))>;
499 def : Pat<(vinsert_insert:$ins
501 (From.VT (bitconvert (From.LdFrag addr:$src2))),
503 (To.VT (!cast<Instruction>(InstrStr#"rm")
504 To.RC:$src1, addr:$src2,
505 (INSERT_get_vinsert_imm To.RC:$ins)))>;
509 multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
510 ValueType EltVT64, int Opcode256> {
512 let Predicates = [HasVLX] in
513 defm NAME # "32x4Z256" : vinsert_for_size<Opcode128,
514 X86VectorVTInfo< 4, EltVT32, VR128X>,
515 X86VectorVTInfo< 8, EltVT32, VR256X>,
516 vinsert128_insert>, EVEX_V256;
518 defm NAME # "32x4Z" : vinsert_for_size<Opcode128,
519 X86VectorVTInfo< 4, EltVT32, VR128X>,
520 X86VectorVTInfo<16, EltVT32, VR512>,
521 vinsert128_insert>, EVEX_V512;
523 defm NAME # "64x4Z" : vinsert_for_size<Opcode256,
524 X86VectorVTInfo< 4, EltVT64, VR256X>,
525 X86VectorVTInfo< 8, EltVT64, VR512>,
526 vinsert256_insert>, VEX_W, EVEX_V512;
528 let Predicates = [HasVLX, HasDQI] in
529 defm NAME # "64x2Z256" : vinsert_for_size<Opcode128,
530 X86VectorVTInfo< 2, EltVT64, VR128X>,
531 X86VectorVTInfo< 4, EltVT64, VR256X>,
532 vinsert128_insert>, VEX_W, EVEX_V256;
534 let Predicates = [HasDQI] in {
535 defm NAME # "64x2Z" : vinsert_for_size<Opcode128,
536 X86VectorVTInfo< 2, EltVT64, VR128X>,
537 X86VectorVTInfo< 8, EltVT64, VR512>,
538 vinsert128_insert>, VEX_W, EVEX_V512;
540 defm NAME # "32x8Z" : vinsert_for_size<Opcode256,
541 X86VectorVTInfo< 8, EltVT32, VR256X>,
542 X86VectorVTInfo<16, EltVT32, VR512>,
543 vinsert256_insert>, EVEX_V512;
547 defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
548 defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
550 // Codegen pattern with the alternative types,
551 // Only add this if 64x2 and its friends are not supported natively via AVX512DQ.
552 defm : vinsert_for_size_lowering<"VINSERTF32x4Z256", v2f64x_info, v4f64x_info,
553 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX, NoDQI]>;
554 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v2i64x_info, v4i64x_info,
555 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX, NoDQI]>;
557 defm : vinsert_for_size_lowering<"VINSERTF32x4Z", v2f64x_info, v8f64_info,
558 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512, NoDQI]>;
559 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v2i64x_info, v8i64_info,
560 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512, NoDQI]>;
562 defm : vinsert_for_size_lowering<"VINSERTF64x4Z", v8f32x_info, v16f32_info,
563 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512, NoDQI]>;
564 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v8i32x_info, v16i32_info,
565 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512, NoDQI]>;
567 // Codegen pattern with the alternative types insert VEC128 into VEC256
568 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v8i16x_info, v16i16x_info,
569 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
570 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v16i8x_info, v32i8x_info,
571 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
572 // Codegen pattern with the alternative types insert VEC128 into VEC512
573 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v8i16x_info, v32i16_info,
574 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
575 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v16i8x_info, v64i8_info,
576 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
577 // Codegen pattern with the alternative types insert VEC256 into VEC512
578 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v16i16x_info, v32i16_info,
579 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
580 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
581 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
583 // vinsertps - insert f32 to XMM
584 let ExeDomain = SSEPackedSingle in {
585 def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
586 (ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
587 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
588 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
590 def VINSERTPSZrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
591 (ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
592 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
593 [(set VR128X:$dst, (X86insertps VR128X:$src1,
594 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
595 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
598 //===----------------------------------------------------------------------===//
599 // AVX-512 VECTOR EXTRACT
602 multiclass vextract_for_size<int Opcode,
603 X86VectorVTInfo From, X86VectorVTInfo To,
604 PatFrag vextract_extract,
605 SDNodeXForm EXTRACT_get_vextract_imm> {
607 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
608 // use AVX512_maskable_in_asm (AVX512_maskable can't be used due to
609 // vextract_extract), we interesting only in patterns without mask,
610 // intrinsics pattern match generated bellow.
611 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
612 (ins From.RC:$src1, i32u8imm:$idx),
613 "vextract" # To.EltTypeName # "x" # To.NumElts,
614 "$idx, $src1", "$src1, $idx",
615 [(set To.RC:$dst, (vextract_extract:$idx (From.VT From.RC:$src1),
617 AVX512AIi8Base, EVEX;
618 def mr : AVX512AIi8<Opcode, MRMDestMem, (outs),
619 (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$idx),
620 "vextract" # To.EltTypeName # "x" # To.NumElts #
621 "\t{$idx, $src1, $dst|$dst, $src1, $idx}",
622 [(store (To.VT (vextract_extract:$idx
623 (From.VT From.RC:$src1), (iPTR imm))),
626 let mayStore = 1, hasSideEffects = 0 in
627 def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs),
628 (ins To.MemOp:$dst, To.KRCWM:$mask,
629 From.RC:$src1, i32u8imm:$idx),
630 "vextract" # To.EltTypeName # "x" # To.NumElts #
631 "\t{$idx, $src1, $dst {${mask}}|"
632 "$dst {${mask}}, $src1, $idx}",
636 def : Pat<(To.VT (vselect To.KRCWM:$mask,
637 (vextract_extract:$ext (From.VT From.RC:$src1),
640 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
641 From.ZSuffix # "rrk")
642 To.RC:$src0, To.KRCWM:$mask, From.RC:$src1,
643 (EXTRACT_get_vextract_imm To.RC:$ext))>;
645 def : Pat<(To.VT (vselect To.KRCWM:$mask,
646 (vextract_extract:$ext (From.VT From.RC:$src1),
649 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
650 From.ZSuffix # "rrkz")
651 To.KRCWM:$mask, From.RC:$src1,
652 (EXTRACT_get_vextract_imm To.RC:$ext))>;
655 // Codegen pattern for the alternative types
656 multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
657 X86VectorVTInfo To, PatFrag vextract_extract,
658 SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> {
659 let Predicates = p in {
660 def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
661 (To.VT (!cast<Instruction>(InstrStr#"rr")
663 (EXTRACT_get_vextract_imm To.RC:$ext)))>;
664 def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1),
665 (iPTR imm))), addr:$dst),
666 (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1,
667 (EXTRACT_get_vextract_imm To.RC:$ext))>;
671 multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
672 ValueType EltVT64, int Opcode256> {
673 defm NAME # "32x4Z" : vextract_for_size<Opcode128,
674 X86VectorVTInfo<16, EltVT32, VR512>,
675 X86VectorVTInfo< 4, EltVT32, VR128X>,
677 EXTRACT_get_vextract128_imm>,
678 EVEX_V512, EVEX_CD8<32, CD8VT4>;
679 defm NAME # "64x4Z" : vextract_for_size<Opcode256,
680 X86VectorVTInfo< 8, EltVT64, VR512>,
681 X86VectorVTInfo< 4, EltVT64, VR256X>,
683 EXTRACT_get_vextract256_imm>,
684 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>;
685 let Predicates = [HasVLX] in
686 defm NAME # "32x4Z256" : vextract_for_size<Opcode128,
687 X86VectorVTInfo< 8, EltVT32, VR256X>,
688 X86VectorVTInfo< 4, EltVT32, VR128X>,
690 EXTRACT_get_vextract128_imm>,
691 EVEX_V256, EVEX_CD8<32, CD8VT4>;
692 let Predicates = [HasVLX, HasDQI] in
693 defm NAME # "64x2Z256" : vextract_for_size<Opcode128,
694 X86VectorVTInfo< 4, EltVT64, VR256X>,
695 X86VectorVTInfo< 2, EltVT64, VR128X>,
697 EXTRACT_get_vextract128_imm>,
698 VEX_W, EVEX_V256, EVEX_CD8<64, CD8VT2>;
699 let Predicates = [HasDQI] in {
700 defm NAME # "64x2Z" : vextract_for_size<Opcode128,
701 X86VectorVTInfo< 8, EltVT64, VR512>,
702 X86VectorVTInfo< 2, EltVT64, VR128X>,
704 EXTRACT_get_vextract128_imm>,
705 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT2>;
706 defm NAME # "32x8Z" : vextract_for_size<Opcode256,
707 X86VectorVTInfo<16, EltVT32, VR512>,
708 X86VectorVTInfo< 8, EltVT32, VR256X>,
710 EXTRACT_get_vextract256_imm>,
711 EVEX_V512, EVEX_CD8<32, CD8VT8>;
715 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
716 defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
718 // extract_subvector codegen patterns with the alternative types.
719 // Only add this if 64x2 and its friends are not supported natively via AVX512DQ.
720 defm : vextract_for_size_lowering<"VEXTRACTF32x4Z", v8f64_info, v2f64x_info,
721 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
722 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v8i64_info, v2i64x_info,
723 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
725 defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v16f32_info, v8f32x_info,
726 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>;
727 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v16i32_info, v8i32x_info,
728 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>;
730 defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
731 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
732 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
733 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
735 // Codegen pattern with the alternative types extract VEC128 from VEC256
736 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v16i16x_info, v8i16x_info,
737 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
738 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v32i8x_info, v16i8x_info,
739 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
741 // Codegen pattern with the alternative types extract VEC128 from VEC512
742 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info,
743 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
744 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v64i8_info, v16i8x_info,
745 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
746 // Codegen pattern with the alternative types extract VEC256 from VEC512
747 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
748 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
749 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
750 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
752 // A 128-bit subvector extract from the first 256-bit vector position
753 // is a subregister copy that needs no instruction.
754 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
755 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
756 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
757 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
758 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
759 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
760 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
761 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
762 def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
763 (v8i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_xmm))>;
764 def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
765 (v16i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_xmm))>;
767 // A 256-bit subvector extract from the first 256-bit vector position
768 // is a subregister copy that needs no instruction.
769 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
770 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
771 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
772 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
773 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
774 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
775 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
776 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
777 def : Pat<(v16i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
778 (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm))>;
779 def : Pat<(v32i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
780 (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm))>;
782 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
783 // A 128-bit subvector insert to the first 512-bit vector position
784 // is a subregister copy that needs no instruction.
785 def : Pat<(v8i64 (insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0))),
786 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
787 def : Pat<(v8f64 (insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0))),
788 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
789 def : Pat<(v16i32 (insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0))),
790 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
791 def : Pat<(v16f32 (insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0))),
792 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
793 def : Pat<(v32i16 (insert_subvector undef, (v8i16 VR128X:$src), (iPTR 0))),
794 (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
795 def : Pat<(v64i8 (insert_subvector undef, (v16i8 VR128X:$src), (iPTR 0))),
796 (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
798 // A 256-bit subvector insert to the first 512-bit vector position
799 // is a subregister copy that needs no instruction.
800 def : Pat<(v8i64 (insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0))),
801 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
802 def : Pat<(v8f64 (insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0))),
803 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
804 def : Pat<(v16i32 (insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0))),
805 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
806 def : Pat<(v16f32 (insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0))),
807 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
808 def : Pat<(v32i16 (insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0))),
809 (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
810 def : Pat<(v64i8 (insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0))),
811 (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
814 // vextractps - extract 32 bits from XMM
815 def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
816 (ins VR128X:$src1, u8imm:$src2),
817 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
818 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
821 def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
822 (ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
823 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
824 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
825 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
827 //===---------------------------------------------------------------------===//
830 // broadcast with a scalar argument.
831 multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
832 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
834 let isCodeGenOnly = 1 in {
835 def r_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
836 (ins SrcInfo.FRC:$src), OpcodeStr#"\t{$src, $dst|$dst, $src}",
837 [(set DestInfo.RC:$dst, (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)))]>,
838 Requires<[HasAVX512]>, T8PD, EVEX;
840 let Constraints = "$src0 = $dst" in
841 def rk_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
842 (ins DestInfo.RC:$src0, DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
843 OpcodeStr#"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
844 [(set DestInfo.RC:$dst,
845 (vselect DestInfo.KRCWM:$mask,
846 (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
847 DestInfo.RC:$src0))]>,
848 Requires<[HasAVX512]>, T8PD, EVEX, EVEX_K;
850 def rkz_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
851 (ins DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
852 OpcodeStr#"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
853 [(set DestInfo.RC:$dst,
854 (vselect DestInfo.KRCWM:$mask,
855 (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
856 DestInfo.ImmAllZerosV))]>,
857 Requires<[HasAVX512]>, T8PD, EVEX, EVEX_KZ;
858 } // let isCodeGenOnly = 1 in
861 multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
862 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
863 let ExeDomain = DestInfo.ExeDomain in {
864 defm r : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
865 (ins SrcInfo.RC:$src), OpcodeStr, "$src", "$src",
866 (DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src)))>,
868 defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
869 (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
870 (DestInfo.VT (X86VBroadcast
871 (SrcInfo.ScalarLdFrag addr:$src)))>,
872 T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
875 def : Pat<(DestInfo.VT (X86VBroadcast
876 (SrcInfo.VT (scalar_to_vector
877 (SrcInfo.ScalarLdFrag addr:$src))))),
878 (!cast<Instruction>(NAME#DestInfo.ZSuffix#m) addr:$src)>;
879 let AddedComplexity = 20 in
880 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
882 (SrcInfo.VT (scalar_to_vector
883 (SrcInfo.ScalarLdFrag addr:$src)))),
885 (!cast<Instruction>(NAME#DestInfo.ZSuffix#mk)
886 DestInfo.RC:$src0, DestInfo.KRCWM:$mask, addr:$src)>;
887 let AddedComplexity = 30 in
888 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
890 (SrcInfo.VT (scalar_to_vector
891 (SrcInfo.ScalarLdFrag addr:$src)))),
892 DestInfo.ImmAllZerosV)),
893 (!cast<Instruction>(NAME#DestInfo.ZSuffix#mkz)
894 DestInfo.KRCWM:$mask, addr:$src)>;
897 multiclass avx512_fp_broadcast_sd<bits<8> opc, string OpcodeStr,
898 AVX512VLVectorVTInfo _> {
899 let Predicates = [HasAVX512] in
900 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
901 avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
904 let Predicates = [HasVLX] in {
905 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
906 avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
911 multiclass avx512_fp_broadcast_ss<bits<8> opc, string OpcodeStr,
912 AVX512VLVectorVTInfo _> {
913 let Predicates = [HasAVX512] in
914 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
915 avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
918 let Predicates = [HasVLX] in {
919 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
920 avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
922 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
923 avx512_broadcast_scalar<opc, OpcodeStr, _.info128, _.info128>,
927 defm VBROADCASTSS : avx512_fp_broadcast_ss<0x18, "vbroadcastss",
929 defm VBROADCASTSD : avx512_fp_broadcast_sd<0x19, "vbroadcastsd",
930 avx512vl_f64_info>, VEX_W;
932 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
933 (VBROADCASTSSZm addr:$src)>;
934 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
935 (VBROADCASTSDZm addr:$src)>;
937 multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
938 RegisterClass SrcRC> {
939 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
941 "vpbroadcast"##_.Suffix, "$src", "$src",
942 (_.VT (X86VBroadcast SrcRC:$src))>, T8PD, EVEX;
945 multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
946 RegisterClass SrcRC, Predicate prd> {
947 let Predicates = [prd] in
948 defm Z : avx512_int_broadcast_reg<opc, _.info512, SrcRC>, EVEX_V512;
949 let Predicates = [prd, HasVLX] in {
950 defm Z256 : avx512_int_broadcast_reg<opc, _.info256, SrcRC>, EVEX_V256;
951 defm Z128 : avx512_int_broadcast_reg<opc, _.info128, SrcRC>, EVEX_V128;
955 let isCodeGenOnly = 1 in {
956 defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR8,
958 defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR16,
961 let isAsmParserOnly = 1 in {
962 defm VPBROADCASTBr_Alt : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info,
964 defm VPBROADCASTWr_Alt : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info,
967 defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32,
969 defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
972 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
973 (VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
974 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
975 (VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
977 // Provide aliases for broadcast from the same register class that
978 // automatically does the extract.
979 multiclass avx512_int_broadcast_rm_lowering<X86VectorVTInfo DestInfo,
980 X86VectorVTInfo SrcInfo> {
981 def : Pat<(DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src))),
982 (!cast<Instruction>(NAME#DestInfo.ZSuffix#"r")
983 (EXTRACT_SUBREG (SrcInfo.VT SrcInfo.RC:$src), sub_xmm))>;
986 multiclass avx512_int_broadcast_rm_vl<bits<8> opc, string OpcodeStr,
987 AVX512VLVectorVTInfo _, Predicate prd> {
988 let Predicates = [prd] in {
989 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
990 avx512_int_broadcast_rm_lowering<_.info512, _.info256>,
992 // Defined separately to avoid redefinition.
993 defm Z_Alt : avx512_int_broadcast_rm_lowering<_.info512, _.info512>;
995 let Predicates = [prd, HasVLX] in {
996 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
997 avx512_int_broadcast_rm_lowering<_.info256, _.info256>,
999 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
1004 defm VPBROADCASTB : avx512_int_broadcast_rm_vl<0x78, "vpbroadcastb",
1005 avx512vl_i8_info, HasBWI>;
1006 defm VPBROADCASTW : avx512_int_broadcast_rm_vl<0x79, "vpbroadcastw",
1007 avx512vl_i16_info, HasBWI>;
1008 defm VPBROADCASTD : avx512_int_broadcast_rm_vl<0x58, "vpbroadcastd",
1009 avx512vl_i32_info, HasAVX512>;
1010 defm VPBROADCASTQ : avx512_int_broadcast_rm_vl<0x59, "vpbroadcastq",
1011 avx512vl_i64_info, HasAVX512>, VEX_W;
1013 multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
1014 X86VectorVTInfo _Dst, X86VectorVTInfo _Src> {
1015 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
1016 (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
1017 (_Dst.VT (X86SubVBroadcast
1018 (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
1022 let Predicates = [HasVLX, HasBWI] in {
1023 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
1024 // This means we'll encounter truncated i32 loads; match that here.
1025 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
1026 (VPBROADCASTWZ128m addr:$src)>;
1027 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
1028 (VPBROADCASTWZ256m addr:$src)>;
1029 def : Pat<(v8i16 (X86VBroadcast
1030 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
1031 (VPBROADCASTWZ128m addr:$src)>;
1032 def : Pat<(v16i16 (X86VBroadcast
1033 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
1034 (VPBROADCASTWZ256m addr:$src)>;
1037 //===----------------------------------------------------------------------===//
1038 // AVX-512 BROADCAST SUBVECTORS
1041 defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
1042 v16i32_info, v4i32x_info>,
1043 EVEX_V512, EVEX_CD8<32, CD8VT4>;
1044 defm VBROADCASTF32X4 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
1045 v16f32_info, v4f32x_info>,
1046 EVEX_V512, EVEX_CD8<32, CD8VT4>;
1047 defm VBROADCASTI64X4 : avx512_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
1048 v8i64_info, v4i64x_info>, VEX_W,
1049 EVEX_V512, EVEX_CD8<64, CD8VT4>;
1050 defm VBROADCASTF64X4 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf64x4",
1051 v8f64_info, v4f64x_info>, VEX_W,
1052 EVEX_V512, EVEX_CD8<64, CD8VT4>;
1054 let Predicates = [HasAVX512] in {
1055 def : Pat<(v32i16 (X86SubVBroadcast (bc_v16i16 (loadv4i64 addr:$src)))),
1056 (VBROADCASTI64X4rm addr:$src)>;
1057 def : Pat<(v64i8 (X86SubVBroadcast (bc_v32i8 (loadv4i64 addr:$src)))),
1058 (VBROADCASTI64X4rm addr:$src)>;
1060 // Provide fallback in case the load node that is used in the patterns above
1061 // is used by additional users, which prevents the pattern selection.
1062 def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
1063 (VINSERTF64x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1064 (v8f32 VR256X:$src), 1)>;
1065 def : Pat<(v8f64 (X86SubVBroadcast (v4f64 VR256X:$src))),
1066 (VINSERTF64x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1067 (v4f64 VR256X:$src), 1)>;
1068 def : Pat<(v8i64 (X86SubVBroadcast (v4i64 VR256X:$src))),
1069 (VINSERTI64x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1070 (v4i64 VR256X:$src), 1)>;
1071 def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
1072 (VINSERTI64x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1073 (v8i32 VR256X:$src), 1)>;
1074 def : Pat<(v32i16 (X86SubVBroadcast (v16i16 VR256X:$src))),
1075 (VINSERTI64x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1076 (v16i16 VR256X:$src), 1)>;
1077 def : Pat<(v64i8 (X86SubVBroadcast (v32i8 VR256X:$src))),
1078 (VINSERTI64x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1079 (v32i8 VR256X:$src), 1)>;
1081 def : Pat<(v32i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
1082 (VBROADCASTI32X4rm addr:$src)>;
1083 def : Pat<(v64i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
1084 (VBROADCASTI32X4rm addr:$src)>;
1086 // Provide fallback in case the load node that is used in the patterns above
1087 // is used by additional users, which prevents the pattern selection.
1088 def : Pat<(v8f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
1090 (VINSERTF32x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
1091 VR128X:$src, sub_xmm),
1094 (v8f64 (VINSERTF32x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
1095 VR128X:$src, sub_xmm),
1096 VR128X:$src, 1)), sub_ymm), 1)>;
1097 def : Pat<(v8i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
1099 (VINSERTI32x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
1100 VR128X:$src, sub_xmm),
1103 (v8i64 (VINSERTI32x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
1104 VR128X:$src, sub_xmm),
1105 VR128X:$src, 1)), sub_ymm), 1)>;
1107 def : Pat<(v32i16 (X86SubVBroadcast (v8i16 VR128X:$src))),
1109 (VINSERTI32x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)),
1110 VR128X:$src, sub_xmm),
1113 (v32i16 (VINSERTI32x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)),
1114 VR128X:$src, sub_xmm),
1115 VR128X:$src, 1)), sub_ymm), 1)>;
1116 def : Pat<(v64i8 (X86SubVBroadcast (v16i8 VR128X:$src))),
1118 (VINSERTI32x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)),
1119 VR128X:$src, sub_xmm),
1122 (v64i8 (VINSERTI32x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)),
1123 VR128X:$src, sub_xmm),
1124 VR128X:$src, 1)), sub_ymm), 1)>;
1127 let Predicates = [HasVLX] in {
1128 defm VBROADCASTI32X4Z256 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
1129 v8i32x_info, v4i32x_info>,
1130 EVEX_V256, EVEX_CD8<32, CD8VT4>;
1131 defm VBROADCASTF32X4Z256 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
1132 v8f32x_info, v4f32x_info>,
1133 EVEX_V256, EVEX_CD8<32, CD8VT4>;
1135 def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
1136 (VBROADCASTI32X4Z256rm addr:$src)>;
1137 def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
1138 (VBROADCASTI32X4Z256rm addr:$src)>;
1140 // Provide fallback in case the load node that is used in the patterns above
1141 // is used by additional users, which prevents the pattern selection.
1142 def : Pat<(v8f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
1143 (VINSERTF32x4Z256rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1144 (v4f32 VR128X:$src), 1)>;
1145 def : Pat<(v8i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
1146 (VINSERTI32x4Z256rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1147 (v4i32 VR128X:$src), 1)>;
1148 def : Pat<(v16i16 (X86SubVBroadcast (v8i16 VR128X:$src))),
1149 (VINSERTI32x4Z256rr (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1150 (v8i16 VR128X:$src), 1)>;
1151 def : Pat<(v32i8 (X86SubVBroadcast (v16i8 VR128X:$src))),
1152 (VINSERTI32x4Z256rr (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1153 (v16i8 VR128X:$src), 1)>;
1156 let Predicates = [HasVLX, HasDQI] in {
1157 defm VBROADCASTI64X2Z128 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
1158 v4i64x_info, v2i64x_info>, VEX_W,
1159 EVEX_V256, EVEX_CD8<64, CD8VT2>;
1160 defm VBROADCASTF64X2Z128 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf64x2",
1161 v4f64x_info, v2f64x_info>, VEX_W,
1162 EVEX_V256, EVEX_CD8<64, CD8VT2>;
1164 // Provide fallback in case the load node that is used in the patterns above
1165 // is used by additional users, which prevents the pattern selection.
1166 def : Pat<(v4f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
1167 (VINSERTF64x2Z256rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1168 (v2f64 VR128X:$src), 1)>;
1169 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
1170 (VINSERTI64x2Z256rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1171 (v2i64 VR128X:$src), 1)>;
1174 let Predicates = [HasVLX, NoDQI] in {
1175 def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
1176 (VBROADCASTF32X4Z256rm addr:$src)>;
1177 def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
1178 (VBROADCASTI32X4Z256rm addr:$src)>;
1180 // Provide fallback in case the load node that is used in the patterns above
1181 // is used by additional users, which prevents the pattern selection.
1182 def : Pat<(v4f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
1183 (VINSERTF32x4Z256rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1184 (v2f64 VR128X:$src), 1)>;
1185 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
1186 (VINSERTI32x4Z256rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1187 (v2i64 VR128X:$src), 1)>;
1190 let Predicates = [HasAVX512, NoDQI] in {
1191 def : Pat<(v8f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
1192 (VBROADCASTF32X4rm addr:$src)>;
1193 def : Pat<(v8i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
1194 (VBROADCASTI32X4rm addr:$src)>;
1196 def : Pat<(v16f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
1198 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
1199 VR128X:$src, sub_xmm),
1202 (v16f32 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
1203 VR128X:$src, sub_xmm),
1204 VR128X:$src, 1)), sub_ymm), 1)>;
1205 def : Pat<(v16i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
1207 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
1208 VR128X:$src, sub_xmm),
1211 (v16i32 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
1212 VR128X:$src, sub_xmm),
1213 VR128X:$src, 1)), sub_ymm), 1)>;
1215 def : Pat<(v16f32 (X86SubVBroadcast (loadv8f32 addr:$src))),
1216 (VBROADCASTF64X4rm addr:$src)>;
1217 def : Pat<(v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src)))),
1218 (VBROADCASTI64X4rm addr:$src)>;
1220 // Provide fallback in case the load node that is used in the patterns above
1221 // is used by additional users, which prevents the pattern selection.
1222 def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
1223 (VINSERTF64x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1224 (v8f32 VR256X:$src), 1)>;
1225 def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
1226 (VINSERTI64x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1227 (v8i32 VR256X:$src), 1)>;
1230 let Predicates = [HasDQI] in {
1231 defm VBROADCASTI64X2 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
1232 v8i64_info, v2i64x_info>, VEX_W,
1233 EVEX_V512, EVEX_CD8<64, CD8VT2>;
1234 defm VBROADCASTI32X8 : avx512_subvec_broadcast_rm<0x5b, "vbroadcasti32x8",
1235 v16i32_info, v8i32x_info>,
1236 EVEX_V512, EVEX_CD8<32, CD8VT8>;
1237 defm VBROADCASTF64X2 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf64x2",
1238 v8f64_info, v2f64x_info>, VEX_W,
1239 EVEX_V512, EVEX_CD8<64, CD8VT2>;
1240 defm VBROADCASTF32X8 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf32x8",
1241 v16f32_info, v8f32x_info>,
1242 EVEX_V512, EVEX_CD8<32, CD8VT8>;
1244 // Provide fallback in case the load node that is used in the patterns above
1245 // is used by additional users, which prevents the pattern selection.
1246 def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
1247 (VINSERTF32x8Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1248 (v8f32 VR256X:$src), 1)>;
1249 def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
1250 (VINSERTI32x8Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1251 (v8i32 VR256X:$src), 1)>;
1253 def : Pat<(v16f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
1255 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
1256 VR128X:$src, sub_xmm),
1259 (v16f32 (VINSERTF32x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
1260 VR128X:$src, sub_xmm),
1261 VR128X:$src, 1)), sub_ymm), 1)>;
1262 def : Pat<(v16i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
1264 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
1265 VR128X:$src, sub_xmm),
1268 (v16i32 (VINSERTI32x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
1269 VR128X:$src, sub_xmm),
1270 VR128X:$src, 1)), sub_ymm), 1)>;
1273 multiclass avx512_common_broadcast_32x2<bits<8> opc, string OpcodeStr,
1274 AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> {
1275 let Predicates = [HasDQI] in
1276 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info512, _Src.info128>,
1278 let Predicates = [HasDQI, HasVLX] in
1279 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info256, _Src.info128>,
1283 multiclass avx512_common_broadcast_i32x2<bits<8> opc, string OpcodeStr,
1284 AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> :
1285 avx512_common_broadcast_32x2<opc, OpcodeStr, _Dst, _Src> {
1287 let Predicates = [HasDQI, HasVLX] in
1288 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info128, _Src.info128>,
1292 defm VBROADCASTI32X2 : avx512_common_broadcast_i32x2<0x59, "vbroadcasti32x2",
1293 avx512vl_i32_info, avx512vl_i64_info>;
1294 defm VBROADCASTF32X2 : avx512_common_broadcast_32x2<0x19, "vbroadcastf32x2",
1295 avx512vl_f32_info, avx512vl_f64_info>;
1297 def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
1298 (VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
1299 def : Pat<(v16f32 (X86VBroadcast (v8f32 VR256X:$src))),
1300 (VBROADCASTSSZr (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm))>;
1302 def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
1303 (VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
1304 def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
1305 (VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
1307 //===----------------------------------------------------------------------===//
1308 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
1310 multiclass avx512_mask_broadcastm<bits<8> opc, string OpcodeStr,
1311 X86VectorVTInfo _, RegisterClass KRC> {
1312 def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.RC:$dst), (ins KRC:$src),
1313 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1314 [(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))]>, EVEX;
1317 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
1318 AVX512VLVectorVTInfo VTInfo, RegisterClass KRC> {
1319 let Predicates = [HasCDI] in
1320 defm Z : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info512, KRC>, EVEX_V512;
1321 let Predicates = [HasCDI, HasVLX] in {
1322 defm Z256 : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info256, KRC>, EVEX_V256;
1323 defm Z128 : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info128, KRC>, EVEX_V128;
1327 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
1328 avx512vl_i32_info, VK16>;
1329 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
1330 avx512vl_i64_info, VK8>, VEX_W;
1332 //===----------------------------------------------------------------------===//
1333 // -- VPERMI2 - 3 source operands form --
1334 multiclass avx512_perm_i<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1335 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
1336 // The index operand in the pattern should really be an integer type. However,
1337 // if we do that and it happens to come from a bitcast, then it becomes
1338 // difficult to find the bitcast needed to convert the index to the
1339 // destination type for the passthru since it will be folded with the bitcast
1340 // of the index operand.
1341 defm rr: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
1342 (ins _.RC:$src2, _.RC:$src3),
1343 OpcodeStr, "$src3, $src2", "$src2, $src3",
1344 (_.VT (X86VPermi2X _.RC:$src1, _.RC:$src2, _.RC:$src3)), 1>, EVEX_4V,
1347 defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1348 (ins _.RC:$src2, _.MemOp:$src3),
1349 OpcodeStr, "$src3, $src2", "$src2, $src3",
1350 (_.VT (X86VPermi2X _.RC:$src1, _.RC:$src2,
1351 (_.VT (bitconvert (_.LdFrag addr:$src3))))), 1>,
1352 EVEX_4V, AVX5128IBase;
1355 multiclass avx512_perm_i_mb<bits<8> opc, string OpcodeStr,
1356 X86VectorVTInfo _> {
1357 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
1358 defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1359 (ins _.RC:$src2, _.ScalarMemOp:$src3),
1360 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
1361 !strconcat("$src2, ${src3}", _.BroadcastStr ),
1362 (_.VT (X86VPermi2X _.RC:$src1,
1363 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))),
1364 1>, AVX5128IBase, EVEX_4V, EVEX_B;
1367 multiclass avx512_perm_i_sizes<bits<8> opc, string OpcodeStr,
1368 AVX512VLVectorVTInfo VTInfo> {
1369 defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512>,
1370 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1371 let Predicates = [HasVLX] in {
1372 defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128>,
1373 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1374 defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256>,
1375 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1379 multiclass avx512_perm_i_sizes_bw<bits<8> opc, string OpcodeStr,
1380 AVX512VLVectorVTInfo VTInfo,
1382 let Predicates = [Prd] in
1383 defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1384 let Predicates = [Prd, HasVLX] in {
1385 defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1386 defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1390 defm VPERMI2D : avx512_perm_i_sizes<0x76, "vpermi2d",
1391 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1392 defm VPERMI2Q : avx512_perm_i_sizes<0x76, "vpermi2q",
1393 avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1394 defm VPERMI2W : avx512_perm_i_sizes_bw<0x75, "vpermi2w",
1395 avx512vl_i16_info, HasBWI>,
1396 VEX_W, EVEX_CD8<16, CD8VF>;
1397 defm VPERMI2B : avx512_perm_i_sizes_bw<0x75, "vpermi2b",
1398 avx512vl_i8_info, HasVBMI>,
1400 defm VPERMI2PS : avx512_perm_i_sizes<0x77, "vpermi2ps",
1401 avx512vl_f32_info>, EVEX_CD8<32, CD8VF>;
1402 defm VPERMI2PD : avx512_perm_i_sizes<0x77, "vpermi2pd",
1403 avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1406 multiclass avx512_perm_t<bits<8> opc, string OpcodeStr,
1407 X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
1408 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
1409 defm rr: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
1410 (ins IdxVT.RC:$src2, _.RC:$src3),
1411 OpcodeStr, "$src3, $src2", "$src2, $src3",
1412 (_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3)), 1>,
1413 EVEX_4V, AVX5128IBase;
1415 defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1416 (ins IdxVT.RC:$src2, _.MemOp:$src3),
1417 OpcodeStr, "$src3, $src2", "$src2, $src3",
1418 (_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2,
1419 (bitconvert (_.LdFrag addr:$src3)))), 1>,
1420 EVEX_4V, AVX5128IBase;
1423 multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr,
1424 X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
1425 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
1426 defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1427 (ins IdxVT.RC:$src2, _.ScalarMemOp:$src3),
1428 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
1429 !strconcat("$src2, ${src3}", _.BroadcastStr ),
1430 (_.VT (X86VPermt2 _.RC:$src1,
1431 IdxVT.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))),
1432 1>, AVX5128IBase, EVEX_4V, EVEX_B;
1435 multiclass avx512_perm_t_sizes<bits<8> opc, string OpcodeStr,
1436 AVX512VLVectorVTInfo VTInfo,
1437 AVX512VLVectorVTInfo ShuffleMask> {
1438 defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
1439 ShuffleMask.info512>,
1440 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info512,
1441 ShuffleMask.info512>, EVEX_V512;
1442 let Predicates = [HasVLX] in {
1443 defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
1444 ShuffleMask.info128>,
1445 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info128,
1446 ShuffleMask.info128>, EVEX_V128;
1447 defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
1448 ShuffleMask.info256>,
1449 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info256,
1450 ShuffleMask.info256>, EVEX_V256;
1454 multiclass avx512_perm_t_sizes_bw<bits<8> opc, string OpcodeStr,
1455 AVX512VLVectorVTInfo VTInfo,
1456 AVX512VLVectorVTInfo Idx,
1458 let Predicates = [Prd] in
1459 defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
1460 Idx.info512>, EVEX_V512;
1461 let Predicates = [Prd, HasVLX] in {
1462 defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
1463 Idx.info128>, EVEX_V128;
1464 defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
1465 Idx.info256>, EVEX_V256;
1469 defm VPERMT2D : avx512_perm_t_sizes<0x7E, "vpermt2d",
1470 avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1471 defm VPERMT2Q : avx512_perm_t_sizes<0x7E, "vpermt2q",
1472 avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1473 defm VPERMT2W : avx512_perm_t_sizes_bw<0x7D, "vpermt2w",
1474 avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
1475 VEX_W, EVEX_CD8<16, CD8VF>;
1476 defm VPERMT2B : avx512_perm_t_sizes_bw<0x7D, "vpermt2b",
1477 avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
1479 defm VPERMT2PS : avx512_perm_t_sizes<0x7F, "vpermt2ps",
1480 avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1481 defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
1482 avx512vl_f64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1484 //===----------------------------------------------------------------------===//
1485 // AVX-512 - BLEND using mask
1487 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1488 let ExeDomain = _.ExeDomain in {
1489 let hasSideEffects = 0 in
1490 def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1491 (ins _.RC:$src1, _.RC:$src2),
1492 !strconcat(OpcodeStr,
1493 "\t{$src2, $src1, ${dst}|${dst}, $src1, $src2}"),
1495 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1496 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1497 !strconcat(OpcodeStr,
1498 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1499 [(set _.RC:$dst, (vselect _.KRCWM:$mask,
1501 (_.VT _.RC:$src1)))]>, EVEX_4V, EVEX_K;
1502 let hasSideEffects = 0 in
1503 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1504 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1505 !strconcat(OpcodeStr,
1506 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1507 []>, EVEX_4V, EVEX_KZ;
1508 let mayLoad = 1, hasSideEffects = 0 in
1509 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1510 (ins _.RC:$src1, _.MemOp:$src2),
1511 !strconcat(OpcodeStr,
1512 "\t{$src2, $src1, ${dst}|${dst}, $src1, $src2}"),
1513 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
1514 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1515 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1516 !strconcat(OpcodeStr,
1517 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1518 [(set _.RC:$dst, (vselect _.KRCWM:$mask,
1519 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1520 (_.VT _.RC:$src1)))]>,
1521 EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
1522 let mayLoad = 1, hasSideEffects = 0 in
1523 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1524 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1525 !strconcat(OpcodeStr,
1526 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1527 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
1530 multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1532 def rmbk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1533 (ins _.KRCWM:$mask, _.RC:$src1, _.ScalarMemOp:$src2),
1534 !strconcat(OpcodeStr,
1535 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1536 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1537 [(set _.RC:$dst,(vselect _.KRCWM:$mask,
1538 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1539 (_.VT _.RC:$src1)))]>,
1540 EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1542 let mayLoad = 1, hasSideEffects = 0 in
1543 def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1544 (ins _.RC:$src1, _.ScalarMemOp:$src2),
1545 !strconcat(OpcodeStr,
1546 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1547 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1548 []>, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1552 multiclass blendmask_dq <bits<8> opc, string OpcodeStr,
1553 AVX512VLVectorVTInfo VTInfo> {
1554 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>,
1555 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1557 let Predicates = [HasVLX] in {
1558 defm Z256 : avx512_blendmask<opc, OpcodeStr, VTInfo.info256>,
1559 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1560 defm Z128 : avx512_blendmask<opc, OpcodeStr, VTInfo.info128>,
1561 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1565 multiclass blendmask_bw <bits<8> opc, string OpcodeStr,
1566 AVX512VLVectorVTInfo VTInfo> {
1567 let Predicates = [HasBWI] in
1568 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1570 let Predicates = [HasBWI, HasVLX] in {
1571 defm Z256 : avx512_blendmask <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1572 defm Z128 : avx512_blendmask <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1577 defm VBLENDMPS : blendmask_dq <0x65, "vblendmps", avx512vl_f32_info>;
1578 defm VBLENDMPD : blendmask_dq <0x65, "vblendmpd", avx512vl_f64_info>, VEX_W;
1579 defm VPBLENDMD : blendmask_dq <0x64, "vpblendmd", avx512vl_i32_info>;
1580 defm VPBLENDMQ : blendmask_dq <0x64, "vpblendmq", avx512vl_i64_info>, VEX_W;
1581 defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
1582 defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
1585 let Predicates = [HasAVX512, NoVLX] in {
1586 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
1587 (v8f32 VR256X:$src2))),
1589 (v16f32 (VBLENDMPSZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1590 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
1591 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1593 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
1594 (v8i32 VR256X:$src2))),
1596 (v16i32 (VPBLENDMDZrrk (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
1597 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
1598 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)))), sub_ymm)>;
1600 //===----------------------------------------------------------------------===//
1601 // Compare Instructions
1602 //===----------------------------------------------------------------------===//
1604 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
1606 multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>{
1608 defm rr_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1610 (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1611 "vcmp${cc}"#_.Suffix,
1612 "$src2, $src1", "$src1, $src2",
1613 (OpNode (_.VT _.RC:$src1),
1616 defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1618 (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1619 "vcmp${cc}"#_.Suffix,
1620 "$src2, $src1", "$src1, $src2",
1621 (OpNode (_.VT _.RC:$src1),
1622 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
1623 imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1625 defm rrb_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1627 (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1628 "vcmp${cc}"#_.Suffix,
1629 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
1630 (OpNodeRnd (_.VT _.RC:$src1),
1633 (i32 FROUND_NO_EXC))>, EVEX_4V, EVEX_B;
1634 // Accept explicit immediate argument form instead of comparison code.
1635 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1636 defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1638 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1640 "$cc, $src2, $src1", "$src1, $src2, $cc">, EVEX_4V;
1641 defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1643 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
1645 "$cc, $src2, $src1", "$src1, $src2, $cc">,
1646 EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1648 defm rrb_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1650 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1652 "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc">,
1654 }// let isAsmParserOnly = 1, hasSideEffects = 0
1656 let isCodeGenOnly = 1 in {
1657 let isCommutable = 1 in
1658 def rr : AVX512Ii8<0xC2, MRMSrcReg,
1659 (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, AVXCC:$cc),
1660 !strconcat("vcmp${cc}", _.Suffix,
1661 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1662 [(set _.KRC:$dst, (OpNode _.FRC:$src1,
1665 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1666 def rm : AVX512Ii8<0xC2, MRMSrcMem,
1668 (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1669 !strconcat("vcmp${cc}", _.Suffix,
1670 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1671 [(set _.KRC:$dst, (OpNode _.FRC:$src1,
1672 (_.ScalarLdFrag addr:$src2),
1674 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1678 let Predicates = [HasAVX512] in {
1679 defm VCMPSSZ : avx512_cmp_scalar<f32x_info, X86cmpms, X86cmpmsRnd>,
1681 defm VCMPSDZ : avx512_cmp_scalar<f64x_info, X86cmpms, X86cmpmsRnd>,
1682 AVX512XDIi8Base, VEX_W;
1685 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
1686 X86VectorVTInfo _, bit IsCommutable> {
1687 let isCommutable = IsCommutable in
1688 def rr : AVX512BI<opc, MRMSrcReg,
1689 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
1690 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1691 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
1692 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1693 def rm : AVX512BI<opc, MRMSrcMem,
1694 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
1695 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1696 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1697 (_.VT (bitconvert (_.LdFrag addr:$src2)))))],
1698 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1699 def rrk : AVX512BI<opc, MRMSrcReg,
1700 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1701 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1702 "$dst {${mask}}, $src1, $src2}"),
1703 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1704 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
1705 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1706 def rmk : AVX512BI<opc, MRMSrcMem,
1707 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1708 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1709 "$dst {${mask}}, $src1, $src2}"),
1710 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1711 (OpNode (_.VT _.RC:$src1),
1713 (_.LdFrag addr:$src2))))))],
1714 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1717 multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
1718 X86VectorVTInfo _, bit IsCommutable> :
1719 avx512_icmp_packed<opc, OpcodeStr, OpNode, _, IsCommutable> {
1720 def rmb : AVX512BI<opc, MRMSrcMem,
1721 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
1722 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
1723 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1724 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1725 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
1726 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1727 def rmbk : AVX512BI<opc, MRMSrcMem,
1728 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1729 _.ScalarMemOp:$src2),
1730 !strconcat(OpcodeStr,
1731 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1732 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1733 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1734 (OpNode (_.VT _.RC:$src1),
1736 (_.ScalarLdFrag addr:$src2)))))],
1737 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1740 multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
1741 AVX512VLVectorVTInfo VTInfo, Predicate prd,
1742 bit IsCommutable = 0> {
1743 let Predicates = [prd] in
1744 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512,
1745 IsCommutable>, EVEX_V512;
1747 let Predicates = [prd, HasVLX] in {
1748 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256,
1749 IsCommutable>, EVEX_V256;
1750 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128,
1751 IsCommutable>, EVEX_V128;
1755 multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
1756 SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
1757 Predicate prd, bit IsCommutable = 0> {
1758 let Predicates = [prd] in
1759 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512,
1760 IsCommutable>, EVEX_V512;
1762 let Predicates = [prd, HasVLX] in {
1763 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256,
1764 IsCommutable>, EVEX_V256;
1765 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128,
1766 IsCommutable>, EVEX_V128;
1770 defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
1771 avx512vl_i8_info, HasBWI, 1>,
1774 defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
1775 avx512vl_i16_info, HasBWI, 1>,
1776 EVEX_CD8<16, CD8VF>;
1778 defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
1779 avx512vl_i32_info, HasAVX512, 1>,
1780 EVEX_CD8<32, CD8VF>;
1782 defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
1783 avx512vl_i64_info, HasAVX512, 1>,
1784 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1786 defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
1787 avx512vl_i8_info, HasBWI>,
1790 defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
1791 avx512vl_i16_info, HasBWI>,
1792 EVEX_CD8<16, CD8VF>;
1794 defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
1795 avx512vl_i32_info, HasAVX512>,
1796 EVEX_CD8<32, CD8VF>;
1798 defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
1799 avx512vl_i64_info, HasAVX512>,
1800 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1802 let Predicates = [HasAVX512, NoVLX] in {
1803 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1804 (COPY_TO_REGCLASS (VPCMPGTDZrr
1805 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
1806 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), VK8)>;
1808 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1809 (COPY_TO_REGCLASS (VPCMPEQDZrr
1810 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
1811 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), VK8)>;
1814 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
1815 X86VectorVTInfo _> {
1816 let isCommutable = 1 in
1817 def rri : AVX512AIi8<opc, MRMSrcReg,
1818 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512ICC:$cc),
1819 !strconcat("vpcmp${cc}", Suffix,
1820 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1821 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1823 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1824 def rmi : AVX512AIi8<opc, MRMSrcMem,
1825 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
1826 !strconcat("vpcmp${cc}", Suffix,
1827 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1828 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1829 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1831 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1832 def rrik : AVX512AIi8<opc, MRMSrcReg,
1833 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1835 !strconcat("vpcmp${cc}", Suffix,
1836 "\t{$src2, $src1, $dst {${mask}}|",
1837 "$dst {${mask}}, $src1, $src2}"),
1838 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1839 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1841 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1842 def rmik : AVX512AIi8<opc, MRMSrcMem,
1843 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1845 !strconcat("vpcmp${cc}", Suffix,
1846 "\t{$src2, $src1, $dst {${mask}}|",
1847 "$dst {${mask}}, $src1, $src2}"),
1848 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1849 (OpNode (_.VT _.RC:$src1),
1850 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1852 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1854 // Accept explicit immediate argument form instead of comparison code.
1855 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1856 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
1857 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1858 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1859 "$dst, $src1, $src2, $cc}"),
1860 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1862 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
1863 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
1864 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1865 "$dst, $src1, $src2, $cc}"),
1866 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1867 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
1868 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1870 !strconcat("vpcmp", Suffix,
1871 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1872 "$dst {${mask}}, $src1, $src2, $cc}"),
1873 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1875 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
1876 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1878 !strconcat("vpcmp", Suffix,
1879 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1880 "$dst {${mask}}, $src1, $src2, $cc}"),
1881 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1885 multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
1886 X86VectorVTInfo _> :
1887 avx512_icmp_cc<opc, Suffix, OpNode, _> {
1888 def rmib : AVX512AIi8<opc, MRMSrcMem,
1889 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1891 !strconcat("vpcmp${cc}", Suffix,
1892 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1893 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1894 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1895 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1897 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1898 def rmibk : AVX512AIi8<opc, MRMSrcMem,
1899 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1900 _.ScalarMemOp:$src2, AVX512ICC:$cc),
1901 !strconcat("vpcmp${cc}", Suffix,
1902 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1903 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1904 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1905 (OpNode (_.VT _.RC:$src1),
1906 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1908 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1910 // Accept explicit immediate argument form instead of comparison code.
1911 let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 1 in {
1912 def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
1913 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1915 !strconcat("vpcmp", Suffix,
1916 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
1917 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1918 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1919 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
1920 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1921 _.ScalarMemOp:$src2, u8imm:$cc),
1922 !strconcat("vpcmp", Suffix,
1923 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1924 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1925 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1929 multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
1930 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1931 let Predicates = [prd] in
1932 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
1934 let Predicates = [prd, HasVLX] in {
1935 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
1936 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
1940 multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
1941 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1942 let Predicates = [prd] in
1943 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
1946 let Predicates = [prd, HasVLX] in {
1947 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
1949 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
1954 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
1955 HasBWI>, EVEX_CD8<8, CD8VF>;
1956 defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
1957 HasBWI>, EVEX_CD8<8, CD8VF>;
1959 defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
1960 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1961 defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
1962 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1964 defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
1965 HasAVX512>, EVEX_CD8<32, CD8VF>;
1966 defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
1967 HasAVX512>, EVEX_CD8<32, CD8VF>;
1969 defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
1970 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1971 defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
1972 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1974 multiclass avx512_vcmp_common<X86VectorVTInfo _> {
1976 defm rri : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1977 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2,AVXCC:$cc),
1978 "vcmp${cc}"#_.Suffix,
1979 "$src2, $src1", "$src1, $src2",
1980 (X86cmpm (_.VT _.RC:$src1),
1984 defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1985 (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
1986 "vcmp${cc}"#_.Suffix,
1987 "$src2, $src1", "$src1, $src2",
1988 (X86cmpm (_.VT _.RC:$src1),
1989 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1992 defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1994 (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1995 "vcmp${cc}"#_.Suffix,
1996 "${src2}"##_.BroadcastStr##", $src1",
1997 "$src1, ${src2}"##_.BroadcastStr,
1998 (X86cmpm (_.VT _.RC:$src1),
1999 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
2001 // Accept explicit immediate argument form instead of comparison code.
2002 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2003 defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
2005 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
2007 "$cc, $src2, $src1", "$src1, $src2, $cc">;
2009 let mayLoad = 1 in {
2010 defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
2012 (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
2014 "$cc, $src2, $src1", "$src1, $src2, $cc">;
2016 defm rmbi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
2018 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
2020 "$cc, ${src2}"##_.BroadcastStr##", $src1",
2021 "$src1, ${src2}"##_.BroadcastStr##", $cc">,EVEX_B;
2026 multiclass avx512_vcmp_sae<X86VectorVTInfo _> {
2027 // comparison code form (VCMP[EQ/LT/LE/...]
2028 defm rrib : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
2029 (outs _.KRC:$dst),(ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
2030 "vcmp${cc}"#_.Suffix,
2031 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
2032 (X86cmpmRnd (_.VT _.RC:$src1),
2035 (i32 FROUND_NO_EXC))>, EVEX_B;
2037 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2038 defm rrib_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
2040 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
2042 "$cc, {sae}, $src2, $src1",
2043 "$src1, $src2, {sae}, $cc">, EVEX_B;
2047 multiclass avx512_vcmp<AVX512VLVectorVTInfo _> {
2048 let Predicates = [HasAVX512] in {
2049 defm Z : avx512_vcmp_common<_.info512>,
2050 avx512_vcmp_sae<_.info512>, EVEX_V512;
2053 let Predicates = [HasAVX512,HasVLX] in {
2054 defm Z128 : avx512_vcmp_common<_.info128>, EVEX_V128;
2055 defm Z256 : avx512_vcmp_common<_.info256>, EVEX_V256;
2059 defm VCMPPD : avx512_vcmp<avx512vl_f64_info>,
2060 AVX512PDIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
2061 defm VCMPPS : avx512_vcmp<avx512vl_f32_info>,
2062 AVX512PSIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
2064 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
2065 (COPY_TO_REGCLASS (VCMPPSZrri
2066 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2067 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2069 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
2070 (COPY_TO_REGCLASS (VPCMPDZrri
2071 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2072 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2074 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
2075 (COPY_TO_REGCLASS (VPCMPUDZrri
2076 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2077 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2080 // ----------------------------------------------------------------
2082 //handle fpclass instruction mask = op(reg_scalar,imm)
2083 // op(mem_scalar,imm)
2084 multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
2085 X86VectorVTInfo _, Predicate prd> {
2086 let Predicates = [prd] in {
2087 def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),//_.KRC:$dst),
2088 (ins _.RC:$src1, i32u8imm:$src2),
2089 OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2090 [(set _.KRC:$dst,(OpNode (_.VT _.RC:$src1),
2091 (i32 imm:$src2)))], NoItinerary>;
2092 def rrk : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2093 (ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
2094 OpcodeStr##_.Suffix#
2095 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2096 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2097 (OpNode (_.VT _.RC:$src1),
2098 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2099 let AddedComplexity = 20 in {
2100 def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2101 (ins _.MemOp:$src1, i32u8imm:$src2),
2102 OpcodeStr##_.Suffix##
2103 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2105 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
2106 (i32 imm:$src2)))], NoItinerary>;
2107 def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2108 (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
2109 OpcodeStr##_.Suffix##
2110 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2111 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2112 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
2113 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2118 //handle fpclass instruction mask = fpclass(reg_vec, reg_vec, imm)
2119 // fpclass(reg_vec, mem_vec, imm)
2120 // fpclass(reg_vec, broadcast(eltVt), imm)
2121 multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
2122 X86VectorVTInfo _, string mem, string broadcast>{
2123 def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2124 (ins _.RC:$src1, i32u8imm:$src2),
2125 OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2126 [(set _.KRC:$dst,(OpNode (_.VT _.RC:$src1),
2127 (i32 imm:$src2)))], NoItinerary>;
2128 def rrk : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2129 (ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
2130 OpcodeStr##_.Suffix#
2131 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2132 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2133 (OpNode (_.VT _.RC:$src1),
2134 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2135 def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2136 (ins _.MemOp:$src1, i32u8imm:$src2),
2137 OpcodeStr##_.Suffix##mem#
2138 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2139 [(set _.KRC:$dst,(OpNode
2140 (_.VT (bitconvert (_.LdFrag addr:$src1))),
2141 (i32 imm:$src2)))], NoItinerary>;
2142 def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2143 (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
2144 OpcodeStr##_.Suffix##mem#
2145 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2146 [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
2147 (_.VT (bitconvert (_.LdFrag addr:$src1))),
2148 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2149 def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2150 (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
2151 OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
2152 _.BroadcastStr##", $dst|$dst, ${src1}"
2153 ##_.BroadcastStr##", $src2}",
2154 [(set _.KRC:$dst,(OpNode
2155 (_.VT (X86VBroadcast
2156 (_.ScalarLdFrag addr:$src1))),
2157 (i32 imm:$src2)))], NoItinerary>,EVEX_B;
2158 def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2159 (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
2160 OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
2161 _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
2162 _.BroadcastStr##", $src2}",
2163 [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
2164 (_.VT (X86VBroadcast
2165 (_.ScalarLdFrag addr:$src1))),
2166 (i32 imm:$src2))))], NoItinerary>,
2170 multiclass avx512_vector_fpclass_all<string OpcodeStr,
2171 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
2173 let Predicates = [prd] in {
2174 defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
2175 broadcast>, EVEX_V512;
2177 let Predicates = [prd, HasVLX] in {
2178 defm Z128 : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info128, "{x}",
2179 broadcast>, EVEX_V128;
2180 defm Z256 : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info256, "{y}",
2181 broadcast>, EVEX_V256;
2185 multiclass avx512_fp_fpclass_all<string OpcodeStr, bits<8> opcVec,
2186 bits<8> opcScalar, SDNode VecOpNode, SDNode ScalarOpNode, Predicate prd>{
2187 defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
2188 VecOpNode, prd, "{l}">, EVEX_CD8<32, CD8VF>;
2189 defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
2190 VecOpNode, prd, "{q}">,EVEX_CD8<64, CD8VF> , VEX_W;
2191 defm SS : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
2192 f32x_info, prd>, EVEX_CD8<32, CD8VT1>;
2193 defm SD : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
2194 f64x_info, prd>, EVEX_CD8<64, CD8VT1>, VEX_W;
2197 defm VFPCLASS : avx512_fp_fpclass_all<"vfpclass", 0x66, 0x67, X86Vfpclass,
2198 X86Vfpclasss, HasDQI>, AVX512AIi8Base,EVEX;
2200 //-----------------------------------------------------------------
2201 // Mask register copy, including
2202 // - copy between mask registers
2203 // - load/store mask registers
2204 // - copy from GPR to mask register and vice versa
2206 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
2207 string OpcodeStr, RegisterClass KRC,
2208 ValueType vvt, X86MemOperand x86memop> {
2209 let hasSideEffects = 0 in
2210 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
2211 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2212 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
2213 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2214 [(set KRC:$dst, (vvt (load addr:$src)))]>;
2215 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
2216 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2217 [(store KRC:$src, addr:$dst)]>;
2220 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
2222 RegisterClass KRC, RegisterClass GRC> {
2223 let hasSideEffects = 0 in {
2224 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
2225 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2226 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
2227 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2231 let Predicates = [HasDQI] in
2232 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>,
2233 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
2236 let Predicates = [HasAVX512] in
2237 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
2238 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
2241 let Predicates = [HasBWI] in {
2242 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
2244 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
2246 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
2248 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
2252 // GR from/to mask register
2253 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
2254 (COPY_TO_REGCLASS GR16:$src, VK16)>;
2255 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
2256 (COPY_TO_REGCLASS VK16:$src, GR16)>;
2258 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
2259 (COPY_TO_REGCLASS GR8:$src, VK8)>;
2260 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
2261 (COPY_TO_REGCLASS VK8:$src, GR8)>;
2263 def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))),
2264 (KMOVWrk VK16:$src)>;
2265 def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))),
2266 (i32 (INSERT_SUBREG (IMPLICIT_DEF),
2267 (i16 (COPY_TO_REGCLASS VK16:$src, GR16)), sub_16bit))>;
2269 def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
2270 (MOVZX32rr8 (COPY_TO_REGCLASS VK8:$src, GR8))>, Requires<[NoDQI]>;
2271 def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
2272 (KMOVBrk VK8:$src)>, Requires<[HasDQI]>;
2273 def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
2274 (i32 (INSERT_SUBREG (IMPLICIT_DEF),
2275 (i8 (COPY_TO_REGCLASS VK8:$src, GR8)), sub_8bit))>;
2277 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))),
2278 (COPY_TO_REGCLASS GR32:$src, VK32)>;
2279 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))),
2280 (COPY_TO_REGCLASS VK32:$src, GR32)>;
2281 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))),
2282 (COPY_TO_REGCLASS GR64:$src, VK64)>;
2283 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))),
2284 (COPY_TO_REGCLASS VK64:$src, GR64)>;
2287 let Predicates = [HasDQI] in {
2288 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
2289 (KMOVBmk addr:$dst, VK8:$src)>;
2290 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
2291 (KMOVBkm addr:$src)>;
2293 def : Pat<(store VK4:$src, addr:$dst),
2294 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK4:$src, VK8))>;
2295 def : Pat<(store VK2:$src, addr:$dst),
2296 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK2:$src, VK8))>;
2297 def : Pat<(store VK1:$src, addr:$dst),
2298 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK8))>;
2300 def : Pat<(v2i1 (load addr:$src)),
2301 (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK2)>;
2302 def : Pat<(v4i1 (load addr:$src)),
2303 (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK4)>;
2305 let Predicates = [HasAVX512, NoDQI] in {
2306 def : Pat<(store VK1:$src, addr:$dst),
2308 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
2310 def : Pat<(store VK2:$src, addr:$dst),
2312 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK2:$src, VK16)),
2314 def : Pat<(store VK4:$src, addr:$dst),
2316 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK4:$src, VK16)),
2318 def : Pat<(store VK8:$src, addr:$dst),
2320 (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
2323 def : Pat<(v8i1 (load addr:$src)),
2324 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK8)>;
2325 def : Pat<(v2i1 (load addr:$src)),
2326 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK2)>;
2327 def : Pat<(v4i1 (load addr:$src)),
2328 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK4)>;
2331 let Predicates = [HasAVX512] in {
2332 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
2333 (KMOVWmk addr:$dst, VK16:$src)>;
2334 def : Pat<(i1 (load addr:$src)),
2335 (COPY_TO_REGCLASS (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), VK1)>;
2336 def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
2337 (KMOVWkm addr:$src)>;
2339 let Predicates = [HasBWI] in {
2340 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
2341 (KMOVDmk addr:$dst, VK32:$src)>;
2342 def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
2343 (KMOVDkm addr:$src)>;
2344 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
2345 (KMOVQmk addr:$dst, VK64:$src)>;
2346 def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
2347 (KMOVQkm addr:$src)>;
2350 let Predicates = [HasAVX512] in {
2351 def : Pat<(i1 (trunc (i64 GR64:$src))),
2352 (COPY_TO_REGCLASS (KMOVWkr (AND32ri8 (EXTRACT_SUBREG $src, sub_32bit),
2355 def : Pat<(i1 (trunc (i32 GR32:$src))),
2356 (COPY_TO_REGCLASS (KMOVWkr (AND32ri8 $src, (i32 1))), VK1)>;
2358 def : Pat<(i1 (trunc (i32 (assertzext_i1 GR32:$src)))),
2359 (COPY_TO_REGCLASS GR32:$src, VK1)>;
2361 def : Pat<(i1 (trunc (i8 GR8:$src))),
2363 (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
2364 GR8:$src, sub_8bit), (i32 1))),
2367 def : Pat<(i1 (trunc (i16 GR16:$src))),
2369 (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
2370 GR16:$src, sub_16bit), (i32 1))),
2373 def : Pat<(i32 (zext VK1:$src)),
2374 (AND32ri8 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
2376 def : Pat<(i32 (anyext VK1:$src)),
2377 (COPY_TO_REGCLASS VK1:$src, GR32)>;
2379 def : Pat<(i8 (zext VK1:$src)),
2382 (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
2384 def : Pat<(i8 (anyext VK1:$src)),
2385 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_8bit)>;
2387 def : Pat<(i64 (zext VK1:$src)),
2388 (AND64ri8 (SUBREG_TO_REG (i64 0),
2389 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
2391 def : Pat<(i64 (anyext VK1:$src)),
2392 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2393 (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_32bit)>;
2395 def : Pat<(i16 (zext VK1:$src)),
2397 (AND32ri8 (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
2400 def : Pat<(i16 (anyext VK1:$src)),
2401 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK1:$src, GR32)), sub_16bit)>;
2403 def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
2404 (COPY_TO_REGCLASS VK1:$src, VK16)>;
2405 def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
2406 (COPY_TO_REGCLASS VK1:$src, VK8)>;
2407 def : Pat<(v4i1 (scalar_to_vector VK1:$src)),
2408 (COPY_TO_REGCLASS VK1:$src, VK4)>;
2409 def : Pat<(v2i1 (scalar_to_vector VK1:$src)),
2410 (COPY_TO_REGCLASS VK1:$src, VK2)>;
2411 def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
2412 (COPY_TO_REGCLASS VK1:$src, VK32)>;
2413 def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
2414 (COPY_TO_REGCLASS VK1:$src, VK64)>;
2416 def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
2417 def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
2418 def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
2420 def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))), (COPY_TO_REGCLASS VK64:$src, VK1)>;
2421 def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))), (COPY_TO_REGCLASS VK32:$src, VK1)>;
2422 def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))), (COPY_TO_REGCLASS VK16:$src, VK1)>;
2423 def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))), (COPY_TO_REGCLASS VK8:$src, VK1)>;
2424 def : Pat<(i1 (X86Vextract VK4:$src, (iPTR 0))), (COPY_TO_REGCLASS VK4:$src, VK1)>;
2425 def : Pat<(i1 (X86Vextract VK2:$src, (iPTR 0))), (COPY_TO_REGCLASS VK2:$src, VK1)>;
2427 // Mask unary operation
2429 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
2430 RegisterClass KRC, SDPatternOperator OpNode,
2432 let Predicates = [prd] in
2433 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
2434 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2435 [(set KRC:$dst, (OpNode KRC:$src))]>;
2438 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
2439 SDPatternOperator OpNode> {
2440 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
2442 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
2443 HasAVX512>, VEX, PS;
2444 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
2445 HasBWI>, VEX, PD, VEX_W;
2446 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
2447 HasBWI>, VEX, PS, VEX_W;
2450 defm KNOT : avx512_mask_unop_all<0x44, "knot", vnot>;
2452 multiclass avx512_mask_unop_int<string IntName, string InstName> {
2453 let Predicates = [HasAVX512] in
2454 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
2456 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
2457 (v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
2459 defm : avx512_mask_unop_int<"knot", "KNOT">;
2461 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
2462 let Predicates = [HasAVX512, NoDQI] in
2463 def : Pat<(vnot VK8:$src),
2464 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
2466 def : Pat<(vnot VK4:$src),
2467 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK4:$src, VK16)), VK4)>;
2468 def : Pat<(vnot VK2:$src),
2469 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK2:$src, VK16)), VK2)>;
2471 // Mask binary operation
2472 // - KAND, KANDN, KOR, KXNOR, KXOR
2473 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
2474 RegisterClass KRC, SDPatternOperator OpNode,
2475 Predicate prd, bit IsCommutable> {
2476 let Predicates = [prd], isCommutable = IsCommutable in
2477 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
2478 !strconcat(OpcodeStr,
2479 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2480 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
2483 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
2484 SDPatternOperator OpNode, bit IsCommutable,
2485 Predicate prdW = HasAVX512> {
2486 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
2487 HasDQI, IsCommutable>, VEX_4V, VEX_L, PD;
2488 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
2489 prdW, IsCommutable>, VEX_4V, VEX_L, PS;
2490 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
2491 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PD;
2492 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
2493 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PS;
2496 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
2497 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
2498 // These nodes use 'vnot' instead of 'not' to support vectors.
2499 def vandn : PatFrag<(ops node:$i0, node:$i1), (and (vnot node:$i0), node:$i1)>;
2500 def vxnor : PatFrag<(ops node:$i0, node:$i1), (vnot (xor node:$i0, node:$i1))>;
2502 defm KAND : avx512_mask_binop_all<0x41, "kand", and, 1>;
2503 defm KOR : avx512_mask_binop_all<0x45, "kor", or, 1>;
2504 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", vxnor, 1>;
2505 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor, 1>;
2506 defm KANDN : avx512_mask_binop_all<0x42, "kandn", vandn, 0>;
2507 defm KADD : avx512_mask_binop_all<0x4A, "kadd", add, 1, HasDQI>;
2509 multiclass avx512_mask_binop_int<string IntName, string InstName> {
2510 let Predicates = [HasAVX512] in
2511 def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
2512 (i16 GR16:$src1), (i16 GR16:$src2)),
2513 (COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
2514 (v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
2515 (v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
2518 defm : avx512_mask_binop_int<"kand", "KAND">;
2519 defm : avx512_mask_binop_int<"kandn", "KANDN">;
2520 defm : avx512_mask_binop_int<"kor", "KOR">;
2521 defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
2522 defm : avx512_mask_binop_int<"kxor", "KXOR">;
2524 multiclass avx512_binop_pat<SDPatternOperator VOpNode, SDPatternOperator OpNode,
2526 // With AVX512F, 8-bit mask is promoted to 16-bit mask,
2527 // for the DQI set, this type is legal and KxxxB instruction is used
2528 let Predicates = [NoDQI] in
2529 def : Pat<(VOpNode VK8:$src1, VK8:$src2),
2531 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
2532 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
2534 // All types smaller than 8 bits require conversion anyway
2535 def : Pat<(OpNode VK1:$src1, VK1:$src2),
2536 (COPY_TO_REGCLASS (Inst
2537 (COPY_TO_REGCLASS VK1:$src1, VK16),
2538 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
2539 def : Pat<(VOpNode VK2:$src1, VK2:$src2),
2540 (COPY_TO_REGCLASS (Inst
2541 (COPY_TO_REGCLASS VK2:$src1, VK16),
2542 (COPY_TO_REGCLASS VK2:$src2, VK16)), VK1)>;
2543 def : Pat<(VOpNode VK4:$src1, VK4:$src2),
2544 (COPY_TO_REGCLASS (Inst
2545 (COPY_TO_REGCLASS VK4:$src1, VK16),
2546 (COPY_TO_REGCLASS VK4:$src2, VK16)), VK1)>;
2549 defm : avx512_binop_pat<and, and, KANDWrr>;
2550 defm : avx512_binop_pat<vandn, andn, KANDNWrr>;
2551 defm : avx512_binop_pat<or, or, KORWrr>;
2552 defm : avx512_binop_pat<vxnor, xnor, KXNORWrr>;
2553 defm : avx512_binop_pat<xor, xor, KXORWrr>;
2556 multiclass avx512_mask_unpck<string Suffix,RegisterClass KRC, ValueType VT,
2557 RegisterClass KRCSrc, Predicate prd> {
2558 let Predicates = [prd] in {
2559 let hasSideEffects = 0 in
2560 def rr : I<0x4b, MRMSrcReg, (outs KRC:$dst),
2561 (ins KRC:$src1, KRC:$src2),
2562 "kunpck"#Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2565 def : Pat<(VT (concat_vectors KRCSrc:$src1, KRCSrc:$src2)),
2566 (!cast<Instruction>(NAME##rr)
2567 (COPY_TO_REGCLASS KRCSrc:$src2, KRC),
2568 (COPY_TO_REGCLASS KRCSrc:$src1, KRC))>;
2572 defm KUNPCKBW : avx512_mask_unpck<"bw", VK16, v16i1, VK8, HasAVX512>, PD;
2573 defm KUNPCKWD : avx512_mask_unpck<"wd", VK32, v32i1, VK16, HasBWI>, PS;
2574 defm KUNPCKDQ : avx512_mask_unpck<"dq", VK64, v64i1, VK32, HasBWI>, PS, VEX_W;
2577 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2578 SDNode OpNode, Predicate prd> {
2579 let Predicates = [prd], Defs = [EFLAGS] in
2580 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
2581 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2582 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
2585 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
2586 Predicate prdW = HasAVX512> {
2587 defm B : avx512_mask_testop<opc, OpcodeStr#"b", VK8, OpNode, HasDQI>,
2589 defm W : avx512_mask_testop<opc, OpcodeStr#"w", VK16, OpNode, prdW>,
2591 defm Q : avx512_mask_testop<opc, OpcodeStr#"q", VK64, OpNode, HasBWI>,
2593 defm D : avx512_mask_testop<opc, OpcodeStr#"d", VK32, OpNode, HasBWI>,
2597 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
2598 defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest, HasDQI>;
2601 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2603 let Predicates = [HasAVX512] in
2604 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, u8imm:$imm),
2605 !strconcat(OpcodeStr,
2606 "\t{$imm, $src, $dst|$dst, $src, $imm}"),
2607 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
2610 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
2612 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
2614 let Predicates = [HasDQI] in
2615 defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
2617 let Predicates = [HasBWI] in {
2618 defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
2620 defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
2625 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
2626 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
2628 // Mask setting all 0s or 1s
2629 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
2630 let Predicates = [HasAVX512] in
2631 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
2632 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
2633 [(set KRC:$dst, (VT Val))]>;
2636 multiclass avx512_mask_setop_w<PatFrag Val> {
2637 defm B : avx512_mask_setop<VK8, v8i1, Val>;
2638 defm W : avx512_mask_setop<VK16, v16i1, Val>;
2639 defm D : avx512_mask_setop<VK32, v32i1, Val>;
2640 defm Q : avx512_mask_setop<VK64, v64i1, Val>;
2643 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
2644 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
2646 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
2647 let Predicates = [HasAVX512] in {
2648 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
2649 def : Pat<(v4i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK4)>;
2650 def : Pat<(v2i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK2)>;
2651 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
2652 def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
2653 def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
2654 def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
2655 def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
2656 def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
2659 // Patterns for kmask insert_subvector/extract_subvector to/from index=0
2660 multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
2661 RegisterClass RC, ValueType VT> {
2662 def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
2663 (subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
2665 def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
2666 (VT (COPY_TO_REGCLASS subRC:$src, RC))>;
2669 defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
2670 defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
2671 defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
2672 defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
2673 defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
2675 defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
2676 defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
2677 defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
2678 defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
2680 defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
2681 defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
2682 defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
2684 defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
2685 defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
2687 defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
2689 def : Pat<(v2i1 (extract_subvector (v4i1 VK4:$src), (iPTR 2))),
2690 (v2i1 (COPY_TO_REGCLASS
2691 (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), (i8 2)),
2693 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 4))),
2694 (v4i1 (COPY_TO_REGCLASS
2695 (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (i8 4)),
2697 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
2698 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
2699 def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
2700 (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
2701 def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
2702 (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
2705 // Patterns for kmask shift
2706 multiclass mask_shift_lowering<RegisterClass RC, ValueType VT> {
2707 def : Pat<(VT (X86vshli RC:$src, (i8 imm:$imm))),
2708 (VT (COPY_TO_REGCLASS
2709 (KSHIFTLWri (COPY_TO_REGCLASS RC:$src, VK16),
2712 def : Pat<(VT (X86vsrli RC:$src, (i8 imm:$imm))),
2713 (VT (COPY_TO_REGCLASS
2714 (KSHIFTRWri (COPY_TO_REGCLASS RC:$src, VK16),
2719 defm : mask_shift_lowering<VK8, v8i1>, Requires<[HasAVX512, NoDQI]>;
2720 defm : mask_shift_lowering<VK4, v4i1>, Requires<[HasAVX512]>;
2721 defm : mask_shift_lowering<VK2, v2i1>, Requires<[HasAVX512]>;
2722 //===----------------------------------------------------------------------===//
2723 // AVX-512 - Aligned and unaligned load and store
2727 multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2728 PatFrag ld_frag, PatFrag mload,
2729 SDPatternOperator SelectOprr = vselect> {
2730 let hasSideEffects = 0 in {
2731 def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
2732 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
2734 def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2735 (ins _.KRCWM:$mask, _.RC:$src),
2736 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
2737 "${dst} {${mask}} {z}, $src}"),
2738 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
2740 _.ImmAllZerosV)))], _.ExeDomain>,
2743 let canFoldAsLoad = 1, isReMaterializable = 1,
2744 SchedRW = [WriteLoad] in
2745 def rm : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.MemOp:$src),
2746 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2747 [(set _.RC:$dst, (_.VT (bitconvert (ld_frag addr:$src))))],
2750 let Constraints = "$src0 = $dst" in {
2751 def rrk : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2752 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1),
2753 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2754 "${dst} {${mask}}, $src1}"),
2755 [(set _.RC:$dst, (_.VT (SelectOprr _.KRCWM:$mask,
2757 (_.VT _.RC:$src0))))], _.ExeDomain>,
2759 let SchedRW = [WriteLoad] in
2760 def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2761 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1),
2762 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2763 "${dst} {${mask}}, $src1}"),
2764 [(set _.RC:$dst, (_.VT
2765 (vselect _.KRCWM:$mask,
2766 (_.VT (bitconvert (ld_frag addr:$src1))),
2767 (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K;
2769 let SchedRW = [WriteLoad] in
2770 def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2771 (ins _.KRCWM:$mask, _.MemOp:$src),
2772 OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
2773 "${dst} {${mask}} {z}, $src}",
2774 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
2775 (_.VT (bitconvert (ld_frag addr:$src))), _.ImmAllZerosV)))],
2776 _.ExeDomain>, EVEX, EVEX_KZ;
2778 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, undef)),
2779 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2781 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, _.ImmAllZerosV)),
2782 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2784 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src0))),
2785 (!cast<Instruction>(NAME#_.ZSuffix##rmk) _.RC:$src0,
2786 _.KRCWM:$mask, addr:$ptr)>;
2789 multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
2790 AVX512VLVectorVTInfo _,
2792 let Predicates = [prd] in
2793 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.AlignedLdFrag,
2794 masked_load_aligned512>, EVEX_V512;
2796 let Predicates = [prd, HasVLX] in {
2797 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.AlignedLdFrag,
2798 masked_load_aligned256>, EVEX_V256;
2799 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.AlignedLdFrag,
2800 masked_load_aligned128>, EVEX_V128;
2804 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
2805 AVX512VLVectorVTInfo _,
2807 SDPatternOperator SelectOprr = vselect> {
2808 let Predicates = [prd] in
2809 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag,
2810 masked_load_unaligned, SelectOprr>, EVEX_V512;
2812 let Predicates = [prd, HasVLX] in {
2813 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag,
2814 masked_load_unaligned, SelectOprr>, EVEX_V256;
2815 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag,
2816 masked_load_unaligned, SelectOprr>, EVEX_V128;
2820 multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2821 PatFrag st_frag, PatFrag mstore> {
2823 let hasSideEffects = 0 in {
2824 def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
2825 OpcodeStr # ".s\t{$src, $dst|$dst, $src}",
2826 [], _.ExeDomain>, EVEX;
2827 def rrk_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2828 (ins _.KRCWM:$mask, _.RC:$src),
2829 OpcodeStr # ".s\t{$src, ${dst} {${mask}}|"#
2830 "${dst} {${mask}}, $src}",
2831 [], _.ExeDomain>, EVEX, EVEX_K;
2832 def rrkz_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2833 (ins _.KRCWM:$mask, _.RC:$src),
2834 OpcodeStr # ".s\t{$src, ${dst} {${mask}} {z}|" #
2835 "${dst} {${mask}} {z}, $src}",
2836 [], _.ExeDomain>, EVEX, EVEX_KZ;
2839 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
2840 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2841 [(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX;
2842 def mrk : AVX512PI<opc, MRMDestMem, (outs),
2843 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
2844 OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
2845 [], _.ExeDomain>, EVEX, EVEX_K;
2847 def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)),
2848 (!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr,
2849 _.KRCWM:$mask, _.RC:$src)>;
2853 multiclass avx512_store_vl< bits<8> opc, string OpcodeStr,
2854 AVX512VLVectorVTInfo _, Predicate prd> {
2855 let Predicates = [prd] in
2856 defm Z : avx512_store<opc, OpcodeStr, _.info512, store,
2857 masked_store_unaligned>, EVEX_V512;
2859 let Predicates = [prd, HasVLX] in {
2860 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, store,
2861 masked_store_unaligned>, EVEX_V256;
2862 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, store,
2863 masked_store_unaligned>, EVEX_V128;
2867 multiclass avx512_alignedstore_vl<bits<8> opc, string OpcodeStr,
2868 AVX512VLVectorVTInfo _, Predicate prd> {
2869 let Predicates = [prd] in
2870 defm Z : avx512_store<opc, OpcodeStr, _.info512, alignedstore512,
2871 masked_store_aligned512>, EVEX_V512;
2873 let Predicates = [prd, HasVLX] in {
2874 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, alignedstore256,
2875 masked_store_aligned256>, EVEX_V256;
2876 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, alignedstore,
2877 masked_store_aligned128>, EVEX_V128;
2881 defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
2883 avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
2884 HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
2886 defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
2888 avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
2889 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2891 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
2893 avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512>,
2894 PS, EVEX_CD8<32, CD8VF>;
2896 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512,
2898 avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512>,
2899 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2901 defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
2903 avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
2904 HasAVX512>, PD, EVEX_CD8<32, CD8VF>;
2906 defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
2908 avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info,
2909 HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
2911 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI>,
2912 avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info,
2913 HasBWI>, XD, EVEX_CD8<8, CD8VF>;
2915 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>,
2916 avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info,
2917 HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
2919 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
2921 avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info,
2922 HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
2924 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
2926 avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info,
2927 HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
2929 // Special instructions to help with spilling when we don't have VLX. We need
2930 // to load or store from a ZMM register instead. These are converted in
2931 // expandPostRAPseudos.
2932 let isReMaterializable = 1, canFoldAsLoad = 1,
2933 isPseudo = 1, SchedRW = [WriteLoad], mayLoad = 1, hasSideEffects = 0 in {
2934 def VMOVAPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
2936 def VMOVAPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
2938 def VMOVUPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
2940 def VMOVUPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
2944 let isPseudo = 1, mayStore = 1, hasSideEffects = 0 in {
2945 def VMOVAPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
2947 def VMOVAPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
2949 def VMOVUPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
2951 def VMOVUPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
2955 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
2956 (v8i64 VR512:$src))),
2957 (VMOVDQA64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
2960 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
2961 (v16i32 VR512:$src))),
2962 (VMOVDQA32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
2964 // These patterns exist to prevent the above patterns from introducing a second
2965 // mask inversion when one already exists.
2966 def : Pat<(v8i64 (vselect (xor VK8:$mask, (v8i1 immAllOnesV)),
2967 (bc_v8i64 (v16i32 immAllZerosV)),
2968 (v8i64 VR512:$src))),
2969 (VMOVDQA64Zrrkz VK8:$mask, VR512:$src)>;
2970 def : Pat<(v16i32 (vselect (xor VK16:$mask, (v16i1 immAllOnesV)),
2971 (v16i32 immAllZerosV),
2972 (v16i32 VR512:$src))),
2973 (VMOVDQA32Zrrkz VK16WM:$mask, VR512:$src)>;
2975 let Predicates = [HasVLX, NoBWI] in {
2976 // 128-bit load/store without BWI.
2977 def : Pat<(alignedstore (v8i16 VR128X:$src), addr:$dst),
2978 (VMOVDQA32Z128mr addr:$dst, VR128X:$src)>;
2979 def : Pat<(alignedstore (v16i8 VR128X:$src), addr:$dst),
2980 (VMOVDQA32Z128mr addr:$dst, VR128X:$src)>;
2981 def : Pat<(store (v8i16 VR128X:$src), addr:$dst),
2982 (VMOVDQU32Z128mr addr:$dst, VR128X:$src)>;
2983 def : Pat<(store (v16i8 VR128X:$src), addr:$dst),
2984 (VMOVDQU32Z128mr addr:$dst, VR128X:$src)>;
2986 // 256-bit load/store without BWI.
2987 def : Pat<(alignedstore256 (v16i16 VR256X:$src), addr:$dst),
2988 (VMOVDQA32Z256mr addr:$dst, VR256X:$src)>;
2989 def : Pat<(alignedstore256 (v32i8 VR256X:$src), addr:$dst),
2990 (VMOVDQA32Z256mr addr:$dst, VR256X:$src)>;
2991 def : Pat<(store (v16i16 VR256X:$src), addr:$dst),
2992 (VMOVDQU32Z256mr addr:$dst, VR256X:$src)>;
2993 def : Pat<(store (v32i8 VR256X:$src), addr:$dst),
2994 (VMOVDQU32Z256mr addr:$dst, VR256X:$src)>;
2997 let Predicates = [HasVLX] in {
2998 // Special patterns for storing subvector extracts of lower 128-bits of 256.
2999 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
3000 def : Pat<(alignedstore (v2f64 (extract_subvector
3001 (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
3002 (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3003 def : Pat<(alignedstore (v4f32 (extract_subvector
3004 (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
3005 (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3006 def : Pat<(alignedstore (v2i64 (extract_subvector
3007 (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
3008 (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3009 def : Pat<(alignedstore (v4i32 (extract_subvector
3010 (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
3011 (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3012 def : Pat<(alignedstore (v8i16 (extract_subvector
3013 (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
3014 (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3015 def : Pat<(alignedstore (v16i8 (extract_subvector
3016 (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
3017 (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3019 def : Pat<(store (v2f64 (extract_subvector
3020 (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
3021 (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3022 def : Pat<(store (v4f32 (extract_subvector
3023 (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
3024 (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3025 def : Pat<(store (v2i64 (extract_subvector
3026 (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
3027 (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3028 def : Pat<(store (v4i32 (extract_subvector
3029 (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
3030 (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3031 def : Pat<(store (v8i16 (extract_subvector
3032 (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
3033 (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3034 def : Pat<(store (v16i8 (extract_subvector
3035 (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
3036 (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
3038 // Special patterns for storing subvector extracts of lower 128-bits of 512.
3039 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
3040 def : Pat<(alignedstore (v2f64 (extract_subvector
3041 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
3042 (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3043 def : Pat<(alignedstore (v4f32 (extract_subvector
3044 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
3045 (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3046 def : Pat<(alignedstore (v2i64 (extract_subvector
3047 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
3048 (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3049 def : Pat<(alignedstore (v4i32 (extract_subvector
3050 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3051 (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3052 def : Pat<(alignedstore (v8i16 (extract_subvector
3053 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3054 (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3055 def : Pat<(alignedstore (v16i8 (extract_subvector
3056 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3057 (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3059 def : Pat<(store (v2f64 (extract_subvector
3060 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
3061 (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3062 def : Pat<(store (v4f32 (extract_subvector
3063 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
3064 (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3065 def : Pat<(store (v2i64 (extract_subvector
3066 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
3067 (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3068 def : Pat<(store (v4i32 (extract_subvector
3069 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3070 (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3071 def : Pat<(store (v8i16 (extract_subvector
3072 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3073 (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3074 def : Pat<(store (v16i8 (extract_subvector
3075 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3076 (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
3078 // Special patterns for storing subvector extracts of lower 256-bits of 512.
3079 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
3080 def : Pat<(alignedstore256 (v4f64 (extract_subvector
3081 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
3082 (VMOVAPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3083 def : Pat<(alignedstore (v8f32 (extract_subvector
3084 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
3085 (VMOVAPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3086 def : Pat<(alignedstore256 (v4i64 (extract_subvector
3087 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
3088 (VMOVDQA64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3089 def : Pat<(alignedstore256 (v8i32 (extract_subvector
3090 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3091 (VMOVDQA32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3092 def : Pat<(alignedstore256 (v16i16 (extract_subvector
3093 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3094 (VMOVDQA32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3095 def : Pat<(alignedstore256 (v32i8 (extract_subvector
3096 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3097 (VMOVDQA32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3099 def : Pat<(store (v4f64 (extract_subvector
3100 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
3101 (VMOVUPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3102 def : Pat<(store (v8f32 (extract_subvector
3103 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
3104 (VMOVUPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3105 def : Pat<(store (v4i64 (extract_subvector
3106 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
3107 (VMOVDQU64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3108 def : Pat<(store (v8i32 (extract_subvector
3109 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3110 (VMOVDQU32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3111 def : Pat<(store (v16i16 (extract_subvector
3112 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3113 (VMOVDQU32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3114 def : Pat<(store (v32i8 (extract_subvector
3115 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3116 (VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3120 // Move Int Doubleword to Packed Double Int
\r
3122 let ExeDomain = SSEPackedInt in {
\r
3123 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
\r
3124 "vmovd\t{$src, $dst|$dst, $src}",
\r
3125 [(set VR128X:$dst,
\r
3126 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
3128 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
3129 "vmovd\t{$src, $dst|$dst, $src}",
3131 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
3132 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
3133 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
3134 "vmovq\t{$src, $dst|$dst, $src}",
3136 (v2i64 (scalar_to_vector GR64:$src)))],
3137 IIC_SSE_MOVDQ>, EVEX, VEX_W;
3138 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
3139 def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
3141 "vmovq\t{$src, $dst|$dst, $src}", []>,
3142 EVEX, VEX_W, EVEX_CD8<64, CD8VT1>;
3143 let isCodeGenOnly = 1 in {
3144 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src),
3145 "vmovq\t{$src, $dst|$dst, $src}",
3146 [(set FR64X:$dst, (bitconvert GR64:$src))],
3147 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
3148 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
3149 "vmovq\t{$src, $dst|$dst, $src}",
3150 [(set GR64:$dst, (bitconvert FR64X:$src))],
3151 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
3152 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64X:$src),
3153 "vmovq\t{$src, $dst|$dst, $src}",
3154 [(store (i64 (bitconvert FR64X:$src)), addr:$dst)],
3155 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
\r
3156 EVEX_CD8<64, CD8VT1>;
\r
3158 } // ExeDomain = SSEPackedInt
\r
3160 // Move Int Doubleword to Single Scalar
\r
3162 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
\r
3163 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
\r
3164 "vmovd\t{$src, $dst|$dst, $src}",
\r
3165 [(set FR32X:$dst, (bitconvert GR32:$src))],
\r
3166 IIC_SSE_MOVDQ>, EVEX;
3168 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
3169 "vmovd\t{$src, $dst|$dst, $src}",
\r
3170 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
\r
3171 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
\r
3172 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
\r
3174 // Move doubleword from xmm register to r/m32
\r
3176 let ExeDomain = SSEPackedInt in {
\r
3177 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
\r
3178 "vmovd\t{$src, $dst|$dst, $src}",
\r
3179 [(set GR32:$dst, (extractelt (v4i32 VR128X:$src),
\r
3180 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
3182 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
3183 (ins i32mem:$dst, VR128X:$src),
3184 "vmovd\t{$src, $dst|$dst, $src}",
3185 [(store (i32 (extractelt (v4i32 VR128X:$src),
\r
3186 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
\r
3187 EVEX, EVEX_CD8<32, CD8VT1>;
\r
3188 } // ExeDomain = SSEPackedInt
\r
3190 // Move quadword from xmm1 register to r/m64
\r
3192 let ExeDomain = SSEPackedInt in {
\r
3193 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
\r
3194 "vmovq\t{$src, $dst|$dst, $src}",
\r
3195 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
\r
3197 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
3198 Requires<[HasAVX512, In64BitMode]>;
3200 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
3201 def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
3202 "vmovq\t{$src, $dst|$dst, $src}",
3203 [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
3204 Requires<[HasAVX512, In64BitMode]>;
3206 def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
3207 (ins i64mem:$dst, VR128X:$src),
3208 "vmovq\t{$src, $dst|$dst, $src}",
3209 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
3210 addr:$dst)], IIC_SSE_MOVDQ>,
3211 EVEX, PD, VEX_W, EVEX_CD8<64, CD8VT1>,
3212 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
3214 let hasSideEffects = 0 in
3215 def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
3216 (ins VR128X:$src),
\r
3217 "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
\r
3219 } // ExeDomain = SSEPackedInt
\r
3221 // Move Scalar Single to Double Int
\r
3223 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
\r
3224 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
\r
3226 "vmovd\t{$src, $dst|$dst, $src}",
\r
3227 [(set GR32:$dst, (bitconvert FR32X:$src))],
3228 IIC_SSE_MOVD_ToGP>, EVEX;
3229 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
3230 (ins i32mem:$dst, FR32X:$src),
3231 "vmovd\t{$src, $dst|$dst, $src}",
\r
3232 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
\r
3233 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
\r
3234 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
\r
3236 // Move Quadword Int to Packed Quadword Int
\r
3238 let ExeDomain = SSEPackedInt in {
\r
3239 def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
\r
3240 (ins i64mem:$src),
\r
3241 "vmovq\t{$src, $dst|$dst, $src}",
\r
3242 [(set VR128X:$dst,
\r
3243 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
\r
3244 EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
\r
3245 } // ExeDomain = SSEPackedInt
\r
3247 //===----------------------------------------------------------------------===//
\r
3248 // AVX-512 MOVSS, MOVSD
\r
3249 //===----------------------------------------------------------------------===//
3251 multiclass avx512_move_scalar<string asm, SDNode OpNode,
3252 X86VectorVTInfo _> {
3253 def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3254 (ins _.RC:$src1, _.FRC:$src2),
3255 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3256 [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1,
3257 (scalar_to_vector _.FRC:$src2))))],
3258 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V;
3259 def rrkz : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3260 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
3261 !strconcat(asm, "\t{$src2, $src1, $dst {${mask}} {z}|",
3262 "$dst {${mask}} {z}, $src1, $src2}"),
3263 [(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
3264 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
3266 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_KZ;
3267 let Constraints = "$src0 = $dst" in
3268 def rrk : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3269 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
3270 !strconcat(asm, "\t{$src2, $src1, $dst {${mask}}|",
3271 "$dst {${mask}}, $src1, $src2}"),
3272 [(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
3273 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
3274 (_.VT _.RC:$src0))))],
3275 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_K;
3276 let canFoldAsLoad = 1, isReMaterializable = 1 in
3277 def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
3278 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
3279 [(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
3280 _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX;
3281 let mayLoad = 1, hasSideEffects = 0 in {
3282 let Constraints = "$src0 = $dst" in
3283 def rmk : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
3284 (ins _.RC:$src0, _.KRCWM:$mask, _.ScalarMemOp:$src),
3285 !strconcat(asm, "\t{$src, $dst {${mask}}|",
3286 "$dst {${mask}}, $src}"),
3287 [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_K;
3288 def rmkz : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
3289 (ins _.KRCWM:$mask, _.ScalarMemOp:$src),
3290 !strconcat(asm, "\t{$src, $dst {${mask}} {z}|",
3291 "$dst {${mask}} {z}, $src}"),
3292 [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_KZ;
3294 def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
3295 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
3296 [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
3298 let mayStore = 1, hasSideEffects = 0 in
3299 def mrk: AVX512PI<0x11, MRMDestMem, (outs),
3300 (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
3301 !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
3302 [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
3305 defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
3306 VEX_LIG, XS, EVEX_CD8<32, CD8VT1>;
3308 defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, f64x_info>,
3309 VEX_LIG, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
3312 multiclass avx512_move_scalar_lowering<string InstrStr, SDNode OpNode,
3313 PatLeaf ZeroFP, X86VectorVTInfo _> {
3315 def : Pat<(_.VT (OpNode _.RC:$src0,
3316 (_.VT (scalar_to_vector
3317 (_.EltVT (X86selects (i1 (trunc GR32:$mask)),
3318 (_.EltVT _.FRC:$src1),
3319 (_.EltVT _.FRC:$src2))))))),
3320 (COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrk)
3321 (COPY_TO_REGCLASS _.FRC:$src2, _.RC),
3322 (COPY_TO_REGCLASS GR32:$mask, VK1WM),
3324 (COPY_TO_REGCLASS _.FRC:$src1, _.RC)),
3327 def : Pat<(_.VT (OpNode _.RC:$src0,
3328 (_.VT (scalar_to_vector
3329 (_.EltVT (X86selects (i1 (trunc GR32:$mask)),
3330 (_.EltVT _.FRC:$src1),
3331 (_.EltVT ZeroFP))))))),
3332 (COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrkz)
3333 (COPY_TO_REGCLASS GR32:$mask, VK1WM),
3335 (COPY_TO_REGCLASS _.FRC:$src1, _.RC)),
3340 multiclass avx512_store_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
3341 dag Mask, RegisterClass MaskRC> {
3343 def : Pat<(masked_store addr:$dst, Mask,
3344 (_.info512.VT (insert_subvector undef,
3345 (_.info256.VT (insert_subvector undef,
3346 (_.info128.VT _.info128.RC:$src),
3349 (!cast<Instruction>(InstrStr#mrk) addr:$dst,
3350 (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
3351 (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
3355 multiclass avx512_load_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
3356 dag Mask, RegisterClass MaskRC> {
3358 def : Pat<(_.info128.VT (extract_subvector
3359 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3360 (_.info512.VT (bitconvert
3361 (v16i32 immAllZerosV))))),
3363 (!cast<Instruction>(InstrStr#rmkz)
3364 (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
3367 def : Pat<(_.info128.VT (extract_subvector
3368 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3369 (_.info512.VT (insert_subvector undef,
3370 (_.info256.VT (insert_subvector undef,
3371 (_.info128.VT (X86vzmovl _.info128.RC:$src)),
3375 (!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
3376 (i1 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM)),
3381 defm : avx512_move_scalar_lowering<"VMOVSSZ", X86Movss, fp32imm0, v4f32x_info>;
3382 defm : avx512_move_scalar_lowering<"VMOVSDZ", X86Movsd, fp64imm0, v2f64x_info>;
3384 defm : avx512_store_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3385 (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>;
3386 defm : avx512_store_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3387 (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16>;
3388 defm : avx512_store_scalar_lowering<"VMOVSDZ", avx512vl_f64_info,
3389 (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8>;
3391 defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3392 (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>;
3393 defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3394 (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16>;
3395 defm : avx512_load_scalar_lowering<"VMOVSDZ", avx512vl_f64_info,
3396 (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8>;
3398 def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
3399 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
3400 VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),(COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>;
3402 def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
3403 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
3404 VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR64X:$src1, VR128X)), FR64X)>;
3406 def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
3407 (VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
3408 (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3410 let hasSideEffects = 0 in
3411 defm VMOVSSZrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f32x_info,
3412 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
3413 "vmovss.s", "$src2, $src1", "$src1, $src2", []>,
3414 XS, EVEX_4V, VEX_LIG;
3416 let hasSideEffects = 0 in
3417 defm VMOVSSDrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f64x_info,
3418 (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
3419 "vmovsd.s", "$src2, $src1", "$src1, $src2", []>,
3420 XD, EVEX_4V, VEX_LIG, VEX_W;
3422 let Predicates = [HasAVX512] in {
3423 let AddedComplexity = 15 in {
3424 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
3425 // MOVS{S,D} to the lower bits.
3426 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
3427 (VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
3428 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
3429 (VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3430 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
3431 (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3432 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
3433 (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
3436 // Move low f32 and clear high bits.
3437 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
3438 (SUBREG_TO_REG (i32 0),
3439 (VMOVSSZrr (v4f32 (V_SET0)),
3440 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
3441 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
3442 (SUBREG_TO_REG (i32 0),
3443 (VMOVSSZrr (v4i32 (V_SET0)),
3444 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
3445 def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))),
3446 (SUBREG_TO_REG (i32 0),
3447 (VMOVSSZrr (v4f32 (V_SET0)),
3448 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)), sub_xmm)>;
3449 def : Pat<(v16i32 (X86vzmovl (v16i32 VR512:$src))),
3450 (SUBREG_TO_REG (i32 0),
3451 (VMOVSSZrr (v4i32 (V_SET0)),
3452 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)), sub_xmm)>;
3454 let AddedComplexity = 20 in {
3455 // MOVSSrm zeros the high parts of the register; represent this
3456 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
3457 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
3458 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3459 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
3460 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3461 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
3462 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3463 def : Pat<(v4f32 (X86vzload addr:$src)),
3464 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3466 // MOVSDrm zeros the high parts of the register; represent this
3467 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
3468 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
3469 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3470 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
3471 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3472 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
3473 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3474 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
3475 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3476 def : Pat<(v2f64 (X86vzload addr:$src)),
3477 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3479 // Represent the same patterns above but in the form they appear for
3481 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3482 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
3483 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3484 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
3485 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
3486 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3487 def : Pat<(v8f32 (X86vzload addr:$src)),
3488 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3489 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
3490 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
3491 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3492 def : Pat<(v4f64 (X86vzload addr:$src)),
3493 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3495 // Represent the same patterns above but in the form they appear for
3497 def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
3498 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
3499 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3500 def : Pat<(v16f32 (X86vzmovl (insert_subvector undef,
3501 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
3502 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3503 def : Pat<(v16f32 (X86vzload addr:$src)),
3504 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3505 def : Pat<(v8f64 (X86vzmovl (insert_subvector undef,
3506 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
3507 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3508 def : Pat<(v8f64 (X86vzload addr:$src)),
3509 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3511 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
3512 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
3513 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
3514 FR32X:$src)), sub_xmm)>;
3515 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
3516 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
3517 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
3518 FR64X:$src)), sub_xmm)>;
3519 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3520 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
3521 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3523 // Move low f64 and clear high bits.
3524 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
3525 (SUBREG_TO_REG (i32 0),
3526 (VMOVSDZrr (v2f64 (V_SET0)),
3527 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
3528 def : Pat<(v8f64 (X86vzmovl (v8f64 VR512:$src))),
3529 (SUBREG_TO_REG (i32 0),
3530 (VMOVSDZrr (v2f64 (V_SET0)),
3531 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)), sub_xmm)>;
3533 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
3534 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
3535 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
3536 def : Pat<(v8i64 (X86vzmovl (v8i64 VR512:$src))),
3537 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
3538 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)), sub_xmm)>;
3540 // Extract and store.
3541 def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
3543 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
3545 // Shuffle with VMOVSS
3546 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
3547 (VMOVSSZrr (v4i32 VR128X:$src1),
3548 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
3549 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
3550 (VMOVSSZrr (v4f32 VR128X:$src1),
3551 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
3554 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
3555 (SUBREG_TO_REG (i32 0),
3556 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
3557 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
3559 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
3560 (SUBREG_TO_REG (i32 0),
3561 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
3562 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
3565 // Shuffle with VMOVSD
3566 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
3567 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3568 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
3569 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3570 def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
3571 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3572 def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
3573 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3576 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
3577 (SUBREG_TO_REG (i32 0),
3578 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
3579 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
3581 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
3582 (SUBREG_TO_REG (i32 0),
3583 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
3584 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
3587 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
3588 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3589 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
3590 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3591 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
3592 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3593 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
3594 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3597 let AddedComplexity = 15 in
3598 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
3600 "vmovq\t{$src, $dst|$dst, $src}",
3601 [(set VR128X:$dst, (v2i64 (X86vzmovl
3602 (v2i64 VR128X:$src))))],
3603 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
3605 let Predicates = [HasAVX512] in {
3606 let AddedComplexity = 15 in {
3607 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
3608 (VMOVDI2PDIZrr GR32:$src)>;
3610 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
3611 (VMOV64toPQIZrr GR64:$src)>;
3613 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3614 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
3615 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
3617 def : Pat<(v8i64 (X86vzmovl (insert_subvector undef,
3618 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
3619 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
3621 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3622 let AddedComplexity = 20 in {
3623 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
3624 (VMOVDI2PDIZrm addr:$src)>;
3625 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3626 (VMOVDI2PDIZrm addr:$src)>;
3627 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3628 (VMOVDI2PDIZrm addr:$src)>;
3629 def : Pat<(v4i32 (X86vzload addr:$src)),
3630 (VMOVDI2PDIZrm addr:$src)>;
3631 def : Pat<(v8i32 (X86vzload addr:$src)),
3632 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3633 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3634 (VMOVQI2PQIZrm addr:$src)>;
3635 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
3636 (VMOVZPQILo2PQIZrr VR128X:$src)>;
3637 def : Pat<(v2i64 (X86vzload addr:$src)),
3638 (VMOVQI2PQIZrm addr:$src)>;
3639 def : Pat<(v4i64 (X86vzload addr:$src)),
3640 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3643 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3644 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3645 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
3646 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
3647 def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
3648 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
3649 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
3651 // Use regular 128-bit instructions to match 512-bit scalar_to_vec+zext.
3652 def : Pat<(v16i32 (X86vzload addr:$src)),
3653 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3654 def : Pat<(v8i64 (X86vzload addr:$src)),
3655 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3658 def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
3659 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
3661 def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
3662 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
3664 def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
3665 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
3667 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
3668 (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
3670 //===----------------------------------------------------------------------===//
3671 // AVX-512 - Non-temporals
3672 //===----------------------------------------------------------------------===//
3673 let SchedRW = [WriteLoad] in {
3674 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
3675 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
3676 [(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
3677 SSEPackedInt>, EVEX, T8PD, EVEX_V512,
3678 EVEX_CD8<64, CD8VF>;
3680 let Predicates = [HasVLX] in {
3681 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
3683 "vmovntdqa\t{$src, $dst|$dst, $src}",
3684 [(set VR256X:$dst, (int_x86_avx2_movntdqa addr:$src))],
3685 SSEPackedInt>, EVEX, T8PD, EVEX_V256,
3686 EVEX_CD8<64, CD8VF>;
3688 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
3690 "vmovntdqa\t{$src, $dst|$dst, $src}",
3691 [(set VR128X:$dst, (int_x86_sse41_movntdqa addr:$src))],
3692 SSEPackedInt>, EVEX, T8PD, EVEX_V128,
3693 EVEX_CD8<64, CD8VF>;
3697 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
3698 PatFrag st_frag = alignednontemporalstore,
3699 InstrItinClass itin = IIC_SSE_MOVNT> {
3700 let SchedRW = [WriteStore], AddedComplexity = 400 in
3701 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
3702 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3703 [(st_frag (_.VT _.RC:$src), addr:$dst)],
3704 _.ExeDomain, itin>, EVEX, EVEX_CD8<_.EltSize, CD8VF>;
3707 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr,
3708 AVX512VLVectorVTInfo VTInfo> {
3709 let Predicates = [HasAVX512] in
3710 defm Z : avx512_movnt<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
3712 let Predicates = [HasAVX512, HasVLX] in {
3713 defm Z256 : avx512_movnt<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
3714 defm Z128 : avx512_movnt<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
3718 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info>, PD;
3719 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info>, PD, VEX_W;
3720 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info>, PS;
3722 let Predicates = [HasAVX512], AddedComplexity = 400 in {
3723 def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
3724 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3725 def : Pat<(alignednontemporalstore (v32i16 VR512:$src), addr:$dst),
3726 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3727 def : Pat<(alignednontemporalstore (v64i8 VR512:$src), addr:$dst),
3728 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3730 def : Pat<(v8f64 (alignednontemporalload addr:$src)),
3731 (VMOVNTDQAZrm addr:$src)>;
3732 def : Pat<(v16f32 (alignednontemporalload addr:$src)),
3733 (VMOVNTDQAZrm addr:$src)>;
3734 def : Pat<(v8i64 (alignednontemporalload addr:$src)),
3735 (VMOVNTDQAZrm addr:$src)>;
3736 def : Pat<(v16i32 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3737 (VMOVNTDQAZrm addr:$src)>;
3738 def : Pat<(v32i16 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3739 (VMOVNTDQAZrm addr:$src)>;
3740 def : Pat<(v64i8 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3741 (VMOVNTDQAZrm addr:$src)>;
3744 let Predicates = [HasVLX], AddedComplexity = 400 in {
3745 def : Pat<(alignednontemporalstore (v8i32 VR256X:$src), addr:$dst),
3746 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3747 def : Pat<(alignednontemporalstore (v16i16 VR256X:$src), addr:$dst),
3748 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3749 def : Pat<(alignednontemporalstore (v32i8 VR256X:$src), addr:$dst),
3750 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3752 def : Pat<(v4f64 (alignednontemporalload addr:$src)),
3753 (VMOVNTDQAZ256rm addr:$src)>;
3754 def : Pat<(v8f32 (alignednontemporalload addr:$src)),
3755 (VMOVNTDQAZ256rm addr:$src)>;
3756 def : Pat<(v4i64 (alignednontemporalload addr:$src)),
3757 (VMOVNTDQAZ256rm addr:$src)>;
3758 def : Pat<(v8i32 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3759 (VMOVNTDQAZ256rm addr:$src)>;
3760 def : Pat<(v16i16 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3761 (VMOVNTDQAZ256rm addr:$src)>;
3762 def : Pat<(v32i8 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3763 (VMOVNTDQAZ256rm addr:$src)>;
3765 def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
3766 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3767 def : Pat<(alignednontemporalstore (v8i16 VR128X:$src), addr:$dst),
3768 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3769 def : Pat<(alignednontemporalstore (v16i8 VR128X:$src), addr:$dst),
3770 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3772 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
3773 (VMOVNTDQAZ128rm addr:$src)>;
3774 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
3775 (VMOVNTDQAZ128rm addr:$src)>;
3776 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
3777 (VMOVNTDQAZ128rm addr:$src)>;
3778 def : Pat<(v4i32 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3779 (VMOVNTDQAZ128rm addr:$src)>;
3780 def : Pat<(v8i16 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3781 (VMOVNTDQAZ128rm addr:$src)>;
3782 def : Pat<(v16i8 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3783 (VMOVNTDQAZ128rm addr:$src)>;
3786 //===----------------------------------------------------------------------===//
3787 // AVX-512 - Integer arithmetic
3789 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3790 X86VectorVTInfo _, OpndItins itins,
3791 bit IsCommutable = 0> {
3792 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3793 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3794 "$src2, $src1", "$src1, $src2",
3795 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
3796 itins.rr, IsCommutable>,
3797 AVX512BIBase, EVEX_4V;
3799 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3800 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
3801 "$src2, $src1", "$src1, $src2",
3802 (_.VT (OpNode _.RC:$src1,
3803 (bitconvert (_.LdFrag addr:$src2)))),
3805 AVX512BIBase, EVEX_4V;
3808 multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3809 X86VectorVTInfo _, OpndItins itins,
3810 bit IsCommutable = 0> :
3811 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
3812 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3813 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
3814 "${src2}"##_.BroadcastStr##", $src1",
3815 "$src1, ${src2}"##_.BroadcastStr,
3816 (_.VT (OpNode _.RC:$src1,
3818 (_.ScalarLdFrag addr:$src2)))),
3820 AVX512BIBase, EVEX_4V, EVEX_B;
3823 multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
3824 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
3825 Predicate prd, bit IsCommutable = 0> {
3826 let Predicates = [prd] in
3827 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3828 IsCommutable>, EVEX_V512;
3830 let Predicates = [prd, HasVLX] in {
3831 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3832 IsCommutable>, EVEX_V256;
3833 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3834 IsCommutable>, EVEX_V128;
3838 multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
3839 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
3840 Predicate prd, bit IsCommutable = 0> {
3841 let Predicates = [prd] in
3842 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3843 IsCommutable>, EVEX_V512;
3845 let Predicates = [prd, HasVLX] in {
3846 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3847 IsCommutable>, EVEX_V256;
3848 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3849 IsCommutable>, EVEX_V128;
3853 multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
3854 OpndItins itins, Predicate prd,
3855 bit IsCommutable = 0> {
3856 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
3857 itins, prd, IsCommutable>,
3858 VEX_W, EVEX_CD8<64, CD8VF>;
3861 multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
3862 OpndItins itins, Predicate prd,
3863 bit IsCommutable = 0> {
3864 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
3865 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
3868 multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
3869 OpndItins itins, Predicate prd,
3870 bit IsCommutable = 0> {
3871 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
3872 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
3875 multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
3876 OpndItins itins, Predicate prd,
3877 bit IsCommutable = 0> {
3878 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
3879 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
3882 multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
3883 SDNode OpNode, OpndItins itins, Predicate prd,
3884 bit IsCommutable = 0> {
3885 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr#"q", OpNode, itins, prd,
3888 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr#"d", OpNode, itins, prd,
3892 multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
3893 SDNode OpNode, OpndItins itins, Predicate prd,
3894 bit IsCommutable = 0> {
3895 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr#"w", OpNode, itins, prd,
3898 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr#"b", OpNode, itins, prd,
3902 multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
3903 bits<8> opc_d, bits<8> opc_q,
3904 string OpcodeStr, SDNode OpNode,
3905 OpndItins itins, bit IsCommutable = 0> {
3906 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
3907 itins, HasAVX512, IsCommutable>,
3908 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
3909 itins, HasBWI, IsCommutable>;
3912 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
3913 SDNode OpNode,X86VectorVTInfo _Src,
3914 X86VectorVTInfo _Dst, X86VectorVTInfo _Brdct,
3915 bit IsCommutable = 0> {
3916 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
3917 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
3918 "$src2, $src1","$src1, $src2",
3920 (_Src.VT _Src.RC:$src1),
3921 (_Src.VT _Src.RC:$src2))),
3922 itins.rr, IsCommutable>,
3923 AVX512BIBase, EVEX_4V;
3924 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3925 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
3926 "$src2, $src1", "$src1, $src2",
3927 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
3928 (bitconvert (_Src.LdFrag addr:$src2)))),
3930 AVX512BIBase, EVEX_4V;
3932 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3933 (ins _Src.RC:$src1, _Brdct.ScalarMemOp:$src2),
3935 "${src2}"##_Brdct.BroadcastStr##", $src1",
3936 "$src1, ${src2}"##_Brdct.BroadcastStr,
3937 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
3938 (_Brdct.VT (X86VBroadcast
3939 (_Brdct.ScalarLdFrag addr:$src2)))))),
3941 AVX512BIBase, EVEX_4V, EVEX_B;
3944 defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
3945 SSE_INTALU_ITINS_P, 1>;
3946 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
3947 SSE_INTALU_ITINS_P, 0>;
3948 defm VPADDS : avx512_binop_rm_vl_bw<0xEC, 0xED, "vpadds", X86adds,
3949 SSE_INTALU_ITINS_P, HasBWI, 1>;
3950 defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", X86subs,
3951 SSE_INTALU_ITINS_P, HasBWI, 0>;
3952 defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", X86addus,
3953 SSE_INTALU_ITINS_P, HasBWI, 1>;
3954 defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", X86subus,
3955 SSE_INTALU_ITINS_P, HasBWI, 0>;
3956 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul,
3957 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3958 defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmullw", mul,
3959 SSE_INTALU_ITINS_P, HasBWI, 1>;
3960 defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmullq", mul,
3961 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
3962 defm VPMULHW : avx512_binop_rm_vl_w<0xE5, "vpmulhw", mulhs, SSE_INTALU_ITINS_P,
3964 defm VPMULHUW : avx512_binop_rm_vl_w<0xE4, "vpmulhuw", mulhu, SSE_INTMUL_ITINS_P,
3966 defm VPMULHRSW : avx512_binop_rm_vl_w<0x0B, "vpmulhrsw", X86mulhrs, SSE_INTMUL_ITINS_P,
3968 defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
3969 SSE_INTALU_ITINS_P, HasBWI, 1>;
3971 multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
3972 AVX512VLVectorVTInfo _SrcVTInfo, AVX512VLVectorVTInfo _DstVTInfo,
3973 SDNode OpNode, Predicate prd, bit IsCommutable = 0> {
3974 let Predicates = [prd] in
3975 defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3976 _SrcVTInfo.info512, _DstVTInfo.info512,
3977 v8i64_info, IsCommutable>,
3978 EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
3979 let Predicates = [HasVLX, prd] in {
3980 defm NAME#Z256 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3981 _SrcVTInfo.info256, _DstVTInfo.info256,
3982 v4i64x_info, IsCommutable>,
3983 EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
3984 defm NAME#Z128 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3985 _SrcVTInfo.info128, _DstVTInfo.info128,
3986 v2i64x_info, IsCommutable>,
3987 EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_W;
3991 defm VPMULDQ : avx512_binop_all<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
3992 avx512vl_i32_info, avx512vl_i64_info,
3993 X86pmuldq, HasAVX512, 1>,T8PD;
3994 defm VPMULUDQ : avx512_binop_all<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
3995 avx512vl_i32_info, avx512vl_i64_info,
3996 X86pmuludq, HasAVX512, 1>;
3997 defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SSE_INTALU_ITINS_P,
3998 avx512vl_i8_info, avx512vl_i8_info,
3999 X86multishift, HasVBMI, 0>, T8PD;
4001 multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4002 X86VectorVTInfo _Src, X86VectorVTInfo _Dst> {
4003 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
4004 (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
4006 "${src2}"##_Src.BroadcastStr##", $src1",
4007 "$src1, ${src2}"##_Src.BroadcastStr,
4008 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
4009 (_Src.VT (X86VBroadcast
4010 (_Src.ScalarLdFrag addr:$src2))))))>,
4011 EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
4014 multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
4015 SDNode OpNode,X86VectorVTInfo _Src,
4016 X86VectorVTInfo _Dst, bit IsCommutable = 0> {
4017 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
4018 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
4019 "$src2, $src1","$src1, $src2",
4021 (_Src.VT _Src.RC:$src1),
4022 (_Src.VT _Src.RC:$src2))),
4023 NoItinerary, IsCommutable>,
4024 EVEX_CD8<_Src.EltSize, CD8VF>, EVEX_4V;
4025 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
4026 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
4027 "$src2, $src1", "$src1, $src2",
4028 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
4029 (bitconvert (_Src.LdFrag addr:$src2))))>,
4030 EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
4033 multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
4035 let Predicates = [HasBWI] in
4036 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i32_info,
4038 avx512_packs_rmb<opc, OpcodeStr, OpNode, v16i32_info,
4039 v32i16_info>, EVEX_V512;
4040 let Predicates = [HasBWI, HasVLX] in {
4041 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i32x_info,
4043 avx512_packs_rmb<opc, OpcodeStr, OpNode, v8i32x_info,
4044 v16i16x_info>, EVEX_V256;
4045 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v4i32x_info,
4047 avx512_packs_rmb<opc, OpcodeStr, OpNode, v4i32x_info,
4048 v8i16x_info>, EVEX_V128;
4051 multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
4053 let Predicates = [HasBWI] in
4054 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info,
4055 v64i8_info>, EVEX_V512;
4056 let Predicates = [HasBWI, HasVLX] in {
4057 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
4058 v32i8x_info>, EVEX_V256;
4059 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
4060 v16i8x_info>, EVEX_V128;
4064 multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
4065 SDNode OpNode, AVX512VLVectorVTInfo _Src,
4066 AVX512VLVectorVTInfo _Dst, bit IsCommutable = 0> {
4067 let Predicates = [HasBWI] in
4068 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info512,
4069 _Dst.info512, IsCommutable>, EVEX_V512;
4070 let Predicates = [HasBWI, HasVLX] in {
4071 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info256,
4072 _Dst.info256, IsCommutable>, EVEX_V256;
4073 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info128,
4074 _Dst.info128, IsCommutable>, EVEX_V128;
4078 defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, AVX512BIBase;
4079 defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, AVX5128IBase;
4080 defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase;
4081 defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
4083 defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
4084 avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
4085 defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
4086 avx512vl_i16_info, avx512vl_i32_info, 1>, AVX512BIBase;
4088 defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
4089 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4090 defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxsw", smax,
4091 SSE_INTALU_ITINS_P, HasBWI, 1>;
4092 defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", smax,
4093 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4095 defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxub", umax,
4096 SSE_INTALU_ITINS_P, HasBWI, 1>;
4097 defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxuw", umax,
4098 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4099 defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", umax,
4100 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4102 defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpminsb", smin,
4103 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4104 defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpminsw", smin,
4105 SSE_INTALU_ITINS_P, HasBWI, 1>;
4106 defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", smin,
4107 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4109 defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminub", umin,
4110 SSE_INTALU_ITINS_P, HasBWI, 1>;
4111 defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminuw", umin,
4112 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4113 defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", umin,
4114 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4116 // PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX.
4117 let Predicates = [HasDQI, NoVLX] in {
4118 def : Pat<(v4i64 (mul (v4i64 VR256X:$src1), (v4i64 VR256X:$src2))),
4121 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
4122 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
4125 def : Pat<(v2i64 (mul (v2i64 VR128X:$src1), (v2i64 VR128X:$src2))),
4128 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
4129 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
4133 //===----------------------------------------------------------------------===//
4134 // AVX-512 Logical Instructions
4135 //===----------------------------------------------------------------------===//
4137 multiclass avx512_logic_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4138 X86VectorVTInfo _, OpndItins itins,
4139 bit IsCommutable = 0> {
4140 defm rr : AVX512_maskable_logic<opc, MRMSrcReg, _, (outs _.RC:$dst),
4141 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4142 "$src2, $src1", "$src1, $src2",
4143 (_.i64VT (OpNode (bitconvert (_.VT _.RC:$src1)),
4144 (bitconvert (_.VT _.RC:$src2)))),
4145 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4147 itins.rr, IsCommutable>,
4148 AVX512BIBase, EVEX_4V;
4150 defm rm : AVX512_maskable_logic<opc, MRMSrcMem, _, (outs _.RC:$dst),
4151 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4152 "$src2, $src1", "$src1, $src2",
4153 (_.i64VT (OpNode (bitconvert (_.VT _.RC:$src1)),
4154 (bitconvert (_.LdFrag addr:$src2)))),
4155 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4156 (bitconvert (_.LdFrag addr:$src2)))))),
4158 AVX512BIBase, EVEX_4V;
4161 multiclass avx512_logic_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4162 X86VectorVTInfo _, OpndItins itins,
4163 bit IsCommutable = 0> :
4164 avx512_logic_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
4165 defm rmb : AVX512_maskable_logic<opc, MRMSrcMem, _, (outs _.RC:$dst),
4166 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4167 "${src2}"##_.BroadcastStr##", $src1",
4168 "$src1, ${src2}"##_.BroadcastStr,
4169 (_.i64VT (OpNode _.RC:$src1,
4171 (_.VT (X86VBroadcast
4172 (_.ScalarLdFrag addr:$src2)))))),
4173 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4175 (_.VT (X86VBroadcast
4176 (_.ScalarLdFrag addr:$src2)))))))),
4178 AVX512BIBase, EVEX_4V, EVEX_B;
4181 multiclass avx512_logic_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
4182 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
4183 Predicate prd, bit IsCommutable = 0> {
4184 let Predicates = [prd] in
4185 defm Z : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
4186 IsCommutable>, EVEX_V512;
4188 let Predicates = [prd, HasVLX] in {
4189 defm Z256 : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
4190 IsCommutable>, EVEX_V256;
4191 defm Z128 : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
4192 IsCommutable>, EVEX_V128;
4196 multiclass avx512_logic_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
4197 OpndItins itins, Predicate prd,
4198 bit IsCommutable = 0> {
4199 defm NAME : avx512_logic_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
4200 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
4203 multiclass avx512_logic_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
4204 OpndItins itins, Predicate prd,
4205 bit IsCommutable = 0> {
4206 defm NAME : avx512_logic_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
4207 itins, prd, IsCommutable>,
4208 VEX_W, EVEX_CD8<64, CD8VF>;
4211 multiclass avx512_logic_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
4212 SDNode OpNode, OpndItins itins, Predicate prd,
4213 bit IsCommutable = 0> {
4214 defm Q : avx512_logic_rm_vl_q<opc_q, OpcodeStr#"q", OpNode, itins, prd,
4217 defm D : avx512_logic_rm_vl_d<opc_d, OpcodeStr#"d", OpNode, itins, prd,
4221 defm VPAND : avx512_logic_rm_vl_dq<0xDB, 0xDB, "vpand", and,
4222 SSE_INTALU_ITINS_P, HasAVX512, 1>;
4223 defm VPOR : avx512_logic_rm_vl_dq<0xEB, 0xEB, "vpor", or,
4224 SSE_INTALU_ITINS_P, HasAVX512, 1>;
4225 defm VPXOR : avx512_logic_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
4226 SSE_INTALU_ITINS_P, HasAVX512, 1>;
4227 defm VPANDN : avx512_logic_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
4228 SSE_INTALU_ITINS_P, HasAVX512, 0>;
4230 //===----------------------------------------------------------------------===//
4231 // AVX-512 FP arithmetic
4232 //===----------------------------------------------------------------------===//
4233 multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4234 SDNode OpNode, SDNode VecNode, OpndItins itins,
4236 let ExeDomain = _.ExeDomain in {
4237 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4238 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4239 "$src2, $src1", "$src1, $src2",
4240 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4241 (i32 FROUND_CURRENT)),
4244 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4245 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4246 "$src2, $src1", "$src1, $src2",
4247 (VecNode (_.VT _.RC:$src1),
4248 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
4249 (i32 FROUND_CURRENT)),
4251 let isCodeGenOnly = 1, Predicates = [HasAVX512] in {
4252 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
4253 (ins _.FRC:$src1, _.FRC:$src2),
4254 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4255 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
4257 let isCommutable = IsCommutable;
4259 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
4260 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
4261 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4262 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
4263 (_.ScalarLdFrag addr:$src2)))], itins.rm>;
4268 multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4269 SDNode VecNode, OpndItins itins, bit IsCommutable = 0> {
4270 let ExeDomain = _.ExeDomain in
4271 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4272 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
4273 "$rc, $src2, $src1", "$src1, $src2, $rc",
4274 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4275 (i32 imm:$rc)), itins.rr, IsCommutable>,
4278 multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4279 SDNode VecNode, OpndItins itins, bit IsCommutable> {
4280 let ExeDomain = _.ExeDomain in
4281 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4282 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4283 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
4284 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4285 (i32 FROUND_NO_EXC))>, EVEX_B;
4288 multiclass avx512_binop_s_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
4290 SizeItins itins, bit IsCommutable> {
4291 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode,
4292 itins.s, IsCommutable>,
4293 avx512_fp_scalar_round<opc, OpcodeStr#"ss", f32x_info, VecNode,
4294 itins.s, IsCommutable>,
4295 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4296 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
4297 itins.d, IsCommutable>,
4298 avx512_fp_scalar_round<opc, OpcodeStr#"sd", f64x_info, VecNode,
4299 itins.d, IsCommutable>,
4300 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4303 multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
4305 SizeItins itins, bit IsCommutable> {
4306 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode,
4307 itins.s, IsCommutable>,
4308 avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, VecNode,
4309 itins.s, IsCommutable>,
4310 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4311 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
4312 itins.d, IsCommutable>,
4313 avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, VecNode,
4314 itins.d, IsCommutable>,
4315 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4317 defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnd, SSE_ALU_ITINS_S, 1>;
4318 defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnd, SSE_MUL_ITINS_S, 1>;
4319 defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnd, SSE_ALU_ITINS_S, 0>;
4320 defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnd, SSE_DIV_ITINS_S, 0>;
4321 defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 0>;
4322 defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 0>;
4324 // MIN/MAX nodes are commutable under "unsafe-fp-math". In this case we use
4325 // X86fminc and X86fmaxc instead of X86fmin and X86fmax
4326 multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
4327 X86VectorVTInfo _, SDNode OpNode, OpndItins itins> {
4328 let isCodeGenOnly = 1, Predicates = [HasAVX512] in {
4329 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
4330 (ins _.FRC:$src1, _.FRC:$src2),
4331 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4332 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
4334 let isCommutable = 1;
4336 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
4337 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
4338 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4339 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
4340 (_.ScalarLdFrag addr:$src2)))], itins.rm>;
4343 defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
4344 SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
4345 EVEX_CD8<32, CD8VT1>;
4347 defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
4348 SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
4349 EVEX_CD8<64, CD8VT1>;
4351 defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
4352 SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
4353 EVEX_CD8<32, CD8VT1>;
4355 defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
4356 SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
4357 EVEX_CD8<64, CD8VT1>;
4359 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
4360 X86VectorVTInfo _, OpndItins itins,
4362 let ExeDomain = _.ExeDomain, hasSideEffects = 0 in {
4363 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4364 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4365 "$src2, $src1", "$src1, $src2",
4366 (_.VT (OpNode _.RC:$src1, _.RC:$src2)), itins.rr,
4367 IsCommutable>, EVEX_4V;
4368 let mayLoad = 1 in {
4369 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4370 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
4371 "$src2, $src1", "$src1, $src2",
4372 (OpNode _.RC:$src1, (_.LdFrag addr:$src2)), itins.rm>,
4374 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4375 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4376 "${src2}"##_.BroadcastStr##", $src1",
4377 "$src1, ${src2}"##_.BroadcastStr,
4378 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4379 (_.ScalarLdFrag addr:$src2)))),
4380 itins.rm>, EVEX_4V, EVEX_B;
4385 multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeRnd,
4386 X86VectorVTInfo _> {
4387 let ExeDomain = _.ExeDomain in
4388 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4389 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix,
4390 "$rc, $src2, $src1", "$src1, $src2, $rc",
4391 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 imm:$rc)))>,
4392 EVEX_4V, EVEX_B, EVEX_RC;
4396 multiclass avx512_fp_sae_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeRnd,
4397 X86VectorVTInfo _> {
4398 let ExeDomain = _.ExeDomain in
4399 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4400 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4401 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
4402 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 FROUND_NO_EXC)))>,
4406 multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
4407 Predicate prd, SizeItins itins,
4408 bit IsCommutable = 0> {
4409 let Predicates = [prd] in {
4410 defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
4411 itins.s, IsCommutable>, EVEX_V512, PS,
4412 EVEX_CD8<32, CD8VF>;
4413 defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
4414 itins.d, IsCommutable>, EVEX_V512, PD, VEX_W,
4415 EVEX_CD8<64, CD8VF>;
4418 // Define only if AVX512VL feature is present.
4419 let Predicates = [prd, HasVLX] in {
4420 defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
4421 itins.s, IsCommutable>, EVEX_V128, PS,
4422 EVEX_CD8<32, CD8VF>;
4423 defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f32x_info,
4424 itins.s, IsCommutable>, EVEX_V256, PS,
4425 EVEX_CD8<32, CD8VF>;
4426 defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v2f64x_info,
4427 itins.d, IsCommutable>, EVEX_V128, PD, VEX_W,
4428 EVEX_CD8<64, CD8VF>;
4429 defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f64x_info,
4430 itins.d, IsCommutable>, EVEX_V256, PD, VEX_W,
4431 EVEX_CD8<64, CD8VF>;
4435 multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
4436 defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
4437 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
4438 defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info>,
4439 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
4442 multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
4443 defm PSZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
4444 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
4445 defm PDZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info>,
4446 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
4449 defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, HasAVX512,
4450 SSE_ALU_ITINS_P, 1>,
4451 avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
4452 defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, HasAVX512,
4453 SSE_MUL_ITINS_P, 1>,
4454 avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
4455 defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub, HasAVX512, SSE_ALU_ITINS_P>,
4456 avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
4457 defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512, SSE_DIV_ITINS_P>,
4458 avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
4459 defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512,
4460 SSE_ALU_ITINS_P, 0>,
4461 avx512_fp_binop_p_sae<0x5D, "vmin", X86fminRnd>;
4462 defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512,
4463 SSE_ALU_ITINS_P, 0>,
4464 avx512_fp_binop_p_sae<0x5F, "vmax", X86fmaxRnd>;
4465 let isCodeGenOnly = 1 in {
4466 defm VMINC : avx512_fp_binop_p<0x5D, "vmin", X86fminc, HasAVX512,
4467 SSE_ALU_ITINS_P, 1>;
4468 defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512,
4469 SSE_ALU_ITINS_P, 1>;
4471 defm VAND : avx512_fp_binop_p<0x54, "vand", null_frag, HasDQI,
4472 SSE_ALU_ITINS_P, 1>;
4473 defm VANDN : avx512_fp_binop_p<0x55, "vandn", null_frag, HasDQI,
4474 SSE_ALU_ITINS_P, 0>;
4475 defm VOR : avx512_fp_binop_p<0x56, "vor", null_frag, HasDQI,
4476 SSE_ALU_ITINS_P, 1>;
4477 defm VXOR : avx512_fp_binop_p<0x57, "vxor", null_frag, HasDQI,
4478 SSE_ALU_ITINS_P, 1>;
4480 // Patterns catch floating point selects with bitcasted integer logic ops.
4481 multiclass avx512_fp_logical_lowering<string InstrStr, SDNode OpNode,
4482 X86VectorVTInfo _, Predicate prd> {
4483 let Predicates = [prd] in {
4484 // Masked register-register logical operations.
4485 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4486 (bitconvert (_.i64VT (OpNode _.RC:$src1, _.RC:$src2))),
4488 (!cast<Instruction>(InstrStr#rrk) _.RC:$src0, _.KRCWM:$mask,
4489 _.RC:$src1, _.RC:$src2)>;
4490 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4491 (bitconvert (_.i64VT (OpNode _.RC:$src1, _.RC:$src2))),
4493 (!cast<Instruction>(InstrStr#rrkz) _.KRCWM:$mask, _.RC:$src1,
4495 // Masked register-memory logical operations.
4496 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4497 (bitconvert (_.i64VT (OpNode _.RC:$src1,
4498 (load addr:$src2)))),
4500 (!cast<Instruction>(InstrStr#rmk) _.RC:$src0, _.KRCWM:$mask,
4501 _.RC:$src1, addr:$src2)>;
4502 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4503 (bitconvert (_.i64VT (OpNode _.RC:$src1, (load addr:$src2)))),
4505 (!cast<Instruction>(InstrStr#rmkz) _.KRCWM:$mask, _.RC:$src1,
4507 // Register-broadcast logical operations.
4508 def : Pat<(_.i64VT (OpNode _.RC:$src1,
4509 (bitconvert (_.VT (X86VBroadcast
4510 (_.ScalarLdFrag addr:$src2)))))),
4511 (!cast<Instruction>(InstrStr#rmb) _.RC:$src1, addr:$src2)>;
4512 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4514 (_.i64VT (OpNode _.RC:$src1,
4517 (_.ScalarLdFrag addr:$src2))))))),
4519 (!cast<Instruction>(InstrStr#rmbk) _.RC:$src0, _.KRCWM:$mask,
4520 _.RC:$src1, addr:$src2)>;
4521 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4523 (_.i64VT (OpNode _.RC:$src1,
4526 (_.ScalarLdFrag addr:$src2))))))),
4528 (!cast<Instruction>(InstrStr#rmbkz) _.KRCWM:$mask,
4529 _.RC:$src1, addr:$src2)>;
4533 multiclass avx512_fp_logical_lowering_sizes<string InstrStr, SDNode OpNode> {
4534 defm : avx512_fp_logical_lowering<InstrStr#DZ128, OpNode, v4f32x_info, HasVLX>;
4535 defm : avx512_fp_logical_lowering<InstrStr#QZ128, OpNode, v2f64x_info, HasVLX>;
4536 defm : avx512_fp_logical_lowering<InstrStr#DZ256, OpNode, v8f32x_info, HasVLX>;
4537 defm : avx512_fp_logical_lowering<InstrStr#QZ256, OpNode, v4f64x_info, HasVLX>;
4538 defm : avx512_fp_logical_lowering<InstrStr#DZ, OpNode, v16f32_info, HasAVX512>;
4539 defm : avx512_fp_logical_lowering<InstrStr#QZ, OpNode, v8f64_info, HasAVX512>;
4542 defm : avx512_fp_logical_lowering_sizes<"VPAND", and>;
4543 defm : avx512_fp_logical_lowering_sizes<"VPOR", or>;
4544 defm : avx512_fp_logical_lowering_sizes<"VPXOR", xor>;
4545 defm : avx512_fp_logical_lowering_sizes<"VPANDN", X86andnp>;
4547 let Predicates = [HasVLX,HasDQI] in {
4548 // Use packed logical operations for scalar ops.
4549 def : Pat<(f64 (X86fand FR64X:$src1, FR64X:$src2)),
4550 (COPY_TO_REGCLASS (VANDPDZ128rr
4551 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4552 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4553 def : Pat<(f64 (X86for FR64X:$src1, FR64X:$src2)),
4554 (COPY_TO_REGCLASS (VORPDZ128rr
4555 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4556 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4557 def : Pat<(f64 (X86fxor FR64X:$src1, FR64X:$src2)),
4558 (COPY_TO_REGCLASS (VXORPDZ128rr
4559 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4560 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4561 def : Pat<(f64 (X86fandn FR64X:$src1, FR64X:$src2)),
4562 (COPY_TO_REGCLASS (VANDNPDZ128rr
4563 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4564 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4566 def : Pat<(f32 (X86fand FR32X:$src1, FR32X:$src2)),
4567 (COPY_TO_REGCLASS (VANDPSZ128rr
4568 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4569 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4570 def : Pat<(f32 (X86for FR32X:$src1, FR32X:$src2)),
4571 (COPY_TO_REGCLASS (VORPSZ128rr
4572 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4573 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4574 def : Pat<(f32 (X86fxor FR32X:$src1, FR32X:$src2)),
4575 (COPY_TO_REGCLASS (VXORPSZ128rr
4576 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4577 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4578 def : Pat<(f32 (X86fandn FR32X:$src1, FR32X:$src2)),
4579 (COPY_TO_REGCLASS (VANDNPSZ128rr
4580 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4581 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4584 multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
4585 X86VectorVTInfo _> {
4586 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4587 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4588 "$src2, $src1", "$src1, $src2",
4589 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>, EVEX_4V;
4590 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4591 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
4592 "$src2, $src1", "$src1, $src2",
4593 (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
4594 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4595 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4596 "${src2}"##_.BroadcastStr##", $src1",
4597 "$src1, ${src2}"##_.BroadcastStr,
4598 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4599 (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
4603 multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
4604 X86VectorVTInfo _> {
4605 defm rr: AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4606 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4607 "$src2, $src1", "$src1, $src2",
4608 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>;
4609 defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4610 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4611 "$src2, $src1", "$src1, $src2",
4613 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
4614 (i32 FROUND_CURRENT))>;
4617 multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode, SDNode OpNodeScal> {
4618 defm PSZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v16f32_info>,
4619 avx512_fp_round_packed<opc, OpcodeStr, OpNode, v16f32_info>,
4620 EVEX_V512, EVEX_CD8<32, CD8VF>;
4621 defm PDZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f64_info>,
4622 avx512_fp_round_packed<opc, OpcodeStr, OpNode, v8f64_info>,
4623 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4624 defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f32x_info>,
4625 avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNodeScal, SSE_ALU_ITINS_S.s>,
4626 EVEX_4V,EVEX_CD8<32, CD8VT1>;
4627 defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f64x_info>,
4628 avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNodeScal, SSE_ALU_ITINS_S.d>,
4629 EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
4631 // Define only if AVX512VL feature is present.
4632 let Predicates = [HasVLX] in {
4633 defm PSZ128 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v4f32x_info>,
4634 EVEX_V128, EVEX_CD8<32, CD8VF>;
4635 defm PSZ256 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f32x_info>,
4636 EVEX_V256, EVEX_CD8<32, CD8VF>;
4637 defm PDZ128 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v2f64x_info>,
4638 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
4639 defm PDZ256 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v4f64x_info>,
4640 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
4643 defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef, X86scalefs>, T8PD;
4645 //===----------------------------------------------------------------------===//
4646 // AVX-512 VPTESTM instructions
4647 //===----------------------------------------------------------------------===//
4649 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
4650 X86VectorVTInfo _> {
4651 let isCommutable = 1 in
4652 defm rr : AVX512_maskable_cmp<opc, MRMSrcReg, _, (outs _.KRC:$dst),
4653 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4654 "$src2, $src1", "$src1, $src2",
4655 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
4657 defm rm : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
4658 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4659 "$src2, $src1", "$src1, $src2",
4660 (OpNode (_.VT _.RC:$src1),
4661 (_.VT (bitconvert (_.LdFrag addr:$src2))))>,
4663 EVEX_CD8<_.EltSize, CD8VF>;
4666 multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4667 X86VectorVTInfo _> {
4668 defm rmb : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
4669 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4670 "${src2}"##_.BroadcastStr##", $src1",
4671 "$src1, ${src2}"##_.BroadcastStr,
4672 (OpNode (_.VT _.RC:$src1), (_.VT (X86VBroadcast
4673 (_.ScalarLdFrag addr:$src2))))>,
4674 EVEX_B, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
4677 // Use 512bit version to implement 128/256 bit in case NoVLX.
4678 multiclass avx512_vptest_lowering<SDNode OpNode, X86VectorVTInfo ExtendInfo,
4679 X86VectorVTInfo _, string Suffix> {
4680 def : Pat<(_.KVT (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))),
4681 (_.KVT (COPY_TO_REGCLASS
4682 (!cast<Instruction>(NAME # Suffix # "Zrr")
4683 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
4684 _.RC:$src1, _.SubRegIdx),
4685 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
4686 _.RC:$src2, _.SubRegIdx)),
4690 multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4691 AVX512VLVectorVTInfo _, string Suffix> {
4692 let Predicates = [HasAVX512] in
4693 defm Z : avx512_vptest<opc, OpcodeStr, OpNode, _.info512>,
4694 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
4696 let Predicates = [HasAVX512, HasVLX] in {
4697 defm Z256 : avx512_vptest<opc, OpcodeStr, OpNode, _.info256>,
4698 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
4699 defm Z128 : avx512_vptest<opc, OpcodeStr, OpNode, _.info128>,
4700 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
4702 let Predicates = [HasAVX512, NoVLX] in {
4703 defm Z256_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info256, Suffix>;
4704 defm Z128_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info128, Suffix>;
4708 multiclass avx512_vptest_dq<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4709 defm D : avx512_vptest_dq_sizes<opc, OpcodeStr#"d", OpNode,
4710 avx512vl_i32_info, "D">;
4711 defm Q : avx512_vptest_dq_sizes<opc, OpcodeStr#"q", OpNode,
4712 avx512vl_i64_info, "Q">, VEX_W;
4715 multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
4717 let Predicates = [HasBWI] in {
4718 defm WZ: avx512_vptest<opc, OpcodeStr#"w", OpNode, v32i16_info>,
4720 defm BZ: avx512_vptest<opc, OpcodeStr#"b", OpNode, v64i8_info>,
4723 let Predicates = [HasVLX, HasBWI] in {
4725 defm WZ256: avx512_vptest<opc, OpcodeStr#"w", OpNode, v16i16x_info>,
4727 defm WZ128: avx512_vptest<opc, OpcodeStr#"w", OpNode, v8i16x_info>,
4729 defm BZ256: avx512_vptest<opc, OpcodeStr#"b", OpNode, v32i8x_info>,
4731 defm BZ128: avx512_vptest<opc, OpcodeStr#"b", OpNode, v16i8x_info>,
4735 let Predicates = [HasAVX512, NoVLX] in {
4736 defm BZ256_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v32i8x_info, "B">;
4737 defm BZ128_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v16i8x_info, "B">;
4738 defm WZ256_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v16i16x_info, "W">;
4739 defm WZ128_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v8i16x_info, "W">;
4744 multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string OpcodeStr,
4746 avx512_vptest_wb <opc_wb, OpcodeStr, OpNode>,
4747 avx512_vptest_dq<opc_dq, OpcodeStr, OpNode>;
4749 defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm", X86testm>, T8PD;
4750 defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm", X86testnm>, T8XS;
4753 //===----------------------------------------------------------------------===//
4754 // AVX-512 Shift instructions
4755 //===----------------------------------------------------------------------===//
4756 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
4757 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
4758 let ExeDomain = _.ExeDomain in {
4759 defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst),
4760 (ins _.RC:$src1, u8imm:$src2), OpcodeStr,
4761 "$src2, $src1", "$src1, $src2",
4762 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
4763 SSE_INTSHIFT_ITINS_P.rr>;
4764 defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
4765 (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
4766 "$src2, $src1", "$src1, $src2",
4767 (_.VT (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
4769 SSE_INTSHIFT_ITINS_P.rm>;
4773 multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
4774 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
4775 let ExeDomain = _.ExeDomain in
4776 defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
4777 (ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
4778 "$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2",
4779 (_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src1)), (i8 imm:$src2))),
4780 SSE_INTSHIFT_ITINS_P.rm>, EVEX_B;
4783 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4784 ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
4785 // src2 is always 128-bit
4786 let ExeDomain = _.ExeDomain in {
4787 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4788 (ins _.RC:$src1, VR128X:$src2), OpcodeStr,
4789 "$src2, $src1", "$src1, $src2",
4790 (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
4791 SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
4792 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4793 (ins _.RC:$src1, i128mem:$src2), OpcodeStr,
4794 "$src2, $src1", "$src1, $src2",
4795 (_.VT (OpNode _.RC:$src1, (bc_frag (loadv2i64 addr:$src2)))),
4796 SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase,
4801 multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4802 ValueType SrcVT, PatFrag bc_frag,
4803 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
4804 let Predicates = [prd] in
4805 defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4806 VTInfo.info512>, EVEX_V512,
4807 EVEX_CD8<VTInfo.info512.EltSize, CD8VQ> ;
4808 let Predicates = [prd, HasVLX] in {
4809 defm Z256 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4810 VTInfo.info256>, EVEX_V256,
4811 EVEX_CD8<VTInfo.info256.EltSize, CD8VH>;
4812 defm Z128 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4813 VTInfo.info128>, EVEX_V128,
4814 EVEX_CD8<VTInfo.info128.EltSize, CD8VF>;
4818 multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, bits<8> opcw,
4819 string OpcodeStr, SDNode OpNode> {
4820 defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
4821 avx512vl_i32_info, HasAVX512>;
4822 defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64,
4823 avx512vl_i64_info, HasAVX512>, VEX_W;
4824 defm W : avx512_shift_sizes<opcw, OpcodeStr#"w", OpNode, v8i16, bc_v8i16,
4825 avx512vl_i16_info, HasBWI>;
4828 multiclass avx512_shift_rmi_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
4829 string OpcodeStr, SDNode OpNode,
4830 AVX512VLVectorVTInfo VTInfo> {
4831 let Predicates = [HasAVX512] in
4832 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4834 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4835 VTInfo.info512>, EVEX_V512;
4836 let Predicates = [HasAVX512, HasVLX] in {
4837 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4839 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4840 VTInfo.info256>, EVEX_V256;
4841 defm Z128: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4843 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4844 VTInfo.info128>, EVEX_V128;
4848 multiclass avx512_shift_rmi_w<bits<8> opcw,
4849 Format ImmFormR, Format ImmFormM,
4850 string OpcodeStr, SDNode OpNode> {
4851 let Predicates = [HasBWI] in
4852 defm WZ: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4853 v32i16_info>, EVEX_V512;
4854 let Predicates = [HasVLX, HasBWI] in {
4855 defm WZ256: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4856 v16i16x_info>, EVEX_V256;
4857 defm WZ128: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4858 v8i16x_info>, EVEX_V128;
4862 multiclass avx512_shift_rmi_dq<bits<8> opcd, bits<8> opcq,
4863 Format ImmFormR, Format ImmFormM,
4864 string OpcodeStr, SDNode OpNode> {
4865 defm D: avx512_shift_rmi_sizes<opcd, ImmFormR, ImmFormM, OpcodeStr#"d", OpNode,
4866 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
4867 defm Q: avx512_shift_rmi_sizes<opcq, ImmFormR, ImmFormM, OpcodeStr#"q", OpNode,
4868 avx512vl_i64_info>, EVEX_CD8<64, CD8VF>, VEX_W;
4871 defm VPSRL : avx512_shift_rmi_dq<0x72, 0x73, MRM2r, MRM2m, "vpsrl", X86vsrli>,
4872 avx512_shift_rmi_w<0x71, MRM2r, MRM2m, "vpsrlw", X86vsrli>, AVX512BIi8Base, EVEX_4V;
4874 defm VPSLL : avx512_shift_rmi_dq<0x72, 0x73, MRM6r, MRM6m, "vpsll", X86vshli>,
4875 avx512_shift_rmi_w<0x71, MRM6r, MRM6m, "vpsllw", X86vshli>, AVX512BIi8Base, EVEX_4V;
4877 defm VPSRA : avx512_shift_rmi_dq<0x72, 0x72, MRM4r, MRM4m, "vpsra", X86vsrai>,
4878 avx512_shift_rmi_w<0x71, MRM4r, MRM4m, "vpsraw", X86vsrai>, AVX512BIi8Base, EVEX_4V;
4880 defm VPROR : avx512_shift_rmi_dq<0x72, 0x72, MRM0r, MRM0m, "vpror", X86vrotri>, AVX512BIi8Base, EVEX_4V;
4881 defm VPROL : avx512_shift_rmi_dq<0x72, 0x72, MRM1r, MRM1m, "vprol", X86vrotli>, AVX512BIi8Base, EVEX_4V;
4883 defm VPSLL : avx512_shift_types<0xF2, 0xF3, 0xF1, "vpsll", X86vshl>;
4884 defm VPSRA : avx512_shift_types<0xE2, 0xE2, 0xE1, "vpsra", X86vsra>;
4885 defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl>;
4887 //===-------------------------------------------------------------------===//
4888 // Variable Bit Shifts
4889 //===-------------------------------------------------------------------===//
4890 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
4891 X86VectorVTInfo _> {
4892 let ExeDomain = _.ExeDomain in {
4893 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4894 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4895 "$src2, $src1", "$src1, $src2",
4896 (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
4897 SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
4898 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4899 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4900 "$src2, $src1", "$src1, $src2",
4901 (_.VT (OpNode _.RC:$src1,
4902 (_.VT (bitconvert (_.LdFrag addr:$src2))))),
4903 SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_4V,
4904 EVEX_CD8<_.EltSize, CD8VF>;
4908 multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4909 X86VectorVTInfo _> {
4910 let ExeDomain = _.ExeDomain in
4911 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4912 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4913 "${src2}"##_.BroadcastStr##", $src1",
4914 "$src1, ${src2}"##_.BroadcastStr,
4915 (_.VT (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4916 (_.ScalarLdFrag addr:$src2))))),
4917 SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_B,
4918 EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
4920 multiclass avx512_var_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4921 AVX512VLVectorVTInfo _> {
4922 let Predicates = [HasAVX512] in
4923 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
4924 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
4926 let Predicates = [HasAVX512, HasVLX] in {
4927 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
4928 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
4929 defm Z128 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
4930 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
4934 multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
4936 defm D : avx512_var_shift_sizes<opc, OpcodeStr#"d", OpNode,
4938 defm Q : avx512_var_shift_sizes<opc, OpcodeStr#"q", OpNode,
4939 avx512vl_i64_info>, VEX_W;
4942 // Use 512bit version to implement 128/256 bit in case NoVLX.
4943 multiclass avx512_var_shift_w_lowering<AVX512VLVectorVTInfo _, SDNode OpNode> {
4944 let Predicates = [HasBWI, NoVLX] in {
4945 def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
4946 (_.info256.VT _.info256.RC:$src2))),
4948 (!cast<Instruction>(NAME#"WZrr")
4949 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
4950 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
4953 def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
4954 (_.info128.VT _.info128.RC:$src2))),
4956 (!cast<Instruction>(NAME#"WZrr")
4957 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
4958 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
4963 multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
4965 let Predicates = [HasBWI] in
4966 defm WZ: avx512_var_shift<opc, OpcodeStr, OpNode, v32i16_info>,
4968 let Predicates = [HasVLX, HasBWI] in {
4970 defm WZ256: avx512_var_shift<opc, OpcodeStr, OpNode, v16i16x_info>,
4972 defm WZ128: avx512_var_shift<opc, OpcodeStr, OpNode, v8i16x_info>,
4977 defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
4978 avx512_var_shift_w<0x12, "vpsllvw", shl>,
4979 avx512_var_shift_w_lowering<avx512vl_i16_info, shl>;
4981 defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
4982 avx512_var_shift_w<0x11, "vpsravw", sra>,
4983 avx512_var_shift_w_lowering<avx512vl_i16_info, sra>;
4985 defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
4986 avx512_var_shift_w<0x10, "vpsrlvw", srl>,
4987 avx512_var_shift_w_lowering<avx512vl_i16_info, srl>;
4988 defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>;
4989 defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>;
4991 // Special handing for handling VPSRAV intrinsics.
4992 multiclass avx512_var_shift_int_lowering<string InstrStr, X86VectorVTInfo _,
4993 list<Predicate> p> {
4994 let Predicates = p in {
4995 def : Pat<(_.VT (X86vsrav _.RC:$src1, _.RC:$src2)),
4996 (!cast<Instruction>(InstrStr#_.ZSuffix#rr) _.RC:$src1,
4998 def : Pat<(_.VT (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2)))),
4999 (!cast<Instruction>(InstrStr#_.ZSuffix##rm)
5000 _.RC:$src1, addr:$src2)>;
5001 let AddedComplexity = 20 in {
5002 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5003 (X86vsrav _.RC:$src1, _.RC:$src2), _.RC:$src0)),
5004 (!cast<Instruction>(InstrStr#_.ZSuffix#rrk) _.RC:$src0,
5005 _.KRC:$mask, _.RC:$src1, _.RC:$src2)>;
5006 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5007 (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
5009 (!cast<Instruction>(InstrStr#_.ZSuffix##rmk) _.RC:$src0,
5010 _.KRC:$mask, _.RC:$src1, addr:$src2)>;
5012 let AddedComplexity = 30 in {
5013 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5014 (X86vsrav _.RC:$src1, _.RC:$src2), _.ImmAllZerosV)),
5015 (!cast<Instruction>(InstrStr#_.ZSuffix#rrkz) _.KRC:$mask,
5016 _.RC:$src1, _.RC:$src2)>;
5017 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5018 (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
5020 (!cast<Instruction>(InstrStr#_.ZSuffix##rmkz) _.KRC:$mask,
5021 _.RC:$src1, addr:$src2)>;
5026 multiclass avx512_var_shift_int_lowering_mb<string InstrStr, X86VectorVTInfo _,
5027 list<Predicate> p> :
5028 avx512_var_shift_int_lowering<InstrStr, _, p> {
5029 let Predicates = p in {
5030 def : Pat<(_.VT (X86vsrav _.RC:$src1,
5031 (X86VBroadcast (_.ScalarLdFrag addr:$src2)))),
5032 (!cast<Instruction>(InstrStr#_.ZSuffix##rmb)
5033 _.RC:$src1, addr:$src2)>;
5034 let AddedComplexity = 20 in
5035 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5036 (X86vsrav _.RC:$src1,
5037 (X86VBroadcast (_.ScalarLdFrag addr:$src2))),
5039 (!cast<Instruction>(InstrStr#_.ZSuffix##rmbk) _.RC:$src0,
5040 _.KRC:$mask, _.RC:$src1, addr:$src2)>;
5041 let AddedComplexity = 30 in
5042 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5043 (X86vsrav _.RC:$src1,
5044 (X86VBroadcast (_.ScalarLdFrag addr:$src2))),
5046 (!cast<Instruction>(InstrStr#_.ZSuffix##rmbkz) _.KRC:$mask,
5047 _.RC:$src1, addr:$src2)>;
5051 defm : avx512_var_shift_int_lowering<"VPSRAVW", v8i16x_info, [HasVLX, HasBWI]>;
5052 defm : avx512_var_shift_int_lowering<"VPSRAVW", v16i16x_info, [HasVLX, HasBWI]>;
5053 defm : avx512_var_shift_int_lowering<"VPSRAVW", v32i16_info, [HasBWI]>;
5054 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v4i32x_info, [HasVLX]>;
5055 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v8i32x_info, [HasVLX]>;
5056 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v16i32_info, [HasAVX512]>;
5057 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v2i64x_info, [HasVLX]>;
5058 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v4i64x_info, [HasVLX]>;
5059 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v8i64_info, [HasAVX512]>;
5061 //===-------------------------------------------------------------------===//
5062 // 1-src variable permutation VPERMW/D/Q
5063 //===-------------------------------------------------------------------===//
5064 multiclass avx512_vperm_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
5065 AVX512VLVectorVTInfo _> {
5066 let Predicates = [HasAVX512] in
5067 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
5068 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
5070 let Predicates = [HasAVX512, HasVLX] in
5071 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
5072 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
5075 multiclass avx512_vpermi_dq_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
5076 string OpcodeStr, SDNode OpNode,
5077 AVX512VLVectorVTInfo VTInfo> {
5078 let Predicates = [HasAVX512] in
5079 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
5081 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
5082 VTInfo.info512>, EVEX_V512;
5083 let Predicates = [HasAVX512, HasVLX] in
5084 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
5086 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
5087 VTInfo.info256>, EVEX_V256;
5090 multiclass avx512_vperm_bw<bits<8> opc, string OpcodeStr,
5091 Predicate prd, SDNode OpNode,
5092 AVX512VLVectorVTInfo _> {
5093 let Predicates = [prd] in
5094 defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
5096 let Predicates = [HasVLX, prd] in {
5097 defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
5099 defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
5104 defm VPERMW : avx512_vperm_bw<0x8D, "vpermw", HasBWI, X86VPermv,
5105 avx512vl_i16_info>, VEX_W;
5106 defm VPERMB : avx512_vperm_bw<0x8D, "vpermb", HasVBMI, X86VPermv,
5109 defm VPERMD : avx512_vperm_dq_sizes<0x36, "vpermd", X86VPermv,
5111 defm VPERMQ : avx512_vperm_dq_sizes<0x36, "vpermq", X86VPermv,
5112 avx512vl_i64_info>, VEX_W;
5113 defm VPERMPS : avx512_vperm_dq_sizes<0x16, "vpermps", X86VPermv,
5115 defm VPERMPD : avx512_vperm_dq_sizes<0x16, "vpermpd", X86VPermv,
5116 avx512vl_f64_info>, VEX_W;
5118 defm VPERMQ : avx512_vpermi_dq_sizes<0x00, MRMSrcReg, MRMSrcMem, "vpermq",
5119 X86VPermi, avx512vl_i64_info>,
5120 EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
5121 defm VPERMPD : avx512_vpermi_dq_sizes<0x01, MRMSrcReg, MRMSrcMem, "vpermpd",
5122 X86VPermi, avx512vl_f64_info>,
5123 EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
5124 //===----------------------------------------------------------------------===//
5125 // AVX-512 - VPERMIL
5126 //===----------------------------------------------------------------------===//
5128 multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
5129 X86VectorVTInfo _, X86VectorVTInfo Ctrl> {
5130 defm rr: AVX512_maskable<OpcVar, MRMSrcReg, _, (outs _.RC:$dst),
5131 (ins _.RC:$src1, Ctrl.RC:$src2), OpcodeStr,
5132 "$src2, $src1", "$src1, $src2",
5133 (_.VT (OpNode _.RC:$src1,
5134 (Ctrl.VT Ctrl.RC:$src2)))>,
5136 defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
5137 (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
5138 "$src2, $src1", "$src1, $src2",
5141 (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
5142 T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
5143 defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
5144 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
5145 "${src2}"##_.BroadcastStr##", $src1",
5146 "$src1, ${src2}"##_.BroadcastStr,
5149 (Ctrl.VT (X86VBroadcast
5150 (Ctrl.ScalarLdFrag addr:$src2)))))>,
5151 T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
5154 multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar,
5155 AVX512VLVectorVTInfo _, AVX512VLVectorVTInfo Ctrl>{
5156 let Predicates = [HasAVX512] in {
5157 defm Z : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info512,
5158 Ctrl.info512>, EVEX_V512;
5160 let Predicates = [HasAVX512, HasVLX] in {
5161 defm Z128 : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info128,
5162 Ctrl.info128>, EVEX_V128;
5163 defm Z256 : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info256,
5164 Ctrl.info256>, EVEX_V256;
5168 multiclass avx512_permil<string OpcodeStr, bits<8> OpcImm, bits<8> OpcVar,
5169 AVX512VLVectorVTInfo _, AVX512VLVectorVTInfo Ctrl>{
5171 defm NAME: avx512_permil_vec_common<OpcodeStr, OpcVar, _, Ctrl>;
5172 defm NAME: avx512_shift_rmi_sizes<OpcImm, MRMSrcReg, MRMSrcMem, OpcodeStr,
5174 EVEX, AVX512AIi8Base, EVEX_CD8<_.info128.EltSize, CD8VF>;
5177 let ExeDomain = SSEPackedSingle in
5178 defm VPERMILPS : avx512_permil<"vpermilps", 0x04, 0x0C, avx512vl_f32_info,
5180 let ExeDomain = SSEPackedDouble in
5181 defm VPERMILPD : avx512_permil<"vpermilpd", 0x05, 0x0D, avx512vl_f64_info,
5182 avx512vl_i64_info>, VEX_W;
5183 //===----------------------------------------------------------------------===//
5184 // AVX-512 - VPSHUFD, VPSHUFLW, VPSHUFHW
5185 //===----------------------------------------------------------------------===//
5187 defm VPSHUFD : avx512_shift_rmi_sizes<0x70, MRMSrcReg, MRMSrcMem, "vpshufd",
5188 X86PShufd, avx512vl_i32_info>,
5189 EVEX, AVX512BIi8Base, EVEX_CD8<32, CD8VF>;
5190 defm VPSHUFH : avx512_shift_rmi_w<0x70, MRMSrcReg, MRMSrcMem, "vpshufhw",
5191 X86PShufhw>, EVEX, AVX512XSIi8Base;
5192 defm VPSHUFL : avx512_shift_rmi_w<0x70, MRMSrcReg, MRMSrcMem, "vpshuflw",
5193 X86PShuflw>, EVEX, AVX512XDIi8Base;
5195 multiclass avx512_pshufb_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode> {
5196 let Predicates = [HasBWI] in
5197 defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, v64i8_info>, EVEX_V512;
5199 let Predicates = [HasVLX, HasBWI] in {
5200 defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, v32i8x_info>, EVEX_V256;
5201 defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, v16i8x_info>, EVEX_V128;
5205 defm VPSHUFB: avx512_pshufb_sizes<0x00, "vpshufb", X86pshufb>;
5207 //===----------------------------------------------------------------------===//
5208 // Move Low to High and High to Low packed FP Instructions
5209 //===----------------------------------------------------------------------===//
5210 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
5211 (ins VR128X:$src1, VR128X:$src2),
5212 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5213 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
5214 IIC_SSE_MOV_LH>, EVEX_4V;
5215 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
5216 (ins VR128X:$src1, VR128X:$src2),
5217 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5218 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
5219 IIC_SSE_MOV_LH>, EVEX_4V;
5221 let Predicates = [HasAVX512] in {
5223 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
5224 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
5225 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
5226 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
5229 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
5230 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
5233 //===----------------------------------------------------------------------===//
5234 // VMOVHPS/PD VMOVLPS Instructions
5235 // All patterns was taken from SSS implementation.
5236 //===----------------------------------------------------------------------===//
5237 multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
5238 X86VectorVTInfo _> {
5239 def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
5240 (ins _.RC:$src1, f64mem:$src2),
5241 !strconcat(OpcodeStr,
5242 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5246 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
5247 IIC_SSE_MOV_LH>, EVEX_4V;
5250 defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", X86Movlhps,
5251 v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
5252 defm VMOVHPDZ128 : avx512_mov_hilo_packed<0x16, "vmovhpd", X86Movlhpd,
5253 v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W;
5254 defm VMOVLPSZ128 : avx512_mov_hilo_packed<0x12, "vmovlps", X86Movlps,
5255 v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
5256 defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movlpd,
5257 v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W;
5259 let Predicates = [HasAVX512] in {
5261 def : Pat<(X86Movlhps VR128X:$src1,
5262 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
5263 (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
5264 def : Pat<(X86Movlhps VR128X:$src1,
5265 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5266 (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
5268 def : Pat<(v2f64 (X86Unpckl VR128X:$src1,
5269 (scalar_to_vector (loadf64 addr:$src2)))),
5270 (VMOVHPDZ128rm VR128X:$src1, addr:$src2)>;
5271 def : Pat<(v2f64 (X86Unpckl VR128X:$src1,
5272 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
5273 (VMOVHPDZ128rm VR128X:$src1, addr:$src2)>;
5275 def : Pat<(v4f32 (X86Movlps VR128X:$src1, (load addr:$src2))),
5276 (VMOVLPSZ128rm VR128X:$src1, addr:$src2)>;
5277 def : Pat<(v4i32 (X86Movlps VR128X:$src1, (load addr:$src2))),
5278 (VMOVLPSZ128rm VR128X:$src1, addr:$src2)>;
5280 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, (load addr:$src2))),
5281 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5282 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, (load addr:$src2))),
5283 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5284 def : Pat<(v2f64 (X86Movsd VR128X:$src1,
5285 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5286 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5289 def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
5290 (ins f64mem:$dst, VR128X:$src),
5291 "vmovhps\t{$src, $dst|$dst, $src}",
5292 [(store (f64 (extractelt
5293 (X86Unpckh (bc_v2f64 (v4f32 VR128X:$src)),
5294 (bc_v2f64 (v4f32 VR128X:$src))),
5295 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
5296 EVEX, EVEX_CD8<32, CD8VT2>;
5297 def VMOVHPDZ128mr : AVX512PDI<0x17, MRMDestMem, (outs),
5298 (ins f64mem:$dst, VR128X:$src),
5299 "vmovhpd\t{$src, $dst|$dst, $src}",
5300 [(store (f64 (extractelt
5301 (v2f64 (X86Unpckh VR128X:$src, VR128X:$src)),
5302 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
5303 EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
5304 def VMOVLPSZ128mr : AVX512PSI<0x13, MRMDestMem, (outs),
5305 (ins f64mem:$dst, VR128X:$src),
5306 "vmovlps\t{$src, $dst|$dst, $src}",
5307 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128X:$src)),
5308 (iPTR 0))), addr:$dst)],
5310 EVEX, EVEX_CD8<32, CD8VT2>;
5311 def VMOVLPDZ128mr : AVX512PDI<0x13, MRMDestMem, (outs),
5312 (ins f64mem:$dst, VR128X:$src),
5313 "vmovlpd\t{$src, $dst|$dst, $src}",
5314 [(store (f64 (extractelt (v2f64 VR128X:$src),
5315 (iPTR 0))), addr:$dst)],
5317 EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
5319 let Predicates = [HasAVX512] in {
5321 def : Pat<(store (f64 (extractelt
5322 (v2f64 (X86VPermilpi VR128X:$src, (i8 1))),
5323 (iPTR 0))), addr:$dst),
5324 (VMOVHPDZ128mr addr:$dst, VR128X:$src)>;
5326 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128X:$src2)),
5328 (VMOVLPSZ128mr addr:$src1, VR128X:$src2)>;
5329 def : Pat<(store (v4i32 (X86Movlps
5330 (bc_v4i32 (loadv2i64 addr:$src1)), VR128X:$src2)), addr:$src1),
5331 (VMOVLPSZ128mr addr:$src1, VR128X:$src2)>;
5333 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128X:$src2)),
5335 (VMOVLPDZ128mr addr:$src1, VR128X:$src2)>;
5336 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128X:$src2)),
5338 (VMOVLPDZ128mr addr:$src1, VR128X:$src2)>;
5340 //===----------------------------------------------------------------------===//
5341 // FMA - Fused Multiply Operations
5344 multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5345 X86VectorVTInfo _, string Suff> {
5346 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5347 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5348 (ins _.RC:$src2, _.RC:$src3),
5349 OpcodeStr, "$src3, $src2", "$src2, $src3",
5350 (_.VT (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)), 1, 1>,
5353 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5354 (ins _.RC:$src2, _.MemOp:$src3),
5355 OpcodeStr, "$src3, $src2", "$src2, $src3",
5356 (_.VT (OpNode _.RC:$src2, _.RC:$src1, (_.LdFrag addr:$src3))), 1, 0>,
5359 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5360 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5361 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
5362 !strconcat("$src2, ${src3}", _.BroadcastStr ),
5364 _.RC:$src1,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3)))), 1, 0>,
5365 AVX512FMA3Base, EVEX_B;
5368 // Additional pattern for folding broadcast nodes in other orders.
5369 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5370 (OpNode _.RC:$src1, _.RC:$src2,
5371 (X86VBroadcast (_.ScalarLdFrag addr:$src3))),
5373 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5374 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5377 multiclass avx512_fma3_213_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5378 X86VectorVTInfo _, string Suff> {
5379 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5380 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5381 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5382 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5383 (_.VT ( OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 imm:$rc))), 1, 1>,
5384 AVX512FMA3Base, EVEX_B, EVEX_RC;
5387 multiclass avx512_fma3p_213_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5388 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5390 let Predicates = [HasAVX512] in {
5391 defm Z : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5392 avx512_fma3_213_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5393 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5395 let Predicates = [HasVLX, HasAVX512] in {
5396 defm Z256 : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5397 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5398 defm Z128 : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5399 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5403 multiclass avx512_fma3p_213_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5404 SDNode OpNodeRnd > {
5405 defm PS : avx512_fma3p_213_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5406 avx512vl_f32_info, "PS">;
5407 defm PD : avx512_fma3p_213_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5408 avx512vl_f64_info, "PD">, VEX_W;
5411 defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", X86Fmadd, X86FmaddRnd>;
5412 defm VFMSUB213 : avx512_fma3p_213_f<0xAA, "vfmsub213", X86Fmsub, X86FmsubRnd>;
5413 defm VFMADDSUB213 : avx512_fma3p_213_f<0xA6, "vfmaddsub213", X86Fmaddsub, X86FmaddsubRnd>;
5414 defm VFMSUBADD213 : avx512_fma3p_213_f<0xA7, "vfmsubadd213", X86Fmsubadd, X86FmsubaddRnd>;
5415 defm VFNMADD213 : avx512_fma3p_213_f<0xAC, "vfnmadd213", X86Fnmadd, X86FnmaddRnd>;
5416 defm VFNMSUB213 : avx512_fma3p_213_f<0xAE, "vfnmsub213", X86Fnmsub, X86FnmsubRnd>;
5419 multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5420 X86VectorVTInfo _, string Suff> {
5421 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5422 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5423 (ins _.RC:$src2, _.RC:$src3),
5424 OpcodeStr, "$src3, $src2", "$src2, $src3",
5425 (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), 1, 1>,
5428 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5429 (ins _.RC:$src2, _.MemOp:$src3),
5430 OpcodeStr, "$src3, $src2", "$src2, $src3",
5431 (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1)), 1, 0>,
5434 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5435 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5436 OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
5437 "$src2, ${src3}"##_.BroadcastStr,
5438 (_.VT (OpNode _.RC:$src2,
5439 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
5440 _.RC:$src1)), 1, 0>, AVX512FMA3Base, EVEX_B;
5443 // Additional patterns for folding broadcast nodes in other orders.
5444 def : Pat<(_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5445 _.RC:$src2, _.RC:$src1)),
5446 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mb) _.RC:$src1,
5447 _.RC:$src2, addr:$src3)>;
5448 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5449 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5450 _.RC:$src2, _.RC:$src1),
5452 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5453 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5454 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5455 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5456 _.RC:$src2, _.RC:$src1),
5458 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbkz) _.RC:$src1,
5459 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5462 multiclass avx512_fma3_231_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5463 X86VectorVTInfo _, string Suff> {
5464 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5465 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5466 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5467 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5468 (_.VT ( OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 imm:$rc))), 1, 1>,
5469 AVX512FMA3Base, EVEX_B, EVEX_RC;
5472 multiclass avx512_fma3p_231_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5473 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5475 let Predicates = [HasAVX512] in {
5476 defm Z : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5477 avx512_fma3_231_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5478 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5480 let Predicates = [HasVLX, HasAVX512] in {
5481 defm Z256 : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5482 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5483 defm Z128 : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5484 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5488 multiclass avx512_fma3p_231_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5489 SDNode OpNodeRnd > {
5490 defm PS : avx512_fma3p_231_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5491 avx512vl_f32_info, "PS">;
5492 defm PD : avx512_fma3p_231_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5493 avx512vl_f64_info, "PD">, VEX_W;
5496 defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", X86Fmadd, X86FmaddRnd>;
5497 defm VFMSUB231 : avx512_fma3p_231_f<0xBA, "vfmsub231", X86Fmsub, X86FmsubRnd>;
5498 defm VFMADDSUB231 : avx512_fma3p_231_f<0xB6, "vfmaddsub231", X86Fmaddsub, X86FmaddsubRnd>;
5499 defm VFMSUBADD231 : avx512_fma3p_231_f<0xB7, "vfmsubadd231", X86Fmsubadd, X86FmsubaddRnd>;
5500 defm VFNMADD231 : avx512_fma3p_231_f<0xBC, "vfnmadd231", X86Fnmadd, X86FnmaddRnd>;
5501 defm VFNMSUB231 : avx512_fma3p_231_f<0xBE, "vfnmsub231", X86Fnmsub, X86FnmsubRnd>;
5503 multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5504 X86VectorVTInfo _, string Suff> {
5505 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5506 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5507 (ins _.RC:$src2, _.RC:$src3),
5508 OpcodeStr, "$src3, $src2", "$src2, $src3",
5509 (_.VT (OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2)), 1, 1>,
5512 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5513 (ins _.RC:$src2, _.MemOp:$src3),
5514 OpcodeStr, "$src3, $src2", "$src2, $src3",
5515 (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src3), _.RC:$src2)), 1, 0>,
5518 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5519 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5520 OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
5521 "$src2, ${src3}"##_.BroadcastStr,
5522 (_.VT (OpNode _.RC:$src1,
5523 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
5524 _.RC:$src2)), 1, 0>, AVX512FMA3Base, EVEX_B;
5527 // Additional patterns for folding broadcast nodes in other orders.
5528 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5529 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5530 _.RC:$src1, _.RC:$src2),
5532 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5533 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5536 multiclass avx512_fma3_132_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5537 X86VectorVTInfo _, string Suff> {
5538 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5539 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5540 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5541 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5542 (_.VT ( OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 imm:$rc))), 1, 1>,
5543 AVX512FMA3Base, EVEX_B, EVEX_RC;
5546 multiclass avx512_fma3p_132_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5547 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5549 let Predicates = [HasAVX512] in {
5550 defm Z : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5551 avx512_fma3_132_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5552 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5554 let Predicates = [HasVLX, HasAVX512] in {
5555 defm Z256 : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5556 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5557 defm Z128 : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5558 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5562 multiclass avx512_fma3p_132_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5563 SDNode OpNodeRnd > {
5564 defm PS : avx512_fma3p_132_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5565 avx512vl_f32_info, "PS">;
5566 defm PD : avx512_fma3p_132_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5567 avx512vl_f64_info, "PD">, VEX_W;
5570 defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", X86Fmadd, X86FmaddRnd>;
5571 defm VFMSUB132 : avx512_fma3p_132_f<0x9A, "vfmsub132", X86Fmsub, X86FmsubRnd>;
5572 defm VFMADDSUB132 : avx512_fma3p_132_f<0x96, "vfmaddsub132", X86Fmaddsub, X86FmaddsubRnd>;
5573 defm VFMSUBADD132 : avx512_fma3p_132_f<0x97, "vfmsubadd132", X86Fmsubadd, X86FmsubaddRnd>;
5574 defm VFNMADD132 : avx512_fma3p_132_f<0x9C, "vfnmadd132", X86Fnmadd, X86FnmaddRnd>;
5575 defm VFNMSUB132 : avx512_fma3p_132_f<0x9E, "vfnmsub132", X86Fnmsub, X86FnmsubRnd>;
5578 let Constraints = "$src1 = $dst" in {
5579 multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
5580 dag RHS_VEC_r, dag RHS_VEC_m, dag RHS_VEC_rb,
5581 dag RHS_r, dag RHS_m > {
5582 defm r_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5583 (ins _.RC:$src2, _.RC:$src3), OpcodeStr,
5584 "$src3, $src2", "$src2, $src3", RHS_VEC_r, 1, 1>, AVX512FMA3Base;
5586 defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
5587 (ins _.RC:$src2, _.ScalarMemOp:$src3), OpcodeStr,
5588 "$src3, $src2", "$src2, $src3", RHS_VEC_m, 1, 1>, AVX512FMA3Base;
5590 defm rb_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5591 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5592 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", RHS_VEC_rb, 1, 1>,
5593 AVX512FMA3Base, EVEX_B, EVEX_RC;
5595 let isCodeGenOnly = 1, isCommutable = 1 in {
5596 def r : AVX512FMA3<opc, MRMSrcReg, (outs _.FRC:$dst),
5597 (ins _.FRC:$src1, _.FRC:$src2, _.FRC:$src3),
5598 !strconcat(OpcodeStr,
5599 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5601 def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
5602 (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
5603 !strconcat(OpcodeStr,
5604 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5606 }// isCodeGenOnly = 1
5608 }// Constraints = "$src1 = $dst"
5610 multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
5611 string OpcodeStr, SDNode OpNode, SDNode OpNodeRnds1,
5612 SDNode OpNodeRnds3, X86VectorVTInfo _ , string SUFF> {
5614 defm NAME#213#SUFF#Z: avx512_fma3s_common<opc213, OpcodeStr#"213"#_.Suffix , _ ,
5615 // Operands for intrinsic are in 123 order to preserve passthu
5617 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 FROUND_CURRENT))),
5618 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2,
5619 (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))), (i32 FROUND_CURRENT))),
5620 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2, _.RC:$src3,
5622 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
5624 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
5625 (_.ScalarLdFrag addr:$src3))))>;
5627 defm NAME#231#SUFF#Z: avx512_fma3s_common<opc231, OpcodeStr#"231"#_.Suffix , _ ,
5628 (_.VT (OpNodeRnds3 _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 FROUND_CURRENT))),
5629 (_.VT (OpNodeRnds3 _.RC:$src2,
5630 (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
5631 _.RC:$src1, (i32 FROUND_CURRENT))),
5632 (_.VT ( OpNodeRnds3 _.RC:$src2, _.RC:$src3, _.RC:$src1,
5634 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src3,
5636 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2,
5637 (_.ScalarLdFrag addr:$src3), _.FRC:$src1)))>;
5639 defm NAME#132#SUFF#Z: avx512_fma3s_common<opc132, OpcodeStr#"132"#_.Suffix , _ ,
5640 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 FROUND_CURRENT))),
5641 (_.VT (OpNodeRnds1 _.RC:$src1,
5642 (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
5643 _.RC:$src2, (i32 FROUND_CURRENT))),
5644 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src3, _.RC:$src2,
5646 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1, _.FRC:$src3,
5648 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1,
5649 (_.ScalarLdFrag addr:$src3), _.FRC:$src2)))>;
5652 multiclass avx512_fma3s<bits<8> opc213, bits<8> opc231, bits<8> opc132,
5653 string OpcodeStr, SDNode OpNode, SDNode OpNodeRnds1,
5654 SDNode OpNodeRnds3> {
5655 let Predicates = [HasAVX512] in {
5656 defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
5657 OpNodeRnds1, OpNodeRnds3, f32x_info, "SS">,
5658 EVEX_CD8<32, CD8VT1>, VEX_LIG;
5659 defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
5660 OpNodeRnds1, OpNodeRnds3, f64x_info, "SD">,
5661 EVEX_CD8<64, CD8VT1>, VEX_LIG, VEX_W;
5665 defm VFMADD : avx512_fma3s<0xA9, 0xB9, 0x99, "vfmadd", X86Fmadd, X86FmaddRnds1,
5667 defm VFMSUB : avx512_fma3s<0xAB, 0xBB, 0x9B, "vfmsub", X86Fmsub, X86FmsubRnds1,
5669 defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd,
5670 X86FnmaddRnds1, X86FnmaddRnds3>;
5671 defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub,
5672 X86FnmsubRnds1, X86FnmsubRnds3>;
5674 //===----------------------------------------------------------------------===//
5675 // AVX-512 Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit IFMA
5676 //===----------------------------------------------------------------------===//
5677 let Constraints = "$src1 = $dst" in {
5678 multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5679 X86VectorVTInfo _> {
5680 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5681 (ins _.RC:$src2, _.RC:$src3),
5682 OpcodeStr, "$src3, $src2", "$src2, $src3",
5683 (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
5686 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5687 (ins _.RC:$src2, _.MemOp:$src3),
5688 OpcodeStr, "$src3, $src2", "$src2, $src3",
5689 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
5692 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5693 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5694 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
5695 !strconcat("$src2, ${src3}", _.BroadcastStr ),
5697 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
5698 AVX512FMA3Base, EVEX_B;
5700 } // Constraints = "$src1 = $dst"
5702 multiclass avx512_pmadd52_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5703 AVX512VLVectorVTInfo _> {
5704 let Predicates = [HasIFMA] in {
5705 defm Z : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info512>,
5706 EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5708 let Predicates = [HasVLX, HasIFMA] in {
5709 defm Z256 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info256>,
5710 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5711 defm Z128 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info128>,
5712 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5716 defm VPMADD52LUQ : avx512_pmadd52_common<0xb4, "vpmadd52luq", x86vpmadd52l,
5717 avx512vl_i64_info>, VEX_W;
5718 defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h,
5719 avx512vl_i64_info>, VEX_W;
5721 //===----------------------------------------------------------------------===//
5722 // AVX-512 Scalar convert from sign integer to float/double
5723 //===----------------------------------------------------------------------===//
5725 multiclass avx512_vcvtsi<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5726 X86VectorVTInfo DstVT, X86MemOperand x86memop,
5727 PatFrag ld_frag, string asm> {
5728 let hasSideEffects = 0 in {
5729 def rr : SI<opc, MRMSrcReg, (outs DstVT.FRC:$dst),
5730 (ins DstVT.FRC:$src1, SrcRC:$src),
5731 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
5734 def rm : SI<opc, MRMSrcMem, (outs DstVT.FRC:$dst),
5735 (ins DstVT.FRC:$src1, x86memop:$src),
5736 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
5738 } // hasSideEffects = 0
5739 let isCodeGenOnly = 1 in {
5740 def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
5741 (ins DstVT.RC:$src1, SrcRC:$src2),
5742 !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5743 [(set DstVT.RC:$dst,
5744 (OpNode (DstVT.VT DstVT.RC:$src1),
5746 (i32 FROUND_CURRENT)))]>, EVEX_4V;
5748 def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst),
5749 (ins DstVT.RC:$src1, x86memop:$src2),
5750 !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5751 [(set DstVT.RC:$dst,
5752 (OpNode (DstVT.VT DstVT.RC:$src1),
5753 (ld_frag addr:$src2),
5754 (i32 FROUND_CURRENT)))]>, EVEX_4V;
5755 }//isCodeGenOnly = 1
5758 multiclass avx512_vcvtsi_round<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5759 X86VectorVTInfo DstVT, string asm> {
5760 def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
5761 (ins DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc),
5763 "\t{$src2, $rc, $src1, $dst|$dst, $src1, $rc, $src2}"),
5764 [(set DstVT.RC:$dst,
5765 (OpNode (DstVT.VT DstVT.RC:$src1),
5767 (i32 imm:$rc)))]>, EVEX_4V, EVEX_B, EVEX_RC;
5770 multiclass avx512_vcvtsi_common<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5771 X86VectorVTInfo DstVT, X86MemOperand x86memop,
5772 PatFrag ld_frag, string asm> {
5773 defm NAME : avx512_vcvtsi_round<opc, OpNode, SrcRC, DstVT, asm>,
5774 avx512_vcvtsi<opc, OpNode, SrcRC, DstVT, x86memop, ld_frag, asm>,
5778 let Predicates = [HasAVX512] in {
5779 defm VCVTSI2SSZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
5780 v4f32x_info, i32mem, loadi32, "cvtsi2ss{l}">,
5781 XS, EVEX_CD8<32, CD8VT1>;
5782 defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
5783 v4f32x_info, i64mem, loadi64, "cvtsi2ss{q}">,
5784 XS, VEX_W, EVEX_CD8<64, CD8VT1>;
5785 defm VCVTSI2SDZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
5786 v2f64x_info, i32mem, loadi32, "cvtsi2sd{l}">,
5787 XD, EVEX_CD8<32, CD8VT1>;
5788 defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
5789 v2f64x_info, i64mem, loadi64, "cvtsi2sd{q}">,
5790 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5792 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
5793 (VCVTSI2SSZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5794 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
5795 (VCVTSI2SDZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5797 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
5798 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5799 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
5800 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5801 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
5802 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5803 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
5804 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5806 def : Pat<(f32 (sint_to_fp GR32:$src)),
5807 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
5808 def : Pat<(f32 (sint_to_fp GR64:$src)),
5809 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
5810 def : Pat<(f64 (sint_to_fp GR32:$src)),
5811 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
5812 def : Pat<(f64 (sint_to_fp GR64:$src)),
5813 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
5815 defm VCVTUSI2SSZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR32,
5816 v4f32x_info, i32mem, loadi32,
5817 "cvtusi2ss{l}">, XS, EVEX_CD8<32, CD8VT1>;
5818 defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR64,
5819 v4f32x_info, i64mem, loadi64, "cvtusi2ss{q}">,
5820 XS, VEX_W, EVEX_CD8<64, CD8VT1>;
5821 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, X86UintToFpRnd, GR32, v2f64x_info,
5822 i32mem, loadi32, "cvtusi2sd{l}">,
5823 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
5824 defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR64,
5825 v2f64x_info, i64mem, loadi64, "cvtusi2sd{q}">,
5826 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5828 def : InstAlias<"vcvtusi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
5829 (VCVTUSI2SSZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5830 def : InstAlias<"vcvtusi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
5831 (VCVTUSI2SDZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5833 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
5834 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5835 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
5836 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5837 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
5838 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5839 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
5840 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5842 def : Pat<(f32 (uint_to_fp GR32:$src)),
5843 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
5844 def : Pat<(f32 (uint_to_fp GR64:$src)),
5845 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
5846 def : Pat<(f64 (uint_to_fp GR32:$src)),
5847 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
5848 def : Pat<(f64 (uint_to_fp GR64:$src)),
5849 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
5852 //===----------------------------------------------------------------------===//
5853 // AVX-512 Scalar convert from float/double to integer
5854 //===----------------------------------------------------------------------===//
5855 multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT ,
5856 X86VectorVTInfo DstVT, SDNode OpNode, string asm> {
5857 let Predicates = [HasAVX512] in {
5858 def rr : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
5859 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5860 [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 FROUND_CURRENT)))]>,
5862 def rb : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
5863 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
5864 [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 imm:$rc)))]>,
5865 EVEX, VEX_LIG, EVEX_B, EVEX_RC;
5866 def rm : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.ScalarMemOp:$src),
5867 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5868 [(set DstVT.RC:$dst, (OpNode
5869 (SrcVT.VT (scalar_to_vector (SrcVT.ScalarLdFrag addr:$src))),
5870 (i32 FROUND_CURRENT)))]>,
5872 } // Predicates = [HasAVX512]
5875 // Convert float/double to signed/unsigned int 32/64
5876 defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, f32x_info, i32x_info,
5877 X86cvts2si, "cvtss2si">,
5878 XS, EVEX_CD8<32, CD8VT1>;
5879 defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, f32x_info, i64x_info,
5880 X86cvts2si, "cvtss2si">,
5881 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
5882 defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, f32x_info, i32x_info,
5883 X86cvts2usi, "cvtss2usi">,
5884 XS, EVEX_CD8<32, CD8VT1>;
5885 defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, f32x_info, i64x_info,
5886 X86cvts2usi, "cvtss2usi">, XS, VEX_W,
5887 EVEX_CD8<32, CD8VT1>;
5888 defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, f64x_info, i32x_info,
5889 X86cvts2si, "cvtsd2si">,
5890 XD, EVEX_CD8<64, CD8VT1>;
5891 defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, f64x_info, i64x_info,
5892 X86cvts2si, "cvtsd2si">,
5893 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5894 defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, f64x_info, i32x_info,
5895 X86cvts2usi, "cvtsd2usi">,
5896 XD, EVEX_CD8<64, CD8VT1>;
5897 defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info,
5898 X86cvts2usi, "cvtsd2usi">, XD, VEX_W,
5899 EVEX_CD8<64, CD8VT1>;
5901 // The SSE version of these instructions are disabled for AVX512.
5902 // Therefore, the SSE intrinsics are mapped to the AVX512 instructions.
5903 let Predicates = [HasAVX512] in {
5904 def : Pat<(i32 (int_x86_sse_cvtss2si (v4f32 VR128X:$src))),
5905 (VCVTSS2SIZrr VR128X:$src)>;
5906 def : Pat<(i32 (int_x86_sse_cvtss2si (sse_load_f32 addr:$src))),
5907 (VCVTSS2SIZrm addr:$src)>;
5908 def : Pat<(i64 (int_x86_sse_cvtss2si64 (v4f32 VR128X:$src))),
5909 (VCVTSS2SI64Zrr VR128X:$src)>;
5910 def : Pat<(i64 (int_x86_sse_cvtss2si64 (sse_load_f32 addr:$src))),
5911 (VCVTSS2SI64Zrm addr:$src)>;
5912 def : Pat<(i32 (int_x86_sse2_cvtsd2si (v2f64 VR128X:$src))),
5913 (VCVTSD2SIZrr VR128X:$src)>;
5914 def : Pat<(i32 (int_x86_sse2_cvtsd2si (sse_load_f64 addr:$src))),
5915 (VCVTSD2SIZrm addr:$src)>;
5916 def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (v2f64 VR128X:$src))),
5917 (VCVTSD2SI64Zrr VR128X:$src)>;
5918 def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (sse_load_f64 addr:$src))),
5919 (VCVTSD2SI64Zrm addr:$src)>;
5922 let Predicates = [HasAVX512] in {
5923 def : Pat<(int_x86_sse_cvtsi2ss VR128X:$src1, GR32:$src2),
5924 (VCVTSI2SSZrr_Int VR128X:$src1, GR32:$src2)>;
5925 def : Pat<(int_x86_sse_cvtsi2ss VR128X:$src1, (loadi32 addr:$src2)),
5926 (VCVTSI2SSZrm_Int VR128X:$src1, addr:$src2)>;
5927 def : Pat<(int_x86_sse_cvtsi642ss VR128X:$src1, GR64:$src2),
5928 (VCVTSI642SSZrr_Int VR128X:$src1, GR64:$src2)>;
5929 def : Pat<(int_x86_sse_cvtsi642ss VR128X:$src1, (loadi64 addr:$src2)),
5930 (VCVTSI642SSZrm_Int VR128X:$src1, addr:$src2)>;
5931 def : Pat<(int_x86_sse2_cvtsi2sd VR128X:$src1, GR32:$src2),
5932 (VCVTSI2SDZrr_Int VR128X:$src1, GR32:$src2)>;
5933 def : Pat<(int_x86_sse2_cvtsi2sd VR128X:$src1, (loadi32 addr:$src2)),
5934 (VCVTSI2SDZrm_Int VR128X:$src1, addr:$src2)>;
5935 def : Pat<(int_x86_sse2_cvtsi642sd VR128X:$src1, GR64:$src2),
5936 (VCVTSI642SDZrr_Int VR128X:$src1, GR64:$src2)>;
5937 def : Pat<(int_x86_sse2_cvtsi642sd VR128X:$src1, (loadi64 addr:$src2)),
5938 (VCVTSI642SDZrm_Int VR128X:$src1, addr:$src2)>;
5939 def : Pat<(int_x86_avx512_cvtusi2sd VR128X:$src1, GR32:$src2),
5940 (VCVTUSI2SDZrr_Int VR128X:$src1, GR32:$src2)>;
5941 def : Pat<(int_x86_avx512_cvtusi2sd VR128X:$src1, (loadi32 addr:$src2)),
5942 (VCVTUSI2SDZrm_Int VR128X:$src1, addr:$src2)>;
5943 } // Predicates = [HasAVX512]
5945 // Convert float/double to signed/unsigned int 32/64 with truncation
5946 multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
5947 X86VectorVTInfo _DstRC, SDNode OpNode,
5948 SDNode OpNodeRnd, string aliasStr>{
5949 let Predicates = [HasAVX512] in {
5950 def rr : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
5951 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5952 [(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src))]>, EVEX;
5953 let hasSideEffects = 0 in
5954 def rb : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
5955 !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
5957 def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src),
5958 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5959 [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
5962 def : InstAlias<asm # aliasStr # "\t{$src, $dst|$dst, $src}",
5963 (!cast<Instruction>(NAME # "rr") _DstRC.RC:$dst, _SrcRC.FRC:$src), 0>;
5964 def : InstAlias<asm # aliasStr # "\t\t{{sae}, $src, $dst|$dst, $src, {sae}}",
5965 (!cast<Instruction>(NAME # "rb") _DstRC.RC:$dst, _SrcRC.FRC:$src), 0>;
5966 def : InstAlias<asm # aliasStr # "\t{$src, $dst|$dst, $src}",
5967 (!cast<Instruction>(NAME # "rm") _DstRC.RC:$dst,
5968 _SrcRC.ScalarMemOp:$src), 0>;
5970 let isCodeGenOnly = 1 in {
5971 def rr_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
5972 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5973 [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
5974 (i32 FROUND_CURRENT)))]>, EVEX, VEX_LIG;
5975 def rb_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
5976 !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
5977 [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
5978 (i32 FROUND_NO_EXC)))]>,
5979 EVEX,VEX_LIG , EVEX_B;
5980 let mayLoad = 1, hasSideEffects = 0 in
5981 def rm_Int : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
5982 (ins _SrcRC.MemOp:$src),
5983 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5986 } // isCodeGenOnly = 1
5991 defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i32x_info,
5992 fp_to_sint, X86cvtts2IntRnd, "{l}">,
5993 XS, EVEX_CD8<32, CD8VT1>;
5994 defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i64x_info,
5995 fp_to_sint, X86cvtts2IntRnd, "{q}">,
5996 VEX_W, XS, EVEX_CD8<32, CD8VT1>;
5997 defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i32x_info,
5998 fp_to_sint, X86cvtts2IntRnd, "{l}">,
5999 XD, EVEX_CD8<64, CD8VT1>;
6000 defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i64x_info,
6001 fp_to_sint, X86cvtts2IntRnd, "{q}">,
6002 VEX_W, XD, EVEX_CD8<64, CD8VT1>;
6004 defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i32x_info,
6005 fp_to_uint, X86cvtts2UIntRnd, "{l}">,
6006 XS, EVEX_CD8<32, CD8VT1>;
6007 defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i64x_info,
6008 fp_to_uint, X86cvtts2UIntRnd, "{q}">,
6009 XS,VEX_W, EVEX_CD8<32, CD8VT1>;
6010 defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i32x_info,
6011 fp_to_uint, X86cvtts2UIntRnd, "{l}">,
6012 XD, EVEX_CD8<64, CD8VT1>;
6013 defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i64x_info,
6014 fp_to_uint, X86cvtts2UIntRnd, "{q}">,
6015 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
6016 let Predicates = [HasAVX512] in {
6017 def : Pat<(i32 (int_x86_sse_cvttss2si (v4f32 VR128X:$src))),
6018 (VCVTTSS2SIZrr_Int VR128X:$src)>;
6019 def : Pat<(i32 (int_x86_sse_cvttss2si (sse_load_f32 addr:$src))),
6020 (VCVTTSS2SIZrm_Int addr:$src)>;
6021 def : Pat<(i64 (int_x86_sse_cvttss2si64 (v4f32 VR128X:$src))),
6022 (VCVTTSS2SI64Zrr_Int VR128X:$src)>;
6023 def : Pat<(i64 (int_x86_sse_cvttss2si64 (sse_load_f32 addr:$src))),
6024 (VCVTTSS2SI64Zrm_Int addr:$src)>;
6025 def : Pat<(i32 (int_x86_sse2_cvttsd2si (v2f64 VR128X:$src))),
6026 (VCVTTSD2SIZrr_Int VR128X:$src)>;
6027 def : Pat<(i32 (int_x86_sse2_cvttsd2si (sse_load_f64 addr:$src))),
6028 (VCVTTSD2SIZrm_Int addr:$src)>;
6029 def : Pat<(i64 (int_x86_sse2_cvttsd2si64 (v2f64 VR128X:$src))),
6030 (VCVTTSD2SI64Zrr_Int VR128X:$src)>;
6031 def : Pat<(i64 (int_x86_sse2_cvttsd2si64 (sse_load_f64 addr:$src))),
6032 (VCVTTSD2SI64Zrm_Int addr:$src)>;
6034 //===----------------------------------------------------------------------===//
6035 // AVX-512 Convert form float to double and back
6036 //===----------------------------------------------------------------------===//
6037 multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6038 X86VectorVTInfo _Src, SDNode OpNode> {
6039 defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6040 (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
6041 "$src2, $src1", "$src1, $src2",
6042 (_.VT (OpNode (_.VT _.RC:$src1),
6043 (_Src.VT _Src.RC:$src2),
6044 (i32 FROUND_CURRENT)))>,
6045 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
6046 defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
6047 (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2), OpcodeStr,
6048 "$src2, $src1", "$src1, $src2",
6049 (_.VT (OpNode (_.VT _.RC:$src1),
6050 (_Src.VT (scalar_to_vector
6051 (_Src.ScalarLdFrag addr:$src2))),
6052 (i32 FROUND_CURRENT)))>,
6053 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
6056 // Scalar Coversion with SAE - suppress all exceptions
6057 multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6058 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6059 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6060 (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
6061 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
6062 (_.VT (OpNodeRnd (_.VT _.RC:$src1),
6063 (_Src.VT _Src.RC:$src2),
6064 (i32 FROUND_NO_EXC)))>,
6065 EVEX_4V, VEX_LIG, EVEX_B;
6068 // Scalar Conversion with rounding control (RC)
6069 multiclass avx512_cvt_fp_rc_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6070 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6071 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6072 (ins _.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
6073 "$rc, $src2, $src1", "$src1, $src2, $rc",
6074 (_.VT (OpNodeRnd (_.VT _.RC:$src1),
6075 (_Src.VT _Src.RC:$src2), (i32 imm:$rc)))>,
6076 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
6079 multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr,
6080 SDNode OpNodeRnd, X86VectorVTInfo _src,
6081 X86VectorVTInfo _dst> {
6082 let Predicates = [HasAVX512] in {
6083 defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6084 avx512_cvt_fp_rc_scalar<opc, OpcodeStr, _dst, _src,
6085 OpNodeRnd>, VEX_W, EVEX_CD8<64, CD8VT1>, XD;
6089 multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr,
6090 SDNode OpNodeRnd, X86VectorVTInfo _src,
6091 X86VectorVTInfo _dst> {
6092 let Predicates = [HasAVX512] in {
6093 defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6094 avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6095 EVEX_CD8<32, CD8VT1>, XS;
6098 defm VCVTSD2SS : avx512_cvt_fp_scalar_sd2ss<0x5A, "vcvtsd2ss",
6099 X86froundRnd, f64x_info, f32x_info>;
6100 defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd",
6101 X86fpextRnd,f32x_info, f64x_info >;
6103 def : Pat<(f64 (fpextend FR32X:$src)),
6104 (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X),
6105 (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>,
6106 Requires<[HasAVX512]>;
6107 def : Pat<(f64 (fpextend (loadf32 addr:$src))),
6108 (COPY_TO_REGCLASS (VCVTSS2SDZrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
6109 Requires<[HasAVX512]>;
6111 def : Pat<(f64 (extloadf32 addr:$src)),
6112 (COPY_TO_REGCLASS (VCVTSS2SDZrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
6113 Requires<[HasAVX512, OptForSize]>;
6115 def : Pat<(f64 (extloadf32 addr:$src)),
6116 (COPY_TO_REGCLASS (VCVTSS2SDZrr (v4f32 (IMPLICIT_DEF)),
6117 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)), VR128X)>,
6118 Requires<[HasAVX512, OptForSpeed]>;
6120 def : Pat<(f32 (fpround FR64X:$src)),
6121 (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X),
6122 (COPY_TO_REGCLASS FR64X:$src, VR128X)), VR128X)>,
6123 Requires<[HasAVX512]>;
6124 //===----------------------------------------------------------------------===//
6125 // AVX-512 Vector convert from signed/unsigned integer to float/double
6126 // and from float/double to signed/unsigned integer
6127 //===----------------------------------------------------------------------===//
6129 multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6130 X86VectorVTInfo _Src, SDNode OpNode,
6131 string Broadcast = _.BroadcastStr,
6132 string Alias = "", X86MemOperand MemOp = _Src.MemOp> {
6134 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6135 (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
6136 (_.VT (OpNode (_Src.VT _Src.RC:$src)))>, EVEX;
6138 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6139 (ins MemOp:$src), OpcodeStr#Alias, "$src", "$src",
6140 (_.VT (OpNode (_Src.VT
6141 (bitconvert (_Src.LdFrag addr:$src)))))>, EVEX;
6143 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6144 (ins _Src.ScalarMemOp:$src), OpcodeStr,
6145 "${src}"##Broadcast, "${src}"##Broadcast,
6146 (_.VT (OpNode (_Src.VT
6147 (X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
6150 // Coversion with SAE - suppress all exceptions
6151 multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6152 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6153 defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6154 (ins _Src.RC:$src), OpcodeStr,
6155 "{sae}, $src", "$src, {sae}",
6156 (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src),
6157 (i32 FROUND_NO_EXC)))>,
6161 // Conversion with rounding control (RC)
6162 multiclass avx512_vcvt_fp_rc<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6163 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6164 defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6165 (ins _Src.RC:$src, AVX512RC:$rc), OpcodeStr,
6166 "$rc, $src", "$src, $rc",
6167 (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src), (i32 imm:$rc)))>,
6168 EVEX, EVEX_B, EVEX_RC;
6171 // Extend Float to Double
6172 multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr> {
6173 let Predicates = [HasAVX512] in {
6174 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8f32x_info, fpextend>,
6175 avx512_vcvt_fp_sae<opc, OpcodeStr, v8f64_info, v8f32x_info,
6176 X86vfpextRnd>, EVEX_V512;
6178 let Predicates = [HasVLX] in {
6179 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v4f32x_info,
6180 X86vfpext, "{1to2}", "", f64mem>, EVEX_V128;
6181 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4f32x_info, fpextend>,
6186 // Truncate Double to Float
6187 multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr> {
6188 let Predicates = [HasAVX512] in {
6189 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, fpround>,
6190 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8f64_info,
6191 X86vfproundRnd>, EVEX_V512;
6193 let Predicates = [HasVLX] in {
6194 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info,
6195 X86vfpround, "{1to2}", "{x}">, EVEX_V128;
6196 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fpround,
6197 "{1to4}", "{y}">, EVEX_V256;
6199 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6200 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6201 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6202 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, f128mem:$src), 0>;
6203 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6204 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6205 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6206 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, f256mem:$src), 0>;
6210 defm VCVTPD2PS : avx512_cvtpd2ps<0x5A, "vcvtpd2ps">,
6211 VEX_W, PD, EVEX_CD8<64, CD8VF>;
6212 defm VCVTPS2PD : avx512_cvtps2pd<0x5A, "vcvtps2pd">,
6213 PS, EVEX_CD8<32, CD8VH>;
6215 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
6216 (VCVTPS2PDZrm addr:$src)>;
6218 let Predicates = [HasVLX] in {
6219 let AddedComplexity = 15 in
6220 def : Pat<(X86vzmovl (v2f64 (bitconvert
6221 (v4f32 (X86vfpround (v2f64 VR128X:$src)))))),
6222 (VCVTPD2PSZ128rr VR128X:$src)>;
6223 def : Pat<(v2f64 (extloadv2f32 addr:$src)),
6224 (VCVTPS2PDZ128rm addr:$src)>;
6225 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
6226 (VCVTPS2PDZ256rm addr:$src)>;
6229 // Convert Signed/Unsigned Doubleword to Double
6230 multiclass avx512_cvtdq2pd<bits<8> opc, string OpcodeStr, SDNode OpNode,
6232 // No rounding in this op
6233 let Predicates = [HasAVX512] in
6234 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8i32x_info, OpNode>,
6237 let Predicates = [HasVLX] in {
6238 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v4i32x_info,
6239 OpNode128, "{1to2}", "", i64mem>, EVEX_V128;
6240 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4i32x_info, OpNode>,
6245 // Convert Signed/Unsigned Doubleword to Float
6246 multiclass avx512_cvtdq2ps<bits<8> opc, string OpcodeStr, SDNode OpNode,
6248 let Predicates = [HasAVX512] in
6249 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16f32_info, v16i32_info, OpNode>,
6250 avx512_vcvt_fp_rc<opc, OpcodeStr, v16f32_info, v16i32_info,
6251 OpNodeRnd>, EVEX_V512;
6253 let Predicates = [HasVLX] in {
6254 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4i32x_info, OpNode>,
6256 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8i32x_info, OpNode>,
6261 // Convert Float to Signed/Unsigned Doubleword with truncation
6262 multiclass avx512_cvttps2dq<bits<8> opc, string OpcodeStr,
6263 SDNode OpNode, SDNode OpNodeRnd> {
6264 let Predicates = [HasAVX512] in {
6265 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i32_info, v16f32_info, OpNode>,
6266 avx512_vcvt_fp_sae<opc, OpcodeStr, v16i32_info, v16f32_info,
6267 OpNodeRnd>, EVEX_V512;
6269 let Predicates = [HasVLX] in {
6270 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f32x_info, OpNode>,
6272 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f32x_info, OpNode>,
6277 // Convert Float to Signed/Unsigned Doubleword
6278 multiclass avx512_cvtps2dq<bits<8> opc, string OpcodeStr,
6279 SDNode OpNode, SDNode OpNodeRnd> {
6280 let Predicates = [HasAVX512] in {
6281 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i32_info, v16f32_info, OpNode>,
6282 avx512_vcvt_fp_rc<opc, OpcodeStr, v16i32_info, v16f32_info,
6283 OpNodeRnd>, EVEX_V512;
6285 let Predicates = [HasVLX] in {
6286 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f32x_info, OpNode>,
6288 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f32x_info, OpNode>,
6293 // Convert Double to Signed/Unsigned Doubleword with truncation
6294 multiclass avx512_cvttpd2dq<bits<8> opc, string OpcodeStr, SDNode OpNode,
6295 SDNode OpNode128, SDNode OpNodeRnd> {
6296 let Predicates = [HasAVX512] in {
6297 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f64_info, OpNode>,
6298 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i32x_info, v8f64_info,
6299 OpNodeRnd>, EVEX_V512;
6301 let Predicates = [HasVLX] in {
6302 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6303 // memory forms of these instructions in Asm Parser. They have the same
6304 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6305 // due to the same reason.
6306 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info,
6307 OpNode128, "{1to2}", "{x}">, EVEX_V128;
6308 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
6309 "{1to4}", "{y}">, EVEX_V256;
6311 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6312 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6313 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6314 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, i128mem:$src), 0>;
6315 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6316 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6317 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6318 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, i256mem:$src), 0>;
6322 // Convert Double to Signed/Unsigned Doubleword
6323 multiclass avx512_cvtpd2dq<bits<8> opc, string OpcodeStr,
6324 SDNode OpNode, SDNode OpNodeRnd> {
6325 let Predicates = [HasAVX512] in {
6326 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f64_info, OpNode>,
6327 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i32x_info, v8f64_info,
6328 OpNodeRnd>, EVEX_V512;
6330 let Predicates = [HasVLX] in {
6331 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6332 // memory forms of these instructions in Asm Parcer. They have the same
6333 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6334 // due to the same reason.
6335 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info, OpNode,
6336 "{1to2}", "{x}">, EVEX_V128;
6337 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
6338 "{1to4}", "{y}">, EVEX_V256;
6340 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6341 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6342 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6343 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, f128mem:$src), 0>;
6344 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6345 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6346 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6347 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, f256mem:$src), 0>;
6351 // Convert Double to Signed/Unsigned Quardword
6352 multiclass avx512_cvtpd2qq<bits<8> opc, string OpcodeStr,
6353 SDNode OpNode, SDNode OpNodeRnd> {
6354 let Predicates = [HasDQI] in {
6355 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f64_info, OpNode>,
6356 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i64_info, v8f64_info,
6357 OpNodeRnd>, EVEX_V512;
6359 let Predicates = [HasDQI, HasVLX] in {
6360 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v2f64x_info, OpNode>,
6362 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f64x_info, OpNode>,
6367 // Convert Double to Signed/Unsigned Quardword with truncation
6368 multiclass avx512_cvttpd2qq<bits<8> opc, string OpcodeStr,
6369 SDNode OpNode, SDNode OpNodeRnd> {
6370 let Predicates = [HasDQI] in {
6371 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f64_info, OpNode>,
6372 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f64_info,
6373 OpNodeRnd>, EVEX_V512;
6375 let Predicates = [HasDQI, HasVLX] in {
6376 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v2f64x_info, OpNode>,
6378 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f64x_info, OpNode>,
6383 // Convert Signed/Unsigned Quardword to Double
6384 multiclass avx512_cvtqq2pd<bits<8> opc, string OpcodeStr,
6385 SDNode OpNode, SDNode OpNodeRnd> {
6386 let Predicates = [HasDQI] in {
6387 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8i64_info, OpNode>,
6388 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f64_info, v8i64_info,
6389 OpNodeRnd>, EVEX_V512;
6391 let Predicates = [HasDQI, HasVLX] in {
6392 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v2i64x_info, OpNode>,
6394 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4i64x_info, OpNode>,
6399 // Convert Float to Signed/Unsigned Quardword
6400 multiclass avx512_cvtps2qq<bits<8> opc, string OpcodeStr,
6401 SDNode OpNode, SDNode OpNodeRnd> {
6402 let Predicates = [HasDQI] in {
6403 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f32x_info, OpNode>,
6404 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i64_info, v8f32x_info,
6405 OpNodeRnd>, EVEX_V512;
6407 let Predicates = [HasDQI, HasVLX] in {
6408 // Explicitly specified broadcast string, since we take only 2 elements
6409 // from v4f32x_info source
6410 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v4f32x_info, OpNode,
6411 "{1to2}", "", f64mem>, EVEX_V128;
6412 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNode>,
6417 // Convert Float to Signed/Unsigned Quardword with truncation
6418 multiclass avx512_cvttps2qq<bits<8> opc, string OpcodeStr, SDNode OpNode,
6419 SDNode OpNode128, SDNode OpNodeRnd> {
6420 let Predicates = [HasDQI] in {
6421 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f32x_info, OpNode>,
6422 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f32x_info,
6423 OpNodeRnd>, EVEX_V512;
6425 let Predicates = [HasDQI, HasVLX] in {
6426 // Explicitly specified broadcast string, since we take only 2 elements
6427 // from v4f32x_info source
6428 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v4f32x_info, OpNode128,
6429 "{1to2}", "", f64mem>, EVEX_V128;
6430 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNode>,
6435 // Convert Signed/Unsigned Quardword to Float
6436 multiclass avx512_cvtqq2ps<bits<8> opc, string OpcodeStr, SDNode OpNode,
6437 SDNode OpNode128, SDNode OpNodeRnd> {
6438 let Predicates = [HasDQI] in {
6439 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8i64_info, OpNode>,
6440 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8i64_info,
6441 OpNodeRnd>, EVEX_V512;
6443 let Predicates = [HasDQI, HasVLX] in {
6444 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6445 // memory forms of these instructions in Asm Parcer. They have the same
6446 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6447 // due to the same reason.
6448 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2i64x_info, OpNode128,
6449 "{1to2}", "{x}">, EVEX_V128;
6450 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4i64x_info, OpNode,
6451 "{1to4}", "{y}">, EVEX_V256;
6453 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6454 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6455 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6456 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, i128mem:$src), 0>;
6457 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6458 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6459 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6460 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, i256mem:$src), 0>;
6464 defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", sint_to_fp, X86VSintToFP>,
6465 XS, EVEX_CD8<32, CD8VH>;
6467 defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", sint_to_fp,
6469 PS, EVEX_CD8<32, CD8VF>;
6471 defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", fp_to_sint,
6473 XS, EVEX_CD8<32, CD8VF>;
6475 defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", fp_to_sint, X86cvttp2si,
6477 PD, VEX_W, EVEX_CD8<64, CD8VF>;
6479 defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", fp_to_uint,
6480 X86cvttp2uiRnd>, PS,
6481 EVEX_CD8<32, CD8VF>;
6483 defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", fp_to_uint,
6484 X86cvttp2ui, X86cvttp2uiRnd>, PS, VEX_W,
6485 EVEX_CD8<64, CD8VF>;
6487 defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", uint_to_fp, X86VUintToFP>,
6488 XS, EVEX_CD8<32, CD8VH>;
6490 defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", uint_to_fp,
6491 X86VUintToFpRnd>, XD,
6492 EVEX_CD8<32, CD8VF>;
6494 defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtp2Int,
6495 X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
6497 defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int,
6498 X86cvtp2IntRnd>, XD, VEX_W,
6499 EVEX_CD8<64, CD8VF>;
6501 defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt,
6503 PS, EVEX_CD8<32, CD8VF>;
6504 defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt,
6505 X86cvtp2UIntRnd>, VEX_W,
6506 PS, EVEX_CD8<64, CD8VF>;
6508 defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int,
6509 X86cvtp2IntRnd>, VEX_W,
6510 PD, EVEX_CD8<64, CD8VF>;
6512 defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtp2Int,
6513 X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
6515 defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtp2UInt,
6516 X86cvtp2UIntRnd>, VEX_W,
6517 PD, EVEX_CD8<64, CD8VF>;
6519 defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt,
6520 X86cvtp2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
6522 defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", fp_to_sint,
6523 X86cvttp2siRnd>, VEX_W,
6524 PD, EVEX_CD8<64, CD8VF>;
6526 defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", fp_to_sint, X86cvttp2si,
6527 X86cvttp2siRnd>, PD, EVEX_CD8<32, CD8VH>;
6529 defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", fp_to_uint,
6530 X86cvttp2uiRnd>, VEX_W,
6531 PD, EVEX_CD8<64, CD8VF>;
6533 defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", fp_to_uint, X86cvttp2ui,
6534 X86cvttp2uiRnd>, PD, EVEX_CD8<32, CD8VH>;
6536 defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", sint_to_fp,
6537 X86VSintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
6539 defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", uint_to_fp,
6540 X86VUintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
6542 defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", sint_to_fp, X86VSintToFP,
6543 X86VSintToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
6545 defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", uint_to_fp, X86VUintToFP,
6546 X86VUintToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
6548 let Predicates = [HasAVX512, NoVLX] in {
6549 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
6550 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
6551 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF),
6552 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6554 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
6555 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
6556 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF),
6557 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6559 def : Pat<(v4i32 (fp_to_uint (v4f64 VR256X:$src1))),
6560 (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
6561 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6562 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6564 def : Pat<(v4i32 (X86cvttp2ui (v2f64 VR128X:$src))),
6565 (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
6566 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6567 VR128X:$src, sub_xmm)))), sub_xmm)>;
6569 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
6570 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
6571 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
6572 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6574 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
6575 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
6576 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
6577 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6579 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
6580 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
6581 (v8i32 (INSERT_SUBREG (IMPLICIT_DEF),
6582 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6584 def : Pat<(v2f64 (X86VUintToFP (v4i32 VR128X:$src1))),
6585 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
6586 (v8i32 (INSERT_SUBREG (IMPLICIT_DEF),
6587 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6590 let Predicates = [HasAVX512, HasVLX] in {
6591 let AddedComplexity = 15 in {
6592 def : Pat<(X86vzmovl (v2i64 (bitconvert
6593 (v4i32 (X86cvtp2Int (v2f64 VR128X:$src)))))),
6594 (VCVTPD2DQZ128rr VR128X:$src)>;
6595 def : Pat<(v4i32 (bitconvert (X86vzmovl (v2i64 (bitconvert
6596 (v4i32 (X86cvtp2UInt (v2f64 VR128X:$src)))))))),
6597 (VCVTPD2UDQZ128rr VR128X:$src)>;
6598 def : Pat<(X86vzmovl (v2i64 (bitconvert
6599 (v4i32 (X86cvttp2si (v2f64 VR128X:$src)))))),
6600 (VCVTTPD2DQZ128rr VR128X:$src)>;
6601 def : Pat<(v4i32 (bitconvert (X86vzmovl (v2i64 (bitconvert
6602 (v4i32 (X86cvttp2ui (v2f64 VR128X:$src)))))))),
6603 (VCVTTPD2UDQZ128rr VR128X:$src)>;
6607 let Predicates = [HasAVX512] in {
6608 def : Pat<(v8f32 (fpround (loadv8f64 addr:$src))),
6609 (VCVTPD2PSZrm addr:$src)>;
6610 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
6611 (VCVTPS2PDZrm addr:$src)>;
6614 let Predicates = [HasDQI, HasVLX] in {
6615 let AddedComplexity = 15 in {
6616 def : Pat<(X86vzmovl (v2f64 (bitconvert
6617 (v4f32 (X86VSintToFP (v2i64 VR128X:$src)))))),
6618 (VCVTQQ2PSZ128rr VR128X:$src)>;
6619 def : Pat<(X86vzmovl (v2f64 (bitconvert
6620 (v4f32 (X86VUintToFP (v2i64 VR128X:$src)))))),
6621 (VCVTUQQ2PSZ128rr VR128X:$src)>;
6625 let Predicates = [HasDQI, NoVLX] in {
6626 def : Pat<(v2i64 (fp_to_sint (v2f64 VR128X:$src1))),
6627 (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr
6628 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6629 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6631 def : Pat<(v4i64 (fp_to_sint (v4f32 VR128X:$src1))),
6632 (EXTRACT_SUBREG (v8i64 (VCVTTPS2QQZrr
6633 (v8f32 (INSERT_SUBREG (IMPLICIT_DEF),
6634 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6636 def : Pat<(v4i64 (fp_to_sint (v4f64 VR256X:$src1))),
6637 (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr
6638 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6639 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6641 def : Pat<(v2i64 (fp_to_uint (v2f64 VR128X:$src1))),
6642 (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr
6643 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6644 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6646 def : Pat<(v4i64 (fp_to_uint (v4f32 VR128X:$src1))),
6647 (EXTRACT_SUBREG (v8i64 (VCVTTPS2UQQZrr
6648 (v8f32 (INSERT_SUBREG (IMPLICIT_DEF),
6649 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6651 def : Pat<(v4i64 (fp_to_uint (v4f64 VR256X:$src1))),
6652 (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr
6653 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6654 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6656 def : Pat<(v4f32 (sint_to_fp (v4i64 VR256X:$src1))),
6657 (EXTRACT_SUBREG (v8f32 (VCVTQQ2PSZrr
6658 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6659 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6661 def : Pat<(v2f64 (sint_to_fp (v2i64 VR128X:$src1))),
6662 (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr
6663 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6664 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6666 def : Pat<(v4f64 (sint_to_fp (v4i64 VR256X:$src1))),
6667 (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr
6668 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6669 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6671 def : Pat<(v4f32 (uint_to_fp (v4i64 VR256X:$src1))),
6672 (EXTRACT_SUBREG (v8f32 (VCVTUQQ2PSZrr
6673 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6674 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6676 def : Pat<(v2f64 (uint_to_fp (v2i64 VR128X:$src1))),
6677 (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr
6678 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6679 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6681 def : Pat<(v4f64 (uint_to_fp (v4i64 VR256X:$src1))),
6682 (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr
6683 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6684 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6687 //===----------------------------------------------------------------------===//
6688 // Half precision conversion instructions
6689 //===----------------------------------------------------------------------===//
6690 multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
6691 X86MemOperand x86memop, PatFrag ld_frag> {
6692 defm rr : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
6693 "vcvtph2ps", "$src", "$src",
6694 (X86cvtph2ps (_src.VT _src.RC:$src),
6695 (i32 FROUND_CURRENT))>, T8PD;
6696 defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
6697 "vcvtph2ps", "$src", "$src",
6698 (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
6699 (i32 FROUND_CURRENT))>, T8PD;
6702 multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
6703 defm rb : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
6704 "vcvtph2ps", "{sae}, $src", "$src, {sae}",
6705 (X86cvtph2ps (_src.VT _src.RC:$src),
6706 (i32 FROUND_NO_EXC))>, T8PD, EVEX_B;
6710 let Predicates = [HasAVX512] in {
6711 defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, loadv4i64>,
6712 avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
6713 EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
6714 let Predicates = [HasVLX] in {
6715 defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
6716 loadv2i64>,EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
6717 defm VCVTPH2PSZ128 : avx512_cvtph2ps<v4f32x_info, v8i16x_info, f64mem,
6718 loadv2i64>, EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
6722 multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
6723 X86MemOperand x86memop> {
6724 defm rr : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
6725 (ins _src.RC:$src1, i32u8imm:$src2),
6726 "vcvtps2ph", "$src2, $src1", "$src1, $src2",
6727 (X86cvtps2ph (_src.VT _src.RC:$src1),
6729 NoItinerary, 0, 0, X86select>, AVX512AIi8Base;
6730 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
6731 (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
6732 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6733 [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
6736 let hasSideEffects = 0, mayStore = 1 in
6737 def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
6738 (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
6739 "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
6742 multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
6743 let hasSideEffects = 0 in
6744 defm rb : AVX512_maskable_in_asm<0x1D, MRMDestReg, _dest,
6745 (outs _dest.RC:$dst),
6746 (ins _src.RC:$src1, i32u8imm:$src2),
6747 "vcvtps2ph", "$src2, {sae}, $src1", "$src1, {sae}, $src2",
6748 []>, EVEX_B, AVX512AIi8Base;
6750 let Predicates = [HasAVX512] in {
6751 defm VCVTPS2PHZ : avx512_cvtps2ph<v16i16x_info, v16f32_info, f256mem>,
6752 avx512_cvtps2ph_sae<v16i16x_info, v16f32_info>,
6753 EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
6754 let Predicates = [HasVLX] in {
6755 defm VCVTPS2PHZ256 : avx512_cvtps2ph<v8i16x_info, v8f32x_info, f128mem>,
6756 EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
6757 defm VCVTPS2PHZ128 : avx512_cvtps2ph<v8i16x_info, v4f32x_info, f128mem>,
6758 EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
6762 // Patterns for matching conversions from float to half-float and vice versa.
6763 let Predicates = [HasVLX] in {
6764 // Use MXCSR.RC for rounding instead of explicitly specifying the default
6765 // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
6766 // configurations we support (the default). However, falling back to MXCSR is
6767 // more consistent with other instructions, which are always controlled by it.
6768 // It's encoded as 0b100.
6769 def : Pat<(fp_to_f16 FR32X:$src),
6770 (i16 (EXTRACT_SUBREG (VMOVPDI2DIZrr (VCVTPS2PHZ128rr
6771 (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), sub_16bit))>;
6773 def : Pat<(f16_to_fp GR16:$src),
6774 (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
6775 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)), FR32X)) >;
6777 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
6778 (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
6779 (VCVTPS2PHZ128rr (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), FR32X)) >;
6782 // Patterns for matching float to half-float conversion when AVX512 is supported
6783 // but F16C isn't. In that case we have to use 512-bit vectors.
6784 let Predicates = [HasAVX512, NoVLX, NoF16C] in {
6785 def : Pat<(fp_to_f16 FR32X:$src),
6786 (i16 (EXTRACT_SUBREG
6788 (v8i16 (EXTRACT_SUBREG
6790 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
6791 (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)),
6792 sub_xmm), 4), sub_xmm))), sub_16bit))>;
6794 def : Pat<(f16_to_fp GR16:$src),
6795 (f32 (COPY_TO_REGCLASS
6796 (v4f32 (EXTRACT_SUBREG
6798 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)),
6799 (v8i16 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)),
6800 sub_xmm)), sub_xmm)), FR32X))>;
6802 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
6803 (f32 (COPY_TO_REGCLASS
6804 (v4f32 (EXTRACT_SUBREG
6806 (VCVTPS2PHZrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
6807 (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)),
6808 sub_xmm), 4)), sub_xmm)), FR32X))>;
6811 // Unordered/Ordered scalar fp compare with Sea and set EFLAGS
6812 multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _,
6814 def rb: AVX512<opc, MRMSrcReg, (outs), (ins _.RC:$src1, _.RC:$src2),
6815 !strconcat(OpcodeStr, "\t{{sae}, $src2, $src1|$src1, $src2, {sae}}"),
6816 [], IIC_SSE_COMIS_RR>, EVEX, EVEX_B, VEX_LIG, EVEX_V128,
6820 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
6821 defm VUCOMISSZ : avx512_ord_cmp_sae<0x2E, v4f32x_info, "vucomiss">,
6822 AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>;
6823 defm VUCOMISDZ : avx512_ord_cmp_sae<0x2E, v2f64x_info, "vucomisd">,
6824 AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>;
6825 defm VCOMISSZ : avx512_ord_cmp_sae<0x2F, v4f32x_info, "vcomiss">,
6826 AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>;
6827 defm VCOMISDZ : avx512_ord_cmp_sae<0x2F, v2f64x_info, "vcomisd">,
6828 AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>;
6831 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
6832 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
6833 "ucomiss">, PS, EVEX, VEX_LIG,
6834 EVEX_CD8<32, CD8VT1>;
6835 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
6836 "ucomisd">, PD, EVEX,
6837 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6838 let Pattern = []<dag> in {
6839 defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, undef, f32, f32mem, loadf32,
6840 "comiss">, PS, EVEX, VEX_LIG,
6841 EVEX_CD8<32, CD8VT1>;
6842 defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, undef, f64, f64mem, loadf64,
6843 "comisd">, PD, EVEX,
6844 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6846 let isCodeGenOnly = 1 in {
6847 defm Int_VUCOMISSZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v4f32, ssmem,
6848 sse_load_f32, "ucomiss">, PS, EVEX, VEX_LIG,
6849 EVEX_CD8<32, CD8VT1>;
6850 defm Int_VUCOMISDZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v2f64, sdmem,
6851 sse_load_f64, "ucomisd">, PD, EVEX,
6852 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6854 defm Int_VCOMISSZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v4f32, ssmem,
6855 sse_load_f32, "comiss">, PS, EVEX, VEX_LIG,
6856 EVEX_CD8<32, CD8VT1>;
6857 defm Int_VCOMISDZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v2f64, sdmem,
6858 sse_load_f64, "comisd">, PD, EVEX,
6859 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6863 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
6864 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
6865 X86VectorVTInfo _> {
6866 let AddedComplexity = 20 , Predicates = [HasAVX512] in {
6867 defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6868 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
6869 "$src2, $src1", "$src1, $src2",
6870 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, EVEX_4V;
6871 defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
6872 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
6873 "$src2, $src1", "$src1, $src2",
6874 (OpNode (_.VT _.RC:$src1),
6875 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))))>, EVEX_4V;
6879 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", X86frcp14s, f32x_info>,
6880 EVEX_CD8<32, CD8VT1>, T8PD;
6881 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", X86frcp14s, f64x_info>,
6882 VEX_W, EVEX_CD8<64, CD8VT1>, T8PD;
6883 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", X86frsqrt14s, f32x_info>,
6884 EVEX_CD8<32, CD8VT1>, T8PD;
6885 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", X86frsqrt14s, f64x_info>,
6886 VEX_W, EVEX_CD8<64, CD8VT1>, T8PD;
6888 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
6889 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
6890 X86VectorVTInfo _> {
6891 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6892 (ins _.RC:$src), OpcodeStr, "$src", "$src",
6893 (_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
6894 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6895 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
6897 (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
6898 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6899 (ins _.ScalarMemOp:$src), OpcodeStr,
6900 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
6902 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
6906 multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
6907 defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, v16f32_info>,
6908 EVEX_V512, EVEX_CD8<32, CD8VF>;
6909 defm PDZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), OpNode, v8f64_info>,
6910 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
6912 // Define only if AVX512VL feature is present.
6913 let Predicates = [HasVLX] in {
6914 defm PSZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
6915 OpNode, v4f32x_info>,
6916 EVEX_V128, EVEX_CD8<32, CD8VF>;
6917 defm PSZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
6918 OpNode, v8f32x_info>,
6919 EVEX_V256, EVEX_CD8<32, CD8VF>;
6920 defm PDZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
6921 OpNode, v2f64x_info>,
6922 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
6923 defm PDZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
6924 OpNode, v4f64x_info>,
6925 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
6929 defm VRSQRT14 : avx512_fp14_p_vl_all<0x4E, "vrsqrt14", X86frsqrt>;
6930 defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86frcp>;
6932 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
6933 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
6936 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6937 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
6938 "$src2, $src1", "$src1, $src2",
6939 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
6940 (i32 FROUND_CURRENT))>;
6942 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6943 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
6944 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
6945 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
6946 (i32 FROUND_NO_EXC))>, EVEX_B;
6948 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
6949 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
6950 "$src2, $src1", "$src1, $src2",
6951 (OpNode (_.VT _.RC:$src1),
6952 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
6953 (i32 FROUND_CURRENT))>;
6956 multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
6957 defm SS : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode>,
6958 EVEX_CD8<32, CD8VT1>;
6959 defm SD : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode>,
6960 EVEX_CD8<64, CD8VT1>, VEX_W;
6963 let Predicates = [HasERI] in {
6964 defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
6965 defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
6968 defm VGETEXP : avx512_eri_s<0x43, "vgetexp", X86fgetexpRnds>, T8PD, EVEX_4V;
6969 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
6971 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6974 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6975 (ins _.RC:$src), OpcodeStr, "$src", "$src",
6976 (OpNode (_.VT _.RC:$src), (i32 FROUND_CURRENT))>;
6978 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6979 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
6981 (bitconvert (_.LdFrag addr:$src))),
6982 (i32 FROUND_CURRENT))>;
6984 defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6985 (ins _.ScalarMemOp:$src), OpcodeStr,
6986 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
6988 (X86VBroadcast (_.ScalarLdFrag addr:$src))),
6989 (i32 FROUND_CURRENT))>, EVEX_B;
6991 multiclass avx512_fp28_p_round<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6993 defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6994 (ins _.RC:$src), OpcodeStr,
6995 "{sae}, $src", "$src, {sae}",
6996 (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC))>, EVEX_B;
6999 multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> {
7000 defm PS : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
7001 avx512_fp28_p_round<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
7002 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
7003 defm PD : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
7004 avx512_fp28_p_round<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
7005 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
7008 multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
7010 // Define only if AVX512VL feature is present.
7011 let Predicates = [HasVLX] in {
7012 defm PSZ128 : avx512_fp28_p<opc, OpcodeStr#"ps", v4f32x_info, OpNode>,
7013 EVEX_V128, T8PD, EVEX_CD8<32, CD8VF>;
7014 defm PSZ256 : avx512_fp28_p<opc, OpcodeStr#"ps", v8f32x_info, OpNode>,
7015 EVEX_V256, T8PD, EVEX_CD8<32, CD8VF>;
7016 defm PDZ128 : avx512_fp28_p<opc, OpcodeStr#"pd", v2f64x_info, OpNode>,
7017 EVEX_V128, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
7018 defm PDZ256 : avx512_fp28_p<opc, OpcodeStr#"pd", v4f64x_info, OpNode>,
7019 EVEX_V256, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
7022 let Predicates = [HasERI] in {
7024 defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX;
7025 defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX;
7026 defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX;
7028 defm VGETEXP : avx512_eri<0x42, "vgetexp", X86fgetexpRnd>,
7029 avx512_fp_unaryop_packed<0x42, "vgetexp", X86fgetexpRnd> , EVEX;
7031 multiclass avx512_sqrt_packed_round<bits<8> opc, string OpcodeStr,
7032 SDNode OpNodeRnd, X86VectorVTInfo _>{
7033 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7034 (ins _.RC:$src, AVX512RC:$rc), OpcodeStr, "$rc, $src", "$src, $rc",
7035 (_.VT (OpNodeRnd _.RC:$src, (i32 imm:$rc)))>,
7036 EVEX, EVEX_B, EVEX_RC;
7039 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
7040 SDNode OpNode, X86VectorVTInfo _>{
7041 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7042 (ins _.RC:$src), OpcodeStr, "$src", "$src",
7043 (_.FloatVT (OpNode _.RC:$src))>, EVEX;
7044 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7045 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
7047 (bitconvert (_.LdFrag addr:$src))))>, EVEX;
7049 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7050 (ins _.ScalarMemOp:$src), OpcodeStr,
7051 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
7053 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
7057 multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
7059 defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
7061 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
7062 defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
7064 EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7065 // Define only if AVX512VL feature is present.
7066 let Predicates = [HasVLX] in {
7067 defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
7068 OpNode, v4f32x_info>,
7069 EVEX_V128, PS, EVEX_CD8<32, CD8VF>;
7070 defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
7071 OpNode, v8f32x_info>,
7072 EVEX_V256, PS, EVEX_CD8<32, CD8VF>;
7073 defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
7074 OpNode, v2f64x_info>,
7075 EVEX_V128, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7076 defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
7077 OpNode, v4f64x_info>,
7078 EVEX_V256, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7082 multiclass avx512_sqrt_packed_all_round<bits<8> opc, string OpcodeStr,
7084 defm PSZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ps"), OpNodeRnd,
7085 v16f32_info>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
7086 defm PDZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "pd"), OpNodeRnd,
7087 v8f64_info>, EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7090 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
7091 string SUFF, SDNode OpNode, SDNode OpNodeRnd> {
7093 defm r_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7094 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
7095 "$src2, $src1", "$src1, $src2",
7096 (OpNodeRnd (_.VT _.RC:$src1),
7098 (i32 FROUND_CURRENT))>;
7099 defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
7100 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
7101 "$src2, $src1", "$src1, $src2",
7102 (OpNodeRnd (_.VT _.RC:$src1),
7103 (_.VT (scalar_to_vector
7104 (_.ScalarLdFrag addr:$src2))),
7105 (i32 FROUND_CURRENT))>;
7107 defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7108 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
7109 "$rc, $src2, $src1", "$src1, $src2, $rc",
7110 (OpNodeRnd (_.VT _.RC:$src1),
7115 let isCodeGenOnly = 1, hasSideEffects = 0 in {
7116 def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
7117 (ins _.FRC:$src1, _.FRC:$src2),
7118 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
7121 def m : I<opc, MRMSrcMem, (outs _.FRC:$dst),
7122 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
7123 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
7126 def : Pat<(_.EltVT (OpNode _.FRC:$src)),
7127 (!cast<Instruction>(NAME#SUFF#Zr)
7128 (_.EltVT (IMPLICIT_DEF)), _.FRC:$src)>;
7130 def : Pat<(_.EltVT (OpNode (load addr:$src))),
7131 (!cast<Instruction>(NAME#SUFF#Zm)
7132 (_.EltVT (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512, OptForSize]>;
7135 multiclass avx512_sqrt_scalar_all<bits<8> opc, string OpcodeStr> {
7136 defm SSZ : avx512_sqrt_scalar<opc, OpcodeStr#"ss", f32x_info, "SS", fsqrt,
7137 X86fsqrtRnds>, EVEX_CD8<32, CD8VT1>, EVEX_4V, XS;
7138 defm SDZ : avx512_sqrt_scalar<opc, OpcodeStr#"sd", f64x_info, "SD", fsqrt,
7139 X86fsqrtRnds>, EVEX_CD8<64, CD8VT1>, EVEX_4V, XD, VEX_W;
7142 defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>,
7143 avx512_sqrt_packed_all_round<0x51, "vsqrt", X86fsqrtRnd>;
7145 defm VSQRT : avx512_sqrt_scalar_all<0x51, "vsqrt">, VEX_LIG;
7147 let Predicates = [HasAVX512] in {
7148 def : Pat<(f32 (X86frsqrt FR32X:$src)),
7149 (COPY_TO_REGCLASS (VRSQRT14SSrr (v4f32 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>;
7150 def : Pat<(f32 (X86frsqrt (load addr:$src))),
7151 (COPY_TO_REGCLASS (VRSQRT14SSrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
7152 Requires<[OptForSize]>;
7153 def : Pat<(f32 (X86frcp FR32X:$src)),
7154 (COPY_TO_REGCLASS (VRCP14SSrr (v4f32 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X )>;
7155 def : Pat<(f32 (X86frcp (load addr:$src))),
7156 (COPY_TO_REGCLASS (VRCP14SSrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
7157 Requires<[OptForSize]>;
7161 avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
7163 let ExeDomain = _.ExeDomain in {
7164 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7165 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
7166 "$src3, $src2, $src1", "$src1, $src2, $src3",
7167 (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7168 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
7170 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7171 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
7172 "$src3, {sae}, $src2, $src1", "$src1, $src2, {sae}, $src3",
7173 (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7174 (i32 imm:$src3), (i32 FROUND_NO_EXC)))>, EVEX_B;
7176 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
7177 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
7179 "$src3, $src2, $src1", "$src1, $src2, $src3",
7180 (_.VT (X86RndScales (_.VT _.RC:$src1),
7181 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
7182 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
7184 let Predicates = [HasAVX512] in {
7185 def : Pat<(ffloor _.FRC:$src), (COPY_TO_REGCLASS
7186 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7187 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x1))), _.FRC)>;
7188 def : Pat<(fceil _.FRC:$src), (COPY_TO_REGCLASS
7189 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7190 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x2))), _.FRC)>;
7191 def : Pat<(ftrunc _.FRC:$src), (COPY_TO_REGCLASS
7192 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7193 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x3))), _.FRC)>;
7194 def : Pat<(frint _.FRC:$src), (COPY_TO_REGCLASS
7195 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7196 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x4))), _.FRC)>;
7197 def : Pat<(fnearbyint _.FRC:$src), (COPY_TO_REGCLASS
7198 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7199 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0xc))), _.FRC)>;
7201 def : Pat<(ffloor (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7202 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7203 addr:$src, (i32 0x1))), _.FRC)>;
7204 def : Pat<(fceil (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7205 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7206 addr:$src, (i32 0x2))), _.FRC)>;
7207 def : Pat<(ftrunc (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7208 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7209 addr:$src, (i32 0x3))), _.FRC)>;
7210 def : Pat<(frint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7211 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7212 addr:$src, (i32 0x4))), _.FRC)>;
7213 def : Pat<(fnearbyint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7214 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7215 addr:$src, (i32 0xc))), _.FRC)>;
7219 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", f32x_info>,
7220 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VT1>;
7222 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", f64x_info>, VEX_W,
7223 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VT1>;
7225 //-------------------------------------------------
7226 // Integer truncate and extend operations
7227 //-------------------------------------------------
7229 multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
7230 X86VectorVTInfo SrcInfo, X86VectorVTInfo DestInfo,
7231 X86MemOperand x86memop> {
7232 let ExeDomain = DestInfo.ExeDomain in
7233 defm rr : AVX512_maskable<opc, MRMDestReg, DestInfo, (outs DestInfo.RC:$dst),
7234 (ins SrcInfo.RC:$src1), OpcodeStr ,"$src1", "$src1",
7235 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1)))>,
7238 // for intrinsic patter match
7239 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7240 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7242 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrkz) DestInfo.KRCWM:$mask ,
7245 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7246 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7247 DestInfo.ImmAllZerosV)),
7248 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrkz) DestInfo.KRCWM:$mask ,
7251 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7252 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7253 DestInfo.RC:$src0)),
7254 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrk) DestInfo.RC:$src0,
7255 DestInfo.KRCWM:$mask ,
7258 let mayStore = 1, mayLoad = 1, hasSideEffects = 0,
7259 ExeDomain = DestInfo.ExeDomain in {
7260 def mr : AVX512XS8I<opc, MRMDestMem, (outs),
7261 (ins x86memop:$dst, SrcInfo.RC:$src),
7262 OpcodeStr # "\t{$src, $dst|$dst, $src}",
7265 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
7266 (ins x86memop:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
7267 OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
7269 }//mayStore = 1, mayLoad = 1, hasSideEffects = 0
7272 multiclass avx512_trunc_mr_lowering<X86VectorVTInfo SrcInfo,
7273 X86VectorVTInfo DestInfo,
7274 PatFrag truncFrag, PatFrag mtruncFrag > {
7276 def : Pat<(truncFrag (SrcInfo.VT SrcInfo.RC:$src), addr:$dst),
7277 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##mr)
7278 addr:$dst, SrcInfo.RC:$src)>;
7280 def : Pat<(mtruncFrag addr:$dst, SrcInfo.KRCWM:$mask,
7281 (SrcInfo.VT SrcInfo.RC:$src)),
7282 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##mrk)
7283 addr:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src)>;
7286 multiclass avx512_trunc<bits<8> opc, string OpcodeStr, SDNode OpNode,
7287 AVX512VLVectorVTInfo VTSrcInfo, X86VectorVTInfo DestInfoZ128,
7288 X86VectorVTInfo DestInfoZ256, X86VectorVTInfo DestInfoZ,
7289 X86MemOperand x86memopZ128, X86MemOperand x86memopZ256,
7290 X86MemOperand x86memopZ, PatFrag truncFrag, PatFrag mtruncFrag,
7291 Predicate prd = HasAVX512>{
7293 let Predicates = [HasVLX, prd] in {
7294 defm Z128: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info128,
7295 DestInfoZ128, x86memopZ128>,
7296 avx512_trunc_mr_lowering<VTSrcInfo.info128, DestInfoZ128,
7297 truncFrag, mtruncFrag>, EVEX_V128;
7299 defm Z256: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info256,
7300 DestInfoZ256, x86memopZ256>,
7301 avx512_trunc_mr_lowering<VTSrcInfo.info256, DestInfoZ256,
7302 truncFrag, mtruncFrag>, EVEX_V256;
7304 let Predicates = [prd] in
7305 defm Z: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info512,
7306 DestInfoZ, x86memopZ>,
7307 avx512_trunc_mr_lowering<VTSrcInfo.info512, DestInfoZ,
7308 truncFrag, mtruncFrag>, EVEX_V512;
7311 multiclass avx512_trunc_qb<bits<8> opc, string OpcodeStr, SDNode OpNode,
7312 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7313 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7314 v16i8x_info, v16i8x_info, v16i8x_info, i16mem, i32mem, i64mem,
7315 StoreNode, MaskedStoreNode>, EVEX_CD8<8, CD8VO>;
7318 multiclass avx512_trunc_qw<bits<8> opc, string OpcodeStr, SDNode OpNode,
7319 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7320 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7321 v8i16x_info, v8i16x_info, v8i16x_info, i32mem, i64mem, i128mem,
7322 StoreNode, MaskedStoreNode>, EVEX_CD8<16, CD8VQ>;
7325 multiclass avx512_trunc_qd<bits<8> opc, string OpcodeStr, SDNode OpNode,
7326 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7327 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7328 v4i32x_info, v4i32x_info, v8i32x_info, i64mem, i128mem, i256mem,
7329 StoreNode, MaskedStoreNode>, EVEX_CD8<32, CD8VH>;
7332 multiclass avx512_trunc_db<bits<8> opc, string OpcodeStr, SDNode OpNode,
7333 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7334 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i32_info,
7335 v16i8x_info, v16i8x_info, v16i8x_info, i32mem, i64mem, i128mem,
7336 StoreNode, MaskedStoreNode>, EVEX_CD8<8, CD8VQ>;
7339 multiclass avx512_trunc_dw<bits<8> opc, string OpcodeStr, SDNode OpNode,
7340 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7341 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i32_info,
7342 v8i16x_info, v8i16x_info, v16i16x_info, i64mem, i128mem, i256mem,
7343 StoreNode, MaskedStoreNode>, EVEX_CD8<16, CD8VH>;
7346 multiclass avx512_trunc_wb<bits<8> opc, string OpcodeStr, SDNode OpNode,
7347 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7348 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i16_info,
7349 v16i8x_info, v16i8x_info, v32i8x_info, i64mem, i128mem, i256mem,
7350 StoreNode, MaskedStoreNode, HasBWI>, EVEX_CD8<16, CD8VH>;
7353 defm VPMOVQB : avx512_trunc_qb<0x32, "vpmovqb", X86vtrunc,
7354 truncstorevi8, masked_truncstorevi8>;
7355 defm VPMOVSQB : avx512_trunc_qb<0x22, "vpmovsqb", X86vtruncs,
7356 truncstore_s_vi8, masked_truncstore_s_vi8>;
7357 defm VPMOVUSQB : avx512_trunc_qb<0x12, "vpmovusqb", X86vtruncus,
7358 truncstore_us_vi8, masked_truncstore_us_vi8>;
7360 defm VPMOVQW : avx512_trunc_qw<0x34, "vpmovqw", X86vtrunc,
7361 truncstorevi16, masked_truncstorevi16>;
7362 defm VPMOVSQW : avx512_trunc_qw<0x24, "vpmovsqw", X86vtruncs,
7363 truncstore_s_vi16, masked_truncstore_s_vi16>;
7364 defm VPMOVUSQW : avx512_trunc_qw<0x14, "vpmovusqw", X86vtruncus,
7365 truncstore_us_vi16, masked_truncstore_us_vi16>;
7367 defm VPMOVQD : avx512_trunc_qd<0x35, "vpmovqd", X86vtrunc,
7368 truncstorevi32, masked_truncstorevi32>;
7369 defm VPMOVSQD : avx512_trunc_qd<0x25, "vpmovsqd", X86vtruncs,
7370 truncstore_s_vi32, masked_truncstore_s_vi32>;
7371 defm VPMOVUSQD : avx512_trunc_qd<0x15, "vpmovusqd", X86vtruncus,
7372 truncstore_us_vi32, masked_truncstore_us_vi32>;
7374 defm VPMOVDB : avx512_trunc_db<0x31, "vpmovdb", X86vtrunc,
7375 truncstorevi8, masked_truncstorevi8>;
7376 defm VPMOVSDB : avx512_trunc_db<0x21, "vpmovsdb", X86vtruncs,
7377 truncstore_s_vi8, masked_truncstore_s_vi8>;
7378 defm VPMOVUSDB : avx512_trunc_db<0x11, "vpmovusdb", X86vtruncus,
7379 truncstore_us_vi8, masked_truncstore_us_vi8>;
7381 defm VPMOVDW : avx512_trunc_dw<0x33, "vpmovdw", X86vtrunc,
7382 truncstorevi16, masked_truncstorevi16>;
7383 defm VPMOVSDW : avx512_trunc_dw<0x23, "vpmovsdw", X86vtruncs,
7384 truncstore_s_vi16, masked_truncstore_s_vi16>;
7385 defm VPMOVUSDW : avx512_trunc_dw<0x13, "vpmovusdw", X86vtruncus,
7386 truncstore_us_vi16, masked_truncstore_us_vi16>;
7388 defm VPMOVWB : avx512_trunc_wb<0x30, "vpmovwb", X86vtrunc,
7389 truncstorevi8, masked_truncstorevi8>;
7390 defm VPMOVSWB : avx512_trunc_wb<0x20, "vpmovswb", X86vtruncs,
7391 truncstore_s_vi8, masked_truncstore_s_vi8>;
7392 defm VPMOVUSWB : avx512_trunc_wb<0x10, "vpmovuswb", X86vtruncus,
7393 truncstore_us_vi8, masked_truncstore_us_vi8>;
7395 let Predicates = [HasAVX512, NoVLX] in {
7396 def: Pat<(v8i16 (X86vtrunc (v8i32 VR256X:$src))),
7397 (v8i16 (EXTRACT_SUBREG
7398 (v16i16 (VPMOVDWZrr (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
7399 VR256X:$src, sub_ymm)))), sub_xmm))>;
7400 def: Pat<(v4i32 (X86vtrunc (v4i64 VR256X:$src))),
7401 (v4i32 (EXTRACT_SUBREG
7402 (v8i32 (VPMOVQDZrr (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
7403 VR256X:$src, sub_ymm)))), sub_xmm))>;
7406 let Predicates = [HasBWI, NoVLX] in {
7407 def: Pat<(v16i8 (X86vtrunc (v16i16 VR256X:$src))),
7408 (v16i8 (EXTRACT_SUBREG (VPMOVWBZrr (v32i16 (INSERT_SUBREG (IMPLICIT_DEF),
7409 VR256X:$src, sub_ymm))), sub_xmm))>;
7412 multiclass avx512_extend_common<bits<8> opc, string OpcodeStr,
7413 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
7414 X86MemOperand x86memop, PatFrag LdFrag, SDPatternOperator OpNode>{
7415 let ExeDomain = DestInfo.ExeDomain in {
7416 defm rr : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
7417 (ins SrcInfo.RC:$src), OpcodeStr ,"$src", "$src",
7418 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>,
7421 defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
7422 (ins x86memop:$src), OpcodeStr ,"$src", "$src",
7423 (DestInfo.VT (LdFrag addr:$src))>,
7428 multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr,
7429 SDPatternOperator OpNode,
7430 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7431 let Predicates = [HasVLX, HasBWI] in {
7432 defm Z128: avx512_extend_common<opc, OpcodeStr, v8i16x_info,
7433 v16i8x_info, i64mem, LdFrag, OpNode>,
7434 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V128;
7436 defm Z256: avx512_extend_common<opc, OpcodeStr, v16i16x_info,
7437 v16i8x_info, i128mem, LdFrag, OpNode>,
7438 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V256;
7440 let Predicates = [HasBWI] in {
7441 defm Z : avx512_extend_common<opc, OpcodeStr, v32i16_info,
7442 v32i8x_info, i256mem, LdFrag, OpNode>,
7443 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V512;
7447 multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr,
7448 SDPatternOperator OpNode,
7449 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7450 let Predicates = [HasVLX, HasAVX512] in {
7451 defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
7452 v16i8x_info, i32mem, LdFrag, OpNode>,
7453 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V128;
7455 defm Z256: avx512_extend_common<opc, OpcodeStr, v8i32x_info,
7456 v16i8x_info, i64mem, LdFrag, OpNode>,
7457 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V256;
7459 let Predicates = [HasAVX512] in {
7460 defm Z : avx512_extend_common<opc, OpcodeStr, v16i32_info,
7461 v16i8x_info, i128mem, LdFrag, OpNode>,
7462 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V512;
7466 multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr,
7467 SDPatternOperator OpNode,
7468 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7469 let Predicates = [HasVLX, HasAVX512] in {
7470 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7471 v16i8x_info, i16mem, LdFrag, OpNode>,
7472 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V128;
7474 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7475 v16i8x_info, i32mem, LdFrag, OpNode>,
7476 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V256;
7478 let Predicates = [HasAVX512] in {
7479 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7480 v16i8x_info, i64mem, LdFrag, OpNode>,
7481 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V512;
7485 multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr,
7486 SDPatternOperator OpNode,
7487 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
7488 let Predicates = [HasVLX, HasAVX512] in {
7489 defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
7490 v8i16x_info, i64mem, LdFrag, OpNode>,
7491 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V128;
7493 defm Z256: avx512_extend_common<opc, OpcodeStr, v8i32x_info,
7494 v8i16x_info, i128mem, LdFrag, OpNode>,
7495 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V256;
7497 let Predicates = [HasAVX512] in {
7498 defm Z : avx512_extend_common<opc, OpcodeStr, v16i32_info,
7499 v16i16x_info, i256mem, LdFrag, OpNode>,
7500 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V512;
7504 multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr,
7505 SDPatternOperator OpNode,
7506 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
7507 let Predicates = [HasVLX, HasAVX512] in {
7508 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7509 v8i16x_info, i32mem, LdFrag, OpNode>,
7510 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V128;
7512 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7513 v8i16x_info, i64mem, LdFrag, OpNode>,
7514 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V256;
7516 let Predicates = [HasAVX512] in {
7517 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7518 v8i16x_info, i128mem, LdFrag, OpNode>,
7519 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V512;
7523 multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr,
7524 SDPatternOperator OpNode,
7525 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi32")> {
7527 let Predicates = [HasVLX, HasAVX512] in {
7528 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7529 v4i32x_info, i64mem, LdFrag, OpNode>,
7530 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V128;
7532 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7533 v4i32x_info, i128mem, LdFrag, OpNode>,
7534 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V256;
7536 let Predicates = [HasAVX512] in {
7537 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7538 v8i32x_info, i256mem, LdFrag, OpNode>,
7539 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V512;
7543 defm VPMOVZXBW : avx512_extend_BW<0x30, "vpmovzxbw", X86vzext, "z">;
7544 defm VPMOVZXBD : avx512_extend_BD<0x31, "vpmovzxbd", X86vzext, "z">;
7545 defm VPMOVZXBQ : avx512_extend_BQ<0x32, "vpmovzxbq", X86vzext, "z">;
7546 defm VPMOVZXWD : avx512_extend_WD<0x33, "vpmovzxwd", X86vzext, "z">;
7547 defm VPMOVZXWQ : avx512_extend_WQ<0x34, "vpmovzxwq", X86vzext, "z">;
7548 defm VPMOVZXDQ : avx512_extend_DQ<0x35, "vpmovzxdq", X86vzext, "z">;
7550 defm VPMOVSXBW: avx512_extend_BW<0x20, "vpmovsxbw", X86vsext, "s">;
7551 defm VPMOVSXBD: avx512_extend_BD<0x21, "vpmovsxbd", X86vsext, "s">;
7552 defm VPMOVSXBQ: avx512_extend_BQ<0x22, "vpmovsxbq", X86vsext, "s">;
7553 defm VPMOVSXWD: avx512_extend_WD<0x23, "vpmovsxwd", X86vsext, "s">;
7554 defm VPMOVSXWQ: avx512_extend_WQ<0x24, "vpmovsxwq", X86vsext, "s">;
7555 defm VPMOVSXDQ: avx512_extend_DQ<0x25, "vpmovsxdq", X86vsext, "s">;
7557 // EXTLOAD patterns, implemented using vpmovz
7558 multiclass avx512_ext_lowering<string InstrStr, X86VectorVTInfo To,
7559 X86VectorVTInfo From, PatFrag LdFrag> {
7560 def : Pat<(To.VT (LdFrag addr:$src)),
7561 (!cast<Instruction>("VPMOVZX"#InstrStr#"rm") addr:$src)>;
7562 def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src), To.RC:$src0)),
7563 (!cast<Instruction>("VPMOVZX"#InstrStr#"rmk") To.RC:$src0,
7564 To.KRC:$mask, addr:$src)>;
7565 def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src),
7567 (!cast<Instruction>("VPMOVZX"#InstrStr#"rmkz") To.KRC:$mask,
7571 let Predicates = [HasVLX, HasBWI] in {
7572 defm : avx512_ext_lowering<"BWZ128", v8i16x_info, v16i8x_info, extloadvi8>;
7573 defm : avx512_ext_lowering<"BWZ256", v16i16x_info, v16i8x_info, extloadvi8>;
7575 let Predicates = [HasBWI] in {
7576 defm : avx512_ext_lowering<"BWZ", v32i16_info, v32i8x_info, extloadvi8>;
7578 let Predicates = [HasVLX, HasAVX512] in {
7579 defm : avx512_ext_lowering<"BDZ128", v4i32x_info, v16i8x_info, extloadvi8>;
7580 defm : avx512_ext_lowering<"BDZ256", v8i32x_info, v16i8x_info, extloadvi8>;
7581 defm : avx512_ext_lowering<"BQZ128", v2i64x_info, v16i8x_info, extloadvi8>;
7582 defm : avx512_ext_lowering<"BQZ256", v4i64x_info, v16i8x_info, extloadvi8>;
7583 defm : avx512_ext_lowering<"WDZ128", v4i32x_info, v8i16x_info, extloadvi16>;
7584 defm : avx512_ext_lowering<"WDZ256", v8i32x_info, v8i16x_info, extloadvi16>;
7585 defm : avx512_ext_lowering<"WQZ128", v2i64x_info, v8i16x_info, extloadvi16>;
7586 defm : avx512_ext_lowering<"WQZ256", v4i64x_info, v8i16x_info, extloadvi16>;
7587 defm : avx512_ext_lowering<"DQZ128", v2i64x_info, v4i32x_info, extloadvi32>;
7588 defm : avx512_ext_lowering<"DQZ256", v4i64x_info, v4i32x_info, extloadvi32>;
7590 let Predicates = [HasAVX512] in {
7591 defm : avx512_ext_lowering<"BDZ", v16i32_info, v16i8x_info, extloadvi8>;
7592 defm : avx512_ext_lowering<"BQZ", v8i64_info, v16i8x_info, extloadvi8>;
7593 defm : avx512_ext_lowering<"WDZ", v16i32_info, v16i16x_info, extloadvi16>;
7594 defm : avx512_ext_lowering<"WQZ", v8i64_info, v8i16x_info, extloadvi16>;
7595 defm : avx512_ext_lowering<"DQZ", v8i64_info, v8i32x_info, extloadvi32>;
7598 multiclass AVX512_pmovx_patterns<string OpcPrefix, string ExtTy,
7599 SDNode ExtOp, PatFrag ExtLoad16> {
7601 let Predicates = [HasVLX, HasBWI] in {
7602 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7603 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7604 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7605 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7606 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7607 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7608 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7609 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7610 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7611 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7613 let Predicates = [HasVLX] in {
7614 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7615 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7616 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7617 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7618 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7619 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7620 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7621 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7623 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
7624 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7625 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7626 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7627 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7628 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7629 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7630 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7632 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7633 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7634 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7635 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7636 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7637 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7638 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7639 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7640 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7641 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7643 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7644 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7645 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
7646 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7647 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7648 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7649 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7650 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7652 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7653 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7654 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7655 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7656 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
7657 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7658 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
7659 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7660 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
7661 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7664 let Predicates = [HasVLX, HasBWI] in {
7665 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7666 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7667 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7668 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7669 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7670 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7672 let Predicates = [HasVLX] in {
7673 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7674 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7675 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7676 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7677 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7678 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7679 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7680 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7682 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7683 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7684 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7685 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7686 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7687 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7688 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7689 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7691 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7692 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7693 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7694 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7695 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7696 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7698 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7699 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7700 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7701 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7702 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7703 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7704 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7705 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7707 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
7708 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7709 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
7710 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7711 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
7712 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7715 let Predicates = [HasBWI] in {
7716 def : Pat<(v32i16 (ExtOp (bc_v32i8 (loadv4i64 addr:$src)))),
7717 (!cast<I>(OpcPrefix#BWZrm) addr:$src)>;
7719 let Predicates = [HasAVX512] in {
7720 def : Pat<(v16i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7721 (!cast<I>(OpcPrefix#BDZrm) addr:$src)>;
7723 def : Pat<(v8i64 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7724 (!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
7725 def : Pat<(v8i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7726 (!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
7728 def : Pat<(v16i32 (ExtOp (bc_v16i16 (loadv4i64 addr:$src)))),
7729 (!cast<I>(OpcPrefix#WDZrm) addr:$src)>;
7731 def : Pat<(v8i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7732 (!cast<I>(OpcPrefix#WQZrm) addr:$src)>;
7734 def : Pat<(v8i64 (ExtOp (bc_v8i32 (loadv4i64 addr:$src)))),
7735 (!cast<I>(OpcPrefix#DQZrm) addr:$src)>;
7739 defm : AVX512_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
7740 defm : AVX512_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
7742 //===----------------------------------------------------------------------===//
7743 // GATHER - SCATTER Operations
7745 multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7746 X86MemOperand memop, PatFrag GatherNode> {
7747 let Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb",
7748 ExeDomain = _.ExeDomain in
7749 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst, _.KRCWM:$mask_wb),
7750 (ins _.RC:$src1, _.KRCWM:$mask, memop:$src2),
7751 !strconcat(OpcodeStr#_.Suffix,
7752 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
7753 [(set _.RC:$dst, _.KRCWM:$mask_wb,
7754 (GatherNode (_.VT _.RC:$src1), _.KRCWM:$mask,
7755 vectoraddr:$src2))]>, EVEX, EVEX_K,
7756 EVEX_CD8<_.EltSize, CD8VT1>;
7759 multiclass avx512_gather_q_pd<bits<8> dopc, bits<8> qopc,
7760 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7761 defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512,
7762 vy512mem, mgatherv8i32>, EVEX_V512, VEX_W;
7763 defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info512,
7764 vz512mem, mgatherv8i64>, EVEX_V512, VEX_W;
7765 let Predicates = [HasVLX] in {
7766 defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
7767 vx256xmem, mgatherv4i32>, EVEX_V256, VEX_W;
7768 defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info256,
7769 vy256xmem, mgatherv4i64>, EVEX_V256, VEX_W;
7770 defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
7771 vx128xmem, mgatherv4i32>, EVEX_V128, VEX_W;
7772 defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7773 vx128xmem, mgatherv2i64>, EVEX_V128, VEX_W;
7777 multiclass avx512_gather_d_ps<bits<8> dopc, bits<8> qopc,
7778 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7779 defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz512mem,
7780 mgatherv16i32>, EVEX_V512;
7781 defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz512mem,
7782 mgatherv8i64>, EVEX_V512;
7783 let Predicates = [HasVLX] in {
7784 defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
7785 vy256xmem, mgatherv8i32>, EVEX_V256;
7786 defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7787 vy128xmem, mgatherv4i64>, EVEX_V256;
7788 defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
7789 vx128xmem, mgatherv4i32>, EVEX_V128;
7790 defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7791 vx64xmem, mgatherv2i64>, EVEX_V128;
7796 defm VGATHER : avx512_gather_q_pd<0x92, 0x93, avx512vl_f64_info, "vgather", "PD">,
7797 avx512_gather_d_ps<0x92, 0x93, avx512vl_f32_info, "vgather", "PS">;
7799 defm VPGATHER : avx512_gather_q_pd<0x90, 0x91, avx512vl_i64_info, "vpgather", "Q">,
7800 avx512_gather_d_ps<0x90, 0x91, avx512vl_i32_info, "vpgather", "D">;
7802 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7803 X86MemOperand memop, PatFrag ScatterNode> {
7805 let mayStore = 1, Constraints = "$mask = $mask_wb", ExeDomain = _.ExeDomain in
7807 def mr : AVX5128I<opc, MRMDestMem, (outs _.KRCWM:$mask_wb),
7808 (ins memop:$dst, _.KRCWM:$mask, _.RC:$src),
7809 !strconcat(OpcodeStr#_.Suffix,
7810 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
7811 [(set _.KRCWM:$mask_wb, (ScatterNode (_.VT _.RC:$src),
7812 _.KRCWM:$mask, vectoraddr:$dst))]>,
7813 EVEX, EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
7816 multiclass avx512_scatter_q_pd<bits<8> dopc, bits<8> qopc,
7817 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7818 defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512,
7819 vy512mem, mscatterv8i32>, EVEX_V512, VEX_W;
7820 defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info512,
7821 vz512mem, mscatterv8i64>, EVEX_V512, VEX_W;
7822 let Predicates = [HasVLX] in {
7823 defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
7824 vx256xmem, mscatterv4i32>, EVEX_V256, VEX_W;
7825 defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info256,
7826 vy256xmem, mscatterv4i64>, EVEX_V256, VEX_W;
7827 defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
7828 vx128xmem, mscatterv4i32>, EVEX_V128, VEX_W;
7829 defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7830 vx128xmem, mscatterv2i64>, EVEX_V128, VEX_W;
7834 multiclass avx512_scatter_d_ps<bits<8> dopc, bits<8> qopc,
7835 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7836 defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz512mem,
7837 mscatterv16i32>, EVEX_V512;
7838 defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz512mem,
7839 mscatterv8i64>, EVEX_V512;
7840 let Predicates = [HasVLX] in {
7841 defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
7842 vy256xmem, mscatterv8i32>, EVEX_V256;
7843 defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7844 vy128xmem, mscatterv4i64>, EVEX_V256;
7845 defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
7846 vx128xmem, mscatterv4i32>, EVEX_V128;
7847 defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7848 vx64xmem, mscatterv2i64>, EVEX_V128;
7852 defm VSCATTER : avx512_scatter_q_pd<0xA2, 0xA3, avx512vl_f64_info, "vscatter", "PD">,
7853 avx512_scatter_d_ps<0xA2, 0xA3, avx512vl_f32_info, "vscatter", "PS">;
7855 defm VPSCATTER : avx512_scatter_q_pd<0xA0, 0xA1, avx512vl_i64_info, "vpscatter", "Q">,
7856 avx512_scatter_d_ps<0xA0, 0xA1, avx512vl_i32_info, "vpscatter", "D">;
7859 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
7860 RegisterClass KRC, X86MemOperand memop> {
7861 let Predicates = [HasPFI], hasSideEffects = 1 in
7862 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
7863 !strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"),
7867 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
7868 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7870 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
7871 VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7873 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
7874 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7876 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
7877 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7879 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
7880 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7882 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
7883 VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7885 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
7886 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7888 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
7889 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7891 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
7892 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7894 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
7895 VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7897 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
7898 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7900 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
7901 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7903 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
7904 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7906 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
7907 VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7909 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
7910 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7912 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
7913 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7915 // Helper fragments to match sext vXi1 to vXiY.
7916 def v64i1sextv64i8 : PatLeaf<(v64i8
7919 (bc_v64i8 (v16i32 immAllZerosV)),
7921 def v32i1sextv32i16 : PatLeaf<(v32i16 (X86vsrai VR512:$src, (i8 15)))>;
7922 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
7923 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
7925 multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
7926 def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
7927 !strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
7928 [(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX;
7931 multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
7932 string OpcodeStr, Predicate prd> {
7933 let Predicates = [prd] in
7934 defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
7936 let Predicates = [prd, HasVLX] in {
7937 defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
7938 defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
7942 multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
7943 defm NAME##B : cvt_mask_by_elt_width<0x28, avx512vl_i8_info, OpcodeStr,
7945 defm NAME##W : cvt_mask_by_elt_width<0x28, avx512vl_i16_info, OpcodeStr,
7947 defm NAME##D : cvt_mask_by_elt_width<0x38, avx512vl_i32_info, OpcodeStr,
7949 defm NAME##Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, OpcodeStr,
7953 defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
7955 multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
7956 def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
7957 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7958 [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
7961 // Use 512bit version to implement 128/256 bit in case NoVLX.
7962 multiclass convert_vector_to_mask_lowering<X86VectorVTInfo ExtendInfo,
7963 X86VectorVTInfo _> {
7965 def : Pat<(_.KVT (X86cvt2mask (_.VT _.RC:$src))),
7966 (_.KVT (COPY_TO_REGCLASS
7967 (!cast<Instruction>(NAME#"Zrr")
7968 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
7969 _.RC:$src, _.SubRegIdx)),
7973 multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
7974 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
7975 let Predicates = [prd] in
7976 defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
7979 let Predicates = [prd, HasVLX] in {
7980 defm Z256 : convert_vector_to_mask_common<opc, VTInfo.info256, OpcodeStr>,
7982 defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
7985 let Predicates = [prd, NoVLX] in {
7986 defm Z256_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info256>;
7987 defm Z128_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info128>;
7991 defm VPMOVB2M : avx512_convert_vector_to_mask<0x29, "vpmovb2m",
7992 avx512vl_i8_info, HasBWI>;
7993 defm VPMOVW2M : avx512_convert_vector_to_mask<0x29, "vpmovw2m",
7994 avx512vl_i16_info, HasBWI>, VEX_W;
7995 defm VPMOVD2M : avx512_convert_vector_to_mask<0x39, "vpmovd2m",
7996 avx512vl_i32_info, HasDQI>;
7997 defm VPMOVQ2M : avx512_convert_vector_to_mask<0x39, "vpmovq2m",
7998 avx512vl_i64_info, HasDQI>, VEX_W;
8000 //===----------------------------------------------------------------------===//
8001 // AVX-512 - COMPRESS and EXPAND
8004 multiclass compress_by_vec_width_common<bits<8> opc, X86VectorVTInfo _,
8006 defm rr : AVX512_maskable<opc, MRMDestReg, _, (outs _.RC:$dst),
8007 (ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
8008 (_.VT (X86compress _.RC:$src1))>, AVX5128IBase;
8010 let mayStore = 1, hasSideEffects = 0 in
8011 def mr : AVX5128I<opc, MRMDestMem, (outs),
8012 (ins _.MemOp:$dst, _.RC:$src),
8013 OpcodeStr # "\t{$src, $dst|$dst, $src}",
8014 []>, EVEX_CD8<_.EltSize, CD8VT1>;
8016 def mrk : AVX5128I<opc, MRMDestMem, (outs),
8017 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
8018 OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
8020 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
8023 multiclass compress_by_vec_width_lowering<X86VectorVTInfo _ > {
8025 def : Pat<(X86mCompressingStore addr:$dst, _.KRCWM:$mask,
8027 (!cast<Instruction>(NAME#_.ZSuffix##mrk)
8028 addr:$dst, _.KRCWM:$mask, _.RC:$src)>;
8031 multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
8032 AVX512VLVectorVTInfo VTInfo> {
8033 defm Z : compress_by_vec_width_common<opc, VTInfo.info512, OpcodeStr>,
8034 compress_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
8036 let Predicates = [HasVLX] in {
8037 defm Z256 : compress_by_vec_width_common<opc, VTInfo.info256, OpcodeStr>,
8038 compress_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
8039 defm Z128 : compress_by_vec_width_common<opc, VTInfo.info128, OpcodeStr>,
8040 compress_by_vec_width_lowering<VTInfo.info128>, EVEX_V128;
8044 defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", avx512vl_i32_info>,
8046 defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", avx512vl_i64_info>,
8048 defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", avx512vl_f32_info>,
8050 defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", avx512vl_f64_info>,
8054 multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
8056 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8057 (ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
8058 (_.VT (X86expand _.RC:$src1))>, AVX5128IBase;
8060 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8061 (ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1",
8062 (_.VT (X86expand (_.VT (bitconvert
8063 (_.LdFrag addr:$src1)))))>,
8064 AVX5128IBase, EVEX_CD8<_.EltSize, CD8VT1>;
8067 multiclass expand_by_vec_width_lowering<X86VectorVTInfo _ > {
8069 def : Pat<(_.VT (X86mExpandingLoad addr:$src, _.KRCWM:$mask, undef)),
8070 (!cast<Instruction>(NAME#_.ZSuffix##rmkz)
8071 _.KRCWM:$mask, addr:$src)>;
8073 def : Pat<(_.VT (X86mExpandingLoad addr:$src, _.KRCWM:$mask,
8074 (_.VT _.RC:$src0))),
8075 (!cast<Instruction>(NAME#_.ZSuffix##rmk)
8076 _.RC:$src0, _.KRCWM:$mask, addr:$src)>;
8079 multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr,
8080 AVX512VLVectorVTInfo VTInfo> {
8081 defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>,
8082 expand_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
8084 let Predicates = [HasVLX] in {
8085 defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>,
8086 expand_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
8087 defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>,
8088 expand_by_vec_width_lowering<VTInfo.info128>, EVEX_V128;
8092 defm VPEXPANDD : expand_by_elt_width <0x89, "vpexpandd", avx512vl_i32_info>,
8094 defm VPEXPANDQ : expand_by_elt_width <0x89, "vpexpandq", avx512vl_i64_info>,
8096 defm VEXPANDPS : expand_by_elt_width <0x88, "vexpandps", avx512vl_f32_info>,
8098 defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", avx512vl_f64_info>,
8101 //handle instruction reg_vec1 = op(reg_vec,imm)
8103 // op(broadcast(eltVt),imm)
8104 //all instruction created with FROUND_CURRENT
8105 multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8107 let ExeDomain = _.ExeDomain in {
8108 defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8109 (ins _.RC:$src1, i32u8imm:$src2),
8110 OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
8111 (OpNode (_.VT _.RC:$src1),
8113 (i32 FROUND_CURRENT))>;
8114 defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8115 (ins _.MemOp:$src1, i32u8imm:$src2),
8116 OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
8117 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
8119 (i32 FROUND_CURRENT))>;
8120 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8121 (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
8122 OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
8123 "${src1}"##_.BroadcastStr##", $src2",
8124 (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
8126 (i32 FROUND_CURRENT))>, EVEX_B;
8130 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8131 multiclass avx512_unary_fp_sae_packed_imm<bits<8> opc, string OpcodeStr,
8132 SDNode OpNode, X86VectorVTInfo _>{
8133 let ExeDomain = _.ExeDomain in
8134 defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8135 (ins _.RC:$src1, i32u8imm:$src2),
8136 OpcodeStr##_.Suffix, "$src2, {sae}, $src1",
8137 "$src1, {sae}, $src2",
8138 (OpNode (_.VT _.RC:$src1),
8140 (i32 FROUND_NO_EXC))>, EVEX_B;
8143 multiclass avx512_common_unary_fp_sae_packed_imm<string OpcodeStr,
8144 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8145 let Predicates = [prd] in {
8146 defm Z : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8147 avx512_unary_fp_sae_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8150 let Predicates = [prd, HasVLX] in {
8151 defm Z128 : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info128>,
8153 defm Z256 : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info256>,
8158 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8159 // op(reg_vec2,mem_vec,imm)
8160 // op(reg_vec2,broadcast(eltVt),imm)
8161 //all instruction created with FROUND_CURRENT
8162 multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8164 let ExeDomain = _.ExeDomain in {
8165 defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8166 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8167 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8168 (OpNode (_.VT _.RC:$src1),
8171 (i32 FROUND_CURRENT))>;
8172 defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8173 (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
8174 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8175 (OpNode (_.VT _.RC:$src1),
8176 (_.VT (bitconvert (_.LdFrag addr:$src2))),
8178 (i32 FROUND_CURRENT))>;
8179 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8180 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
8181 OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
8182 "$src1, ${src2}"##_.BroadcastStr##", $src3",
8183 (OpNode (_.VT _.RC:$src1),
8184 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
8186 (i32 FROUND_CURRENT))>, EVEX_B;
8190 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8191 // op(reg_vec2,mem_vec,imm)
8192 multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
8193 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo>{
8194 let ExeDomain = DestInfo.ExeDomain in {
8195 defm rri : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
8196 (ins SrcInfo.RC:$src1, SrcInfo.RC:$src2, u8imm:$src3),
8197 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8198 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
8199 (SrcInfo.VT SrcInfo.RC:$src2),
8201 defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
8202 (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
8203 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8204 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
8205 (SrcInfo.VT (bitconvert
8206 (SrcInfo.LdFrag addr:$src2))),
8211 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8212 // op(reg_vec2,mem_vec,imm)
8213 // op(reg_vec2,broadcast(eltVt),imm)
8214 multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
8216 avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, _, _>{
8218 let ExeDomain = _.ExeDomain in
8219 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8220 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
8221 OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
8222 "$src1, ${src2}"##_.BroadcastStr##", $src3",
8223 (OpNode (_.VT _.RC:$src1),
8224 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
8225 (i8 imm:$src3))>, EVEX_B;
8228 //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8229 // op(reg_vec2,mem_scalar,imm)
8230 //all instruction created with FROUND_CURRENT
8231 multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8232 X86VectorVTInfo _> {
8233 let ExeDomain = _.ExeDomain in {
8234 defm rri : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8235 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8236 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8237 (OpNode (_.VT _.RC:$src1),
8240 (i32 FROUND_CURRENT))>;
8241 defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
8242 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
8243 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8244 (OpNode (_.VT _.RC:$src1),
8245 (_.VT (scalar_to_vector
8246 (_.ScalarLdFrag addr:$src2))),
8248 (i32 FROUND_CURRENT))>;
8252 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8253 multiclass avx512_fp_sae_packed_imm<bits<8> opc, string OpcodeStr,
8254 SDNode OpNode, X86VectorVTInfo _>{
8255 let ExeDomain = _.ExeDomain in
8256 defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8257 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8258 OpcodeStr, "$src3, {sae}, $src2, $src1",
8259 "$src1, $src2, {sae}, $src3",
8260 (OpNode (_.VT _.RC:$src1),
8263 (i32 FROUND_NO_EXC))>, EVEX_B;
8265 //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8266 multiclass avx512_fp_sae_scalar_imm<bits<8> opc, string OpcodeStr,
8267 SDNode OpNode, X86VectorVTInfo _> {
8268 defm NAME#rrib : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8269 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8270 OpcodeStr, "$src3, {sae}, $src2, $src1",
8271 "$src1, $src2, {sae}, $src3",
8272 (OpNode (_.VT _.RC:$src1),
8275 (i32 FROUND_NO_EXC))>, EVEX_B;
8278 multiclass avx512_common_fp_sae_packed_imm<string OpcodeStr,
8279 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8280 let Predicates = [prd] in {
8281 defm Z : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8282 avx512_fp_sae_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8286 let Predicates = [prd, HasVLX] in {
8287 defm Z128 : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info128>,
8289 defm Z256 : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info256>,
8294 multiclass avx512_common_3Op_rm_imm8<bits<8> opc, SDNode OpNode, string OpStr,
8295 AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo>{
8296 let Predicates = [HasBWI] in {
8297 defm Z : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info512,
8298 SrcInfo.info512>, EVEX_V512, AVX512AIi8Base, EVEX_4V;
8300 let Predicates = [HasBWI, HasVLX] in {
8301 defm Z128 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info128,
8302 SrcInfo.info128>, EVEX_V128, AVX512AIi8Base, EVEX_4V;
8303 defm Z256 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info256,
8304 SrcInfo.info256>, EVEX_V256, AVX512AIi8Base, EVEX_4V;
8308 multiclass avx512_common_3Op_imm8<string OpcodeStr, AVX512VLVectorVTInfo _,
8309 bits<8> opc, SDNode OpNode>{
8310 let Predicates = [HasAVX512] in {
8311 defm Z : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
8313 let Predicates = [HasAVX512, HasVLX] in {
8314 defm Z128 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
8315 defm Z256 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
8319 multiclass avx512_common_fp_sae_scalar_imm<string OpcodeStr,
8320 X86VectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8321 let Predicates = [prd] in {
8322 defm Z128 : avx512_fp_scalar_imm<opc, OpcodeStr, OpNode, _>,
8323 avx512_fp_sae_scalar_imm<opc, OpcodeStr, OpNode, _>;
8327 multiclass avx512_common_unary_fp_sae_packed_imm_all<string OpcodeStr,
8328 bits<8> opcPs, bits<8> opcPd, SDNode OpNode, Predicate prd>{
8329 defm PS : avx512_common_unary_fp_sae_packed_imm<OpcodeStr, avx512vl_f32_info,
8330 opcPs, OpNode, prd>, EVEX_CD8<32, CD8VF>;
8331 defm PD : avx512_common_unary_fp_sae_packed_imm<OpcodeStr, avx512vl_f64_info,
8332 opcPd, OpNode, prd>, EVEX_CD8<64, CD8VF>, VEX_W;
8336 defm VREDUCE : avx512_common_unary_fp_sae_packed_imm_all<"vreduce", 0x56, 0x56,
8337 X86VReduce, HasDQI>, AVX512AIi8Base, EVEX;
8338 defm VRNDSCALE : avx512_common_unary_fp_sae_packed_imm_all<"vrndscale", 0x08, 0x09,
8339 X86VRndScale, HasAVX512>, AVX512AIi8Base, EVEX;
8340 defm VGETMANT : avx512_common_unary_fp_sae_packed_imm_all<"vgetmant", 0x26, 0x26,
8341 X86VGetMant, HasAVX512>, AVX512AIi8Base, EVEX;
8344 defm VRANGEPD : avx512_common_fp_sae_packed_imm<"vrangepd", avx512vl_f64_info,
8345 0x50, X86VRange, HasDQI>,
8346 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8347 defm VRANGEPS : avx512_common_fp_sae_packed_imm<"vrangeps", avx512vl_f32_info,
8348 0x50, X86VRange, HasDQI>,
8349 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8351 defm VRANGESD: avx512_common_fp_sae_scalar_imm<"vrangesd", f64x_info,
8352 0x51, X86VRange, HasDQI>,
8353 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8354 defm VRANGESS: avx512_common_fp_sae_scalar_imm<"vrangess", f32x_info,
8355 0x51, X86VRange, HasDQI>,
8356 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8358 defm VREDUCESD: avx512_common_fp_sae_scalar_imm<"vreducesd", f64x_info,
8359 0x57, X86Reduces, HasDQI>,
8360 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8361 defm VREDUCESS: avx512_common_fp_sae_scalar_imm<"vreducess", f32x_info,
8362 0x57, X86Reduces, HasDQI>,
8363 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8365 defm VGETMANTSD: avx512_common_fp_sae_scalar_imm<"vgetmantsd", f64x_info,
8366 0x27, X86GetMants, HasAVX512>,
8367 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8368 defm VGETMANTSS: avx512_common_fp_sae_scalar_imm<"vgetmantss", f32x_info,
8369 0x27, X86GetMants, HasAVX512>,
8370 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8372 multiclass avx512_shuff_packed_128<string OpcodeStr, AVX512VLVectorVTInfo _,
8373 bits<8> opc, SDNode OpNode = X86Shuf128>{
8374 let Predicates = [HasAVX512] in {
8375 defm Z : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
8378 let Predicates = [HasAVX512, HasVLX] in {
8379 defm Z256 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
8382 let Predicates = [HasAVX512] in {
8383 def : Pat<(v16f32 (ffloor VR512:$src)),
8384 (VRNDSCALEPSZrri VR512:$src, (i32 0x1))>;
8385 def : Pat<(v16f32 (fnearbyint VR512:$src)),
8386 (VRNDSCALEPSZrri VR512:$src, (i32 0xC))>;
8387 def : Pat<(v16f32 (fceil VR512:$src)),
8388 (VRNDSCALEPSZrri VR512:$src, (i32 0x2))>;
8389 def : Pat<(v16f32 (frint VR512:$src)),
8390 (VRNDSCALEPSZrri VR512:$src, (i32 0x4))>;
8391 def : Pat<(v16f32 (ftrunc VR512:$src)),
8392 (VRNDSCALEPSZrri VR512:$src, (i32 0x3))>;
8394 def : Pat<(v8f64 (ffloor VR512:$src)),
8395 (VRNDSCALEPDZrri VR512:$src, (i32 0x1))>;
8396 def : Pat<(v8f64 (fnearbyint VR512:$src)),
8397 (VRNDSCALEPDZrri VR512:$src, (i32 0xC))>;
8398 def : Pat<(v8f64 (fceil VR512:$src)),
8399 (VRNDSCALEPDZrri VR512:$src, (i32 0x2))>;
8400 def : Pat<(v8f64 (frint VR512:$src)),
8401 (VRNDSCALEPDZrri VR512:$src, (i32 0x4))>;
8402 def : Pat<(v8f64 (ftrunc VR512:$src)),
8403 (VRNDSCALEPDZrri VR512:$src, (i32 0x3))>;
8406 defm VSHUFF32X4 : avx512_shuff_packed_128<"vshuff32x4",avx512vl_f32_info, 0x23>,
8407 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8408 defm VSHUFF64X2 : avx512_shuff_packed_128<"vshuff64x2",avx512vl_f64_info, 0x23>,
8409 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8410 defm VSHUFI32X4 : avx512_shuff_packed_128<"vshufi32x4",avx512vl_i32_info, 0x43>,
8411 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8412 defm VSHUFI64X2 : avx512_shuff_packed_128<"vshufi64x2",avx512vl_i64_info, 0x43>,
8413 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8415 multiclass avx512_valign<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_I> {
8416 defm NAME: avx512_common_3Op_imm8<OpcodeStr, VTInfo_I, 0x03, X86VAlign>,
8417 AVX512AIi8Base, EVEX_4V;
8420 defm VALIGND: avx512_valign<"valignd", avx512vl_i32_info>,
8421 EVEX_CD8<32, CD8VF>;
8422 defm VALIGNQ: avx512_valign<"valignq", avx512vl_i64_info>,
8423 EVEX_CD8<64, CD8VF>, VEX_W;
8425 multiclass avx512_vpalignr_lowering<X86VectorVTInfo _ , list<Predicate> p>{
8426 let Predicates = p in
8427 def NAME#_.VTName#rri:
8428 Pat<(_.VT (X86PAlignr _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
8429 (!cast<Instruction>(NAME#_.ZSuffix#rri)
8430 _.RC:$src1, _.RC:$src2, imm:$imm)>;
8433 multiclass avx512_vpalignr_lowering_common<AVX512VLVectorVTInfo _>:
8434 avx512_vpalignr_lowering<_.info512, [HasBWI]>,
8435 avx512_vpalignr_lowering<_.info128, [HasBWI, HasVLX]>,
8436 avx512_vpalignr_lowering<_.info256, [HasBWI, HasVLX]>;
8438 defm VPALIGNR: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
8439 avx512vl_i8_info, avx512vl_i8_info>,
8440 avx512_vpalignr_lowering_common<avx512vl_i16_info>,
8441 avx512_vpalignr_lowering_common<avx512vl_i32_info>,
8442 avx512_vpalignr_lowering_common<avx512vl_f32_info>,
8443 avx512_vpalignr_lowering_common<avx512vl_i64_info>,
8444 avx512_vpalignr_lowering_common<avx512vl_f64_info>,
8447 defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw" ,
8448 avx512vl_i16_info, avx512vl_i8_info>, EVEX_CD8<8, CD8VF>;
8450 multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8451 X86VectorVTInfo _> {
8452 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8453 (ins _.RC:$src1), OpcodeStr,
8455 (_.VT (OpNode _.RC:$src1))>, EVEX, AVX5128IBase;
8457 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8458 (ins _.MemOp:$src1), OpcodeStr,
8460 (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
8461 EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
8464 multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
8465 X86VectorVTInfo _> :
8466 avx512_unary_rm<opc, OpcodeStr, OpNode, _> {
8467 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8468 (ins _.ScalarMemOp:$src1), OpcodeStr,
8469 "${src1}"##_.BroadcastStr,
8470 "${src1}"##_.BroadcastStr,
8471 (_.VT (OpNode (X86VBroadcast
8472 (_.ScalarLdFrag addr:$src1))))>,
8473 EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
8476 multiclass avx512_unary_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
8477 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
8478 let Predicates = [prd] in
8479 defm Z : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info512>, EVEX_V512;
8481 let Predicates = [prd, HasVLX] in {
8482 defm Z256 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info256>,
8484 defm Z128 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info128>,
8489 multiclass avx512_unary_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
8490 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
8491 let Predicates = [prd] in
8492 defm Z : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
8495 let Predicates = [prd, HasVLX] in {
8496 defm Z256 : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
8498 defm Z128 : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
8503 multiclass avx512_unary_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
8504 SDNode OpNode, Predicate prd> {
8505 defm Q : avx512_unary_rmb_vl<opc_q, OpcodeStr#"q", OpNode, avx512vl_i64_info,
8507 defm D : avx512_unary_rmb_vl<opc_d, OpcodeStr#"d", OpNode, avx512vl_i32_info,
8511 multiclass avx512_unary_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
8512 SDNode OpNode, Predicate prd> {
8513 defm W : avx512_unary_rm_vl<opc_w, OpcodeStr#"w", OpNode, avx512vl_i16_info, prd>;
8514 defm B : avx512_unary_rm_vl<opc_b, OpcodeStr#"b", OpNode, avx512vl_i8_info, prd>;
8517 multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
8518 bits<8> opc_d, bits<8> opc_q,
8519 string OpcodeStr, SDNode OpNode> {
8520 defm NAME : avx512_unary_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
8522 avx512_unary_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
8526 defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", X86Abs>;
8528 def avx512_v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
8530 def avx512_v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128X:$src, (i8 15)))>;
8531 def avx512_v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128X:$src, (i8 31)))>;
8532 def avx512_v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
8534 def avx512_v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256X:$src, (i8 15)))>;
8535 def avx512_v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256X:$src, (i8 31)))>;
8537 let Predicates = [HasBWI, HasVLX] in {
8539 (bc_v2i64 (avx512_v16i1sextv16i8)),
8540 (bc_v2i64 (add (v16i8 VR128X:$src), (avx512_v16i1sextv16i8)))),
8541 (VPABSBZ128rr VR128X:$src)>;
8543 (bc_v2i64 (avx512_v8i1sextv8i16)),
8544 (bc_v2i64 (add (v8i16 VR128X:$src), (avx512_v8i1sextv8i16)))),
8545 (VPABSWZ128rr VR128X:$src)>;
8547 (bc_v4i64 (avx512_v32i1sextv32i8)),
8548 (bc_v4i64 (add (v32i8 VR256X:$src), (avx512_v32i1sextv32i8)))),
8549 (VPABSBZ256rr VR256X:$src)>;
8551 (bc_v4i64 (avx512_v16i1sextv16i16)),
8552 (bc_v4i64 (add (v16i16 VR256X:$src), (avx512_v16i1sextv16i16)))),
8553 (VPABSWZ256rr VR256X:$src)>;
8555 let Predicates = [HasAVX512, HasVLX] in {
8557 (bc_v2i64 (avx512_v4i1sextv4i32)),
8558 (bc_v2i64 (add (v4i32 VR128X:$src), (avx512_v4i1sextv4i32)))),
8559 (VPABSDZ128rr VR128X:$src)>;
8561 (bc_v4i64 (avx512_v8i1sextv8i32)),
8562 (bc_v4i64 (add (v8i32 VR256X:$src), (avx512_v8i1sextv8i32)))),
8563 (VPABSDZ256rr VR256X:$src)>;
8566 let Predicates = [HasAVX512] in {
8568 (bc_v8i64 (v16i1sextv16i32)),
8569 (bc_v8i64 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
8570 (VPABSDZrr VR512:$src)>;
8572 (bc_v8i64 (v8i1sextv8i64)),
8573 (bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
8574 (VPABSQZrr VR512:$src)>;
8576 let Predicates = [HasBWI] in {
8578 (bc_v8i64 (v64i1sextv64i8)),
8579 (bc_v8i64 (add (v64i8 VR512:$src), (v64i1sextv64i8)))),
8580 (VPABSBZrr VR512:$src)>;
8582 (bc_v8i64 (v32i1sextv32i16)),
8583 (bc_v8i64 (add (v32i16 VR512:$src), (v32i1sextv32i16)))),
8584 (VPABSWZrr VR512:$src)>;
8587 multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
8589 defm NAME : avx512_unary_rm_vl_dq<opc, opc, OpcodeStr, ctlz, prd>;
8592 defm VPLZCNT : avx512_ctlz<0x44, "vplzcnt", HasCDI>;
8593 defm VPCONFLICT : avx512_unary_rm_vl_dq<0xC4, 0xC4, "vpconflict", X86Conflict, HasCDI>;
8595 //===---------------------------------------------------------------------===//
8596 // Replicate Single FP - MOVSHDUP and MOVSLDUP
8597 //===---------------------------------------------------------------------===//
8598 multiclass avx512_replicate<bits<8> opc, string OpcodeStr, SDNode OpNode>{
8599 defm NAME: avx512_unary_rm_vl<opc, OpcodeStr, OpNode, avx512vl_f32_info,
8603 defm VMOVSHDUP : avx512_replicate<0x16, "vmovshdup", X86Movshdup>;
8604 defm VMOVSLDUP : avx512_replicate<0x12, "vmovsldup", X86Movsldup>;
8606 //===----------------------------------------------------------------------===//
8607 // AVX-512 - MOVDDUP
8608 //===----------------------------------------------------------------------===//
8610 multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr, SDNode OpNode,
8611 X86VectorVTInfo _> {
8612 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8613 (ins _.RC:$src), OpcodeStr, "$src", "$src",
8614 (_.VT (OpNode (_.VT _.RC:$src)))>, EVEX;
8615 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8616 (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
8617 (_.VT (OpNode (_.VT (scalar_to_vector
8618 (_.ScalarLdFrag addr:$src)))))>,
8619 EVEX, EVEX_CD8<_.EltSize, CD8VH>;
8622 multiclass avx512_movddup_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
8623 AVX512VLVectorVTInfo VTInfo> {
8625 defm Z : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info512>, EVEX_V512;
8627 let Predicates = [HasAVX512, HasVLX] in {
8628 defm Z256 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info256>,
8630 defm Z128 : avx512_movddup_128<opc, OpcodeStr, OpNode, VTInfo.info128>,
8635 multiclass avx512_movddup<bits<8> opc, string OpcodeStr, SDNode OpNode>{
8636 defm NAME: avx512_movddup_common<opc, OpcodeStr, OpNode,
8637 avx512vl_f64_info>, XD, VEX_W;
8640 defm VMOVDDUP : avx512_movddup<0x12, "vmovddup", X86Movddup>;
8642 let Predicates = [HasVLX] in {
8643 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
8644 (VMOVDDUPZ128rm addr:$src)>;
8645 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
8646 (VMOVDDUPZ128rm addr:$src)>;
8647 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8648 (VMOVDDUPZ128rr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
8651 //===----------------------------------------------------------------------===//
8652 // AVX-512 - Unpack Instructions
8653 //===----------------------------------------------------------------------===//
8654 defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh, HasAVX512,
8656 defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl, HasAVX512,
8659 defm VPUNPCKLBW : avx512_binop_rm_vl_b<0x60, "vpunpcklbw", X86Unpckl,
8660 SSE_INTALU_ITINS_P, HasBWI>;
8661 defm VPUNPCKHBW : avx512_binop_rm_vl_b<0x68, "vpunpckhbw", X86Unpckh,
8662 SSE_INTALU_ITINS_P, HasBWI>;
8663 defm VPUNPCKLWD : avx512_binop_rm_vl_w<0x61, "vpunpcklwd", X86Unpckl,
8664 SSE_INTALU_ITINS_P, HasBWI>;
8665 defm VPUNPCKHWD : avx512_binop_rm_vl_w<0x69, "vpunpckhwd", X86Unpckh,
8666 SSE_INTALU_ITINS_P, HasBWI>;
8668 defm VPUNPCKLDQ : avx512_binop_rm_vl_d<0x62, "vpunpckldq", X86Unpckl,
8669 SSE_INTALU_ITINS_P, HasAVX512>;
8670 defm VPUNPCKHDQ : avx512_binop_rm_vl_d<0x6A, "vpunpckhdq", X86Unpckh,
8671 SSE_INTALU_ITINS_P, HasAVX512>;
8672 defm VPUNPCKLQDQ : avx512_binop_rm_vl_q<0x6C, "vpunpcklqdq", X86Unpckl,
8673 SSE_INTALU_ITINS_P, HasAVX512>;
8674 defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
8675 SSE_INTALU_ITINS_P, HasAVX512>;
8677 //===----------------------------------------------------------------------===//
8678 // AVX-512 - Extract & Insert Integer Instructions
8679 //===----------------------------------------------------------------------===//
8681 multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
8682 X86VectorVTInfo _> {
8683 def mr : AVX512Ii8<opc, MRMDestMem, (outs),
8684 (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
8685 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8686 [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
8689 EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
8692 multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
8693 let Predicates = [HasBWI] in {
8694 def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst),
8695 (ins _.RC:$src1, u8imm:$src2),
8696 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8697 [(set GR32orGR64:$dst,
8698 (X86pextrb (_.VT _.RC:$src1), imm:$src2))]>,
8701 defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TAPD;
8705 multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
8706 let Predicates = [HasBWI] in {
8707 def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst),
8708 (ins _.RC:$src1, u8imm:$src2),
8709 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8710 [(set GR32orGR64:$dst,
8711 (X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
8714 let hasSideEffects = 0 in
8715 def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
8716 (ins _.RC:$src1, u8imm:$src2),
8717 OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8720 defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
8724 multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
8725 RegisterClass GRC> {
8726 let Predicates = [HasDQI] in {
8727 def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst),
8728 (ins _.RC:$src1, u8imm:$src2),
8729 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8731 (extractelt (_.VT _.RC:$src1), imm:$src2))]>,
8734 def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
8735 (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
8736 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8737 [(store (extractelt (_.VT _.RC:$src1),
8738 imm:$src2),addr:$dst)]>,
8739 EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
8743 defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>;
8744 defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>;
8745 defm VPEXTRDZ : avx512_extract_elt_dq<"vpextrd", v4i32x_info, GR32>;
8746 defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, VEX_W;
8748 multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
8749 X86VectorVTInfo _, PatFrag LdFrag> {
8750 def rm : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
8751 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
8752 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8754 (_.VT (OpNode _.RC:$src1, (LdFrag addr:$src2), imm:$src3)))]>,
8755 EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
8758 multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode,
8759 X86VectorVTInfo _, PatFrag LdFrag> {
8760 let Predicates = [HasBWI] in {
8761 def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
8762 (ins _.RC:$src1, GR32orGR64:$src2, u8imm:$src3),
8763 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8765 (OpNode _.RC:$src1, GR32orGR64:$src2, imm:$src3))]>, EVEX_4V;
8767 defm NAME : avx512_insert_elt_m<opc, OpcodeStr, OpNode, _, LdFrag>;
8771 multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
8772 X86VectorVTInfo _, RegisterClass GRC> {
8773 let Predicates = [HasDQI] in {
8774 def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
8775 (ins _.RC:$src1, GRC:$src2, u8imm:$src3),
8776 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8778 (_.VT (insertelt _.RC:$src1, GRC:$src2, imm:$src3)))]>,
8781 defm NAME : avx512_insert_elt_m<opc, OpcodeStr, insertelt, _,
8782 _.ScalarLdFrag>, TAPD;
8786 defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info,
8788 defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info,
8790 defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>;
8791 defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, VEX_W;
8792 //===----------------------------------------------------------------------===//
8793 // VSHUFPS - VSHUFPD Operations
8794 //===----------------------------------------------------------------------===//
8795 multiclass avx512_shufp<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_I,
8796 AVX512VLVectorVTInfo VTInfo_FP>{
8797 defm NAME: avx512_common_3Op_imm8<OpcodeStr, VTInfo_FP, 0xC6, X86Shufp>,
8798 EVEX_CD8<VTInfo_FP.info512.EltSize, CD8VF>,
8799 AVX512AIi8Base, EVEX_4V;
8802 defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_i32_info, avx512vl_f32_info>, PS;
8803 defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_i64_info, avx512vl_f64_info>, PD, VEX_W;
8804 //===----------------------------------------------------------------------===//
8805 // AVX-512 - Byte shift Left/Right
8806 //===----------------------------------------------------------------------===//
8808 multiclass avx512_shift_packed<bits<8> opc, SDNode OpNode, Format MRMr,
8809 Format MRMm, string OpcodeStr, X86VectorVTInfo _>{
8810 def rr : AVX512<opc, MRMr,
8811 (outs _.RC:$dst), (ins _.RC:$src1, u8imm:$src2),
8812 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8813 [(set _.RC:$dst,(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>;
8814 def rm : AVX512<opc, MRMm,
8815 (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
8816 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8817 [(set _.RC:$dst,(_.VT (OpNode
8818 (_.VT (bitconvert (_.LdFrag addr:$src1))),
8819 (i8 imm:$src2))))]>;
8822 multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
8823 Format MRMm, string OpcodeStr, Predicate prd>{
8824 let Predicates = [prd] in
8825 defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8826 OpcodeStr, v64i8_info>, EVEX_V512;
8827 let Predicates = [prd, HasVLX] in {
8828 defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8829 OpcodeStr, v32i8x_info>, EVEX_V256;
8830 defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8831 OpcodeStr, v16i8x_info>, EVEX_V128;
8834 defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
8835 HasBWI>, AVX512PDIi8Base, EVEX_4V;
8836 defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
8837 HasBWI>, AVX512PDIi8Base, EVEX_4V;
8840 multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
8841 string OpcodeStr, X86VectorVTInfo _dst,
8842 X86VectorVTInfo _src>{
8843 def rr : AVX512BI<opc, MRMSrcReg,
8844 (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.RC:$src2),
8845 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8846 [(set _dst.RC:$dst,(_dst.VT
8847 (OpNode (_src.VT _src.RC:$src1),
8848 (_src.VT _src.RC:$src2))))]>;
8849 def rm : AVX512BI<opc, MRMSrcMem,
8850 (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
8851 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8852 [(set _dst.RC:$dst,(_dst.VT
8853 (OpNode (_src.VT _src.RC:$src1),
8854 (_src.VT (bitconvert
8855 (_src.LdFrag addr:$src2))))))]>;
8858 multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
8859 string OpcodeStr, Predicate prd> {
8860 let Predicates = [prd] in
8861 defm Z512 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v8i64_info,
8862 v64i8_info>, EVEX_V512;
8863 let Predicates = [prd, HasVLX] in {
8864 defm Z256 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v4i64x_info,
8865 v32i8x_info>, EVEX_V256;
8866 defm Z128 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v2i64x_info,
8867 v16i8x_info>, EVEX_V128;
8871 defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
8874 multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
8876 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
8877 defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
8878 (ins _.RC:$src2, _.RC:$src3, u8imm:$src4),
8879 OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
8880 (OpNode (_.VT _.RC:$src1),
8883 (i8 imm:$src4)), 1, 1>, AVX512AIi8Base, EVEX_4V;
8884 defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
8885 (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
8886 OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
8887 (OpNode (_.VT _.RC:$src1),
8889 (_.VT (bitconvert (_.LdFrag addr:$src3))),
8890 (i8 imm:$src4)), 1, 0>,
8891 AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
8892 defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
8893 (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
8894 OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
8895 "$src2, ${src3}"##_.BroadcastStr##", $src4",
8896 (OpNode (_.VT _.RC:$src1),
8898 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
8899 (i8 imm:$src4)), 1, 0>, EVEX_B,
8900 AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
8901 }// Constraints = "$src1 = $dst"
8904 multiclass avx512_common_ternlog<string OpcodeStr, AVX512VLVectorVTInfo _>{
8905 let Predicates = [HasAVX512] in
8906 defm Z : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info512>, EVEX_V512;
8907 let Predicates = [HasAVX512, HasVLX] in {
8908 defm Z128 : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info128>, EVEX_V128;
8909 defm Z256 : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info256>, EVEX_V256;
8913 defm VPTERNLOGD : avx512_common_ternlog<"vpternlogd", avx512vl_i32_info>;
8914 defm VPTERNLOGQ : avx512_common_ternlog<"vpternlogq", avx512vl_i64_info>, VEX_W;
8916 //===----------------------------------------------------------------------===//
8917 // AVX-512 - FixupImm
8918 //===----------------------------------------------------------------------===//
8920 multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
8922 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
8923 defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
8924 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
8925 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
8926 (OpNode (_.VT _.RC:$src1),
8928 (_.IntVT _.RC:$src3),
8930 (i32 FROUND_CURRENT))>;
8931 defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
8932 (ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4),
8933 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
8934 (OpNode (_.VT _.RC:$src1),
8936 (_.IntVT (bitconvert (_.LdFrag addr:$src3))),
8938 (i32 FROUND_CURRENT))>;
8939 defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
8940 (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
8941 OpcodeStr##_.Suffix, "$src4, ${src3}"##_.BroadcastStr##", $src2",
8942 "$src2, ${src3}"##_.BroadcastStr##", $src4",
8943 (OpNode (_.VT _.RC:$src1),
8945 (_.IntVT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
8947 (i32 FROUND_CURRENT))>, EVEX_B;
8948 } // Constraints = "$src1 = $dst"
8951 multiclass avx512_fixupimm_packed_sae<bits<8> opc, string OpcodeStr,
8952 SDNode OpNode, X86VectorVTInfo _>{
8953 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
8954 defm rrib : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
8955 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
8956 OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
8957 "$src2, $src3, {sae}, $src4",
8958 (OpNode (_.VT _.RC:$src1),
8960 (_.IntVT _.RC:$src3),
8962 (i32 FROUND_NO_EXC))>, EVEX_B;
8966 multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
8967 X86VectorVTInfo _, X86VectorVTInfo _src3VT> {
8968 let Constraints = "$src1 = $dst" , Predicates = [HasAVX512],
8969 ExeDomain = _.ExeDomain in {
8970 defm rri : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8971 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
8972 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
8973 (OpNode (_.VT _.RC:$src1),
8975 (_src3VT.VT _src3VT.RC:$src3),
8977 (i32 FROUND_CURRENT))>;
8979 defm rrib : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8980 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
8981 OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
8982 "$src2, $src3, {sae}, $src4",
8983 (OpNode (_.VT _.RC:$src1),
8985 (_src3VT.VT _src3VT.RC:$src3),
8987 (i32 FROUND_NO_EXC))>, EVEX_B;
8988 defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
8989 (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
8990 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
8991 (OpNode (_.VT _.RC:$src1),
8993 (_src3VT.VT (scalar_to_vector
8994 (_src3VT.ScalarLdFrag addr:$src3))),
8996 (i32 FROUND_CURRENT))>;
9000 multiclass avx512_fixupimm_packed_all<AVX512VLVectorVTInfo _Vec>{
9001 let Predicates = [HasAVX512] in
9002 defm Z : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
9003 avx512_fixupimm_packed_sae<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
9004 AVX512AIi8Base, EVEX_4V, EVEX_V512;
9005 let Predicates = [HasAVX512, HasVLX] in {
9006 defm Z128 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info128>,
9007 AVX512AIi8Base, EVEX_4V, EVEX_V128;
9008 defm Z256 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info256>,
9009 AVX512AIi8Base, EVEX_4V, EVEX_V256;
9013 defm VFIXUPIMMSS : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
9014 f32x_info, v4i32x_info>,
9015 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
9016 defm VFIXUPIMMSD : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
9017 f64x_info, v2i64x_info>,
9018 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
9019 defm VFIXUPIMMPS : avx512_fixupimm_packed_all<avx512vl_f32_info>,
9020 EVEX_CD8<32, CD8VF>;
9021 defm VFIXUPIMMPD : avx512_fixupimm_packed_all<avx512vl_f64_info>,
9022 EVEX_CD8<64, CD8VF>, VEX_W;
9026 // Patterns used to select SSE scalar fp arithmetic instructions from
9029 // (1) a scalar fp operation followed by a blend
9031 // The effect is that the backend no longer emits unnecessary vector
9032 // insert instructions immediately after SSE scalar fp instructions
9033 // like addss or mulss.
9035 // For example, given the following code:
9036 // __m128 foo(__m128 A, __m128 B) {
9041 // Previously we generated:
9042 // addss %xmm0, %xmm1
9043 // movss %xmm1, %xmm0
9046 // addss %xmm1, %xmm0
9048 // (2) a vector packed single/double fp operation followed by a vector insert
9050 // The effect is that the backend converts the packed fp instruction
9051 // followed by a vector insert into a single SSE scalar fp instruction.
9053 // For example, given the following code:
9054 // __m128 foo(__m128 A, __m128 B) {
9055 // __m128 C = A + B;
9056 // return (__m128) {c[0], a[1], a[2], a[3]};
9059 // Previously we generated:
9060 // addps %xmm0, %xmm1
9061 // movss %xmm1, %xmm0
9064 // addss %xmm1, %xmm0
9066 // TODO: Some canonicalization in lowering would simplify the number of
9067 // patterns we have to try to match.
9068 multiclass AVX512_scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
9069 let Predicates = [HasAVX512] in {
9070 // extracted scalar math op with insert via movss
9071 def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector
9072 (Op (f32 (extractelt (v4f32 VR128X:$dst), (iPTR 0))),
9074 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
9075 (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
9077 // extracted scalar math op with insert via blend
9078 def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector
9079 (Op (f32 (extractelt (v4f32 VR128X:$dst), (iPTR 0))),
9080 FR32X:$src))), (i8 1))),
9081 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
9082 (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
9084 // vector math op with insert via movss
9085 def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst),
9086 (Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)))),
9087 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
9089 // vector math op with insert via blend
9090 def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst),
9091 (Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)), (i8 1))),
9092 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
9094 // extracted masked scalar math op with insert via movss
9095 def : Pat<(X86Movss (v4f32 VR128X:$src1),
9097 (X86selects VK1WM:$mask,
9098 (Op (f32 (extractelt (v4f32 VR128X:$src1), (iPTR 0))),
9101 (!cast<I>("V"#OpcPrefix#SSZrr_Intk) (COPY_TO_REGCLASS FR32X:$src0, VR128X),
9102 VK1WM:$mask, v4f32:$src1,
9103 (COPY_TO_REGCLASS FR32X:$src2, VR128X))>;
9107 defm : AVX512_scalar_math_f32_patterns<fadd, "ADD">;
9108 defm : AVX512_scalar_math_f32_patterns<fsub, "SUB">;
9109 defm : AVX512_scalar_math_f32_patterns<fmul, "MUL">;
9110 defm : AVX512_scalar_math_f32_patterns<fdiv, "DIV">;
9112 multiclass AVX512_scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
9113 let Predicates = [HasAVX512] in {
9114 // extracted scalar math op with insert via movsd
9115 def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector
9116 (Op (f64 (extractelt (v2f64 VR128X:$dst), (iPTR 0))),
9118 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
9119 (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
9121 // extracted scalar math op with insert via blend
9122 def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector
9123 (Op (f64 (extractelt (v2f64 VR128X:$dst), (iPTR 0))),
9124 FR64X:$src))), (i8 1))),
9125 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
9126 (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
9128 // vector math op with insert via movsd
9129 def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst),
9130 (Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)))),
9131 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
9133 // vector math op with insert via blend
9134 def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst),
9135 (Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)), (i8 1))),
9136 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
9138 // extracted masked scalar math op with insert via movss
9139 def : Pat<(X86Movsd (v2f64 VR128X:$src1),
9141 (X86selects VK1WM:$mask,
9142 (Op (f64 (extractelt (v2f64 VR128X:$src1), (iPTR 0))),
9145 (!cast<I>("V"#OpcPrefix#SDZrr_Intk) (COPY_TO_REGCLASS FR64X:$src0, VR128X),
9146 VK1WM:$mask, v2f64:$src1,
9147 (COPY_TO_REGCLASS FR64X:$src2, VR128X))>;
9151 defm : AVX512_scalar_math_f64_patterns<fadd, "ADD">;
9152 defm : AVX512_scalar_math_f64_patterns<fsub, "SUB">;
9153 defm : AVX512_scalar_math_f64_patterns<fmul, "MUL">;
9154 defm : AVX512_scalar_math_f64_patterns<fdiv, "DIV">;