1 //===-- X86InstrAVX512.td - AVX512 Instruction Set ---------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 AVX512 instruction set, defining the
11 // instructions, and properties of the instructions which are needed for code
12 // generation, machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 // Group template arguments that can be derived from the vector type (EltNum x
17 // EltVT). These are things like the register class for the writemask, etc.
18 // The idea is to pass one of these as the template argument rather than the
19 // individual arguments.
20 // The template is also used for scalar types, in this case numelts is 1.
21 class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
23 RegisterClass RC = rc;
24 ValueType EltVT = eltvt;
25 int NumElts = numelts;
27 // Corresponding mask register class.
28 RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
30 // Corresponding write-mask register class.
31 RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
34 ValueType KVT = !cast<ValueType>("v" # NumElts # "i1");
36 // Suffix used in the instruction mnemonic.
37 string Suffix = suffix;
39 // VTName is a string name for vector VT. For vector types it will be
40 // v # NumElts # EltVT, so for vector of 8 elements of i32 it will be v8i32
41 // It is a little bit complex for scalar types, where NumElts = 1.
42 // In this case we build v4f32 or v2f64
43 string VTName = "v" # !if (!eq (NumElts, 1),
44 !if (!eq (EltVT.Size, 32), 4,
45 !if (!eq (EltVT.Size, 64), 2, NumElts)), NumElts) # EltVT;
48 ValueType VT = !cast<ValueType>(VTName);
50 string EltTypeName = !cast<string>(EltVT);
51 // Size of the element type in bits, e.g. 32 for v16i32.
52 string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
53 int EltSize = EltVT.Size;
55 // "i" for integer types and "f" for floating-point types
56 string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
58 // Size of RC in bits, e.g. 512 for VR512.
61 // The corresponding memory operand, e.g. i512mem for VR512.
62 X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
63 X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
64 // FP scalar memory operand for intrinsics - ssmem/sdmem.
65 Operand IntScalarMemOp = !if (!eq (EltTypeName, "f32"), !cast<Operand>("ssmem"),
66 !if (!eq (EltTypeName, "f64"), !cast<Operand>("sdmem"), ?));
69 // Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
70 // due to load promotion during legalization
71 PatFrag LdFrag = !cast<PatFrag>("load" #
72 !if (!eq (TypeVariantName, "i"),
73 !if (!eq (Size, 128), "v2i64",
74 !if (!eq (Size, 256), "v4i64",
75 !if (!eq (Size, 512), "v8i64",
78 PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" #
79 !if (!eq (TypeVariantName, "i"),
80 !if (!eq (Size, 128), "v2i64",
81 !if (!eq (Size, 256), "v4i64",
82 !if (!eq (Size, 512), "v8i64",
85 PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
87 ComplexPattern ScalarIntMemCPat = !if (!eq (EltTypeName, "f32"),
88 !cast<ComplexPattern>("sse_load_f32"),
89 !if (!eq (EltTypeName, "f64"),
90 !cast<ComplexPattern>("sse_load_f64"),
93 // The corresponding float type, e.g. v16f32 for v16i32
94 // Note: For EltSize < 32, FloatVT is illegal and TableGen
95 // fails to compile, so we choose FloatVT = VT
96 ValueType FloatVT = !cast<ValueType>(
97 !if (!eq (!srl(EltSize,5),0),
99 !if (!eq(TypeVariantName, "i"),
100 "v" # NumElts # "f" # EltSize,
103 ValueType IntVT = !cast<ValueType>(
104 !if (!eq (!srl(EltSize,5),0),
106 !if (!eq(TypeVariantName, "f"),
107 "v" # NumElts # "i" # EltSize,
109 // The string to specify embedded broadcast in assembly.
110 string BroadcastStr = "{1to" # NumElts # "}";
112 // 8-bit compressed displacement tuple/subvector format. This is only
113 // defined for NumElts <= 8.
114 CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
115 !cast<CD8VForm>("CD8VT" # NumElts), ?);
117 SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
118 !if (!eq (Size, 256), sub_ymm, ?));
120 Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
121 !if (!eq (EltTypeName, "f64"), SSEPackedDouble,
124 RegisterClass FRC = !if (!eq (EltTypeName, "f32"), FR32X, FR64X);
126 // A vector tye of the same width with element type i64. This is used to
127 // create patterns for logic ops.
128 ValueType i64VT = !cast<ValueType>("v" # !srl(Size, 6) # "i64");
130 // A vector type of the same width with element type i32. This is used to
131 // create the canonical constant zero node ImmAllZerosV.
132 ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
133 dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
135 string ZSuffix = !if (!eq (Size, 128), "Z128",
136 !if (!eq (Size, 256), "Z256", "Z"));
139 def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
140 def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
141 def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
142 def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
143 def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
144 def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
146 // "x" in v32i8x_info means RC = VR256X
147 def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
148 def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
149 def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
150 def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
151 def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">;
152 def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">;
154 def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
155 def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
156 def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
157 def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
158 def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
159 def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
161 // We map scalar types to the smallest (128-bit) vector type
162 // with the appropriate element type. This allows to use the same masking logic.
163 def i32x_info : X86VectorVTInfo<1, i32, GR32, "si">;
164 def i64x_info : X86VectorVTInfo<1, i64, GR64, "sq">;
165 def f32x_info : X86VectorVTInfo<1, f32, VR128X, "ss">;
166 def f64x_info : X86VectorVTInfo<1, f64, VR128X, "sd">;
168 class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
169 X86VectorVTInfo i128> {
170 X86VectorVTInfo info512 = i512;
171 X86VectorVTInfo info256 = i256;
172 X86VectorVTInfo info128 = i128;
175 def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
177 def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
179 def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
181 def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
183 def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info,
185 def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info,
188 // This multiclass generates the masking variants from the non-masking
189 // variant. It only provides the assembly pieces for the masking variants.
190 // It assumes custom ISel patterns for masking which can be provided as
191 // template arguments.
192 multiclass AVX512_maskable_custom<bits<8> O, Format F,
194 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
196 string AttSrcAsm, string IntelSrcAsm,
198 list<dag> MaskingPattern,
199 list<dag> ZeroMaskingPattern,
200 string MaskingConstraint = "",
201 InstrItinClass itin = NoItinerary,
202 bit IsCommutable = 0,
203 bit IsKCommutable = 0> {
204 let isCommutable = IsCommutable in
205 def NAME: AVX512<O, F, Outs, Ins,
206 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
207 "$dst, "#IntelSrcAsm#"}",
210 // Prefer over VMOV*rrk Pat<>
211 let isCommutable = IsKCommutable in
212 def NAME#k: AVX512<O, F, Outs, MaskingIns,
213 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
214 "$dst {${mask}}, "#IntelSrcAsm#"}",
215 MaskingPattern, itin>,
217 // In case of the 3src subclass this is overridden with a let.
218 string Constraints = MaskingConstraint;
221 // Zero mask does not add any restrictions to commute operands transformation.
222 // So, it is Ok to use IsCommutable instead of IsKCommutable.
223 let isCommutable = IsCommutable in // Prefer over VMOV*rrkz Pat<>
224 def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
225 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}|"#
226 "$dst {${mask}} {z}, "#IntelSrcAsm#"}",
233 // Common base class of AVX512_maskable and AVX512_maskable_3src.
234 multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
236 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
238 string AttSrcAsm, string IntelSrcAsm,
239 dag RHS, dag MaskingRHS,
240 SDNode Select = vselect,
241 string MaskingConstraint = "",
242 InstrItinClass itin = NoItinerary,
243 bit IsCommutable = 0,
244 bit IsKCommutable = 0> :
245 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
246 AttSrcAsm, IntelSrcAsm,
247 [(set _.RC:$dst, RHS)],
248 [(set _.RC:$dst, MaskingRHS)],
250 (Select _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
251 MaskingConstraint, NoItinerary, IsCommutable,
254 // Similar to AVX512_maskable_common, but with scalar types.
255 multiclass AVX512_maskable_fp_common<bits<8> O, Format F, X86VectorVTInfo _,
257 dag Ins, dag MaskingIns, dag ZeroMaskingIns,
259 string AttSrcAsm, string IntelSrcAsm,
260 SDNode Select = vselect,
261 string MaskingConstraint = "",
262 InstrItinClass itin = NoItinerary,
263 bit IsCommutable = 0,
264 bit IsKCommutable = 0> :
265 AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
266 AttSrcAsm, IntelSrcAsm,
268 MaskingConstraint, NoItinerary, IsCommutable,
271 // This multiclass generates the unconditional/non-masking, the masking and
272 // the zero-masking variant of the vector instruction. In the masking case, the
273 // perserved vector elements come from a new dummy input operand tied to $dst.
274 multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
275 dag Outs, dag Ins, string OpcodeStr,
276 string AttSrcAsm, string IntelSrcAsm,
278 InstrItinClass itin = NoItinerary,
279 bit IsCommutable = 0, bit IsKCommutable = 0,
280 SDNode Select = vselect> :
281 AVX512_maskable_common<O, F, _, Outs, Ins,
282 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
283 !con((ins _.KRCWM:$mask), Ins),
284 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
285 (Select _.KRCWM:$mask, RHS, _.RC:$src0), Select,
286 "$src0 = $dst", itin, IsCommutable, IsKCommutable>;
288 // This multiclass generates the unconditional/non-masking, the masking and
289 // the zero-masking variant of the scalar instruction.
290 multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
291 dag Outs, dag Ins, string OpcodeStr,
292 string AttSrcAsm, string IntelSrcAsm,
294 InstrItinClass itin = NoItinerary,
295 bit IsCommutable = 0> :
296 AVX512_maskable_common<O, F, _, Outs, Ins,
297 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
298 !con((ins _.KRCWM:$mask), Ins),
299 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
300 (X86selects _.KRCWM:$mask, RHS, _.RC:$src0),
301 X86selects, "$src0 = $dst", itin, IsCommutable>;
303 // Similar to AVX512_maskable but in this case one of the source operands
304 // ($src1) is already tied to $dst so we just use that for the preserved
305 // vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
307 multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
308 dag Outs, dag NonTiedIns, string OpcodeStr,
309 string AttSrcAsm, string IntelSrcAsm,
310 dag RHS, bit IsCommutable = 0,
311 bit IsKCommutable = 0> :
312 AVX512_maskable_common<O, F, _, Outs,
313 !con((ins _.RC:$src1), NonTiedIns),
314 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
315 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
316 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
317 (vselect _.KRCWM:$mask, RHS, _.RC:$src1),
318 vselect, "", NoItinerary, IsCommutable, IsKCommutable>;
320 multiclass AVX512_maskable_3src_scalar<bits<8> O, Format F, X86VectorVTInfo _,
321 dag Outs, dag NonTiedIns, string OpcodeStr,
322 string AttSrcAsm, string IntelSrcAsm,
323 dag RHS, bit IsCommutable = 0,
324 bit IsKCommutable = 0> :
325 AVX512_maskable_common<O, F, _, Outs,
326 !con((ins _.RC:$src1), NonTiedIns),
327 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
328 !con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
329 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
330 (X86selects _.KRCWM:$mask, RHS, _.RC:$src1),
331 X86selects, "", NoItinerary, IsCommutable,
334 multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
337 string AttSrcAsm, string IntelSrcAsm,
339 AVX512_maskable_custom<O, F, Outs, Ins,
340 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
341 !con((ins _.KRCWM:$mask), Ins),
342 OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [],
346 // Instruction with mask that puts result in mask register,
347 // like "compare" and "vptest"
348 multiclass AVX512_maskable_custom_cmp<bits<8> O, Format F,
350 dag Ins, dag MaskingIns,
352 string AttSrcAsm, string IntelSrcAsm,
354 list<dag> MaskingPattern,
355 bit IsCommutable = 0> {
356 let isCommutable = IsCommutable in
357 def NAME: AVX512<O, F, Outs, Ins,
358 OpcodeStr#"\t{"#AttSrcAsm#", $dst|"#
359 "$dst, "#IntelSrcAsm#"}",
360 Pattern, NoItinerary>;
362 def NAME#k: AVX512<O, F, Outs, MaskingIns,
363 OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}|"#
364 "$dst {${mask}}, "#IntelSrcAsm#"}",
365 MaskingPattern, NoItinerary>, EVEX_K;
368 multiclass AVX512_maskable_common_cmp<bits<8> O, Format F, X86VectorVTInfo _,
370 dag Ins, dag MaskingIns,
372 string AttSrcAsm, string IntelSrcAsm,
373 dag RHS, dag MaskingRHS,
374 bit IsCommutable = 0> :
375 AVX512_maskable_custom_cmp<O, F, Outs, Ins, MaskingIns, OpcodeStr,
376 AttSrcAsm, IntelSrcAsm,
377 [(set _.KRC:$dst, RHS)],
378 [(set _.KRC:$dst, MaskingRHS)], IsCommutable>;
380 multiclass AVX512_maskable_cmp<bits<8> O, Format F, X86VectorVTInfo _,
381 dag Outs, dag Ins, string OpcodeStr,
382 string AttSrcAsm, string IntelSrcAsm,
383 dag RHS, bit IsCommutable = 0> :
384 AVX512_maskable_common_cmp<O, F, _, Outs, Ins,
385 !con((ins _.KRCWM:$mask), Ins),
386 OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
387 (and _.KRCWM:$mask, RHS), IsCommutable>;
389 multiclass AVX512_maskable_cmp_alt<bits<8> O, Format F, X86VectorVTInfo _,
390 dag Outs, dag Ins, string OpcodeStr,
391 string AttSrcAsm, string IntelSrcAsm> :
392 AVX512_maskable_custom_cmp<O, F, Outs,
393 Ins, !con((ins _.KRCWM:$mask),Ins), OpcodeStr,
394 AttSrcAsm, IntelSrcAsm, [],[]>;
396 // This multiclass generates the unconditional/non-masking, the masking and
397 // the zero-masking variant of the vector instruction. In the masking case, the
398 // perserved vector elements come from a new dummy input operand tied to $dst.
399 multiclass AVX512_maskable_logic<bits<8> O, Format F, X86VectorVTInfo _,
400 dag Outs, dag Ins, string OpcodeStr,
401 string AttSrcAsm, string IntelSrcAsm,
402 dag RHS, dag MaskedRHS,
403 InstrItinClass itin = NoItinerary,
404 bit IsCommutable = 0, SDNode Select = vselect> :
405 AVX512_maskable_custom<O, F, Outs, Ins,
406 !con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
407 !con((ins _.KRCWM:$mask), Ins),
408 OpcodeStr, AttSrcAsm, IntelSrcAsm,
409 [(set _.RC:$dst, RHS)],
411 (Select _.KRCWM:$mask, MaskedRHS, _.RC:$src0))],
413 (Select _.KRCWM:$mask, MaskedRHS,
415 "$src0 = $dst", itin, IsCommutable>;
417 // Bitcasts between 512-bit vector types. Return the original type since
418 // no instruction is needed for the conversion.
419 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
420 def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
421 def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
422 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
423 def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
424 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
425 def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
426 def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
427 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
428 def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
429 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
430 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
431 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
432 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
433 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
434 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
435 def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
436 def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
437 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
438 def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
439 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
440 def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
441 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
442 def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
443 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
444 def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
445 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
446 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
447 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
448 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
449 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
451 // Alias instruction that maps zero vector to pxor / xorp* for AVX-512.
452 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
453 // swizzled by ExecutionDepsFix to pxor.
454 // We set canFoldAsLoad because this can be converted to a constant-pool
455 // load of an all-zeros value if folding it would be beneficial.
456 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
457 isPseudo = 1, Predicates = [HasAVX512], SchedRW = [WriteZero] in {
458 def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
459 [(set VR512:$dst, (v16i32 immAllZerosV))]>;
460 def AVX512_512_SETALLONES : I<0, Pseudo, (outs VR512:$dst), (ins), "",
461 [(set VR512:$dst, (v16i32 immAllOnesV))]>;
464 // Alias instructions that allow VPTERNLOG to be used with a mask to create
465 // a mix of all ones and all zeros elements. This is done this way to force
466 // the same register to be used as input for all three sources.
467 let isPseudo = 1, Predicates = [HasAVX512] in {
468 def AVX512_512_SEXT_MASK_32 : I<0, Pseudo, (outs VR512:$dst),
469 (ins VK16WM:$mask), "",
470 [(set VR512:$dst, (vselect (v16i1 VK16WM:$mask),
471 (v16i32 immAllOnesV),
472 (v16i32 immAllZerosV)))]>;
473 def AVX512_512_SEXT_MASK_64 : I<0, Pseudo, (outs VR512:$dst),
474 (ins VK8WM:$mask), "",
475 [(set VR512:$dst, (vselect (v8i1 VK8WM:$mask),
476 (bc_v8i64 (v16i32 immAllOnesV)),
477 (bc_v8i64 (v16i32 immAllZerosV))))]>;
480 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
481 isPseudo = 1, Predicates = [HasAVX512], SchedRW = [WriteZero] in {
482 def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "",
483 [(set VR128X:$dst, (v4i32 immAllZerosV))]>;
484 def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "",
485 [(set VR256X:$dst, (v8i32 immAllZerosV))]>;
488 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
489 // This is expanded by ExpandPostRAPseudos.
490 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
491 isPseudo = 1, SchedRW = [WriteZero], Predicates = [HasAVX512] in {
492 def AVX512_FsFLD0SS : I<0, Pseudo, (outs FR32X:$dst), (ins), "",
493 [(set FR32X:$dst, fp32imm0)]>;
494 def AVX512_FsFLD0SD : I<0, Pseudo, (outs FR64X:$dst), (ins), "",
495 [(set FR64X:$dst, fpimm0)]>;
498 //===----------------------------------------------------------------------===//
499 // AVX-512 - VECTOR INSERT
501 multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To,
502 PatFrag vinsert_insert> {
503 let ExeDomain = To.ExeDomain in {
504 defm rr : AVX512_maskable<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
505 (ins To.RC:$src1, From.RC:$src2, u8imm:$src3),
506 "vinsert" # From.EltTypeName # "x" # From.NumElts,
507 "$src3, $src2, $src1", "$src1, $src2, $src3",
508 (vinsert_insert:$src3 (To.VT To.RC:$src1),
509 (From.VT From.RC:$src2),
510 (iPTR imm))>, AVX512AIi8Base, EVEX_4V;
512 defm rm : AVX512_maskable<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
513 (ins To.RC:$src1, From.MemOp:$src2, u8imm:$src3),
514 "vinsert" # From.EltTypeName # "x" # From.NumElts,
515 "$src3, $src2, $src1", "$src1, $src2, $src3",
516 (vinsert_insert:$src3 (To.VT To.RC:$src1),
517 (From.VT (bitconvert (From.LdFrag addr:$src2))),
518 (iPTR imm))>, AVX512AIi8Base, EVEX_4V,
519 EVEX_CD8<From.EltSize, From.CD8TupleForm>;
523 multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From,
524 X86VectorVTInfo To, PatFrag vinsert_insert,
525 SDNodeXForm INSERT_get_vinsert_imm , list<Predicate> p> {
526 let Predicates = p in {
527 def : Pat<(vinsert_insert:$ins
528 (To.VT To.RC:$src1), (From.VT From.RC:$src2), (iPTR imm)),
529 (To.VT (!cast<Instruction>(InstrStr#"rr")
530 To.RC:$src1, From.RC:$src2,
531 (INSERT_get_vinsert_imm To.RC:$ins)))>;
533 def : Pat<(vinsert_insert:$ins
535 (From.VT (bitconvert (From.LdFrag addr:$src2))),
537 (To.VT (!cast<Instruction>(InstrStr#"rm")
538 To.RC:$src1, addr:$src2,
539 (INSERT_get_vinsert_imm To.RC:$ins)))>;
543 multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
544 ValueType EltVT64, int Opcode256> {
546 let Predicates = [HasVLX] in
547 defm NAME # "32x4Z256" : vinsert_for_size<Opcode128,
548 X86VectorVTInfo< 4, EltVT32, VR128X>,
549 X86VectorVTInfo< 8, EltVT32, VR256X>,
550 vinsert128_insert>, EVEX_V256;
552 defm NAME # "32x4Z" : vinsert_for_size<Opcode128,
553 X86VectorVTInfo< 4, EltVT32, VR128X>,
554 X86VectorVTInfo<16, EltVT32, VR512>,
555 vinsert128_insert>, EVEX_V512;
557 defm NAME # "64x4Z" : vinsert_for_size<Opcode256,
558 X86VectorVTInfo< 4, EltVT64, VR256X>,
559 X86VectorVTInfo< 8, EltVT64, VR512>,
560 vinsert256_insert>, VEX_W, EVEX_V512;
562 let Predicates = [HasVLX, HasDQI] in
563 defm NAME # "64x2Z256" : vinsert_for_size<Opcode128,
564 X86VectorVTInfo< 2, EltVT64, VR128X>,
565 X86VectorVTInfo< 4, EltVT64, VR256X>,
566 vinsert128_insert>, VEX_W, EVEX_V256;
568 let Predicates = [HasDQI] in {
569 defm NAME # "64x2Z" : vinsert_for_size<Opcode128,
570 X86VectorVTInfo< 2, EltVT64, VR128X>,
571 X86VectorVTInfo< 8, EltVT64, VR512>,
572 vinsert128_insert>, VEX_W, EVEX_V512;
574 defm NAME # "32x8Z" : vinsert_for_size<Opcode256,
575 X86VectorVTInfo< 8, EltVT32, VR256X>,
576 X86VectorVTInfo<16, EltVT32, VR512>,
577 vinsert256_insert>, EVEX_V512;
581 defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
582 defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
584 // Codegen pattern with the alternative types,
585 // Only add this if 64x2 and its friends are not supported natively via AVX512DQ.
586 defm : vinsert_for_size_lowering<"VINSERTF32x4Z256", v2f64x_info, v4f64x_info,
587 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX, NoDQI]>;
588 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v2i64x_info, v4i64x_info,
589 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX, NoDQI]>;
591 defm : vinsert_for_size_lowering<"VINSERTF32x4Z", v2f64x_info, v8f64_info,
592 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512, NoDQI]>;
593 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v2i64x_info, v8i64_info,
594 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512, NoDQI]>;
596 defm : vinsert_for_size_lowering<"VINSERTF64x4Z", v8f32x_info, v16f32_info,
597 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512, NoDQI]>;
598 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v8i32x_info, v16i32_info,
599 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512, NoDQI]>;
601 // Codegen pattern with the alternative types insert VEC128 into VEC256
602 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v8i16x_info, v16i16x_info,
603 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
604 defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v16i8x_info, v32i8x_info,
605 vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
606 // Codegen pattern with the alternative types insert VEC128 into VEC512
607 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v8i16x_info, v32i16_info,
608 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
609 defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v16i8x_info, v64i8_info,
610 vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
611 // Codegen pattern with the alternative types insert VEC256 into VEC512
612 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v16i16x_info, v32i16_info,
613 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
614 defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
615 vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
617 // vinsertps - insert f32 to XMM
618 let ExeDomain = SSEPackedSingle in {
619 def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
620 (ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
621 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
622 [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
624 def VINSERTPSZrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
625 (ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
626 "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
627 [(set VR128X:$dst, (X86insertps VR128X:$src1,
628 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
629 imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
632 //===----------------------------------------------------------------------===//
633 // AVX-512 VECTOR EXTRACT
636 multiclass vextract_for_size<int Opcode,
637 X86VectorVTInfo From, X86VectorVTInfo To,
638 PatFrag vextract_extract,
639 SDNodeXForm EXTRACT_get_vextract_imm> {
641 let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
642 // use AVX512_maskable_in_asm (AVX512_maskable can't be used due to
643 // vextract_extract), we interesting only in patterns without mask,
644 // intrinsics pattern match generated bellow.
645 defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
646 (ins From.RC:$src1, u8imm:$idx),
647 "vextract" # To.EltTypeName # "x" # To.NumElts,
648 "$idx, $src1", "$src1, $idx",
649 [(set To.RC:$dst, (vextract_extract:$idx (From.VT From.RC:$src1),
651 AVX512AIi8Base, EVEX;
652 def mr : AVX512AIi8<Opcode, MRMDestMem, (outs),
653 (ins To.MemOp:$dst, From.RC:$src1, u8imm:$idx),
654 "vextract" # To.EltTypeName # "x" # To.NumElts #
655 "\t{$idx, $src1, $dst|$dst, $src1, $idx}",
656 [(store (To.VT (vextract_extract:$idx
657 (From.VT From.RC:$src1), (iPTR imm))),
660 let mayStore = 1, hasSideEffects = 0 in
661 def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs),
662 (ins To.MemOp:$dst, To.KRCWM:$mask,
663 From.RC:$src1, u8imm:$idx),
664 "vextract" # To.EltTypeName # "x" # To.NumElts #
665 "\t{$idx, $src1, $dst {${mask}}|"
666 "$dst {${mask}}, $src1, $idx}",
670 def : Pat<(To.VT (vselect To.KRCWM:$mask,
671 (vextract_extract:$ext (From.VT From.RC:$src1),
674 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
675 From.ZSuffix # "rrk")
676 To.RC:$src0, To.KRCWM:$mask, From.RC:$src1,
677 (EXTRACT_get_vextract_imm To.RC:$ext))>;
679 def : Pat<(To.VT (vselect To.KRCWM:$mask,
680 (vextract_extract:$ext (From.VT From.RC:$src1),
683 (!cast<Instruction>(NAME # To.EltSize # "x" # To.NumElts #
684 From.ZSuffix # "rrkz")
685 To.KRCWM:$mask, From.RC:$src1,
686 (EXTRACT_get_vextract_imm To.RC:$ext))>;
689 // Codegen pattern for the alternative types
690 multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
691 X86VectorVTInfo To, PatFrag vextract_extract,
692 SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> {
693 let Predicates = p in {
694 def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
695 (To.VT (!cast<Instruction>(InstrStr#"rr")
697 (EXTRACT_get_vextract_imm To.RC:$ext)))>;
698 def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1),
699 (iPTR imm))), addr:$dst),
700 (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1,
701 (EXTRACT_get_vextract_imm To.RC:$ext))>;
705 multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
706 ValueType EltVT64, int Opcode256> {
707 defm NAME # "32x4Z" : vextract_for_size<Opcode128,
708 X86VectorVTInfo<16, EltVT32, VR512>,
709 X86VectorVTInfo< 4, EltVT32, VR128X>,
711 EXTRACT_get_vextract128_imm>,
712 EVEX_V512, EVEX_CD8<32, CD8VT4>;
713 defm NAME # "64x4Z" : vextract_for_size<Opcode256,
714 X86VectorVTInfo< 8, EltVT64, VR512>,
715 X86VectorVTInfo< 4, EltVT64, VR256X>,
717 EXTRACT_get_vextract256_imm>,
718 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>;
719 let Predicates = [HasVLX] in
720 defm NAME # "32x4Z256" : vextract_for_size<Opcode128,
721 X86VectorVTInfo< 8, EltVT32, VR256X>,
722 X86VectorVTInfo< 4, EltVT32, VR128X>,
724 EXTRACT_get_vextract128_imm>,
725 EVEX_V256, EVEX_CD8<32, CD8VT4>;
726 let Predicates = [HasVLX, HasDQI] in
727 defm NAME # "64x2Z256" : vextract_for_size<Opcode128,
728 X86VectorVTInfo< 4, EltVT64, VR256X>,
729 X86VectorVTInfo< 2, EltVT64, VR128X>,
731 EXTRACT_get_vextract128_imm>,
732 VEX_W, EVEX_V256, EVEX_CD8<64, CD8VT2>;
733 let Predicates = [HasDQI] in {
734 defm NAME # "64x2Z" : vextract_for_size<Opcode128,
735 X86VectorVTInfo< 8, EltVT64, VR512>,
736 X86VectorVTInfo< 2, EltVT64, VR128X>,
738 EXTRACT_get_vextract128_imm>,
739 VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT2>;
740 defm NAME # "32x8Z" : vextract_for_size<Opcode256,
741 X86VectorVTInfo<16, EltVT32, VR512>,
742 X86VectorVTInfo< 8, EltVT32, VR256X>,
744 EXTRACT_get_vextract256_imm>,
745 EVEX_V512, EVEX_CD8<32, CD8VT8>;
749 defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
750 defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
752 // extract_subvector codegen patterns with the alternative types.
753 // Only add this if 64x2 and its friends are not supported natively via AVX512DQ.
754 defm : vextract_for_size_lowering<"VEXTRACTF32x4Z", v8f64_info, v2f64x_info,
755 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
756 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v8i64_info, v2i64x_info,
757 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
759 defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v16f32_info, v8f32x_info,
760 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>;
761 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v16i32_info, v8i32x_info,
762 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>;
764 defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
765 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
766 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
767 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
769 // Codegen pattern with the alternative types extract VEC128 from VEC256
770 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v16i16x_info, v8i16x_info,
771 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
772 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v32i8x_info, v16i8x_info,
773 vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
775 // Codegen pattern with the alternative types extract VEC128 from VEC512
776 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info,
777 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
778 defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v64i8_info, v16i8x_info,
779 vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
780 // Codegen pattern with the alternative types extract VEC256 from VEC512
781 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
782 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
783 defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
784 vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
786 // A 128-bit subvector extract from the first 256-bit vector position
787 // is a subregister copy that needs no instruction.
788 def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
789 (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
790 def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
791 (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
792 def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
793 (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
794 def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
795 (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
796 def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
797 (v8i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_xmm))>;
798 def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
799 (v16i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_xmm))>;
801 // A 256-bit subvector extract from the first 256-bit vector position
802 // is a subregister copy that needs no instruction.
803 def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
804 (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
805 def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
806 (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
807 def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
808 (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
809 def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
810 (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
811 def : Pat<(v16i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
812 (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm))>;
813 def : Pat<(v32i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
814 (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm))>;
816 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
817 // A 128-bit subvector insert to the first 512-bit vector position
818 // is a subregister copy that needs no instruction.
819 def : Pat<(v8i64 (insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0))),
820 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
821 def : Pat<(v8f64 (insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0))),
822 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
823 def : Pat<(v16i32 (insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0))),
824 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
825 def : Pat<(v16f32 (insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0))),
826 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
827 def : Pat<(v32i16 (insert_subvector undef, (v8i16 VR128X:$src), (iPTR 0))),
828 (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
829 def : Pat<(v64i8 (insert_subvector undef, (v16i8 VR128X:$src), (iPTR 0))),
830 (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
832 // A 256-bit subvector insert to the first 512-bit vector position
833 // is a subregister copy that needs no instruction.
834 def : Pat<(v8i64 (insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0))),
835 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
836 def : Pat<(v8f64 (insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0))),
837 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
838 def : Pat<(v16i32 (insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0))),
839 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
840 def : Pat<(v16f32 (insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0))),
841 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
842 def : Pat<(v32i16 (insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0))),
843 (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
844 def : Pat<(v64i8 (insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0))),
845 (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
848 // vextractps - extract 32 bits from XMM
849 def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
850 (ins VR128X:$src1, u8imm:$src2),
851 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
852 [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
855 def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
856 (ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
857 "vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
858 [(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
859 addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
861 //===---------------------------------------------------------------------===//
864 // broadcast with a scalar argument.
865 multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
866 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
867 def : Pat<(DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
868 (!cast<Instruction>(NAME#DestInfo.ZSuffix#r)
869 (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
870 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
871 (X86VBroadcast SrcInfo.FRC:$src),
873 (!cast<Instruction>(NAME#DestInfo.ZSuffix#rk)
874 DestInfo.RC:$src0, DestInfo.KRCWM:$mask,
875 (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
876 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
877 (X86VBroadcast SrcInfo.FRC:$src),
878 DestInfo.ImmAllZerosV)),
879 (!cast<Instruction>(NAME#DestInfo.ZSuffix#rkz)
880 DestInfo.KRCWM:$mask, (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
883 multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
884 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
885 let ExeDomain = DestInfo.ExeDomain in {
886 defm r : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
887 (ins SrcInfo.RC:$src), OpcodeStr, "$src", "$src",
888 (DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src)))>,
890 defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
891 (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
892 (DestInfo.VT (X86VBroadcast
893 (SrcInfo.ScalarLdFrag addr:$src)))>,
894 T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
897 def : Pat<(DestInfo.VT (X86VBroadcast
898 (SrcInfo.VT (scalar_to_vector
899 (SrcInfo.ScalarLdFrag addr:$src))))),
900 (!cast<Instruction>(NAME#DestInfo.ZSuffix#m) addr:$src)>;
901 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
903 (SrcInfo.VT (scalar_to_vector
904 (SrcInfo.ScalarLdFrag addr:$src)))),
906 (!cast<Instruction>(NAME#DestInfo.ZSuffix#mk)
907 DestInfo.RC:$src0, DestInfo.KRCWM:$mask, addr:$src)>;
908 def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
910 (SrcInfo.VT (scalar_to_vector
911 (SrcInfo.ScalarLdFrag addr:$src)))),
912 DestInfo.ImmAllZerosV)),
913 (!cast<Instruction>(NAME#DestInfo.ZSuffix#mkz)
914 DestInfo.KRCWM:$mask, addr:$src)>;
917 multiclass avx512_fp_broadcast_sd<bits<8> opc, string OpcodeStr,
918 AVX512VLVectorVTInfo _> {
919 let Predicates = [HasAVX512] in
920 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
921 avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
924 let Predicates = [HasVLX] in {
925 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
926 avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
931 multiclass avx512_fp_broadcast_ss<bits<8> opc, string OpcodeStr,
932 AVX512VLVectorVTInfo _> {
933 let Predicates = [HasAVX512] in
934 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
935 avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
938 let Predicates = [HasVLX] in {
939 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
940 avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
942 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
943 avx512_broadcast_scalar<opc, OpcodeStr, _.info128, _.info128>,
947 defm VBROADCASTSS : avx512_fp_broadcast_ss<0x18, "vbroadcastss",
949 defm VBROADCASTSD : avx512_fp_broadcast_sd<0x19, "vbroadcastsd",
950 avx512vl_f64_info>, VEX_W;
952 def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
953 (VBROADCASTSSZm addr:$src)>;
954 def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
955 (VBROADCASTSDZm addr:$src)>;
957 multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
958 SDPatternOperator OpNode,
959 RegisterClass SrcRC> {
960 let ExeDomain = _.ExeDomain in
961 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
963 "vpbroadcast"##_.Suffix, "$src", "$src",
964 (_.VT (OpNode SrcRC:$src))>, T8PD, EVEX;
967 multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
968 SDPatternOperator OpNode,
969 RegisterClass SrcRC, Predicate prd> {
970 let Predicates = [prd] in
971 defm Z : avx512_int_broadcast_reg<opc, _.info512, OpNode, SrcRC>, EVEX_V512;
972 let Predicates = [prd, HasVLX] in {
973 defm Z256 : avx512_int_broadcast_reg<opc, _.info256, OpNode, SrcRC>, EVEX_V256;
974 defm Z128 : avx512_int_broadcast_reg<opc, _.info128, OpNode, SrcRC>, EVEX_V128;
978 let isCodeGenOnly = 1 in {
979 defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info,
980 X86VBroadcast, GR8, HasBWI>;
981 defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info,
982 X86VBroadcast, GR16, HasBWI>;
984 let isAsmParserOnly = 1 in {
985 defm VPBROADCASTBr_Alt : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info,
986 null_frag, GR32, HasBWI>;
987 defm VPBROADCASTWr_Alt : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info,
988 null_frag, GR32, HasBWI>;
990 defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info,
991 X86VBroadcast, GR32, HasAVX512>;
992 defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info,
993 X86VBroadcast, GR64, HasAVX512>, VEX_W;
995 def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
996 (VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
997 def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
998 (VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
1000 // Provide aliases for broadcast from the same register class that
1001 // automatically does the extract.
1002 multiclass avx512_int_broadcast_rm_lowering<X86VectorVTInfo DestInfo,
1003 X86VectorVTInfo SrcInfo> {
1004 def : Pat<(DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src))),
1005 (!cast<Instruction>(NAME#DestInfo.ZSuffix#"r")
1006 (EXTRACT_SUBREG (SrcInfo.VT SrcInfo.RC:$src), sub_xmm))>;
1009 multiclass avx512_int_broadcast_rm_vl<bits<8> opc, string OpcodeStr,
1010 AVX512VLVectorVTInfo _, Predicate prd> {
1011 let Predicates = [prd] in {
1012 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
1013 avx512_int_broadcast_rm_lowering<_.info512, _.info256>,
1015 // Defined separately to avoid redefinition.
1016 defm Z_Alt : avx512_int_broadcast_rm_lowering<_.info512, _.info512>;
1018 let Predicates = [prd, HasVLX] in {
1019 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
1020 avx512_int_broadcast_rm_lowering<_.info256, _.info256>,
1022 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
1027 defm VPBROADCASTB : avx512_int_broadcast_rm_vl<0x78, "vpbroadcastb",
1028 avx512vl_i8_info, HasBWI>;
1029 defm VPBROADCASTW : avx512_int_broadcast_rm_vl<0x79, "vpbroadcastw",
1030 avx512vl_i16_info, HasBWI>;
1031 defm VPBROADCASTD : avx512_int_broadcast_rm_vl<0x58, "vpbroadcastd",
1032 avx512vl_i32_info, HasAVX512>;
1033 defm VPBROADCASTQ : avx512_int_broadcast_rm_vl<0x59, "vpbroadcastq",
1034 avx512vl_i64_info, HasAVX512>, VEX_W;
1036 multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
1037 X86VectorVTInfo _Dst, X86VectorVTInfo _Src> {
1038 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
1039 (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
1040 (_Dst.VT (X86SubVBroadcast
1041 (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
1045 let Predicates = [HasAVX512] in {
1046 // 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
1047 def : Pat<(v8i64 (X86VBroadcast (v8i64 (X86vzload addr:$src)))),
1048 (VPBROADCASTQZm addr:$src)>;
1051 let Predicates = [HasVLX, HasBWI] in {
1052 // 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
1053 def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
1054 (VPBROADCASTQZ128m addr:$src)>;
1055 def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
1056 (VPBROADCASTQZ256m addr:$src)>;
1057 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
1058 // This means we'll encounter truncated i32 loads; match that here.
1059 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
1060 (VPBROADCASTWZ128m addr:$src)>;
1061 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
1062 (VPBROADCASTWZ256m addr:$src)>;
1063 def : Pat<(v8i16 (X86VBroadcast
1064 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
1065 (VPBROADCASTWZ128m addr:$src)>;
1066 def : Pat<(v16i16 (X86VBroadcast
1067 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
1068 (VPBROADCASTWZ256m addr:$src)>;
1071 //===----------------------------------------------------------------------===//
1072 // AVX-512 BROADCAST SUBVECTORS
1075 defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
1076 v16i32_info, v4i32x_info>,
1077 EVEX_V512, EVEX_CD8<32, CD8VT4>;
1078 defm VBROADCASTF32X4 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
1079 v16f32_info, v4f32x_info>,
1080 EVEX_V512, EVEX_CD8<32, CD8VT4>;
1081 defm VBROADCASTI64X4 : avx512_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
1082 v8i64_info, v4i64x_info>, VEX_W,
1083 EVEX_V512, EVEX_CD8<64, CD8VT4>;
1084 defm VBROADCASTF64X4 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf64x4",
1085 v8f64_info, v4f64x_info>, VEX_W,
1086 EVEX_V512, EVEX_CD8<64, CD8VT4>;
1088 let Predicates = [HasAVX512] in {
1089 def : Pat<(v32i16 (X86SubVBroadcast (bc_v16i16 (loadv4i64 addr:$src)))),
1090 (VBROADCASTI64X4rm addr:$src)>;
1091 def : Pat<(v64i8 (X86SubVBroadcast (bc_v32i8 (loadv4i64 addr:$src)))),
1092 (VBROADCASTI64X4rm addr:$src)>;
1094 // Provide fallback in case the load node that is used in the patterns above
1095 // is used by additional users, which prevents the pattern selection.
1096 def : Pat<(v8f64 (X86SubVBroadcast (v4f64 VR256X:$src))),
1097 (VINSERTF64x4Zrr (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1098 (v4f64 VR256X:$src), 1)>;
1099 def : Pat<(v8i64 (X86SubVBroadcast (v4i64 VR256X:$src))),
1100 (VINSERTI64x4Zrr (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1101 (v4i64 VR256X:$src), 1)>;
1102 def : Pat<(v32i16 (X86SubVBroadcast (v16i16 VR256X:$src))),
1103 (VINSERTI64x4Zrr (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1104 (v16i16 VR256X:$src), 1)>;
1105 def : Pat<(v64i8 (X86SubVBroadcast (v32i8 VR256X:$src))),
1106 (VINSERTI64x4Zrr (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1107 (v32i8 VR256X:$src), 1)>;
1109 def : Pat<(v32i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
1110 (VBROADCASTI32X4rm addr:$src)>;
1111 def : Pat<(v64i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
1112 (VBROADCASTI32X4rm addr:$src)>;
1115 let Predicates = [HasVLX] in {
1116 defm VBROADCASTI32X4Z256 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
1117 v8i32x_info, v4i32x_info>,
1118 EVEX_V256, EVEX_CD8<32, CD8VT4>;
1119 defm VBROADCASTF32X4Z256 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
1120 v8f32x_info, v4f32x_info>,
1121 EVEX_V256, EVEX_CD8<32, CD8VT4>;
1123 def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
1124 (VBROADCASTI32X4Z256rm addr:$src)>;
1125 def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
1126 (VBROADCASTI32X4Z256rm addr:$src)>;
1128 // Provide fallback in case the load node that is used in the patterns above
1129 // is used by additional users, which prevents the pattern selection.
1130 def : Pat<(v8f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
1131 (VINSERTF32x4Z256rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1132 (v4f32 VR128X:$src), 1)>;
1133 def : Pat<(v8i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
1134 (VINSERTI32x4Z256rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1135 (v4i32 VR128X:$src), 1)>;
1136 def : Pat<(v16i16 (X86SubVBroadcast (v8i16 VR128X:$src))),
1137 (VINSERTI32x4Z256rr (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1138 (v8i16 VR128X:$src), 1)>;
1139 def : Pat<(v32i8 (X86SubVBroadcast (v16i8 VR128X:$src))),
1140 (VINSERTI32x4Z256rr (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1141 (v16i8 VR128X:$src), 1)>;
1144 let Predicates = [HasVLX, HasDQI] in {
1145 defm VBROADCASTI64X2Z128 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
1146 v4i64x_info, v2i64x_info>, VEX_W,
1147 EVEX_V256, EVEX_CD8<64, CD8VT2>;
1148 defm VBROADCASTF64X2Z128 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf64x2",
1149 v4f64x_info, v2f64x_info>, VEX_W,
1150 EVEX_V256, EVEX_CD8<64, CD8VT2>;
1152 // Provide fallback in case the load node that is used in the patterns above
1153 // is used by additional users, which prevents the pattern selection.
1154 def : Pat<(v4f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
1155 (VINSERTF64x2Z256rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1156 (v2f64 VR128X:$src), 1)>;
1157 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
1158 (VINSERTI64x2Z256rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1159 (v2i64 VR128X:$src), 1)>;
1162 let Predicates = [HasVLX, NoDQI] in {
1163 def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
1164 (VBROADCASTF32X4Z256rm addr:$src)>;
1165 def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
1166 (VBROADCASTI32X4Z256rm addr:$src)>;
1168 // Provide fallback in case the load node that is used in the patterns above
1169 // is used by additional users, which prevents the pattern selection.
1170 def : Pat<(v4f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
1171 (VINSERTF32x4Z256rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1172 (v2f64 VR128X:$src), 1)>;
1173 def : Pat<(v4i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
1174 (VINSERTI32x4Z256rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
1175 (v2i64 VR128X:$src), 1)>;
1178 let Predicates = [HasAVX512, NoDQI] in {
1179 def : Pat<(v8f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
1180 (VBROADCASTF32X4rm addr:$src)>;
1181 def : Pat<(v8i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
1182 (VBROADCASTI32X4rm addr:$src)>;
1184 def : Pat<(v16f32 (X86SubVBroadcast (loadv8f32 addr:$src))),
1185 (VBROADCASTF64X4rm addr:$src)>;
1186 def : Pat<(v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src)))),
1187 (VBROADCASTI64X4rm addr:$src)>;
1189 // Provide fallback in case the load node that is used in the patterns above
1190 // is used by additional users, which prevents the pattern selection.
1191 def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
1192 (VINSERTF64x4Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1193 (v8f32 VR256X:$src), 1)>;
1194 def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
1195 (VINSERTI64x4Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1196 (v8i32 VR256X:$src), 1)>;
1199 let Predicates = [HasDQI] in {
1200 defm VBROADCASTI64X2 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
1201 v8i64_info, v2i64x_info>, VEX_W,
1202 EVEX_V512, EVEX_CD8<64, CD8VT2>;
1203 defm VBROADCASTI32X8 : avx512_subvec_broadcast_rm<0x5b, "vbroadcasti32x8",
1204 v16i32_info, v8i32x_info>,
1205 EVEX_V512, EVEX_CD8<32, CD8VT8>;
1206 defm VBROADCASTF64X2 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf64x2",
1207 v8f64_info, v2f64x_info>, VEX_W,
1208 EVEX_V512, EVEX_CD8<64, CD8VT2>;
1209 defm VBROADCASTF32X8 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf32x8",
1210 v16f32_info, v8f32x_info>,
1211 EVEX_V512, EVEX_CD8<32, CD8VT8>;
1213 // Provide fallback in case the load node that is used in the patterns above
1214 // is used by additional users, which prevents the pattern selection.
1215 def : Pat<(v16f32 (X86SubVBroadcast (v8f32 VR256X:$src))),
1216 (VINSERTF32x8Zrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1217 (v8f32 VR256X:$src), 1)>;
1218 def : Pat<(v16i32 (X86SubVBroadcast (v8i32 VR256X:$src))),
1219 (VINSERTI32x8Zrr (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm),
1220 (v8i32 VR256X:$src), 1)>;
1223 multiclass avx512_common_broadcast_32x2<bits<8> opc, string OpcodeStr,
1224 AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> {
1225 let Predicates = [HasDQI] in
1226 defm Z : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info512, _Src.info128>,
1228 let Predicates = [HasDQI, HasVLX] in
1229 defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info256, _Src.info128>,
1233 multiclass avx512_common_broadcast_i32x2<bits<8> opc, string OpcodeStr,
1234 AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> :
1235 avx512_common_broadcast_32x2<opc, OpcodeStr, _Dst, _Src> {
1237 let Predicates = [HasDQI, HasVLX] in
1238 defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info128, _Src.info128>,
1242 defm VBROADCASTI32X2 : avx512_common_broadcast_i32x2<0x59, "vbroadcasti32x2",
1243 avx512vl_i32_info, avx512vl_i64_info>;
1244 defm VBROADCASTF32X2 : avx512_common_broadcast_32x2<0x19, "vbroadcastf32x2",
1245 avx512vl_f32_info, avx512vl_f64_info>;
1247 let Predicates = [HasVLX] in {
1248 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256X:$src))),
1249 (VBROADCASTSSZ256r (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm))>;
1250 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256X:$src))),
1251 (VBROADCASTSDZ256r (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
1254 def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
1255 (VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
1256 def : Pat<(v16f32 (X86VBroadcast (v8f32 VR256X:$src))),
1257 (VBROADCASTSSZr (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm))>;
1259 def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
1260 (VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
1261 def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
1262 (VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
1264 //===----------------------------------------------------------------------===//
1265 // AVX-512 BROADCAST MASK TO VECTOR REGISTER
1267 multiclass avx512_mask_broadcastm<bits<8> opc, string OpcodeStr,
1268 X86VectorVTInfo _, RegisterClass KRC> {
1269 def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.RC:$dst), (ins KRC:$src),
1270 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
1271 [(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))]>, EVEX;
1274 multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
1275 AVX512VLVectorVTInfo VTInfo, RegisterClass KRC> {
1276 let Predicates = [HasCDI] in
1277 defm Z : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info512, KRC>, EVEX_V512;
1278 let Predicates = [HasCDI, HasVLX] in {
1279 defm Z256 : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info256, KRC>, EVEX_V256;
1280 defm Z128 : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info128, KRC>, EVEX_V128;
1284 defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
1285 avx512vl_i32_info, VK16>;
1286 defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
1287 avx512vl_i64_info, VK8>, VEX_W;
1289 //===----------------------------------------------------------------------===//
1290 // -- VPERMI2 - 3 source operands form --
1291 multiclass avx512_perm_i<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1292 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
1293 // The index operand in the pattern should really be an integer type. However,
1294 // if we do that and it happens to come from a bitcast, then it becomes
1295 // difficult to find the bitcast needed to convert the index to the
1296 // destination type for the passthru since it will be folded with the bitcast
1297 // of the index operand.
1298 defm rr: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
1299 (ins _.RC:$src2, _.RC:$src3),
1300 OpcodeStr, "$src3, $src2", "$src2, $src3",
1301 (_.VT (X86VPermi2X _.RC:$src1, _.RC:$src2, _.RC:$src3)), 1>, EVEX_4V,
1304 defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1305 (ins _.RC:$src2, _.MemOp:$src3),
1306 OpcodeStr, "$src3, $src2", "$src2, $src3",
1307 (_.VT (X86VPermi2X _.RC:$src1, _.RC:$src2,
1308 (_.VT (bitconvert (_.LdFrag addr:$src3))))), 1>,
1309 EVEX_4V, AVX5128IBase;
1312 multiclass avx512_perm_i_mb<bits<8> opc, string OpcodeStr,
1313 X86VectorVTInfo _> {
1314 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
1315 defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1316 (ins _.RC:$src2, _.ScalarMemOp:$src3),
1317 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
1318 !strconcat("$src2, ${src3}", _.BroadcastStr ),
1319 (_.VT (X86VPermi2X _.RC:$src1,
1320 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))),
1321 1>, AVX5128IBase, EVEX_4V, EVEX_B;
1324 multiclass avx512_perm_i_sizes<bits<8> opc, string OpcodeStr,
1325 AVX512VLVectorVTInfo VTInfo> {
1326 defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512>,
1327 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1328 let Predicates = [HasVLX] in {
1329 defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128>,
1330 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1331 defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256>,
1332 avx512_perm_i_mb<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1336 multiclass avx512_perm_i_sizes_bw<bits<8> opc, string OpcodeStr,
1337 AVX512VLVectorVTInfo VTInfo,
1339 let Predicates = [Prd] in
1340 defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1341 let Predicates = [Prd, HasVLX] in {
1342 defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1343 defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1347 defm VPERMI2D : avx512_perm_i_sizes<0x76, "vpermi2d",
1348 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1349 defm VPERMI2Q : avx512_perm_i_sizes<0x76, "vpermi2q",
1350 avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1351 defm VPERMI2W : avx512_perm_i_sizes_bw<0x75, "vpermi2w",
1352 avx512vl_i16_info, HasBWI>,
1353 VEX_W, EVEX_CD8<16, CD8VF>;
1354 defm VPERMI2B : avx512_perm_i_sizes_bw<0x75, "vpermi2b",
1355 avx512vl_i8_info, HasVBMI>,
1357 defm VPERMI2PS : avx512_perm_i_sizes<0x77, "vpermi2ps",
1358 avx512vl_f32_info>, EVEX_CD8<32, CD8VF>;
1359 defm VPERMI2PD : avx512_perm_i_sizes<0x77, "vpermi2pd",
1360 avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1363 multiclass avx512_perm_t<bits<8> opc, string OpcodeStr,
1364 X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
1365 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
1366 defm rr: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
1367 (ins IdxVT.RC:$src2, _.RC:$src3),
1368 OpcodeStr, "$src3, $src2", "$src2, $src3",
1369 (_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3)), 1>,
1370 EVEX_4V, AVX5128IBase;
1372 defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1373 (ins IdxVT.RC:$src2, _.MemOp:$src3),
1374 OpcodeStr, "$src3, $src2", "$src2, $src3",
1375 (_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2,
1376 (bitconvert (_.LdFrag addr:$src3)))), 1>,
1377 EVEX_4V, AVX5128IBase;
1380 multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr,
1381 X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
1382 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
1383 defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
1384 (ins IdxVT.RC:$src2, _.ScalarMemOp:$src3),
1385 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
1386 !strconcat("$src2, ${src3}", _.BroadcastStr ),
1387 (_.VT (X86VPermt2 _.RC:$src1,
1388 IdxVT.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))),
1389 1>, AVX5128IBase, EVEX_4V, EVEX_B;
1392 multiclass avx512_perm_t_sizes<bits<8> opc, string OpcodeStr,
1393 AVX512VLVectorVTInfo VTInfo,
1394 AVX512VLVectorVTInfo ShuffleMask> {
1395 defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
1396 ShuffleMask.info512>,
1397 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info512,
1398 ShuffleMask.info512>, EVEX_V512;
1399 let Predicates = [HasVLX] in {
1400 defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
1401 ShuffleMask.info128>,
1402 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info128,
1403 ShuffleMask.info128>, EVEX_V128;
1404 defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
1405 ShuffleMask.info256>,
1406 avx512_perm_t_mb<opc, OpcodeStr, VTInfo.info256,
1407 ShuffleMask.info256>, EVEX_V256;
1411 multiclass avx512_perm_t_sizes_bw<bits<8> opc, string OpcodeStr,
1412 AVX512VLVectorVTInfo VTInfo,
1413 AVX512VLVectorVTInfo Idx,
1415 let Predicates = [Prd] in
1416 defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
1417 Idx.info512>, EVEX_V512;
1418 let Predicates = [Prd, HasVLX] in {
1419 defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
1420 Idx.info128>, EVEX_V128;
1421 defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
1422 Idx.info256>, EVEX_V256;
1426 defm VPERMT2D : avx512_perm_t_sizes<0x7E, "vpermt2d",
1427 avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1428 defm VPERMT2Q : avx512_perm_t_sizes<0x7E, "vpermt2q",
1429 avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1430 defm VPERMT2W : avx512_perm_t_sizes_bw<0x7D, "vpermt2w",
1431 avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
1432 VEX_W, EVEX_CD8<16, CD8VF>;
1433 defm VPERMT2B : avx512_perm_t_sizes_bw<0x7D, "vpermt2b",
1434 avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
1436 defm VPERMT2PS : avx512_perm_t_sizes<0x7F, "vpermt2ps",
1437 avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
1438 defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
1439 avx512vl_f64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
1441 //===----------------------------------------------------------------------===//
1442 // AVX-512 - BLEND using mask
1444 multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1445 let ExeDomain = _.ExeDomain, hasSideEffects = 0 in {
1446 def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1447 (ins _.RC:$src1, _.RC:$src2),
1448 !strconcat(OpcodeStr,
1449 "\t{$src2, $src1, ${dst}|${dst}, $src1, $src2}"),
1451 def rrk : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1452 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1453 !strconcat(OpcodeStr,
1454 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1455 []>, EVEX_4V, EVEX_K;
1456 def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
1457 (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1458 !strconcat(OpcodeStr,
1459 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1460 []>, EVEX_4V, EVEX_KZ;
1461 let mayLoad = 1 in {
1462 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1463 (ins _.RC:$src1, _.MemOp:$src2),
1464 !strconcat(OpcodeStr,
1465 "\t{$src2, $src1, ${dst}|${dst}, $src1, $src2}"),
1466 []>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
1467 def rmk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1468 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1469 !strconcat(OpcodeStr,
1470 "\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
1471 []>, EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
1472 def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1473 (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1474 !strconcat(OpcodeStr,
1475 "\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
1476 []>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
1480 multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
1482 let mayLoad = 1, hasSideEffects = 0 in {
1483 def rmbk : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1484 (ins _.KRCWM:$mask, _.RC:$src1, _.ScalarMemOp:$src2),
1485 !strconcat(OpcodeStr,
1486 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1487 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1488 []>, EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1490 def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
1491 (ins _.RC:$src1, _.ScalarMemOp:$src2),
1492 !strconcat(OpcodeStr,
1493 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1494 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1495 []>, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
1499 multiclass blendmask_dq <bits<8> opc, string OpcodeStr,
1500 AVX512VLVectorVTInfo VTInfo> {
1501 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>,
1502 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1504 let Predicates = [HasVLX] in {
1505 defm Z256 : avx512_blendmask<opc, OpcodeStr, VTInfo.info256>,
1506 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1507 defm Z128 : avx512_blendmask<opc, OpcodeStr, VTInfo.info128>,
1508 avx512_blendmask_rmb <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1512 multiclass blendmask_bw <bits<8> opc, string OpcodeStr,
1513 AVX512VLVectorVTInfo VTInfo> {
1514 let Predicates = [HasBWI] in
1515 defm Z : avx512_blendmask <opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
1517 let Predicates = [HasBWI, HasVLX] in {
1518 defm Z256 : avx512_blendmask <opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
1519 defm Z128 : avx512_blendmask <opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
1524 defm VBLENDMPS : blendmask_dq <0x65, "vblendmps", avx512vl_f32_info>;
1525 defm VBLENDMPD : blendmask_dq <0x65, "vblendmpd", avx512vl_f64_info>, VEX_W;
1526 defm VPBLENDMD : blendmask_dq <0x64, "vpblendmd", avx512vl_i32_info>;
1527 defm VPBLENDMQ : blendmask_dq <0x64, "vpblendmq", avx512vl_i64_info>, VEX_W;
1528 defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
1529 defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
1532 //===----------------------------------------------------------------------===//
1533 // Compare Instructions
1534 //===----------------------------------------------------------------------===//
1536 // avx512_cmp_scalar - AVX512 CMPSS and CMPSD
1538 multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>{
1540 defm rr_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1542 (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1543 "vcmp${cc}"#_.Suffix,
1544 "$src2, $src1", "$src1, $src2",
1545 (OpNode (_.VT _.RC:$src1),
1549 defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1551 (ins _.RC:$src1, _.IntScalarMemOp:$src2, AVXCC:$cc),
1552 "vcmp${cc}"#_.Suffix,
1553 "$src2, $src1", "$src1, $src2",
1554 (OpNode (_.VT _.RC:$src1), _.ScalarIntMemCPat:$src2,
1555 imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1557 defm rrb_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1559 (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1560 "vcmp${cc}"#_.Suffix,
1561 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
1562 (OpNodeRnd (_.VT _.RC:$src1),
1565 (i32 FROUND_NO_EXC))>, EVEX_4V, EVEX_B;
1566 // Accept explicit immediate argument form instead of comparison code.
1567 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1568 defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1570 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1572 "$cc, $src2, $src1", "$src1, $src2, $cc">, EVEX_4V;
1574 defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1576 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
1578 "$cc, $src2, $src1", "$src1, $src2, $cc">,
1579 EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1581 defm rrb_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1583 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1585 "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc">,
1587 }// let isAsmParserOnly = 1, hasSideEffects = 0
1589 let isCodeGenOnly = 1 in {
1590 let isCommutable = 1 in
1591 def rr : AVX512Ii8<0xC2, MRMSrcReg,
1592 (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, AVXCC:$cc),
1593 !strconcat("vcmp${cc}", _.Suffix,
1594 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1595 [(set _.KRC:$dst, (OpNode _.FRC:$src1,
1598 IIC_SSE_ALU_F32S_RR>, EVEX_4V;
1599 def rm : AVX512Ii8<0xC2, MRMSrcMem,
1601 (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1602 !strconcat("vcmp${cc}", _.Suffix,
1603 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1604 [(set _.KRC:$dst, (OpNode _.FRC:$src1,
1605 (_.ScalarLdFrag addr:$src2),
1607 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
1611 let Predicates = [HasAVX512] in {
1612 let ExeDomain = SSEPackedSingle in
1613 defm VCMPSSZ : avx512_cmp_scalar<f32x_info, X86cmpms, X86cmpmsRnd>,
1615 let ExeDomain = SSEPackedDouble in
1616 defm VCMPSDZ : avx512_cmp_scalar<f64x_info, X86cmpms, X86cmpmsRnd>,
1617 AVX512XDIi8Base, VEX_W;
1620 multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
1621 X86VectorVTInfo _, bit IsCommutable> {
1622 let isCommutable = IsCommutable in
1623 def rr : AVX512BI<opc, MRMSrcReg,
1624 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
1625 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1626 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
1627 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1628 def rm : AVX512BI<opc, MRMSrcMem,
1629 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
1630 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1631 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1632 (_.VT (bitconvert (_.LdFrag addr:$src2)))))],
1633 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1634 def rrk : AVX512BI<opc, MRMSrcReg,
1635 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
1636 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1637 "$dst {${mask}}, $src1, $src2}"),
1638 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1639 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
1640 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1641 def rmk : AVX512BI<opc, MRMSrcMem,
1642 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
1643 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
1644 "$dst {${mask}}, $src1, $src2}"),
1645 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1646 (OpNode (_.VT _.RC:$src1),
1648 (_.LdFrag addr:$src2))))))],
1649 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1652 multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
1653 X86VectorVTInfo _, bit IsCommutable> :
1654 avx512_icmp_packed<opc, OpcodeStr, OpNode, _, IsCommutable> {
1655 def rmb : AVX512BI<opc, MRMSrcMem,
1656 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
1657 !strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
1658 "|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1659 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1660 (X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
1661 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1662 def rmbk : AVX512BI<opc, MRMSrcMem,
1663 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1664 _.ScalarMemOp:$src2),
1665 !strconcat(OpcodeStr,
1666 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1667 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1668 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1669 (OpNode (_.VT _.RC:$src1),
1671 (_.ScalarLdFrag addr:$src2)))))],
1672 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1675 multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
1676 AVX512VLVectorVTInfo VTInfo, Predicate prd,
1677 bit IsCommutable = 0> {
1678 let Predicates = [prd] in
1679 defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512,
1680 IsCommutable>, EVEX_V512;
1682 let Predicates = [prd, HasVLX] in {
1683 defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256,
1684 IsCommutable>, EVEX_V256;
1685 defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128,
1686 IsCommutable>, EVEX_V128;
1690 multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
1691 SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
1692 Predicate prd, bit IsCommutable = 0> {
1693 let Predicates = [prd] in
1694 defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512,
1695 IsCommutable>, EVEX_V512;
1697 let Predicates = [prd, HasVLX] in {
1698 defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256,
1699 IsCommutable>, EVEX_V256;
1700 defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128,
1701 IsCommutable>, EVEX_V128;
1705 defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
1706 avx512vl_i8_info, HasBWI, 1>,
1709 defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
1710 avx512vl_i16_info, HasBWI, 1>,
1711 EVEX_CD8<16, CD8VF>;
1713 defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
1714 avx512vl_i32_info, HasAVX512, 1>,
1715 EVEX_CD8<32, CD8VF>;
1717 defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
1718 avx512vl_i64_info, HasAVX512, 1>,
1719 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1721 defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
1722 avx512vl_i8_info, HasBWI>,
1725 defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
1726 avx512vl_i16_info, HasBWI>,
1727 EVEX_CD8<16, CD8VF>;
1729 defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
1730 avx512vl_i32_info, HasAVX512>,
1731 EVEX_CD8<32, CD8VF>;
1733 defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
1734 avx512vl_i64_info, HasAVX512>,
1735 T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
1737 let Predicates = [HasAVX512, NoVLX] in {
1738 def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1739 (COPY_TO_REGCLASS (VPCMPGTDZrr
1740 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
1741 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), VK8)>;
1743 def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
1744 (COPY_TO_REGCLASS (VPCMPEQDZrr
1745 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
1746 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm))), VK8)>;
1749 multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
1750 X86VectorVTInfo _> {
1751 let isCommutable = 1 in
1752 def rri : AVX512AIi8<opc, MRMSrcReg,
1753 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512ICC:$cc),
1754 !strconcat("vpcmp${cc}", Suffix,
1755 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1756 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1758 IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1759 def rmi : AVX512AIi8<opc, MRMSrcMem,
1760 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
1761 !strconcat("vpcmp${cc}", Suffix,
1762 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1763 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1764 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1766 IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1767 def rrik : AVX512AIi8<opc, MRMSrcReg,
1768 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1770 !strconcat("vpcmp${cc}", Suffix,
1771 "\t{$src2, $src1, $dst {${mask}}|",
1772 "$dst {${mask}}, $src1, $src2}"),
1773 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1774 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
1776 IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1777 def rmik : AVX512AIi8<opc, MRMSrcMem,
1778 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1780 !strconcat("vpcmp${cc}", Suffix,
1781 "\t{$src2, $src1, $dst {${mask}}|",
1782 "$dst {${mask}}, $src1, $src2}"),
1783 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1784 (OpNode (_.VT _.RC:$src1),
1785 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1787 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1789 // Accept explicit immediate argument form instead of comparison code.
1790 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1791 def rri_alt : AVX512AIi8<opc, MRMSrcReg,
1792 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1793 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1794 "$dst, $src1, $src2, $cc}"),
1795 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
1797 def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
1798 (outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
1799 !strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
1800 "$dst, $src1, $src2, $cc}"),
1801 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
1802 def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
1803 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
1805 !strconcat("vpcmp", Suffix,
1806 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1807 "$dst {${mask}}, $src1, $src2, $cc}"),
1808 [], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
1810 def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
1811 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
1813 !strconcat("vpcmp", Suffix,
1814 "\t{$cc, $src2, $src1, $dst {${mask}}|",
1815 "$dst {${mask}}, $src1, $src2, $cc}"),
1816 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
1820 multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
1821 X86VectorVTInfo _> :
1822 avx512_icmp_cc<opc, Suffix, OpNode, _> {
1823 def rmib : AVX512AIi8<opc, MRMSrcMem,
1824 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1826 !strconcat("vpcmp${cc}", Suffix,
1827 "\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
1828 "$dst, $src1, ${src2}", _.BroadcastStr, "}"),
1829 [(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
1830 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1832 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1833 def rmibk : AVX512AIi8<opc, MRMSrcMem,
1834 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1835 _.ScalarMemOp:$src2, AVX512ICC:$cc),
1836 !strconcat("vpcmp${cc}", Suffix,
1837 "\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1838 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
1839 [(set _.KRC:$dst, (and _.KRCWM:$mask,
1840 (OpNode (_.VT _.RC:$src1),
1841 (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
1843 IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1845 // Accept explicit immediate argument form instead of comparison code.
1846 let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 1 in {
1847 def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
1848 (outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
1850 !strconcat("vpcmp", Suffix,
1851 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
1852 "$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1853 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
1854 def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
1855 (outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
1856 _.ScalarMemOp:$src2, u8imm:$cc),
1857 !strconcat("vpcmp", Suffix,
1858 "\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
1859 "$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
1860 [], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
1864 multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
1865 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1866 let Predicates = [prd] in
1867 defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
1869 let Predicates = [prd, HasVLX] in {
1870 defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
1871 defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
1875 multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
1876 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
1877 let Predicates = [prd] in
1878 defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
1881 let Predicates = [prd, HasVLX] in {
1882 defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
1884 defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
1889 defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
1890 HasBWI>, EVEX_CD8<8, CD8VF>;
1891 defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
1892 HasBWI>, EVEX_CD8<8, CD8VF>;
1894 defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
1895 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1896 defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
1897 HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
1899 defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
1900 HasAVX512>, EVEX_CD8<32, CD8VF>;
1901 defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
1902 HasAVX512>, EVEX_CD8<32, CD8VF>;
1904 defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
1905 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1906 defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
1907 HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
1909 multiclass avx512_vcmp_common<X86VectorVTInfo _> {
1911 defm rri : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1912 (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2,AVXCC:$cc),
1913 "vcmp${cc}"#_.Suffix,
1914 "$src2, $src1", "$src1, $src2",
1915 (X86cmpm (_.VT _.RC:$src1),
1919 defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1920 (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
1921 "vcmp${cc}"#_.Suffix,
1922 "$src2, $src1", "$src1, $src2",
1923 (X86cmpm (_.VT _.RC:$src1),
1924 (_.VT (bitconvert (_.LdFrag addr:$src2))),
1927 defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
1929 (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
1930 "vcmp${cc}"#_.Suffix,
1931 "${src2}"##_.BroadcastStr##", $src1",
1932 "$src1, ${src2}"##_.BroadcastStr,
1933 (X86cmpm (_.VT _.RC:$src1),
1934 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
1936 // Accept explicit immediate argument form instead of comparison code.
1937 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1938 defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1940 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1942 "$cc, $src2, $src1", "$src1, $src2, $cc">;
1944 let mayLoad = 1 in {
1945 defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1947 (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
1949 "$cc, $src2, $src1", "$src1, $src2, $cc">;
1951 defm rmbi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
1953 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
1955 "$cc, ${src2}"##_.BroadcastStr##", $src1",
1956 "$src1, ${src2}"##_.BroadcastStr##", $cc">,EVEX_B;
1961 multiclass avx512_vcmp_sae<X86VectorVTInfo _> {
1962 // comparison code form (VCMP[EQ/LT/LE/...]
1963 defm rrib : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
1964 (outs _.KRC:$dst),(ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
1965 "vcmp${cc}"#_.Suffix,
1966 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
1967 (X86cmpmRnd (_.VT _.RC:$src1),
1970 (i32 FROUND_NO_EXC))>, EVEX_B;
1972 let isAsmParserOnly = 1, hasSideEffects = 0 in {
1973 defm rrib_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
1975 (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
1977 "$cc, {sae}, $src2, $src1",
1978 "$src1, $src2, {sae}, $cc">, EVEX_B;
1982 multiclass avx512_vcmp<AVX512VLVectorVTInfo _> {
1983 let Predicates = [HasAVX512] in {
1984 defm Z : avx512_vcmp_common<_.info512>,
1985 avx512_vcmp_sae<_.info512>, EVEX_V512;
1988 let Predicates = [HasAVX512,HasVLX] in {
1989 defm Z128 : avx512_vcmp_common<_.info128>, EVEX_V128;
1990 defm Z256 : avx512_vcmp_common<_.info256>, EVEX_V256;
1994 defm VCMPPD : avx512_vcmp<avx512vl_f64_info>,
1995 AVX512PDIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
1996 defm VCMPPS : avx512_vcmp<avx512vl_f32_info>,
1997 AVX512PSIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
1999 def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
2000 (COPY_TO_REGCLASS (VCMPPSZrri
2001 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2002 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2004 def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
2005 (COPY_TO_REGCLASS (VPCMPDZrri
2006 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2007 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2009 def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
2010 (COPY_TO_REGCLASS (VPCMPUDZrri
2011 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
2012 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src2, sub_ymm)),
2015 // ----------------------------------------------------------------
2017 //handle fpclass instruction mask = op(reg_scalar,imm)
2018 // op(mem_scalar,imm)
2019 multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
2020 X86VectorVTInfo _, Predicate prd> {
2021 let Predicates = [prd] in {
2022 def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),//_.KRC:$dst),
2023 (ins _.RC:$src1, i32u8imm:$src2),
2024 OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2025 [(set _.KRC:$dst,(OpNode (_.VT _.RC:$src1),
2026 (i32 imm:$src2)))], NoItinerary>;
2027 def rrk : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2028 (ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
2029 OpcodeStr##_.Suffix#
2030 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2031 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2032 (OpNode (_.VT _.RC:$src1),
2033 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2034 def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2035 (ins _.MemOp:$src1, i32u8imm:$src2),
2036 OpcodeStr##_.Suffix##
2037 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2039 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
2040 (i32 imm:$src2)))], NoItinerary>;
2041 def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2042 (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
2043 OpcodeStr##_.Suffix##
2044 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2045 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2046 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
2047 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2051 //handle fpclass instruction mask = fpclass(reg_vec, reg_vec, imm)
2052 // fpclass(reg_vec, mem_vec, imm)
2053 // fpclass(reg_vec, broadcast(eltVt), imm)
2054 multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
2055 X86VectorVTInfo _, string mem, string broadcast>{
2056 def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2057 (ins _.RC:$src1, i32u8imm:$src2),
2058 OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2059 [(set _.KRC:$dst,(OpNode (_.VT _.RC:$src1),
2060 (i32 imm:$src2)))], NoItinerary>;
2061 def rrk : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst),
2062 (ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
2063 OpcodeStr##_.Suffix#
2064 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2065 [(set _.KRC:$dst,(or _.KRCWM:$mask,
2066 (OpNode (_.VT _.RC:$src1),
2067 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2068 def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2069 (ins _.MemOp:$src1, i32u8imm:$src2),
2070 OpcodeStr##_.Suffix##mem#
2071 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2072 [(set _.KRC:$dst,(OpNode
2073 (_.VT (bitconvert (_.LdFrag addr:$src1))),
2074 (i32 imm:$src2)))], NoItinerary>;
2075 def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2076 (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
2077 OpcodeStr##_.Suffix##mem#
2078 "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
2079 [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
2080 (_.VT (bitconvert (_.LdFrag addr:$src1))),
2081 (i32 imm:$src2))))], NoItinerary>, EVEX_K;
2082 def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2083 (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
2084 OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
2085 _.BroadcastStr##", $dst|$dst, ${src1}"
2086 ##_.BroadcastStr##", $src2}",
2087 [(set _.KRC:$dst,(OpNode
2088 (_.VT (X86VBroadcast
2089 (_.ScalarLdFrag addr:$src1))),
2090 (i32 imm:$src2)))], NoItinerary>,EVEX_B;
2091 def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
2092 (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
2093 OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
2094 _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
2095 _.BroadcastStr##", $src2}",
2096 [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
2097 (_.VT (X86VBroadcast
2098 (_.ScalarLdFrag addr:$src1))),
2099 (i32 imm:$src2))))], NoItinerary>,
2103 multiclass avx512_vector_fpclass_all<string OpcodeStr,
2104 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
2106 let Predicates = [prd] in {
2107 defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
2108 broadcast>, EVEX_V512;
2110 let Predicates = [prd, HasVLX] in {
2111 defm Z128 : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info128, "{x}",
2112 broadcast>, EVEX_V128;
2113 defm Z256 : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info256, "{y}",
2114 broadcast>, EVEX_V256;
2118 multiclass avx512_fp_fpclass_all<string OpcodeStr, bits<8> opcVec,
2119 bits<8> opcScalar, SDNode VecOpNode, SDNode ScalarOpNode, Predicate prd>{
2120 defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
2121 VecOpNode, prd, "{l}">, EVEX_CD8<32, CD8VF>;
2122 defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
2123 VecOpNode, prd, "{q}">,EVEX_CD8<64, CD8VF> , VEX_W;
2124 defm SS : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
2125 f32x_info, prd>, EVEX_CD8<32, CD8VT1>;
2126 defm SD : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
2127 f64x_info, prd>, EVEX_CD8<64, CD8VT1>, VEX_W;
2130 defm VFPCLASS : avx512_fp_fpclass_all<"vfpclass", 0x66, 0x67, X86Vfpclass,
2131 X86Vfpclasss, HasDQI>, AVX512AIi8Base,EVEX;
2133 //-----------------------------------------------------------------
2134 // Mask register copy, including
2135 // - copy between mask registers
2136 // - load/store mask registers
2137 // - copy from GPR to mask register and vice versa
2139 multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
2140 string OpcodeStr, RegisterClass KRC,
2141 ValueType vvt, X86MemOperand x86memop> {
2142 let hasSideEffects = 0 in
2143 def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
2144 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2145 def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
2146 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2147 [(set KRC:$dst, (vvt (load addr:$src)))]>;
2148 def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
2149 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2150 [(store KRC:$src, addr:$dst)]>;
2153 multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
2155 RegisterClass KRC, RegisterClass GRC> {
2156 let hasSideEffects = 0 in {
2157 def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
2158 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2159 def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
2160 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
2164 let Predicates = [HasDQI] in
2165 defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>,
2166 avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
2169 let Predicates = [HasAVX512] in
2170 defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
2171 avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
2174 let Predicates = [HasBWI] in {
2175 defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
2177 defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
2179 defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
2181 defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
2185 // GR from/to mask register
2186 def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
2187 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), GR16:$src, sub_16bit)), VK16)>;
2188 def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
2189 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK16:$src, GR32)), sub_16bit)>;
2191 def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
2192 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), GR8:$src, sub_8bit)), VK8)>;
2193 def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
2194 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK8:$src, GR32)), sub_8bit)>;
2196 def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))),
2197 (KMOVWrk VK16:$src)>;
2198 def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))),
2199 (COPY_TO_REGCLASS VK16:$src, GR32)>;
2201 def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
2202 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK8:$src, GR32)), sub_8bit))>, Requires<[NoDQI]>;
2203 def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
2204 (KMOVBrk VK8:$src)>, Requires<[HasDQI]>;
2205 def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
2206 (COPY_TO_REGCLASS VK8:$src, GR32)>;
2208 def : Pat<(v32i1 (bitconvert (i32 GR32:$src))),
2209 (COPY_TO_REGCLASS GR32:$src, VK32)>;
2210 def : Pat<(i32 (bitconvert (v32i1 VK32:$src))),
2211 (COPY_TO_REGCLASS VK32:$src, GR32)>;
2212 def : Pat<(v64i1 (bitconvert (i64 GR64:$src))),
2213 (COPY_TO_REGCLASS GR64:$src, VK64)>;
2214 def : Pat<(i64 (bitconvert (v64i1 VK64:$src))),
2215 (COPY_TO_REGCLASS VK64:$src, GR64)>;
2218 let Predicates = [HasDQI] in {
2219 def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
2220 (KMOVBmk addr:$dst, VK8:$src)>;
2221 def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
2222 (KMOVBkm addr:$src)>;
2224 def : Pat<(store VK4:$src, addr:$dst),
2225 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK4:$src, VK8))>;
2226 def : Pat<(store VK2:$src, addr:$dst),
2227 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK2:$src, VK8))>;
2228 def : Pat<(store VK1:$src, addr:$dst),
2229 (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK8))>;
2231 def : Pat<(v2i1 (load addr:$src)),
2232 (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK2)>;
2233 def : Pat<(v4i1 (load addr:$src)),
2234 (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK4)>;
2236 let Predicates = [HasAVX512, NoDQI] in {
2237 def : Pat<(store VK1:$src, addr:$dst),
2239 (i8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK1:$src, GR32)),
2241 def : Pat<(store VK2:$src, addr:$dst),
2243 (i8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK2:$src, GR32)),
2245 def : Pat<(store VK4:$src, addr:$dst),
2247 (i8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK4:$src, GR32)),
2249 def : Pat<(store VK8:$src, addr:$dst),
2251 (i8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS VK8:$src, GR32)),
2254 def : Pat<(v8i1 (load addr:$src)),
2255 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK8)>;
2256 def : Pat<(v2i1 (load addr:$src)),
2257 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK2)>;
2258 def : Pat<(v4i1 (load addr:$src)),
2259 (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK4)>;
2262 let Predicates = [HasAVX512] in {
2263 def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
2264 (KMOVWmk addr:$dst, VK16:$src)>;
2265 def : Pat<(v1i1 (load addr:$src)),
2266 (COPY_TO_REGCLASS (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), VK1)>;
2267 def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
2268 (KMOVWkm addr:$src)>;
2270 let Predicates = [HasBWI] in {
2271 def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
2272 (KMOVDmk addr:$dst, VK32:$src)>;
2273 def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
2274 (KMOVDkm addr:$src)>;
2275 def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
2276 (KMOVQmk addr:$dst, VK64:$src)>;
2277 def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
2278 (KMOVQkm addr:$src)>;
2281 let Predicates = [HasAVX512] in {
2282 multiclass operation_gpr_mask_copy_lowering<RegisterClass maskRC, ValueType maskVT> {
2283 def : Pat<(maskVT (scalar_to_vector GR32:$src)),
2284 (COPY_TO_REGCLASS GR32:$src, maskRC)>;
2286 def : Pat<(i32 (X86Vextract maskRC:$src, (iPTR 0))),
2287 (COPY_TO_REGCLASS maskRC:$src, GR32)>;
2289 def : Pat<(maskVT (scalar_to_vector GR8:$src)),
2290 (COPY_TO_REGCLASS (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$src, sub_8bit), maskRC)>;
2292 def : Pat<(i8 (X86Vextract maskRC:$src, (iPTR 0))),
2293 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS maskRC:$src, GR32)), sub_8bit)>;
2295 def : Pat<(i32 (anyext (i8 (X86Vextract maskRC:$src, (iPTR 0))))),
2296 (COPY_TO_REGCLASS maskRC:$src, GR32)>;
2299 defm : operation_gpr_mask_copy_lowering<VK1, v1i1>;
2300 defm : operation_gpr_mask_copy_lowering<VK2, v2i1>;
2301 defm : operation_gpr_mask_copy_lowering<VK4, v4i1>;
2302 defm : operation_gpr_mask_copy_lowering<VK8, v8i1>;
2303 defm : operation_gpr_mask_copy_lowering<VK16, v16i1>;
2304 defm : operation_gpr_mask_copy_lowering<VK32, v32i1>;
2305 defm : operation_gpr_mask_copy_lowering<VK64, v64i1>;
2307 def : Pat<(X86kshiftr (X86kshiftl (v1i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
2309 (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
2310 GR8:$src, sub_8bit), (i32 1))), VK1)>;
2311 def : Pat<(X86kshiftr (X86kshiftl (v16i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
2313 (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
2314 GR8:$src, sub_8bit), (i32 1))), VK16)>;
2315 def : Pat<(X86kshiftr (X86kshiftl (v8i1 (scalar_to_vector GR8:$src)), (i8 15)), (i8 15)) ,
2317 (KMOVWkr (AND32ri8 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
2318 GR8:$src, sub_8bit), (i32 1))), VK8)>;
2322 // Mask unary operation
2324 multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
2325 RegisterClass KRC, SDPatternOperator OpNode,
2327 let Predicates = [prd] in
2328 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
2329 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2330 [(set KRC:$dst, (OpNode KRC:$src))]>;
2333 multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
2334 SDPatternOperator OpNode> {
2335 defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
2337 defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
2338 HasAVX512>, VEX, PS;
2339 defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
2340 HasBWI>, VEX, PD, VEX_W;
2341 defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
2342 HasBWI>, VEX, PS, VEX_W;
2345 defm KNOT : avx512_mask_unop_all<0x44, "knot", vnot>;
2347 // KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
2348 let Predicates = [HasAVX512, NoDQI] in
2349 def : Pat<(vnot VK8:$src),
2350 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
2352 def : Pat<(vnot VK4:$src),
2353 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK4:$src, VK16)), VK4)>;
2354 def : Pat<(vnot VK2:$src),
2355 (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK2:$src, VK16)), VK2)>;
2357 // Mask binary operation
2358 // - KAND, KANDN, KOR, KXNOR, KXOR
2359 multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
2360 RegisterClass KRC, SDPatternOperator OpNode,
2361 Predicate prd, bit IsCommutable> {
2362 let Predicates = [prd], isCommutable = IsCommutable in
2363 def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
2364 !strconcat(OpcodeStr,
2365 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2366 [(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
2369 multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
2370 SDPatternOperator OpNode, bit IsCommutable,
2371 Predicate prdW = HasAVX512> {
2372 defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
2373 HasDQI, IsCommutable>, VEX_4V, VEX_L, PD;
2374 defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
2375 prdW, IsCommutable>, VEX_4V, VEX_L, PS;
2376 defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
2377 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PD;
2378 defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
2379 HasBWI, IsCommutable>, VEX_4V, VEX_L, VEX_W, PS;
2382 def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
2383 def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
2384 // These nodes use 'vnot' instead of 'not' to support vectors.
2385 def vandn : PatFrag<(ops node:$i0, node:$i1), (and (vnot node:$i0), node:$i1)>;
2386 def vxnor : PatFrag<(ops node:$i0, node:$i1), (vnot (xor node:$i0, node:$i1))>;
2388 defm KAND : avx512_mask_binop_all<0x41, "kand", and, 1>;
2389 defm KOR : avx512_mask_binop_all<0x45, "kor", or, 1>;
2390 defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", vxnor, 1>;
2391 defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor, 1>;
2392 defm KANDN : avx512_mask_binop_all<0x42, "kandn", vandn, 0>;
2393 defm KADD : avx512_mask_binop_all<0x4A, "kadd", add, 1, HasDQI>;
2395 multiclass avx512_binop_pat<SDPatternOperator VOpNode, SDPatternOperator OpNode,
2397 // With AVX512F, 8-bit mask is promoted to 16-bit mask,
2398 // for the DQI set, this type is legal and KxxxB instruction is used
2399 let Predicates = [NoDQI] in
2400 def : Pat<(VOpNode VK8:$src1, VK8:$src2),
2402 (Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
2403 (COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
2405 // All types smaller than 8 bits require conversion anyway
2406 def : Pat<(OpNode VK1:$src1, VK1:$src2),
2407 (COPY_TO_REGCLASS (Inst
2408 (COPY_TO_REGCLASS VK1:$src1, VK16),
2409 (COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
2410 def : Pat<(VOpNode VK2:$src1, VK2:$src2),
2411 (COPY_TO_REGCLASS (Inst
2412 (COPY_TO_REGCLASS VK2:$src1, VK16),
2413 (COPY_TO_REGCLASS VK2:$src2, VK16)), VK1)>;
2414 def : Pat<(VOpNode VK4:$src1, VK4:$src2),
2415 (COPY_TO_REGCLASS (Inst
2416 (COPY_TO_REGCLASS VK4:$src1, VK16),
2417 (COPY_TO_REGCLASS VK4:$src2, VK16)), VK1)>;
2420 defm : avx512_binop_pat<and, and, KANDWrr>;
2421 defm : avx512_binop_pat<vandn, andn, KANDNWrr>;
2422 defm : avx512_binop_pat<or, or, KORWrr>;
2423 defm : avx512_binop_pat<vxnor, xnor, KXNORWrr>;
2424 defm : avx512_binop_pat<xor, xor, KXORWrr>;
2427 multiclass avx512_mask_unpck<string Suffix,RegisterClass KRC, ValueType VT,
2428 RegisterClass KRCSrc, Predicate prd> {
2429 let Predicates = [prd] in {
2430 let hasSideEffects = 0 in
2431 def rr : I<0x4b, MRMSrcReg, (outs KRC:$dst),
2432 (ins KRC:$src1, KRC:$src2),
2433 "kunpck"#Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2436 def : Pat<(VT (concat_vectors KRCSrc:$src1, KRCSrc:$src2)),
2437 (!cast<Instruction>(NAME##rr)
2438 (COPY_TO_REGCLASS KRCSrc:$src2, KRC),
2439 (COPY_TO_REGCLASS KRCSrc:$src1, KRC))>;
2443 defm KUNPCKBW : avx512_mask_unpck<"bw", VK16, v16i1, VK8, HasAVX512>, PD;
2444 defm KUNPCKWD : avx512_mask_unpck<"wd", VK32, v32i1, VK16, HasBWI>, PS;
2445 defm KUNPCKDQ : avx512_mask_unpck<"dq", VK64, v64i1, VK32, HasBWI>, PS, VEX_W;
2448 multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2449 SDNode OpNode, Predicate prd> {
2450 let Predicates = [prd], Defs = [EFLAGS] in
2451 def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
2452 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2453 [(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
2456 multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
2457 Predicate prdW = HasAVX512> {
2458 defm B : avx512_mask_testop<opc, OpcodeStr#"b", VK8, OpNode, HasDQI>,
2460 defm W : avx512_mask_testop<opc, OpcodeStr#"w", VK16, OpNode, prdW>,
2462 defm Q : avx512_mask_testop<opc, OpcodeStr#"q", VK64, OpNode, HasBWI>,
2464 defm D : avx512_mask_testop<opc, OpcodeStr#"d", VK32, OpNode, HasBWI>,
2468 defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
2469 defm KTEST : avx512_mask_testop_w<0x99, "ktest", X86ktest, HasDQI>;
2472 multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
2474 let Predicates = [HasAVX512] in
2475 def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, u8imm:$imm),
2476 !strconcat(OpcodeStr,
2477 "\t{$imm, $src, $dst|$dst, $src, $imm}"),
2478 [(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
2481 multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
2483 defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
2485 let Predicates = [HasDQI] in
2486 defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode>,
2488 let Predicates = [HasBWI] in {
2489 defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
2491 defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
2496 defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86kshiftl>;
2497 defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86kshiftr>;
2499 // Mask setting all 0s or 1s
2500 multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
2501 let Predicates = [HasAVX512] in
2502 let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
2503 def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
2504 [(set KRC:$dst, (VT Val))]>;
2507 multiclass avx512_mask_setop_w<PatFrag Val> {
2508 defm W : avx512_mask_setop<VK16, v16i1, Val>;
2509 defm D : avx512_mask_setop<VK32, v32i1, Val>;
2510 defm Q : avx512_mask_setop<VK64, v64i1, Val>;
2513 defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
2514 defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
2516 // With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
2517 let Predicates = [HasAVX512] in {
2518 def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
2519 def : Pat<(v4i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK4)>;
2520 def : Pat<(v2i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK2)>;
2521 def : Pat<(v1i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK1)>;
2522 def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
2523 def : Pat<(v4i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK4)>;
2524 def : Pat<(v2i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK2)>;
2525 def : Pat<(v1i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK1)>;
2528 // Patterns for kmask insert_subvector/extract_subvector to/from index=0
2529 multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
2530 RegisterClass RC, ValueType VT> {
2531 def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
2532 (subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
2534 def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
2535 (VT (COPY_TO_REGCLASS subRC:$src, RC))>;
2537 defm : operation_subvector_mask_lowering<VK1, v1i1, VK2, v2i1>;
2538 defm : operation_subvector_mask_lowering<VK1, v1i1, VK4, v4i1>;
2539 defm : operation_subvector_mask_lowering<VK1, v1i1, VK8, v8i1>;
2540 defm : operation_subvector_mask_lowering<VK1, v1i1, VK16, v16i1>;
2541 defm : operation_subvector_mask_lowering<VK1, v1i1, VK32, v32i1>;
2542 defm : operation_subvector_mask_lowering<VK1, v1i1, VK64, v64i1>;
2544 defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
2545 defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
2546 defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
2547 defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
2548 defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
2550 defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
2551 defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
2552 defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
2553 defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
2555 defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
2556 defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
2557 defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
2559 defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
2560 defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
2562 defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
2564 def : Pat<(v2i1 (extract_subvector (v4i1 VK4:$src), (iPTR 2))),
2565 (v2i1 (COPY_TO_REGCLASS
2566 (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), (i8 2)),
2568 def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 4))),
2569 (v4i1 (COPY_TO_REGCLASS
2570 (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (i8 4)),
2572 def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
2573 (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
2574 def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
2575 (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
2576 def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
2577 (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
2580 // Patterns for kmask shift
2581 multiclass mask_shift_lowering<RegisterClass RC, ValueType VT> {
2582 def : Pat<(VT (X86kshiftl RC:$src, (i8 imm:$imm))),
2583 (VT (COPY_TO_REGCLASS
2584 (KSHIFTLWri (COPY_TO_REGCLASS RC:$src, VK16),
2587 def : Pat<(VT (X86kshiftr RC:$src, (i8 imm:$imm))),
2588 (VT (COPY_TO_REGCLASS
2589 (KSHIFTRWri (COPY_TO_REGCLASS RC:$src, VK16),
2594 defm : mask_shift_lowering<VK8, v8i1>, Requires<[HasAVX512, NoDQI]>;
2595 defm : mask_shift_lowering<VK4, v4i1>, Requires<[HasAVX512]>;
2596 defm : mask_shift_lowering<VK2, v2i1>, Requires<[HasAVX512]>;
2597 //===----------------------------------------------------------------------===//
2598 // AVX-512 - Aligned and unaligned load and store
2602 multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2603 PatFrag ld_frag, PatFrag mload,
2604 SDPatternOperator SelectOprr = vselect> {
2605 let hasSideEffects = 0 in {
2606 def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
2607 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
2609 def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2610 (ins _.KRCWM:$mask, _.RC:$src),
2611 !strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
2612 "${dst} {${mask}} {z}, $src}"),
2613 [(set _.RC:$dst, (_.VT (SelectOprr _.KRCWM:$mask,
2615 _.ImmAllZerosV)))], _.ExeDomain>,
2618 let canFoldAsLoad = 1, isReMaterializable = 1,
2619 SchedRW = [WriteLoad] in
2620 def rm : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst), (ins _.MemOp:$src),
2621 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2622 [(set _.RC:$dst, (_.VT (bitconvert (ld_frag addr:$src))))],
2625 let Constraints = "$src0 = $dst", isConvertibleToThreeAddress = 1 in {
2626 def rrk : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
2627 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1),
2628 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2629 "${dst} {${mask}}, $src1}"),
2630 [(set _.RC:$dst, (_.VT (SelectOprr _.KRCWM:$mask,
2632 (_.VT _.RC:$src0))))], _.ExeDomain>,
2634 let SchedRW = [WriteLoad] in
2635 def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2636 (ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1),
2637 !strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
2638 "${dst} {${mask}}, $src1}"),
2639 [(set _.RC:$dst, (_.VT
2640 (vselect _.KRCWM:$mask,
2641 (_.VT (bitconvert (ld_frag addr:$src1))),
2642 (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K;
2644 let SchedRW = [WriteLoad] in
2645 def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
2646 (ins _.KRCWM:$mask, _.MemOp:$src),
2647 OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
2648 "${dst} {${mask}} {z}, $src}",
2649 [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
2650 (_.VT (bitconvert (ld_frag addr:$src))), _.ImmAllZerosV)))],
2651 _.ExeDomain>, EVEX, EVEX_KZ;
2653 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, undef)),
2654 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2656 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, _.ImmAllZerosV)),
2657 (!cast<Instruction>(NAME#_.ZSuffix##rmkz) _.KRCWM:$mask, addr:$ptr)>;
2659 def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src0))),
2660 (!cast<Instruction>(NAME#_.ZSuffix##rmk) _.RC:$src0,
2661 _.KRCWM:$mask, addr:$ptr)>;
2664 multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
2665 AVX512VLVectorVTInfo _,
2667 let Predicates = [prd] in
2668 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.AlignedLdFrag,
2669 masked_load_aligned512>, EVEX_V512;
2671 let Predicates = [prd, HasVLX] in {
2672 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.AlignedLdFrag,
2673 masked_load_aligned256>, EVEX_V256;
2674 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.AlignedLdFrag,
2675 masked_load_aligned128>, EVEX_V128;
2679 multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
2680 AVX512VLVectorVTInfo _,
2682 SDPatternOperator SelectOprr = vselect> {
2683 let Predicates = [prd] in
2684 defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag,
2685 masked_load_unaligned, SelectOprr>, EVEX_V512;
2687 let Predicates = [prd, HasVLX] in {
2688 defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag,
2689 masked_load_unaligned, SelectOprr>, EVEX_V256;
2690 defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag,
2691 masked_load_unaligned, SelectOprr>, EVEX_V128;
2695 multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
2696 PatFrag st_frag, PatFrag mstore, string Name> {
2698 let hasSideEffects = 0 in {
2699 def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
2700 OpcodeStr # ".s\t{$src, $dst|$dst, $src}",
2701 [], _.ExeDomain>, EVEX, FoldGenData<Name#rr>;
2702 def rrk_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2703 (ins _.KRCWM:$mask, _.RC:$src),
2704 OpcodeStr # ".s\t{$src, ${dst} {${mask}}|"#
2705 "${dst} {${mask}}, $src}",
2706 [], _.ExeDomain>, EVEX, EVEX_K, FoldGenData<Name#rrk>;
2707 def rrkz_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
2708 (ins _.KRCWM:$mask, _.RC:$src),
2709 OpcodeStr # ".s\t{$src, ${dst} {${mask}} {z}|" #
2710 "${dst} {${mask}} {z}, $src}",
2711 [], _.ExeDomain>, EVEX, EVEX_KZ, FoldGenData<Name#rrkz>;
2714 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
2715 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2716 [(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX;
2717 def mrk : AVX512PI<opc, MRMDestMem, (outs),
2718 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
2719 OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
2720 [], _.ExeDomain>, EVEX, EVEX_K;
2722 def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)),
2723 (!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr,
2724 _.KRCWM:$mask, _.RC:$src)>;
2728 multiclass avx512_store_vl< bits<8> opc, string OpcodeStr,
2729 AVX512VLVectorVTInfo _, Predicate prd,
2731 let Predicates = [prd] in
2732 defm Z : avx512_store<opc, OpcodeStr, _.info512, store,
2733 masked_store_unaligned, Name#Z>, EVEX_V512;
2735 let Predicates = [prd, HasVLX] in {
2736 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, store,
2737 masked_store_unaligned, Name#Z256>, EVEX_V256;
2738 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, store,
2739 masked_store_unaligned, Name#Z128>, EVEX_V128;
2743 multiclass avx512_alignedstore_vl<bits<8> opc, string OpcodeStr,
2744 AVX512VLVectorVTInfo _, Predicate prd,
2746 let Predicates = [prd] in
2747 defm Z : avx512_store<opc, OpcodeStr, _.info512, alignedstore512,
2748 masked_store_aligned512, Name#Z>, EVEX_V512;
2750 let Predicates = [prd, HasVLX] in {
2751 defm Z256 : avx512_store<opc, OpcodeStr, _.info256, alignedstore256,
2752 masked_store_aligned256, Name#Z256>, EVEX_V256;
2753 defm Z128 : avx512_store<opc, OpcodeStr, _.info128, alignedstore,
2754 masked_store_aligned128, Name#Z128>, EVEX_V128;
2758 defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
2760 avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
2761 HasAVX512, "VMOVAPS">,
2762 PS, EVEX_CD8<32, CD8VF>;
2764 defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
2766 avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
2767 HasAVX512, "VMOVAPD">,
2768 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2770 defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
2772 avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512,
2774 PS, EVEX_CD8<32, CD8VF>;
2776 defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512,
2778 avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512,
2780 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2782 defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
2784 avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
2785 HasAVX512, "VMOVDQA32">,
2786 PD, EVEX_CD8<32, CD8VF>;
2788 defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
2790 avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info,
2791 HasAVX512, "VMOVDQA64">,
2792 PD, VEX_W, EVEX_CD8<64, CD8VF>;
2794 defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI>,
2795 avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info,
2796 HasBWI, "VMOVDQU8">,
2797 XD, EVEX_CD8<8, CD8VF>;
2799 defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>,
2800 avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info,
2801 HasBWI, "VMOVDQU16">,
2802 XD, VEX_W, EVEX_CD8<16, CD8VF>;
2804 defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
2806 avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info,
2807 HasAVX512, "VMOVDQU32">,
2808 XS, EVEX_CD8<32, CD8VF>;
2810 defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
2812 avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info,
2813 HasAVX512, "VMOVDQU64">,
2814 XS, VEX_W, EVEX_CD8<64, CD8VF>;
2816 // Special instructions to help with spilling when we don't have VLX. We need
2817 // to load or store from a ZMM register instead. These are converted in
2818 // expandPostRAPseudos.
2819 let isReMaterializable = 1, canFoldAsLoad = 1,
2820 isPseudo = 1, SchedRW = [WriteLoad], mayLoad = 1, hasSideEffects = 0 in {
2821 def VMOVAPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
2823 def VMOVAPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
2825 def VMOVUPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
2827 def VMOVUPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
2831 let isPseudo = 1, mayStore = 1, hasSideEffects = 0 in {
2832 def VMOVAPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
2834 def VMOVAPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
2836 def VMOVUPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
2838 def VMOVUPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
2842 def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
2843 (v8i64 VR512:$src))),
2844 (VMOVDQA64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
2847 def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
2848 (v16i32 VR512:$src))),
2849 (VMOVDQA32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
2851 // These patterns exist to prevent the above patterns from introducing a second
2852 // mask inversion when one already exists.
2853 def : Pat<(v8i64 (vselect (xor VK8:$mask, (v8i1 immAllOnesV)),
2854 (bc_v8i64 (v16i32 immAllZerosV)),
2855 (v8i64 VR512:$src))),
2856 (VMOVDQA64Zrrkz VK8:$mask, VR512:$src)>;
2857 def : Pat<(v16i32 (vselect (xor VK16:$mask, (v16i1 immAllOnesV)),
2858 (v16i32 immAllZerosV),
2859 (v16i32 VR512:$src))),
2860 (VMOVDQA32Zrrkz VK16WM:$mask, VR512:$src)>;
2862 // Patterns for handling v8i1 selects of 256-bit vectors when VLX isn't
2863 // available. Use a 512-bit operation and extract.
2864 let Predicates = [HasAVX512, NoVLX] in {
2865 def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
2866 (v8f32 VR256X:$src0))),
2870 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src0, sub_ymm)),
2871 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
2872 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)))),
2875 def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
2876 (v8i32 VR256X:$src0))),
2880 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src0, sub_ymm)),
2881 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
2882 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)))),
2886 let Predicates = [HasVLX, NoBWI] in {
2887 // 128-bit load/store without BWI.
2888 def : Pat<(alignedstore (v8i16 VR128X:$src), addr:$dst),
2889 (VMOVDQA32Z128mr addr:$dst, VR128X:$src)>;
2890 def : Pat<(alignedstore (v16i8 VR128X:$src), addr:$dst),
2891 (VMOVDQA32Z128mr addr:$dst, VR128X:$src)>;
2892 def : Pat<(store (v8i16 VR128X:$src), addr:$dst),
2893 (VMOVDQU32Z128mr addr:$dst, VR128X:$src)>;
2894 def : Pat<(store (v16i8 VR128X:$src), addr:$dst),
2895 (VMOVDQU32Z128mr addr:$dst, VR128X:$src)>;
2897 // 256-bit load/store without BWI.
2898 def : Pat<(alignedstore256 (v16i16 VR256X:$src), addr:$dst),
2899 (VMOVDQA32Z256mr addr:$dst, VR256X:$src)>;
2900 def : Pat<(alignedstore256 (v32i8 VR256X:$src), addr:$dst),
2901 (VMOVDQA32Z256mr addr:$dst, VR256X:$src)>;
2902 def : Pat<(store (v16i16 VR256X:$src), addr:$dst),
2903 (VMOVDQU32Z256mr addr:$dst, VR256X:$src)>;
2904 def : Pat<(store (v32i8 VR256X:$src), addr:$dst),
2905 (VMOVDQU32Z256mr addr:$dst, VR256X:$src)>;
2908 let Predicates = [HasVLX] in {
2909 // Special patterns for storing subvector extracts of lower 128-bits of 256.
2910 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
2911 def : Pat<(alignedstore (v2f64 (extract_subvector
2912 (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
2913 (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2914 def : Pat<(alignedstore (v4f32 (extract_subvector
2915 (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
2916 (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2917 def : Pat<(alignedstore (v2i64 (extract_subvector
2918 (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
2919 (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2920 def : Pat<(alignedstore (v4i32 (extract_subvector
2921 (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
2922 (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2923 def : Pat<(alignedstore (v8i16 (extract_subvector
2924 (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
2925 (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2926 def : Pat<(alignedstore (v16i8 (extract_subvector
2927 (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
2928 (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2930 def : Pat<(store (v2f64 (extract_subvector
2931 (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
2932 (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2933 def : Pat<(store (v4f32 (extract_subvector
2934 (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
2935 (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2936 def : Pat<(store (v2i64 (extract_subvector
2937 (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
2938 (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2939 def : Pat<(store (v4i32 (extract_subvector
2940 (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
2941 (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2942 def : Pat<(store (v8i16 (extract_subvector
2943 (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
2944 (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2945 def : Pat<(store (v16i8 (extract_subvector
2946 (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
2947 (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
2949 // Special patterns for storing subvector extracts of lower 128-bits of 512.
2950 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
2951 def : Pat<(alignedstore (v2f64 (extract_subvector
2952 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
2953 (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2954 def : Pat<(alignedstore (v4f32 (extract_subvector
2955 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
2956 (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2957 def : Pat<(alignedstore (v2i64 (extract_subvector
2958 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
2959 (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2960 def : Pat<(alignedstore (v4i32 (extract_subvector
2961 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
2962 (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2963 def : Pat<(alignedstore (v8i16 (extract_subvector
2964 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
2965 (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2966 def : Pat<(alignedstore (v16i8 (extract_subvector
2967 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
2968 (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2970 def : Pat<(store (v2f64 (extract_subvector
2971 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
2972 (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2973 def : Pat<(store (v4f32 (extract_subvector
2974 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
2975 (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2976 def : Pat<(store (v2i64 (extract_subvector
2977 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
2978 (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2979 def : Pat<(store (v4i32 (extract_subvector
2980 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
2981 (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2982 def : Pat<(store (v8i16 (extract_subvector
2983 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
2984 (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2985 def : Pat<(store (v16i8 (extract_subvector
2986 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
2987 (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
2989 // Special patterns for storing subvector extracts of lower 256-bits of 512.
2990 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
2991 def : Pat<(alignedstore256 (v4f64 (extract_subvector
2992 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
2993 (VMOVAPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
2994 def : Pat<(alignedstore (v8f32 (extract_subvector
2995 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
2996 (VMOVAPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
2997 def : Pat<(alignedstore256 (v4i64 (extract_subvector
2998 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
2999 (VMOVDQA64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3000 def : Pat<(alignedstore256 (v8i32 (extract_subvector
3001 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3002 (VMOVDQA32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3003 def : Pat<(alignedstore256 (v16i16 (extract_subvector
3004 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3005 (VMOVDQA32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3006 def : Pat<(alignedstore256 (v32i8 (extract_subvector
3007 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3008 (VMOVDQA32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3010 def : Pat<(store (v4f64 (extract_subvector
3011 (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
3012 (VMOVUPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3013 def : Pat<(store (v8f32 (extract_subvector
3014 (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
3015 (VMOVUPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3016 def : Pat<(store (v4i64 (extract_subvector
3017 (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
3018 (VMOVDQU64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3019 def : Pat<(store (v8i32 (extract_subvector
3020 (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
3021 (VMOVDQU32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3022 def : Pat<(store (v16i16 (extract_subvector
3023 (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
3024 (VMOVDQU32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3025 def : Pat<(store (v32i8 (extract_subvector
3026 (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
3027 (VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
3031 // Move Int Doubleword to Packed Double Int
3033 let ExeDomain = SSEPackedInt in {
3034 def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
3035 "vmovd\t{$src, $dst|$dst, $src}",
3037 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
3039 def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
3040 "vmovd\t{$src, $dst|$dst, $src}",
3042 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
3043 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
3044 def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
3045 "vmovq\t{$src, $dst|$dst, $src}",
3047 (v2i64 (scalar_to_vector GR64:$src)))],
3048 IIC_SSE_MOVDQ>, EVEX, VEX_W;
3049 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
3050 def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
3052 "vmovq\t{$src, $dst|$dst, $src}", []>,
3053 EVEX, VEX_W, EVEX_CD8<64, CD8VT1>;
3054 let isCodeGenOnly = 1 in {
3055 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src),
3056 "vmovq\t{$src, $dst|$dst, $src}",
3057 [(set FR64X:$dst, (bitconvert GR64:$src))],
3058 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
3059 def VMOV64toSDZrm : AVX512XSI<0x7E, MRMSrcMem, (outs FR64X:$dst), (ins i64mem:$src),
3060 "vmovq\t{$src, $dst|$dst, $src}",
3061 [(set FR64X:$dst, (bitconvert (loadi64 addr:$src)))]>,
3062 EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
3063 def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
3064 "vmovq\t{$src, $dst|$dst, $src}",
3065 [(set GR64:$dst, (bitconvert FR64X:$src))],
3066 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
3067 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64X:$src),
3068 "vmovq\t{$src, $dst|$dst, $src}",
3069 [(store (i64 (bitconvert FR64X:$src)), addr:$dst)],
3070 IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
3071 EVEX_CD8<64, CD8VT1>;
3073 } // ExeDomain = SSEPackedInt
3075 // Move Int Doubleword to Single Scalar
3077 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
3078 def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
3079 "vmovd\t{$src, $dst|$dst, $src}",
3080 [(set FR32X:$dst, (bitconvert GR32:$src))],
3081 IIC_SSE_MOVDQ>, EVEX;
3083 def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
3084 "vmovd\t{$src, $dst|$dst, $src}",
3085 [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
3086 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
3087 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
3089 // Move doubleword from xmm register to r/m32
3091 let ExeDomain = SSEPackedInt in {
3092 def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
3093 "vmovd\t{$src, $dst|$dst, $src}",
3094 [(set GR32:$dst, (extractelt (v4i32 VR128X:$src),
3095 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
3097 def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
3098 (ins i32mem:$dst, VR128X:$src),
3099 "vmovd\t{$src, $dst|$dst, $src}",
3100 [(store (i32 (extractelt (v4i32 VR128X:$src),
3101 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
3102 EVEX, EVEX_CD8<32, CD8VT1>;
3103 } // ExeDomain = SSEPackedInt
3105 // Move quadword from xmm1 register to r/m64
3107 let ExeDomain = SSEPackedInt in {
3108 def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
3109 "vmovq\t{$src, $dst|$dst, $src}",
3110 [(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
3112 IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
3113 Requires<[HasAVX512, In64BitMode]>;
3115 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
3116 def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
3117 "vmovq\t{$src, $dst|$dst, $src}",
3118 [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
3119 Requires<[HasAVX512, In64BitMode]>;
3121 def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
3122 (ins i64mem:$dst, VR128X:$src),
3123 "vmovq\t{$src, $dst|$dst, $src}",
3124 [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
3125 addr:$dst)], IIC_SSE_MOVDQ>,
3126 EVEX, PD, VEX_W, EVEX_CD8<64, CD8VT1>,
3127 Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
3129 let hasSideEffects = 0 in
3130 def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
3132 "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
3134 } // ExeDomain = SSEPackedInt
3136 // Move Scalar Single to Double Int
3138 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
3139 def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
3141 "vmovd\t{$src, $dst|$dst, $src}",
3142 [(set GR32:$dst, (bitconvert FR32X:$src))],
3143 IIC_SSE_MOVD_ToGP>, EVEX;
3144 def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
3145 (ins i32mem:$dst, FR32X:$src),
3146 "vmovd\t{$src, $dst|$dst, $src}",
3147 [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
3148 IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
3149 } // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
3151 // Move Quadword Int to Packed Quadword Int
3153 let ExeDomain = SSEPackedInt in {
3154 def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
3156 "vmovq\t{$src, $dst|$dst, $src}",
3158 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
3159 EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
3160 } // ExeDomain = SSEPackedInt
3162 //===----------------------------------------------------------------------===//
3163 // AVX-512 MOVSS, MOVSD
3164 //===----------------------------------------------------------------------===//
3166 multiclass avx512_move_scalar<string asm, SDNode OpNode,
3167 X86VectorVTInfo _> {
3168 def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3169 (ins _.RC:$src1, _.FRC:$src2),
3170 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3171 [(set _.RC:$dst, (_.VT (OpNode _.RC:$src1,
3172 (scalar_to_vector _.FRC:$src2))))],
3173 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V;
3174 def rrkz : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3175 (ins _.KRCWM:$mask, _.RC:$src1, _.FRC:$src2),
3176 !strconcat(asm, "\t{$src2, $src1, $dst {${mask}} {z}|",
3177 "$dst {${mask}} {z}, $src1, $src2}"),
3178 [(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
3179 (_.VT (OpNode _.RC:$src1,
3180 (scalar_to_vector _.FRC:$src2))),
3182 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_KZ;
3183 let Constraints = "$src0 = $dst" in
3184 def rrk : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
3185 (ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, _.FRC:$src2),
3186 !strconcat(asm, "\t{$src2, $src1, $dst {${mask}}|",
3187 "$dst {${mask}}, $src1, $src2}"),
3188 [(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
3189 (_.VT (OpNode _.RC:$src1,
3190 (scalar_to_vector _.FRC:$src2))),
3191 (_.VT _.RC:$src0))))],
3192 _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_K;
3193 let canFoldAsLoad = 1, isReMaterializable = 1 in
3194 def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
3195 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
3196 [(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
3197 _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX;
3198 let mayLoad = 1, hasSideEffects = 0 in {
3199 let Constraints = "$src0 = $dst" in
3200 def rmk : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
3201 (ins _.RC:$src0, _.KRCWM:$mask, _.ScalarMemOp:$src),
3202 !strconcat(asm, "\t{$src, $dst {${mask}}|",
3203 "$dst {${mask}}, $src}"),
3204 [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_K;
3205 def rmkz : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
3206 (ins _.KRCWM:$mask, _.ScalarMemOp:$src),
3207 !strconcat(asm, "\t{$src, $dst {${mask}} {z}|",
3208 "$dst {${mask}} {z}, $src}"),
3209 [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_KZ;
3211 def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
3212 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
3213 [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
3215 let mayStore = 1, hasSideEffects = 0 in
3216 def mrk: AVX512PI<0x11, MRMDestMem, (outs),
3217 (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
3218 !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
3219 [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
3222 defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
3223 VEX_LIG, XS, EVEX_CD8<32, CD8VT1>;
3225 defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, f64x_info>,
3226 VEX_LIG, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
3229 multiclass avx512_move_scalar_lowering<string InstrStr, SDNode OpNode,
3230 PatLeaf ZeroFP, X86VectorVTInfo _> {
3232 def : Pat<(_.VT (OpNode _.RC:$src0,
3233 (_.VT (scalar_to_vector
3234 (_.EltVT (X86selects (scalar_to_vector (and (i8 (trunc GR32:$mask)), (i8 1))),
3235 (_.EltVT _.FRC:$src1),
3236 (_.EltVT _.FRC:$src2))))))),
3237 (COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrk)
3238 (COPY_TO_REGCLASS _.FRC:$src2, _.RC),
3239 (COPY_TO_REGCLASS GR32:$mask, VK1WM),
3240 (_.VT _.RC:$src0), _.FRC:$src1),
3243 def : Pat<(_.VT (OpNode _.RC:$src0,
3244 (_.VT (scalar_to_vector
3245 (_.EltVT (X86selects (scalar_to_vector (and (i8 (trunc GR32:$mask)), (i8 1))),
3246 (_.EltVT _.FRC:$src1),
3247 (_.EltVT ZeroFP))))))),
3248 (COPY_TO_REGCLASS (!cast<Instruction>(InstrStr#rrkz)
3249 (COPY_TO_REGCLASS GR32:$mask, VK1WM),
3250 (_.VT _.RC:$src0), _.FRC:$src1),
3254 multiclass avx512_store_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
3255 dag Mask, RegisterClass MaskRC> {
3257 def : Pat<(masked_store addr:$dst, Mask,
3258 (_.info512.VT (insert_subvector undef,
3259 (_.info256.VT (insert_subvector undef,
3260 (_.info128.VT _.info128.RC:$src),
3263 (!cast<Instruction>(InstrStr#mrk) addr:$dst,
3264 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
3265 (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
3269 multiclass avx512_store_scalar_lowering_subreg<string InstrStr,
3270 AVX512VLVectorVTInfo _,
3271 dag Mask, RegisterClass MaskRC,
3272 SubRegIndex subreg> {
3274 def : Pat<(masked_store addr:$dst, Mask,
3275 (_.info512.VT (insert_subvector undef,
3276 (_.info256.VT (insert_subvector undef,
3277 (_.info128.VT _.info128.RC:$src),
3280 (!cast<Instruction>(InstrStr#mrk) addr:$dst,
3281 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
3282 (COPY_TO_REGCLASS _.info128.RC:$src, _.info128.FRC))>;
3286 multiclass avx512_load_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
3287 dag Mask, RegisterClass MaskRC> {
3289 def : Pat<(_.info128.VT (extract_subvector
3290 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3291 (_.info512.VT (bitconvert
3292 (v16i32 immAllZerosV))))),
3294 (!cast<Instruction>(InstrStr#rmkz)
3295 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
3298 def : Pat<(_.info128.VT (extract_subvector
3299 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3300 (_.info512.VT (insert_subvector undef,
3301 (_.info256.VT (insert_subvector undef,
3302 (_.info128.VT (X86vzmovl _.info128.RC:$src)),
3306 (!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
3307 (COPY_TO_REGCLASS MaskRC:$mask, VK1WM),
3312 multiclass avx512_load_scalar_lowering_subreg<string InstrStr,
3313 AVX512VLVectorVTInfo _,
3314 dag Mask, RegisterClass MaskRC,
3315 SubRegIndex subreg> {
3317 def : Pat<(_.info128.VT (extract_subvector
3318 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3319 (_.info512.VT (bitconvert
3320 (v16i32 immAllZerosV))))),
3322 (!cast<Instruction>(InstrStr#rmkz)
3323 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
3326 def : Pat<(_.info128.VT (extract_subvector
3327 (_.info512.VT (masked_load addr:$srcAddr, Mask,
3328 (_.info512.VT (insert_subvector undef,
3329 (_.info256.VT (insert_subvector undef,
3330 (_.info128.VT (X86vzmovl _.info128.RC:$src)),
3334 (!cast<Instruction>(InstrStr#rmk) _.info128.RC:$src,
3335 (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), MaskRC:$mask, subreg)), VK1WM),
3340 defm : avx512_move_scalar_lowering<"VMOVSSZ", X86Movss, fp32imm0, v4f32x_info>;
3341 defm : avx512_move_scalar_lowering<"VMOVSDZ", X86Movsd, fp64imm0, v2f64x_info>;
3343 defm : avx512_store_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3344 (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>;
3345 defm : avx512_store_scalar_lowering_subreg<"VMOVSSZ", avx512vl_f32_info,
3346 (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16, sub_16bit>;
3347 defm : avx512_store_scalar_lowering_subreg<"VMOVSDZ", avx512vl_f64_info,
3348 (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8, sub_8bit>;
3350 defm : avx512_load_scalar_lowering<"VMOVSSZ", avx512vl_f32_info,
3351 (v16i1 (bitconvert (i16 (trunc (and GR32:$mask, (i32 1)))))), GR32>;
3352 defm : avx512_load_scalar_lowering_subreg<"VMOVSSZ", avx512vl_f32_info,
3353 (v16i1 (bitconvert (i16 (and GR16:$mask, (i16 1))))), GR16, sub_16bit>;
3354 defm : avx512_load_scalar_lowering_subreg<"VMOVSDZ", avx512vl_f64_info,
3355 (v8i1 (bitconvert (i8 (and GR8:$mask, (i8 1))))), GR8, sub_8bit>;
3357 def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
3358 (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
3359 VK1WM:$mask, (v4f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
3361 def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
3362 (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
3363 VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
3365 def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
3366 (VMOVSSZmrk addr:$dst, (COPY_TO_REGCLASS (i32 (INSERT_SUBREG (IMPLICIT_DEF), GR8:$mask, sub_8bit)), VK1WM),
3367 (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3369 let hasSideEffects = 0 in {
3370 def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3371 (ins VR128X:$src1, FR32X:$src2),
3372 "vmovss.s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3373 [], NoItinerary>, XS, EVEX_4V, VEX_LIG,
3374 FoldGenData<"VMOVSSZrr">;
3376 let Constraints = "$src0 = $dst" in
3377 def VMOVSSZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3378 (ins f32x_info.RC:$src0, f32x_info.KRCWM:$mask,
3379 VR128X:$src1, FR32X:$src2),
3380 "vmovss.s\t{$src2, $src1, $dst {${mask}}|"#
3381 "$dst {${mask}}, $src1, $src2}",
3382 [], NoItinerary>, EVEX_K, XS, EVEX_4V, VEX_LIG,
3383 FoldGenData<"VMOVSSZrrk">;
3385 def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3386 (ins f32x_info.KRCWM:$mask, VR128X:$src1, FR32X:$src2),
3387 "vmovss.s\t{$src2, $src1, $dst {${mask}} {z}|"#
3388 "$dst {${mask}} {z}, $src1, $src2}",
3389 [], NoItinerary>, EVEX_KZ, XS, EVEX_4V, VEX_LIG,
3390 FoldGenData<"VMOVSSZrrkz">;
3392 def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3393 (ins VR128X:$src1, FR64X:$src2),
3394 "vmovsd.s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3395 [], NoItinerary>, XD, EVEX_4V, VEX_LIG, VEX_W,
3396 FoldGenData<"VMOVSDZrr">;
3398 let Constraints = "$src0 = $dst" in
3399 def VMOVSDZrrk_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3400 (ins f64x_info.RC:$src0, f64x_info.KRCWM:$mask,
3401 VR128X:$src1, FR64X:$src2),
3402 "vmovsd.s\t{$src2, $src1, $dst {${mask}}|"#
3403 "$dst {${mask}}, $src1, $src2}",
3404 [], NoItinerary>, EVEX_K, XD, EVEX_4V, VEX_LIG,
3405 VEX_W, FoldGenData<"VMOVSDZrrk">;
3407 def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
3408 (ins f64x_info.KRCWM:$mask, VR128X:$src1,
3410 "vmovsd.s\t{$src2, $src1, $dst {${mask}} {z}|"#
3411 "$dst {${mask}} {z}, $src1, $src2}",
3412 [], NoItinerary>, EVEX_KZ, XD, EVEX_4V, VEX_LIG,
3413 VEX_W, FoldGenData<"VMOVSDZrrkz">;
3416 let Predicates = [HasAVX512] in {
3417 let AddedComplexity = 15 in {
3418 // Move scalar to XMM zero-extended, zeroing a VR128X then do a
3419 // MOVS{S,D} to the lower bits.
3420 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
3421 (VMOVSSZrr (v4f32 (AVX512_128_SET0)), FR32X:$src)>;
3422 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
3423 (VMOVSSZrr (v4f32 (AVX512_128_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3424 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
3425 (VMOVSSZrr (v4i32 (AVX512_128_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
3426 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
3427 (VMOVSDZrr (v2f64 (AVX512_128_SET0)), FR64X:$src)>;
3430 // Move low f32 and clear high bits.
3431 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
3432 (SUBREG_TO_REG (i32 0),
3433 (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
3434 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
3435 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
3436 (SUBREG_TO_REG (i32 0),
3437 (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
3438 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
3439 def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))),
3440 (SUBREG_TO_REG (i32 0),
3441 (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
3442 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)), sub_xmm)>;
3443 def : Pat<(v16i32 (X86vzmovl (v16i32 VR512:$src))),
3444 (SUBREG_TO_REG (i32 0),
3445 (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
3446 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)), sub_xmm)>;
3448 let AddedComplexity = 20 in {
3449 // MOVSSrm zeros the high parts of the register; represent this
3450 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
3451 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
3452 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3453 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
3454 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3455 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
3456 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3457 def : Pat<(v4f32 (X86vzload addr:$src)),
3458 (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
3460 // MOVSDrm zeros the high parts of the register; represent this
3461 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
3462 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
3463 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3464 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
3465 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3466 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
3467 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3468 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
3469 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3470 def : Pat<(v2f64 (X86vzload addr:$src)),
3471 (COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
3473 // Represent the same patterns above but in the form they appear for
3475 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3476 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
3477 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3478 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
3479 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
3480 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3481 def : Pat<(v8f32 (X86vzload addr:$src)),
3482 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3483 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
3484 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
3485 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3486 def : Pat<(v4f64 (X86vzload addr:$src)),
3487 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3489 // Represent the same patterns above but in the form they appear for
3491 def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
3492 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
3493 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3494 def : Pat<(v16f32 (X86vzmovl (insert_subvector undef,
3495 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
3496 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3497 def : Pat<(v16f32 (X86vzload addr:$src)),
3498 (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
3499 def : Pat<(v8f64 (X86vzmovl (insert_subvector undef,
3500 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
3501 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3502 def : Pat<(v8f64 (X86vzload addr:$src)),
3503 (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
3505 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
3506 (v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
3507 (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
3508 FR32X:$src)), sub_xmm)>;
3509 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
3510 (v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
3511 (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
3512 FR64X:$src)), sub_xmm)>;
3513 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3514 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
3515 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3517 // Move low f64 and clear high bits.
3518 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
3519 (SUBREG_TO_REG (i32 0),
3520 (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
3521 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
3522 def : Pat<(v8f64 (X86vzmovl (v8f64 VR512:$src))),
3523 (SUBREG_TO_REG (i32 0),
3524 (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
3525 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)), sub_xmm)>;
3527 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
3528 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
3529 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
3530 def : Pat<(v8i64 (X86vzmovl (v8i64 VR512:$src))),
3531 (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
3532 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)), sub_xmm)>;
3534 // Extract and store.
3535 def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
3537 (VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
3539 // Shuffle with VMOVSS
3540 def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
3541 (VMOVSSZrr (v4i32 VR128X:$src1),
3542 (COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
3543 def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
3544 (VMOVSSZrr (v4f32 VR128X:$src1),
3545 (COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
3548 def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
3549 (SUBREG_TO_REG (i32 0),
3550 (VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
3551 (EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
3553 def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
3554 (SUBREG_TO_REG (i32 0),
3555 (VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
3556 (EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
3559 // Shuffle with VMOVSD
3560 def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
3561 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3562 def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
3563 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3566 def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
3567 (SUBREG_TO_REG (i32 0),
3568 (VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
3569 (EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
3571 def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
3572 (SUBREG_TO_REG (i32 0),
3573 (VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
3574 (EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
3577 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
3578 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3579 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
3580 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3581 def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
3582 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3583 def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
3584 (VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
3587 let AddedComplexity = 15 in
3588 def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
3590 "vmovq\t{$src, $dst|$dst, $src}",
3591 [(set VR128X:$dst, (v2i64 (X86vzmovl
3592 (v2i64 VR128X:$src))))],
3593 IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
3595 let Predicates = [HasAVX512] in {
3596 let AddedComplexity = 15 in {
3597 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
3598 (VMOVDI2PDIZrr GR32:$src)>;
3600 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
3601 (VMOV64toPQIZrr GR64:$src)>;
3603 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3604 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
3605 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
3607 def : Pat<(v8i64 (X86vzmovl (insert_subvector undef,
3608 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
3609 (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
3611 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3612 let AddedComplexity = 20 in {
3613 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector (zextloadi64i32 addr:$src))))),
3614 (VMOVDI2PDIZrm addr:$src)>;
3615 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
3616 (VMOVDI2PDIZrm addr:$src)>;
3617 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3618 (VMOVDI2PDIZrm addr:$src)>;
3619 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3620 (VMOVDI2PDIZrm addr:$src)>;
3621 def : Pat<(v4i32 (X86vzload addr:$src)),
3622 (VMOVDI2PDIZrm addr:$src)>;
3623 def : Pat<(v8i32 (X86vzload addr:$src)),
3624 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3625 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3626 (VMOVQI2PQIZrm addr:$src)>;
3627 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
3628 (VMOVZPQILo2PQIZrr VR128X:$src)>;
3629 def : Pat<(v2i64 (X86vzload addr:$src)),
3630 (VMOVQI2PQIZrm addr:$src)>;
3631 def : Pat<(v4i64 (X86vzload addr:$src)),
3632 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3635 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3636 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3637 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
3638 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
3639 def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
3640 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
3641 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
3643 // Use regular 128-bit instructions to match 512-bit scalar_to_vec+zext.
3644 def : Pat<(v16i32 (X86vzload addr:$src)),
3645 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
3646 def : Pat<(v8i64 (X86vzload addr:$src)),
3647 (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
3649 //===----------------------------------------------------------------------===//
3650 // AVX-512 - Non-temporals
3651 //===----------------------------------------------------------------------===//
3652 let SchedRW = [WriteLoad] in {
3653 def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
3654 (ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
3655 [], SSEPackedInt>, EVEX, T8PD, EVEX_V512,
3656 EVEX_CD8<64, CD8VF>;
3658 let Predicates = [HasVLX] in {
3659 def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
3661 "vmovntdqa\t{$src, $dst|$dst, $src}",
3662 [], SSEPackedInt>, EVEX, T8PD, EVEX_V256,
3663 EVEX_CD8<64, CD8VF>;
3665 def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
3667 "vmovntdqa\t{$src, $dst|$dst, $src}",
3668 [], SSEPackedInt>, EVEX, T8PD, EVEX_V128,
3669 EVEX_CD8<64, CD8VF>;
3673 multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
3674 PatFrag st_frag = alignednontemporalstore,
3675 InstrItinClass itin = IIC_SSE_MOVNT> {
3676 let SchedRW = [WriteStore], AddedComplexity = 400 in
3677 def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
3678 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3679 [(st_frag (_.VT _.RC:$src), addr:$dst)],
3680 _.ExeDomain, itin>, EVEX, EVEX_CD8<_.EltSize, CD8VF>;
3683 multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr,
3684 AVX512VLVectorVTInfo VTInfo> {
3685 let Predicates = [HasAVX512] in
3686 defm Z : avx512_movnt<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
3688 let Predicates = [HasAVX512, HasVLX] in {
3689 defm Z256 : avx512_movnt<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
3690 defm Z128 : avx512_movnt<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
3694 defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info>, PD;
3695 defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info>, PD, VEX_W;
3696 defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info>, PS;
3698 let Predicates = [HasAVX512], AddedComplexity = 400 in {
3699 def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
3700 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3701 def : Pat<(alignednontemporalstore (v32i16 VR512:$src), addr:$dst),
3702 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3703 def : Pat<(alignednontemporalstore (v64i8 VR512:$src), addr:$dst),
3704 (VMOVNTDQZmr addr:$dst, VR512:$src)>;
3706 def : Pat<(v8f64 (alignednontemporalload addr:$src)),
3707 (VMOVNTDQAZrm addr:$src)>;
3708 def : Pat<(v16f32 (alignednontemporalload addr:$src)),
3709 (VMOVNTDQAZrm addr:$src)>;
3710 def : Pat<(v8i64 (alignednontemporalload addr:$src)),
3711 (VMOVNTDQAZrm addr:$src)>;
3712 def : Pat<(v16i32 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3713 (VMOVNTDQAZrm addr:$src)>;
3714 def : Pat<(v32i16 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3715 (VMOVNTDQAZrm addr:$src)>;
3716 def : Pat<(v64i8 (bitconvert (v8i64 (alignednontemporalload addr:$src)))),
3717 (VMOVNTDQAZrm addr:$src)>;
3720 let Predicates = [HasVLX], AddedComplexity = 400 in {
3721 def : Pat<(alignednontemporalstore (v8i32 VR256X:$src), addr:$dst),
3722 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3723 def : Pat<(alignednontemporalstore (v16i16 VR256X:$src), addr:$dst),
3724 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3725 def : Pat<(alignednontemporalstore (v32i8 VR256X:$src), addr:$dst),
3726 (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
3728 def : Pat<(v4f64 (alignednontemporalload addr:$src)),
3729 (VMOVNTDQAZ256rm addr:$src)>;
3730 def : Pat<(v8f32 (alignednontemporalload addr:$src)),
3731 (VMOVNTDQAZ256rm addr:$src)>;
3732 def : Pat<(v4i64 (alignednontemporalload addr:$src)),
3733 (VMOVNTDQAZ256rm addr:$src)>;
3734 def : Pat<(v8i32 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3735 (VMOVNTDQAZ256rm addr:$src)>;
3736 def : Pat<(v16i16 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3737 (VMOVNTDQAZ256rm addr:$src)>;
3738 def : Pat<(v32i8 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3739 (VMOVNTDQAZ256rm addr:$src)>;
3741 def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
3742 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3743 def : Pat<(alignednontemporalstore (v8i16 VR128X:$src), addr:$dst),
3744 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3745 def : Pat<(alignednontemporalstore (v16i8 VR128X:$src), addr:$dst),
3746 (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
3748 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
3749 (VMOVNTDQAZ128rm addr:$src)>;
3750 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
3751 (VMOVNTDQAZ128rm addr:$src)>;
3752 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
3753 (VMOVNTDQAZ128rm addr:$src)>;
3754 def : Pat<(v4i32 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3755 (VMOVNTDQAZ128rm addr:$src)>;
3756 def : Pat<(v8i16 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3757 (VMOVNTDQAZ128rm addr:$src)>;
3758 def : Pat<(v16i8 (bitconvert (v2i64 (alignednontemporalload addr:$src)))),
3759 (VMOVNTDQAZ128rm addr:$src)>;
3762 //===----------------------------------------------------------------------===//
3763 // AVX-512 - Integer arithmetic
3765 multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3766 X86VectorVTInfo _, OpndItins itins,
3767 bit IsCommutable = 0> {
3768 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
3769 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
3770 "$src2, $src1", "$src1, $src2",
3771 (_.VT (OpNode _.RC:$src1, _.RC:$src2)),
3772 itins.rr, IsCommutable>,
3773 AVX512BIBase, EVEX_4V;
3775 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3776 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
3777 "$src2, $src1", "$src1, $src2",
3778 (_.VT (OpNode _.RC:$src1,
3779 (bitconvert (_.LdFrag addr:$src2)))),
3781 AVX512BIBase, EVEX_4V;
3784 multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3785 X86VectorVTInfo _, OpndItins itins,
3786 bit IsCommutable = 0> :
3787 avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
3788 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
3789 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
3790 "${src2}"##_.BroadcastStr##", $src1",
3791 "$src1, ${src2}"##_.BroadcastStr,
3792 (_.VT (OpNode _.RC:$src1,
3794 (_.ScalarLdFrag addr:$src2)))),
3796 AVX512BIBase, EVEX_4V, EVEX_B;
3799 multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
3800 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
3801 Predicate prd, bit IsCommutable = 0> {
3802 let Predicates = [prd] in
3803 defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3804 IsCommutable>, EVEX_V512;
3806 let Predicates = [prd, HasVLX] in {
3807 defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3808 IsCommutable>, EVEX_V256;
3809 defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3810 IsCommutable>, EVEX_V128;
3814 multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
3815 AVX512VLVectorVTInfo VTInfo, OpndItins itins,
3816 Predicate prd, bit IsCommutable = 0> {
3817 let Predicates = [prd] in
3818 defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
3819 IsCommutable>, EVEX_V512;
3821 let Predicates = [prd, HasVLX] in {
3822 defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
3823 IsCommutable>, EVEX_V256;
3824 defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
3825 IsCommutable>, EVEX_V128;
3829 multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
3830 OpndItins itins, Predicate prd,
3831 bit IsCommutable = 0> {
3832 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
3833 itins, prd, IsCommutable>,
3834 VEX_W, EVEX_CD8<64, CD8VF>;
3837 multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
3838 OpndItins itins, Predicate prd,
3839 bit IsCommutable = 0> {
3840 defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
3841 itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
3844 multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
3845 OpndItins itins, Predicate prd,
3846 bit IsCommutable = 0> {
3847 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
3848 itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
3851 multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
3852 OpndItins itins, Predicate prd,
3853 bit IsCommutable = 0> {
3854 defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
3855 itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
3858 multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
3859 SDNode OpNode, OpndItins itins, Predicate prd,
3860 bit IsCommutable = 0> {
3861 defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr#"q", OpNode, itins, prd,
3864 defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr#"d", OpNode, itins, prd,
3868 multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
3869 SDNode OpNode, OpndItins itins, Predicate prd,
3870 bit IsCommutable = 0> {
3871 defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr#"w", OpNode, itins, prd,
3874 defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr#"b", OpNode, itins, prd,
3878 multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
3879 bits<8> opc_d, bits<8> opc_q,
3880 string OpcodeStr, SDNode OpNode,
3881 OpndItins itins, bit IsCommutable = 0> {
3882 defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
3883 itins, HasAVX512, IsCommutable>,
3884 avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
3885 itins, HasBWI, IsCommutable>;
3888 multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
3889 SDNode OpNode,X86VectorVTInfo _Src,
3890 X86VectorVTInfo _Dst, X86VectorVTInfo _Brdct,
3891 bit IsCommutable = 0> {
3892 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
3893 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
3894 "$src2, $src1","$src1, $src2",
3896 (_Src.VT _Src.RC:$src1),
3897 (_Src.VT _Src.RC:$src2))),
3898 itins.rr, IsCommutable>,
3899 AVX512BIBase, EVEX_4V;
3900 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3901 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
3902 "$src2, $src1", "$src1, $src2",
3903 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
3904 (bitconvert (_Src.LdFrag addr:$src2)))),
3906 AVX512BIBase, EVEX_4V;
3908 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3909 (ins _Src.RC:$src1, _Brdct.ScalarMemOp:$src2),
3911 "${src2}"##_Brdct.BroadcastStr##", $src1",
3912 "$src1, ${src2}"##_Brdct.BroadcastStr,
3913 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
3914 (_Brdct.VT (X86VBroadcast
3915 (_Brdct.ScalarLdFrag addr:$src2)))))),
3917 AVX512BIBase, EVEX_4V, EVEX_B;
3920 defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
3921 SSE_INTALU_ITINS_P, 1>;
3922 defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
3923 SSE_INTALU_ITINS_P, 0>;
3924 defm VPADDS : avx512_binop_rm_vl_bw<0xEC, 0xED, "vpadds", X86adds,
3925 SSE_INTALU_ITINS_P, HasBWI, 1>;
3926 defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", X86subs,
3927 SSE_INTALU_ITINS_P, HasBWI, 0>;
3928 defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", X86addus,
3929 SSE_INTALU_ITINS_P, HasBWI, 1>;
3930 defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", X86subus,
3931 SSE_INTALU_ITINS_P, HasBWI, 0>;
3932 defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul,
3933 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
3934 defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmullw", mul,
3935 SSE_INTALU_ITINS_P, HasBWI, 1>;
3936 defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmullq", mul,
3937 SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
3938 defm VPMULHW : avx512_binop_rm_vl_w<0xE5, "vpmulhw", mulhs, SSE_INTALU_ITINS_P,
3940 defm VPMULHUW : avx512_binop_rm_vl_w<0xE4, "vpmulhuw", mulhu, SSE_INTMUL_ITINS_P,
3942 defm VPMULHRSW : avx512_binop_rm_vl_w<0x0B, "vpmulhrsw", X86mulhrs, SSE_INTMUL_ITINS_P,
3944 defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
3945 SSE_INTALU_ITINS_P, HasBWI, 1>;
3947 multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
3948 AVX512VLVectorVTInfo _SrcVTInfo, AVX512VLVectorVTInfo _DstVTInfo,
3949 SDNode OpNode, Predicate prd, bit IsCommutable = 0> {
3950 let Predicates = [prd] in
3951 defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3952 _SrcVTInfo.info512, _DstVTInfo.info512,
3953 v8i64_info, IsCommutable>,
3954 EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
3955 let Predicates = [HasVLX, prd] in {
3956 defm NAME#Z256 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3957 _SrcVTInfo.info256, _DstVTInfo.info256,
3958 v4i64x_info, IsCommutable>,
3959 EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
3960 defm NAME#Z128 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
3961 _SrcVTInfo.info128, _DstVTInfo.info128,
3962 v2i64x_info, IsCommutable>,
3963 EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_W;
3967 defm VPMULDQ : avx512_binop_all<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
3968 avx512vl_i32_info, avx512vl_i64_info,
3969 X86pmuldq, HasAVX512, 1>,T8PD;
3970 defm VPMULUDQ : avx512_binop_all<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
3971 avx512vl_i32_info, avx512vl_i64_info,
3972 X86pmuludq, HasAVX512, 1>;
3973 defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SSE_INTALU_ITINS_P,
3974 avx512vl_i8_info, avx512vl_i8_info,
3975 X86multishift, HasVBMI, 0>, T8PD;
3977 multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
3978 X86VectorVTInfo _Src, X86VectorVTInfo _Dst> {
3979 defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
3980 (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
3982 "${src2}"##_Src.BroadcastStr##", $src1",
3983 "$src1, ${src2}"##_Src.BroadcastStr,
3984 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
3985 (_Src.VT (X86VBroadcast
3986 (_Src.ScalarLdFrag addr:$src2))))))>,
3987 EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
3990 multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
3991 SDNode OpNode,X86VectorVTInfo _Src,
3992 X86VectorVTInfo _Dst, bit IsCommutable = 0> {
3993 defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
3994 (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
3995 "$src2, $src1","$src1, $src2",
3997 (_Src.VT _Src.RC:$src1),
3998 (_Src.VT _Src.RC:$src2))),
3999 NoItinerary, IsCommutable>,
4000 EVEX_CD8<_Src.EltSize, CD8VF>, EVEX_4V;
4001 defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
4002 (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
4003 "$src2, $src1", "$src1, $src2",
4004 (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
4005 (bitconvert (_Src.LdFrag addr:$src2))))>,
4006 EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
4009 multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
4011 let Predicates = [HasBWI] in
4012 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i32_info,
4014 avx512_packs_rmb<opc, OpcodeStr, OpNode, v16i32_info,
4015 v32i16_info>, EVEX_V512;
4016 let Predicates = [HasBWI, HasVLX] in {
4017 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i32x_info,
4019 avx512_packs_rmb<opc, OpcodeStr, OpNode, v8i32x_info,
4020 v16i16x_info>, EVEX_V256;
4021 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v4i32x_info,
4023 avx512_packs_rmb<opc, OpcodeStr, OpNode, v4i32x_info,
4024 v8i16x_info>, EVEX_V128;
4027 multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
4029 let Predicates = [HasBWI] in
4030 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info,
4031 v64i8_info>, EVEX_V512;
4032 let Predicates = [HasBWI, HasVLX] in {
4033 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
4034 v32i8x_info>, EVEX_V256;
4035 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
4036 v16i8x_info>, EVEX_V128;
4040 multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
4041 SDNode OpNode, AVX512VLVectorVTInfo _Src,
4042 AVX512VLVectorVTInfo _Dst, bit IsCommutable = 0> {
4043 let Predicates = [HasBWI] in
4044 defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info512,
4045 _Dst.info512, IsCommutable>, EVEX_V512;
4046 let Predicates = [HasBWI, HasVLX] in {
4047 defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info256,
4048 _Dst.info256, IsCommutable>, EVEX_V256;
4049 defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info128,
4050 _Dst.info128, IsCommutable>, EVEX_V128;
4054 defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, AVX512BIBase;
4055 defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, AVX5128IBase;
4056 defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase;
4057 defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
4059 defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
4060 avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
4061 defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
4062 avx512vl_i16_info, avx512vl_i32_info, 1>, AVX512BIBase;
4064 defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
4065 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4066 defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxsw", smax,
4067 SSE_INTALU_ITINS_P, HasBWI, 1>;
4068 defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", smax,
4069 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4071 defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxub", umax,
4072 SSE_INTALU_ITINS_P, HasBWI, 1>;
4073 defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxuw", umax,
4074 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4075 defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", umax,
4076 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4078 defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpminsb", smin,
4079 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4080 defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpminsw", smin,
4081 SSE_INTALU_ITINS_P, HasBWI, 1>;
4082 defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", smin,
4083 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4085 defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminub", umin,
4086 SSE_INTALU_ITINS_P, HasBWI, 1>;
4087 defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminuw", umin,
4088 SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
4089 defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", umin,
4090 SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
4092 // PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX.
4093 let Predicates = [HasDQI, NoVLX] in {
4094 def : Pat<(v4i64 (mul (v4i64 VR256X:$src1), (v4i64 VR256X:$src2))),
4097 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
4098 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
4101 def : Pat<(v2i64 (mul (v2i64 VR128X:$src1), (v2i64 VR128X:$src2))),
4104 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
4105 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
4109 //===----------------------------------------------------------------------===//
4110 // AVX-512 Logical Instructions
4111 //===----------------------------------------------------------------------===//
4113 multiclass avx512_logic_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4114 X86VectorVTInfo _, bit IsCommutable = 0> {
4115 defm rr : AVX512_maskable_logic<opc, MRMSrcReg, _, (outs _.RC:$dst),
4116 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4117 "$src2, $src1", "$src1, $src2",
4118 (_.i64VT (OpNode (bitconvert (_.VT _.RC:$src1)),
4119 (bitconvert (_.VT _.RC:$src2)))),
4120 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4122 IIC_SSE_BIT_P_RR, IsCommutable>,
4123 AVX512BIBase, EVEX_4V;
4125 defm rm : AVX512_maskable_logic<opc, MRMSrcMem, _, (outs _.RC:$dst),
4126 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4127 "$src2, $src1", "$src1, $src2",
4128 (_.i64VT (OpNode (bitconvert (_.VT _.RC:$src1)),
4129 (bitconvert (_.LdFrag addr:$src2)))),
4130 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4131 (bitconvert (_.LdFrag addr:$src2)))))),
4133 AVX512BIBase, EVEX_4V;
4136 multiclass avx512_logic_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4137 X86VectorVTInfo _, bit IsCommutable = 0> :
4138 avx512_logic_rm<opc, OpcodeStr, OpNode, _, IsCommutable> {
4139 defm rmb : AVX512_maskable_logic<opc, MRMSrcMem, _, (outs _.RC:$dst),
4140 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4141 "${src2}"##_.BroadcastStr##", $src1",
4142 "$src1, ${src2}"##_.BroadcastStr,
4143 (_.i64VT (OpNode _.RC:$src1,
4145 (_.VT (X86VBroadcast
4146 (_.ScalarLdFrag addr:$src2)))))),
4147 (_.VT (bitconvert (_.i64VT (OpNode _.RC:$src1,
4149 (_.VT (X86VBroadcast
4150 (_.ScalarLdFrag addr:$src2)))))))),
4152 AVX512BIBase, EVEX_4V, EVEX_B;
4155 multiclass avx512_logic_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
4156 AVX512VLVectorVTInfo VTInfo,
4157 bit IsCommutable = 0> {
4158 let Predicates = [HasAVX512] in
4159 defm Z : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info512,
4160 IsCommutable>, EVEX_V512;
4162 let Predicates = [HasAVX512, HasVLX] in {
4163 defm Z256 : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info256,
4164 IsCommutable>, EVEX_V256;
4165 defm Z128 : avx512_logic_rmb<opc, OpcodeStr, OpNode, VTInfo.info128,
4166 IsCommutable>, EVEX_V128;
4170 multiclass avx512_logic_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
4171 bit IsCommutable = 0> {
4172 defm NAME : avx512_logic_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
4173 IsCommutable>, EVEX_CD8<32, CD8VF>;
4176 multiclass avx512_logic_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
4177 bit IsCommutable = 0> {
4178 defm NAME : avx512_logic_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
4180 VEX_W, EVEX_CD8<64, CD8VF>;
4183 multiclass avx512_logic_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
4184 SDNode OpNode, bit IsCommutable = 0> {
4185 defm Q : avx512_logic_rm_vl_q<opc_q, OpcodeStr#"q", OpNode, IsCommutable>;
4186 defm D : avx512_logic_rm_vl_d<opc_d, OpcodeStr#"d", OpNode, IsCommutable>;
4189 defm VPAND : avx512_logic_rm_vl_dq<0xDB, 0xDB, "vpand", and, 1>;
4190 defm VPOR : avx512_logic_rm_vl_dq<0xEB, 0xEB, "vpor", or, 1>;
4191 defm VPXOR : avx512_logic_rm_vl_dq<0xEF, 0xEF, "vpxor", xor, 1>;
4192 defm VPANDN : avx512_logic_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp>;
4194 //===----------------------------------------------------------------------===//
4195 // AVX-512 FP arithmetic
4196 //===----------------------------------------------------------------------===//
4197 multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4198 SDNode OpNode, SDNode VecNode, OpndItins itins,
4200 let ExeDomain = _.ExeDomain in {
4201 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4202 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4203 "$src2, $src1", "$src1, $src2",
4204 (_.VT (VecNode _.RC:$src1, _.RC:$src2,
4205 (i32 FROUND_CURRENT))),
4208 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4209 (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
4210 "$src2, $src1", "$src1, $src2",
4211 (_.VT (VecNode _.RC:$src1,
4212 _.ScalarIntMemCPat:$src2,
4213 (i32 FROUND_CURRENT))),
4215 let isCodeGenOnly = 1, Predicates = [HasAVX512] in {
4216 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
4217 (ins _.FRC:$src1, _.FRC:$src2),
4218 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4219 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
4221 let isCommutable = IsCommutable;
4223 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
4224 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
4225 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4226 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
4227 (_.ScalarLdFrag addr:$src2)))], itins.rm>;
4232 multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4233 SDNode VecNode, OpndItins itins, bit IsCommutable = 0> {
4234 let ExeDomain = _.ExeDomain in
4235 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4236 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
4237 "$rc, $src2, $src1", "$src1, $src2, $rc",
4238 (VecNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4239 (i32 imm:$rc)), itins.rr, IsCommutable>,
4242 multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
4243 SDNode OpNode, SDNode VecNode, SDNode SaeNode,
4244 OpndItins itins, bit IsCommutable> {
4245 let ExeDomain = _.ExeDomain in {
4246 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4247 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4248 "$src2, $src1", "$src1, $src2",
4249 (_.VT (VecNode _.RC:$src1, _.RC:$src2)),
4252 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4253 (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr,
4254 "$src2, $src1", "$src1, $src2",
4255 (_.VT (VecNode _.RC:$src1,
4256 _.ScalarIntMemCPat:$src2)),
4259 let isCodeGenOnly = 1, Predicates = [HasAVX512] in {
4260 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
4261 (ins _.FRC:$src1, _.FRC:$src2),
4262 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4263 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
4265 let isCommutable = IsCommutable;
4267 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
4268 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
4269 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4270 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
4271 (_.ScalarLdFrag addr:$src2)))], itins.rm>;
4274 defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4275 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4276 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
4277 (SaeNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
4278 (i32 FROUND_NO_EXC))>, EVEX_B;
4282 multiclass avx512_binop_s_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
4284 SizeItins itins, bit IsCommutable> {
4285 defm SSZ : avx512_fp_scalar<opc, OpcodeStr#"ss", f32x_info, OpNode, VecNode,
4286 itins.s, IsCommutable>,
4287 avx512_fp_scalar_round<opc, OpcodeStr#"ss", f32x_info, VecNode,
4288 itins.s, IsCommutable>,
4289 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4290 defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
4291 itins.d, IsCommutable>,
4292 avx512_fp_scalar_round<opc, OpcodeStr#"sd", f64x_info, VecNode,
4293 itins.d, IsCommutable>,
4294 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4297 multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
4298 SDNode VecNode, SDNode SaeNode,
4299 SizeItins itins, bit IsCommutable> {
4300 defm SSZ : avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, OpNode,
4301 VecNode, SaeNode, itins.s, IsCommutable>,
4302 XS, EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>;
4303 defm SDZ : avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, OpNode,
4304 VecNode, SaeNode, itins.d, IsCommutable>,
4305 XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>;
4307 defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnds, SSE_ALU_ITINS_S, 1>;
4308 defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnds, SSE_MUL_ITINS_S, 1>;
4309 defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnds, SSE_ALU_ITINS_S, 0>;
4310 defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnds, SSE_DIV_ITINS_S, 0>;
4311 defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fmins, X86fminRnds,
4312 SSE_ALU_ITINS_S, 0>;
4313 defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxs, X86fmaxRnds,
4314 SSE_ALU_ITINS_S, 0>;
4316 // MIN/MAX nodes are commutable under "unsafe-fp-math". In this case we use
4317 // X86fminc and X86fmaxc instead of X86fmin and X86fmax
4318 multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
4319 X86VectorVTInfo _, SDNode OpNode, OpndItins itins> {
4320 let isCodeGenOnly = 1, Predicates = [HasAVX512], ExeDomain = _.ExeDomain in {
4321 def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
4322 (ins _.FRC:$src1, _.FRC:$src2),
4323 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4324 [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
4326 let isCommutable = 1;
4328 def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
4329 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
4330 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4331 [(set _.FRC:$dst, (OpNode _.FRC:$src1,
4332 (_.ScalarLdFrag addr:$src2)))], itins.rm>;
4335 defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
4336 SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
4337 EVEX_CD8<32, CD8VT1>;
4339 defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
4340 SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
4341 EVEX_CD8<64, CD8VT1>;
4343 defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
4344 SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
4345 EVEX_CD8<32, CD8VT1>;
4347 defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
4348 SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
4349 EVEX_CD8<64, CD8VT1>;
4351 multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
4352 X86VectorVTInfo _, OpndItins itins,
4354 let ExeDomain = _.ExeDomain, hasSideEffects = 0 in {
4355 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4356 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4357 "$src2, $src1", "$src1, $src2",
4358 (_.VT (OpNode _.RC:$src1, _.RC:$src2)), itins.rr,
4359 IsCommutable>, EVEX_4V;
4360 let mayLoad = 1 in {
4361 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4362 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
4363 "$src2, $src1", "$src1, $src2",
4364 (OpNode _.RC:$src1, (_.LdFrag addr:$src2)), itins.rm>,
4366 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4367 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4368 "${src2}"##_.BroadcastStr##", $src1",
4369 "$src1, ${src2}"##_.BroadcastStr,
4370 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4371 (_.ScalarLdFrag addr:$src2)))),
4372 itins.rm>, EVEX_4V, EVEX_B;
4377 multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeRnd,
4378 X86VectorVTInfo _> {
4379 let ExeDomain = _.ExeDomain in
4380 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4381 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix,
4382 "$rc, $src2, $src1", "$src1, $src2, $rc",
4383 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 imm:$rc)))>,
4384 EVEX_4V, EVEX_B, EVEX_RC;
4388 multiclass avx512_fp_sae_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeRnd,
4389 X86VectorVTInfo _> {
4390 let ExeDomain = _.ExeDomain in
4391 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4392 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4393 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
4394 (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src2, (i32 FROUND_NO_EXC)))>,
4398 multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
4399 Predicate prd, SizeItins itins,
4400 bit IsCommutable = 0> {
4401 let Predicates = [prd] in {
4402 defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
4403 itins.s, IsCommutable>, EVEX_V512, PS,
4404 EVEX_CD8<32, CD8VF>;
4405 defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
4406 itins.d, IsCommutable>, EVEX_V512, PD, VEX_W,
4407 EVEX_CD8<64, CD8VF>;
4410 // Define only if AVX512VL feature is present.
4411 let Predicates = [prd, HasVLX] in {
4412 defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
4413 itins.s, IsCommutable>, EVEX_V128, PS,
4414 EVEX_CD8<32, CD8VF>;
4415 defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f32x_info,
4416 itins.s, IsCommutable>, EVEX_V256, PS,
4417 EVEX_CD8<32, CD8VF>;
4418 defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v2f64x_info,
4419 itins.d, IsCommutable>, EVEX_V128, PD, VEX_W,
4420 EVEX_CD8<64, CD8VF>;
4421 defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f64x_info,
4422 itins.d, IsCommutable>, EVEX_V256, PD, VEX_W,
4423 EVEX_CD8<64, CD8VF>;
4427 multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
4428 defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
4429 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
4430 defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info>,
4431 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
4434 multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd> {
4435 defm PSZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, v16f32_info>,
4436 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
4437 defm PDZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, v8f64_info>,
4438 EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
4441 defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, HasAVX512,
4442 SSE_ALU_ITINS_P, 1>,
4443 avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
4444 defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, HasAVX512,
4445 SSE_MUL_ITINS_P, 1>,
4446 avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
4447 defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub, HasAVX512, SSE_ALU_ITINS_P>,
4448 avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
4449 defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512, SSE_DIV_ITINS_P>,
4450 avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
4451 defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512,
4452 SSE_ALU_ITINS_P, 0>,
4453 avx512_fp_binop_p_sae<0x5D, "vmin", X86fminRnd>;
4454 defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512,
4455 SSE_ALU_ITINS_P, 0>,
4456 avx512_fp_binop_p_sae<0x5F, "vmax", X86fmaxRnd>;
4457 let isCodeGenOnly = 1 in {
4458 defm VMINC : avx512_fp_binop_p<0x5D, "vmin", X86fminc, HasAVX512,
4459 SSE_ALU_ITINS_P, 1>;
4460 defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512,
4461 SSE_ALU_ITINS_P, 1>;
4463 defm VAND : avx512_fp_binop_p<0x54, "vand", null_frag, HasDQI,
4464 SSE_ALU_ITINS_P, 1>;
4465 defm VANDN : avx512_fp_binop_p<0x55, "vandn", null_frag, HasDQI,
4466 SSE_ALU_ITINS_P, 0>;
4467 defm VOR : avx512_fp_binop_p<0x56, "vor", null_frag, HasDQI,
4468 SSE_ALU_ITINS_P, 1>;
4469 defm VXOR : avx512_fp_binop_p<0x57, "vxor", null_frag, HasDQI,
4470 SSE_ALU_ITINS_P, 1>;
4472 // Patterns catch floating point selects with bitcasted integer logic ops.
4473 multiclass avx512_fp_logical_lowering<string InstrStr, SDNode OpNode,
4474 X86VectorVTInfo _, Predicate prd> {
4475 let Predicates = [prd] in {
4476 // Masked register-register logical operations.
4477 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4478 (bitconvert (_.i64VT (OpNode _.RC:$src1, _.RC:$src2))),
4480 (!cast<Instruction>(InstrStr#rrk) _.RC:$src0, _.KRCWM:$mask,
4481 _.RC:$src1, _.RC:$src2)>;
4482 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4483 (bitconvert (_.i64VT (OpNode _.RC:$src1, _.RC:$src2))),
4485 (!cast<Instruction>(InstrStr#rrkz) _.KRCWM:$mask, _.RC:$src1,
4487 // Masked register-memory logical operations.
4488 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4489 (bitconvert (_.i64VT (OpNode _.RC:$src1,
4490 (load addr:$src2)))),
4492 (!cast<Instruction>(InstrStr#rmk) _.RC:$src0, _.KRCWM:$mask,
4493 _.RC:$src1, addr:$src2)>;
4494 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4495 (bitconvert (_.i64VT (OpNode _.RC:$src1, (load addr:$src2)))),
4497 (!cast<Instruction>(InstrStr#rmkz) _.KRCWM:$mask, _.RC:$src1,
4499 // Register-broadcast logical operations.
4500 def : Pat<(_.i64VT (OpNode _.RC:$src1,
4501 (bitconvert (_.VT (X86VBroadcast
4502 (_.ScalarLdFrag addr:$src2)))))),
4503 (!cast<Instruction>(InstrStr#rmb) _.RC:$src1, addr:$src2)>;
4504 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4506 (_.i64VT (OpNode _.RC:$src1,
4509 (_.ScalarLdFrag addr:$src2))))))),
4511 (!cast<Instruction>(InstrStr#rmbk) _.RC:$src0, _.KRCWM:$mask,
4512 _.RC:$src1, addr:$src2)>;
4513 def : Pat<(_.VT (vselect _.KRCWM:$mask,
4515 (_.i64VT (OpNode _.RC:$src1,
4518 (_.ScalarLdFrag addr:$src2))))))),
4520 (!cast<Instruction>(InstrStr#rmbkz) _.KRCWM:$mask,
4521 _.RC:$src1, addr:$src2)>;
4525 multiclass avx512_fp_logical_lowering_sizes<string InstrStr, SDNode OpNode> {
4526 defm : avx512_fp_logical_lowering<InstrStr#DZ128, OpNode, v4f32x_info, HasVLX>;
4527 defm : avx512_fp_logical_lowering<InstrStr#QZ128, OpNode, v2f64x_info, HasVLX>;
4528 defm : avx512_fp_logical_lowering<InstrStr#DZ256, OpNode, v8f32x_info, HasVLX>;
4529 defm : avx512_fp_logical_lowering<InstrStr#QZ256, OpNode, v4f64x_info, HasVLX>;
4530 defm : avx512_fp_logical_lowering<InstrStr#DZ, OpNode, v16f32_info, HasAVX512>;
4531 defm : avx512_fp_logical_lowering<InstrStr#QZ, OpNode, v8f64_info, HasAVX512>;
4534 defm : avx512_fp_logical_lowering_sizes<"VPAND", and>;
4535 defm : avx512_fp_logical_lowering_sizes<"VPOR", or>;
4536 defm : avx512_fp_logical_lowering_sizes<"VPXOR", xor>;
4537 defm : avx512_fp_logical_lowering_sizes<"VPANDN", X86andnp>;
4539 let Predicates = [HasVLX,HasDQI] in {
4540 // Use packed logical operations for scalar ops.
4541 def : Pat<(f64 (X86fand FR64X:$src1, FR64X:$src2)),
4542 (COPY_TO_REGCLASS (VANDPDZ128rr
4543 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4544 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4545 def : Pat<(f64 (X86for FR64X:$src1, FR64X:$src2)),
4546 (COPY_TO_REGCLASS (VORPDZ128rr
4547 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4548 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4549 def : Pat<(f64 (X86fxor FR64X:$src1, FR64X:$src2)),
4550 (COPY_TO_REGCLASS (VXORPDZ128rr
4551 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4552 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4553 def : Pat<(f64 (X86fandn FR64X:$src1, FR64X:$src2)),
4554 (COPY_TO_REGCLASS (VANDNPDZ128rr
4555 (COPY_TO_REGCLASS FR64X:$src1, VR128X),
4556 (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
4558 def : Pat<(f32 (X86fand FR32X:$src1, FR32X:$src2)),
4559 (COPY_TO_REGCLASS (VANDPSZ128rr
4560 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4561 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4562 def : Pat<(f32 (X86for FR32X:$src1, FR32X:$src2)),
4563 (COPY_TO_REGCLASS (VORPSZ128rr
4564 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4565 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4566 def : Pat<(f32 (X86fxor FR32X:$src1, FR32X:$src2)),
4567 (COPY_TO_REGCLASS (VXORPSZ128rr
4568 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4569 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4570 def : Pat<(f32 (X86fandn FR32X:$src1, FR32X:$src2)),
4571 (COPY_TO_REGCLASS (VANDNPSZ128rr
4572 (COPY_TO_REGCLASS FR32X:$src1, VR128X),
4573 (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
4576 multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
4577 X86VectorVTInfo _> {
4578 let ExeDomain = _.ExeDomain in {
4579 defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4580 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4581 "$src2, $src1", "$src1, $src2",
4582 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>, EVEX_4V;
4583 defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4584 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
4585 "$src2, $src1", "$src1, $src2",
4586 (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
4587 defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4588 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4589 "${src2}"##_.BroadcastStr##", $src1",
4590 "$src1, ${src2}"##_.BroadcastStr,
4591 (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4592 (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
4597 multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
4598 X86VectorVTInfo _> {
4599 let ExeDomain = _.ExeDomain in {
4600 defm rr: AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
4601 (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
4602 "$src2, $src1", "$src1, $src2",
4603 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>;
4604 defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
4605 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
4606 "$src2, $src1", "$src1, $src2",
4608 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
4609 (i32 FROUND_CURRENT))>;
4613 multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode, SDNode OpNodeScal> {
4614 defm PSZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v16f32_info>,
4615 avx512_fp_round_packed<opc, OpcodeStr, OpNode, v16f32_info>,
4616 EVEX_V512, EVEX_CD8<32, CD8VF>;
4617 defm PDZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f64_info>,
4618 avx512_fp_round_packed<opc, OpcodeStr, OpNode, v8f64_info>,
4619 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
4620 defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f32x_info>,
4621 avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNodeScal, SSE_ALU_ITINS_S.s>,
4622 EVEX_4V,EVEX_CD8<32, CD8VT1>;
4623 defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f64x_info>,
4624 avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNodeScal, SSE_ALU_ITINS_S.d>,
4625 EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
4627 // Define only if AVX512VL feature is present.
4628 let Predicates = [HasVLX] in {
4629 defm PSZ128 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v4f32x_info>,
4630 EVEX_V128, EVEX_CD8<32, CD8VF>;
4631 defm PSZ256 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f32x_info>,
4632 EVEX_V256, EVEX_CD8<32, CD8VF>;
4633 defm PDZ128 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v2f64x_info>,
4634 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
4635 defm PDZ256 : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v4f64x_info>,
4636 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
4639 defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef, X86scalefs>, T8PD;
4641 //===----------------------------------------------------------------------===//
4642 // AVX-512 VPTESTM instructions
4643 //===----------------------------------------------------------------------===//
4645 multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
4646 X86VectorVTInfo _> {
4647 let isCommutable = 1 in
4648 defm rr : AVX512_maskable_cmp<opc, MRMSrcReg, _, (outs _.KRC:$dst),
4649 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4650 "$src2, $src1", "$src1, $src2",
4651 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
4653 defm rm : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
4654 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4655 "$src2, $src1", "$src1, $src2",
4656 (OpNode (_.VT _.RC:$src1),
4657 (_.VT (bitconvert (_.LdFrag addr:$src2))))>,
4659 EVEX_CD8<_.EltSize, CD8VF>;
4662 multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4663 X86VectorVTInfo _> {
4664 defm rmb : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
4665 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4666 "${src2}"##_.BroadcastStr##", $src1",
4667 "$src1, ${src2}"##_.BroadcastStr,
4668 (OpNode (_.VT _.RC:$src1), (_.VT (X86VBroadcast
4669 (_.ScalarLdFrag addr:$src2))))>,
4670 EVEX_B, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
4673 // Use 512bit version to implement 128/256 bit in case NoVLX.
4674 multiclass avx512_vptest_lowering<SDNode OpNode, X86VectorVTInfo ExtendInfo,
4675 X86VectorVTInfo _, string Suffix> {
4676 def : Pat<(_.KVT (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))),
4677 (_.KVT (COPY_TO_REGCLASS
4678 (!cast<Instruction>(NAME # Suffix # "Zrr")
4679 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
4680 _.RC:$src1, _.SubRegIdx),
4681 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
4682 _.RC:$src2, _.SubRegIdx)),
4686 multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4687 AVX512VLVectorVTInfo _, string Suffix> {
4688 let Predicates = [HasAVX512] in
4689 defm Z : avx512_vptest<opc, OpcodeStr, OpNode, _.info512>,
4690 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
4692 let Predicates = [HasAVX512, HasVLX] in {
4693 defm Z256 : avx512_vptest<opc, OpcodeStr, OpNode, _.info256>,
4694 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
4695 defm Z128 : avx512_vptest<opc, OpcodeStr, OpNode, _.info128>,
4696 avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
4698 let Predicates = [HasAVX512, NoVLX] in {
4699 defm Z256_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info256, Suffix>;
4700 defm Z128_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info128, Suffix>;
4704 multiclass avx512_vptest_dq<bits<8> opc, string OpcodeStr, SDNode OpNode> {
4705 defm D : avx512_vptest_dq_sizes<opc, OpcodeStr#"d", OpNode,
4706 avx512vl_i32_info, "D">;
4707 defm Q : avx512_vptest_dq_sizes<opc, OpcodeStr#"q", OpNode,
4708 avx512vl_i64_info, "Q">, VEX_W;
4711 multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
4713 let Predicates = [HasBWI] in {
4714 defm WZ: avx512_vptest<opc, OpcodeStr#"w", OpNode, v32i16_info>,
4716 defm BZ: avx512_vptest<opc, OpcodeStr#"b", OpNode, v64i8_info>,
4719 let Predicates = [HasVLX, HasBWI] in {
4721 defm WZ256: avx512_vptest<opc, OpcodeStr#"w", OpNode, v16i16x_info>,
4723 defm WZ128: avx512_vptest<opc, OpcodeStr#"w", OpNode, v8i16x_info>,
4725 defm BZ256: avx512_vptest<opc, OpcodeStr#"b", OpNode, v32i8x_info>,
4727 defm BZ128: avx512_vptest<opc, OpcodeStr#"b", OpNode, v16i8x_info>,
4731 let Predicates = [HasAVX512, NoVLX] in {
4732 defm BZ256_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v32i8x_info, "B">;
4733 defm BZ128_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v16i8x_info, "B">;
4734 defm WZ256_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v16i16x_info, "W">;
4735 defm WZ128_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v8i16x_info, "W">;
4740 multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string OpcodeStr,
4742 avx512_vptest_wb <opc_wb, OpcodeStr, OpNode>,
4743 avx512_vptest_dq<opc_dq, OpcodeStr, OpNode>;
4745 defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm", X86testm>, T8PD;
4746 defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm", X86testnm>, T8XS;
4749 //===----------------------------------------------------------------------===//
4750 // AVX-512 Shift instructions
4751 //===----------------------------------------------------------------------===//
4752 multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
4753 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
4754 let ExeDomain = _.ExeDomain in {
4755 defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst),
4756 (ins _.RC:$src1, u8imm:$src2), OpcodeStr,
4757 "$src2, $src1", "$src1, $src2",
4758 (_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
4759 SSE_INTSHIFT_ITINS_P.rr>;
4760 defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
4761 (ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
4762 "$src2, $src1", "$src1, $src2",
4763 (_.VT (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
4765 SSE_INTSHIFT_ITINS_P.rm>;
4769 multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
4770 string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
4771 let ExeDomain = _.ExeDomain in
4772 defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
4773 (ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
4774 "$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2",
4775 (_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src1)), (i8 imm:$src2))),
4776 SSE_INTSHIFT_ITINS_P.rm>, EVEX_B;
4779 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4780 ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
4781 // src2 is always 128-bit
4782 let ExeDomain = _.ExeDomain in {
4783 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4784 (ins _.RC:$src1, VR128X:$src2), OpcodeStr,
4785 "$src2, $src1", "$src1, $src2",
4786 (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
4787 SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
4788 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4789 (ins _.RC:$src1, i128mem:$src2), OpcodeStr,
4790 "$src2, $src1", "$src1, $src2",
4791 (_.VT (OpNode _.RC:$src1, (bc_frag (loadv2i64 addr:$src2)))),
4792 SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase,
4797 multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4798 ValueType SrcVT, PatFrag bc_frag,
4799 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
4800 let Predicates = [prd] in
4801 defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4802 VTInfo.info512>, EVEX_V512,
4803 EVEX_CD8<VTInfo.info512.EltSize, CD8VQ> ;
4804 let Predicates = [prd, HasVLX] in {
4805 defm Z256 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4806 VTInfo.info256>, EVEX_V256,
4807 EVEX_CD8<VTInfo.info256.EltSize, CD8VH>;
4808 defm Z128 : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag,
4809 VTInfo.info128>, EVEX_V128,
4810 EVEX_CD8<VTInfo.info128.EltSize, CD8VF>;
4814 multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, bits<8> opcw,
4815 string OpcodeStr, SDNode OpNode> {
4816 defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
4817 avx512vl_i32_info, HasAVX512>;
4818 defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64,
4819 avx512vl_i64_info, HasAVX512>, VEX_W;
4820 defm W : avx512_shift_sizes<opcw, OpcodeStr#"w", OpNode, v8i16, bc_v8i16,
4821 avx512vl_i16_info, HasBWI>;
4824 multiclass avx512_shift_rmi_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
4825 string OpcodeStr, SDNode OpNode,
4826 AVX512VLVectorVTInfo VTInfo> {
4827 let Predicates = [HasAVX512] in
4828 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4830 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4831 VTInfo.info512>, EVEX_V512;
4832 let Predicates = [HasAVX512, HasVLX] in {
4833 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4835 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4836 VTInfo.info256>, EVEX_V256;
4837 defm Z128: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4839 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
4840 VTInfo.info128>, EVEX_V128;
4844 multiclass avx512_shift_rmi_w<bits<8> opcw,
4845 Format ImmFormR, Format ImmFormM,
4846 string OpcodeStr, SDNode OpNode> {
4847 let Predicates = [HasBWI] in
4848 defm WZ: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4849 v32i16_info>, EVEX_V512;
4850 let Predicates = [HasVLX, HasBWI] in {
4851 defm WZ256: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4852 v16i16x_info>, EVEX_V256;
4853 defm WZ128: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
4854 v8i16x_info>, EVEX_V128;
4858 multiclass avx512_shift_rmi_dq<bits<8> opcd, bits<8> opcq,
4859 Format ImmFormR, Format ImmFormM,
4860 string OpcodeStr, SDNode OpNode> {
4861 defm D: avx512_shift_rmi_sizes<opcd, ImmFormR, ImmFormM, OpcodeStr#"d", OpNode,
4862 avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
4863 defm Q: avx512_shift_rmi_sizes<opcq, ImmFormR, ImmFormM, OpcodeStr#"q", OpNode,
4864 avx512vl_i64_info>, EVEX_CD8<64, CD8VF>, VEX_W;
4867 defm VPSRL : avx512_shift_rmi_dq<0x72, 0x73, MRM2r, MRM2m, "vpsrl", X86vsrli>,
4868 avx512_shift_rmi_w<0x71, MRM2r, MRM2m, "vpsrlw", X86vsrli>, AVX512BIi8Base, EVEX_4V;
4870 defm VPSLL : avx512_shift_rmi_dq<0x72, 0x73, MRM6r, MRM6m, "vpsll", X86vshli>,
4871 avx512_shift_rmi_w<0x71, MRM6r, MRM6m, "vpsllw", X86vshli>, AVX512BIi8Base, EVEX_4V;
4873 defm VPSRA : avx512_shift_rmi_dq<0x72, 0x72, MRM4r, MRM4m, "vpsra", X86vsrai>,
4874 avx512_shift_rmi_w<0x71, MRM4r, MRM4m, "vpsraw", X86vsrai>, AVX512BIi8Base, EVEX_4V;
4876 defm VPROR : avx512_shift_rmi_dq<0x72, 0x72, MRM0r, MRM0m, "vpror", X86vrotri>, AVX512BIi8Base, EVEX_4V;
4877 defm VPROL : avx512_shift_rmi_dq<0x72, 0x72, MRM1r, MRM1m, "vprol", X86vrotli>, AVX512BIi8Base, EVEX_4V;
4879 defm VPSLL : avx512_shift_types<0xF2, 0xF3, 0xF1, "vpsll", X86vshl>;
4880 defm VPSRA : avx512_shift_types<0xE2, 0xE2, 0xE1, "vpsra", X86vsra>;
4881 defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl>;
4883 // Use 512bit VPSRA/VPSRAI version to implement v2i64/v4i64 in case NoVLX.
4884 let Predicates = [HasAVX512, NoVLX] in {
4885 def : Pat<(v4i64 (X86vsra (v4i64 VR256X:$src1), (v2i64 VR128X:$src2))),
4886 (EXTRACT_SUBREG (v8i64
4888 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
4889 VR128X:$src2)), sub_ymm)>;
4891 def : Pat<(v2i64 (X86vsra (v2i64 VR128X:$src1), (v2i64 VR128X:$src2))),
4892 (EXTRACT_SUBREG (v8i64
4894 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), VR128X:$src1, sub_xmm)),
4895 VR128X:$src2)), sub_xmm)>;
4897 def : Pat<(v4i64 (X86vsrai (v4i64 VR256X:$src1), (i8 imm:$src2))),
4898 (EXTRACT_SUBREG (v8i64
4900 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), VR256X:$src1, sub_ymm)),
4901 imm:$src2)), sub_ymm)>;
4903 def : Pat<(v2i64 (X86vsrai (v2i64 VR128X:$src1), (i8 imm:$src2))),
4904 (EXTRACT_SUBREG (v8i64
4906 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), VR128X:$src1, sub_xmm)),
4907 imm:$src2)), sub_xmm)>;
4910 //===-------------------------------------------------------------------===//
4911 // Variable Bit Shifts
4912 //===-------------------------------------------------------------------===//
4913 multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
4914 X86VectorVTInfo _> {
4915 let ExeDomain = _.ExeDomain in {
4916 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
4917 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
4918 "$src2, $src1", "$src1, $src2",
4919 (_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
4920 SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
4921 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4922 (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
4923 "$src2, $src1", "$src1, $src2",
4924 (_.VT (OpNode _.RC:$src1,
4925 (_.VT (bitconvert (_.LdFrag addr:$src2))))),
4926 SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_4V,
4927 EVEX_CD8<_.EltSize, CD8VF>;
4931 multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
4932 X86VectorVTInfo _> {
4933 let ExeDomain = _.ExeDomain in
4934 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
4935 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
4936 "${src2}"##_.BroadcastStr##", $src1",
4937 "$src1, ${src2}"##_.BroadcastStr,
4938 (_.VT (OpNode _.RC:$src1, (_.VT (X86VBroadcast
4939 (_.ScalarLdFrag addr:$src2))))),
4940 SSE_INTSHIFT_ITINS_P.rm>, AVX5128IBase, EVEX_B,
4941 EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
4944 multiclass avx512_var_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
4945 AVX512VLVectorVTInfo _> {
4946 let Predicates = [HasAVX512] in
4947 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
4948 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
4950 let Predicates = [HasAVX512, HasVLX] in {
4951 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
4952 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
4953 defm Z128 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
4954 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
4958 multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
4960 defm D : avx512_var_shift_sizes<opc, OpcodeStr#"d", OpNode,
4962 defm Q : avx512_var_shift_sizes<opc, OpcodeStr#"q", OpNode,
4963 avx512vl_i64_info>, VEX_W;
4966 // Use 512bit version to implement 128/256 bit in case NoVLX.
4967 multiclass avx512_var_shift_lowering<AVX512VLVectorVTInfo _, string OpcodeStr,
4968 SDNode OpNode, list<Predicate> p> {
4969 let Predicates = p in {
4970 def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
4971 (_.info256.VT _.info256.RC:$src2))),
4973 (!cast<Instruction>(OpcodeStr#"Zrr")
4974 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
4975 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
4978 def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
4979 (_.info128.VT _.info128.RC:$src2))),
4981 (!cast<Instruction>(OpcodeStr#"Zrr")
4982 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
4983 (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
4987 multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
4989 let Predicates = [HasBWI] in
4990 defm WZ: avx512_var_shift<opc, OpcodeStr, OpNode, v32i16_info>,
4992 let Predicates = [HasVLX, HasBWI] in {
4994 defm WZ256: avx512_var_shift<opc, OpcodeStr, OpNode, v16i16x_info>,
4996 defm WZ128: avx512_var_shift<opc, OpcodeStr, OpNode, v8i16x_info>,
5001 defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
5002 avx512_var_shift_w<0x12, "vpsllvw", shl>;
5004 defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
5005 avx512_var_shift_w<0x11, "vpsravw", sra>;
5007 defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
5008 avx512_var_shift_w<0x10, "vpsrlvw", srl>;
5010 defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>;
5011 defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>;
5013 defm : avx512_var_shift_lowering<avx512vl_i64_info, "VPSRAVQ", sra, [HasAVX512, NoVLX]>;
5014 defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSLLVW", shl, [HasBWI, NoVLX]>;
5015 defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRAVW", sra, [HasBWI, NoVLX]>;
5016 defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRLVW", srl, [HasBWI, NoVLX]>;
5018 // Special handing for handling VPSRAV intrinsics.
5019 multiclass avx512_var_shift_int_lowering<string InstrStr, X86VectorVTInfo _,
5020 list<Predicate> p> {
5021 let Predicates = p in {
5022 def : Pat<(_.VT (X86vsrav _.RC:$src1, _.RC:$src2)),
5023 (!cast<Instruction>(InstrStr#_.ZSuffix#rr) _.RC:$src1,
5025 def : Pat<(_.VT (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2)))),
5026 (!cast<Instruction>(InstrStr#_.ZSuffix##rm)
5027 _.RC:$src1, addr:$src2)>;
5028 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5029 (X86vsrav _.RC:$src1, _.RC:$src2), _.RC:$src0)),
5030 (!cast<Instruction>(InstrStr#_.ZSuffix#rrk) _.RC:$src0,
5031 _.KRC:$mask, _.RC:$src1, _.RC:$src2)>;
5032 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5033 (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
5035 (!cast<Instruction>(InstrStr#_.ZSuffix##rmk) _.RC:$src0,
5036 _.KRC:$mask, _.RC:$src1, addr:$src2)>;
5037 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5038 (X86vsrav _.RC:$src1, _.RC:$src2), _.ImmAllZerosV)),
5039 (!cast<Instruction>(InstrStr#_.ZSuffix#rrkz) _.KRC:$mask,
5040 _.RC:$src1, _.RC:$src2)>;
5041 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5042 (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
5044 (!cast<Instruction>(InstrStr#_.ZSuffix##rmkz) _.KRC:$mask,
5045 _.RC:$src1, addr:$src2)>;
5049 multiclass avx512_var_shift_int_lowering_mb<string InstrStr, X86VectorVTInfo _,
5050 list<Predicate> p> :
5051 avx512_var_shift_int_lowering<InstrStr, _, p> {
5052 let Predicates = p in {
5053 def : Pat<(_.VT (X86vsrav _.RC:$src1,
5054 (X86VBroadcast (_.ScalarLdFrag addr:$src2)))),
5055 (!cast<Instruction>(InstrStr#_.ZSuffix##rmb)
5056 _.RC:$src1, addr:$src2)>;
5057 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5058 (X86vsrav _.RC:$src1,
5059 (X86VBroadcast (_.ScalarLdFrag addr:$src2))),
5061 (!cast<Instruction>(InstrStr#_.ZSuffix##rmbk) _.RC:$src0,
5062 _.KRC:$mask, _.RC:$src1, addr:$src2)>;
5063 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5064 (X86vsrav _.RC:$src1,
5065 (X86VBroadcast (_.ScalarLdFrag addr:$src2))),
5067 (!cast<Instruction>(InstrStr#_.ZSuffix##rmbkz) _.KRC:$mask,
5068 _.RC:$src1, addr:$src2)>;
5072 defm : avx512_var_shift_int_lowering<"VPSRAVW", v8i16x_info, [HasVLX, HasBWI]>;
5073 defm : avx512_var_shift_int_lowering<"VPSRAVW", v16i16x_info, [HasVLX, HasBWI]>;
5074 defm : avx512_var_shift_int_lowering<"VPSRAVW", v32i16_info, [HasBWI]>;
5075 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v4i32x_info, [HasVLX]>;
5076 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v8i32x_info, [HasVLX]>;
5077 defm : avx512_var_shift_int_lowering_mb<"VPSRAVD", v16i32_info, [HasAVX512]>;
5078 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v2i64x_info, [HasVLX]>;
5079 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v4i64x_info, [HasVLX]>;
5080 defm : avx512_var_shift_int_lowering_mb<"VPSRAVQ", v8i64_info, [HasAVX512]>;
5082 //===-------------------------------------------------------------------===//
5083 // 1-src variable permutation VPERMW/D/Q
5084 //===-------------------------------------------------------------------===//
5085 multiclass avx512_vperm_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
5086 AVX512VLVectorVTInfo _> {
5087 let Predicates = [HasAVX512] in
5088 defm Z : avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
5089 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
5091 let Predicates = [HasAVX512, HasVLX] in
5092 defm Z256 : avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
5093 avx512_var_shift_mb<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
5096 multiclass avx512_vpermi_dq_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
5097 string OpcodeStr, SDNode OpNode,
5098 AVX512VLVectorVTInfo VTInfo> {
5099 let Predicates = [HasAVX512] in
5100 defm Z: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
5102 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
5103 VTInfo.info512>, EVEX_V512;
5104 let Predicates = [HasAVX512, HasVLX] in
5105 defm Z256: avx512_shift_rmi<opc, ImmFormR, ImmFormM, OpcodeStr, OpNode,
5107 avx512_shift_rmbi<opc, ImmFormM, OpcodeStr, OpNode,
5108 VTInfo.info256>, EVEX_V256;
5111 multiclass avx512_vperm_bw<bits<8> opc, string OpcodeStr,
5112 Predicate prd, SDNode OpNode,
5113 AVX512VLVectorVTInfo _> {
5114 let Predicates = [prd] in
5115 defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
5117 let Predicates = [HasVLX, prd] in {
5118 defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
5120 defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
5125 defm VPERMW : avx512_vperm_bw<0x8D, "vpermw", HasBWI, X86VPermv,
5126 avx512vl_i16_info>, VEX_W;
5127 defm VPERMB : avx512_vperm_bw<0x8D, "vpermb", HasVBMI, X86VPermv,
5130 defm VPERMD : avx512_vperm_dq_sizes<0x36, "vpermd", X86VPermv,
5132 defm VPERMQ : avx512_vperm_dq_sizes<0x36, "vpermq", X86VPermv,
5133 avx512vl_i64_info>, VEX_W;
5134 defm VPERMPS : avx512_vperm_dq_sizes<0x16, "vpermps", X86VPermv,
5136 defm VPERMPD : avx512_vperm_dq_sizes<0x16, "vpermpd", X86VPermv,
5137 avx512vl_f64_info>, VEX_W;
5139 defm VPERMQ : avx512_vpermi_dq_sizes<0x00, MRMSrcReg, MRMSrcMem, "vpermq",
5140 X86VPermi, avx512vl_i64_info>,
5141 EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
5142 defm VPERMPD : avx512_vpermi_dq_sizes<0x01, MRMSrcReg, MRMSrcMem, "vpermpd",
5143 X86VPermi, avx512vl_f64_info>,
5144 EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
5145 //===----------------------------------------------------------------------===//
5146 // AVX-512 - VPERMIL
5147 //===----------------------------------------------------------------------===//
5149 multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
5150 X86VectorVTInfo _, X86VectorVTInfo Ctrl> {
5151 defm rr: AVX512_maskable<OpcVar, MRMSrcReg, _, (outs _.RC:$dst),
5152 (ins _.RC:$src1, Ctrl.RC:$src2), OpcodeStr,
5153 "$src2, $src1", "$src1, $src2",
5154 (_.VT (OpNode _.RC:$src1,
5155 (Ctrl.VT Ctrl.RC:$src2)))>,
5157 defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
5158 (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
5159 "$src2, $src1", "$src1, $src2",
5162 (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
5163 T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
5164 defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
5165 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
5166 "${src2}"##_.BroadcastStr##", $src1",
5167 "$src1, ${src2}"##_.BroadcastStr,
5170 (Ctrl.VT (X86VBroadcast
5171 (Ctrl.ScalarLdFrag addr:$src2)))))>,
5172 T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
5175 multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar,
5176 AVX512VLVectorVTInfo _, AVX512VLVectorVTInfo Ctrl>{
5177 let Predicates = [HasAVX512] in {
5178 defm Z : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info512,
5179 Ctrl.info512>, EVEX_V512;
5181 let Predicates = [HasAVX512, HasVLX] in {
5182 defm Z128 : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info128,
5183 Ctrl.info128>, EVEX_V128;
5184 defm Z256 : avx512_permil_vec<OpcVar, OpcodeStr, X86VPermilpv, _.info256,
5185 Ctrl.info256>, EVEX_V256;
5189 multiclass avx512_permil<string OpcodeStr, bits<8> OpcImm, bits<8> OpcVar,
5190 AVX512VLVectorVTInfo _, AVX512VLVectorVTInfo Ctrl>{
5192 defm NAME: avx512_permil_vec_common<OpcodeStr, OpcVar, _, Ctrl>;
5193 defm NAME: avx512_shift_rmi_sizes<OpcImm, MRMSrcReg, MRMSrcMem, OpcodeStr,
5195 EVEX, AVX512AIi8Base, EVEX_CD8<_.info128.EltSize, CD8VF>;
5198 let ExeDomain = SSEPackedSingle in
5199 defm VPERMILPS : avx512_permil<"vpermilps", 0x04, 0x0C, avx512vl_f32_info,
5201 let ExeDomain = SSEPackedDouble in
5202 defm VPERMILPD : avx512_permil<"vpermilpd", 0x05, 0x0D, avx512vl_f64_info,
5203 avx512vl_i64_info>, VEX_W;
5204 //===----------------------------------------------------------------------===//
5205 // AVX-512 - VPSHUFD, VPSHUFLW, VPSHUFHW
5206 //===----------------------------------------------------------------------===//
5208 defm VPSHUFD : avx512_shift_rmi_sizes<0x70, MRMSrcReg, MRMSrcMem, "vpshufd",
5209 X86PShufd, avx512vl_i32_info>,
5210 EVEX, AVX512BIi8Base, EVEX_CD8<32, CD8VF>;
5211 defm VPSHUFH : avx512_shift_rmi_w<0x70, MRMSrcReg, MRMSrcMem, "vpshufhw",
5212 X86PShufhw>, EVEX, AVX512XSIi8Base;
5213 defm VPSHUFL : avx512_shift_rmi_w<0x70, MRMSrcReg, MRMSrcMem, "vpshuflw",
5214 X86PShuflw>, EVEX, AVX512XDIi8Base;
5216 multiclass avx512_pshufb_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode> {
5217 let Predicates = [HasBWI] in
5218 defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, v64i8_info>, EVEX_V512;
5220 let Predicates = [HasVLX, HasBWI] in {
5221 defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, v32i8x_info>, EVEX_V256;
5222 defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, v16i8x_info>, EVEX_V128;
5226 defm VPSHUFB: avx512_pshufb_sizes<0x00, "vpshufb", X86pshufb>;
5228 //===----------------------------------------------------------------------===//
5229 // Move Low to High and High to Low packed FP Instructions
5230 //===----------------------------------------------------------------------===//
5231 def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
5232 (ins VR128X:$src1, VR128X:$src2),
5233 "vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5234 [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
5235 IIC_SSE_MOV_LH>, EVEX_4V;
5236 def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
5237 (ins VR128X:$src1, VR128X:$src2),
5238 "vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5239 [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
5240 IIC_SSE_MOV_LH>, EVEX_4V;
5242 let Predicates = [HasAVX512] in {
5244 def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
5245 (VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
5246 def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
5247 (VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
5250 def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
5251 (VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
5254 //===----------------------------------------------------------------------===//
5255 // VMOVHPS/PD VMOVLPS Instructions
5256 // All patterns was taken from SSS implementation.
5257 //===----------------------------------------------------------------------===//
5258 multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
5259 X86VectorVTInfo _> {
5260 let ExeDomain = _.ExeDomain in
5261 def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
5262 (ins _.RC:$src1, f64mem:$src2),
5263 !strconcat(OpcodeStr,
5264 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5268 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
5269 IIC_SSE_MOV_LH>, EVEX_4V;
5272 defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", X86Movlhps,
5273 v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
5274 defm VMOVHPDZ128 : avx512_mov_hilo_packed<0x16, "vmovhpd", X86Movlhpd,
5275 v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W;
5276 defm VMOVLPSZ128 : avx512_mov_hilo_packed<0x12, "vmovlps", X86Movlps,
5277 v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
5278 defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movlpd,
5279 v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W;
5281 let Predicates = [HasAVX512] in {
5283 def : Pat<(X86Movlhps VR128X:$src1,
5284 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
5285 (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
5286 def : Pat<(X86Movlhps VR128X:$src1,
5287 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5288 (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
5290 def : Pat<(v2f64 (X86Unpckl VR128X:$src1,
5291 (scalar_to_vector (loadf64 addr:$src2)))),
5292 (VMOVHPDZ128rm VR128X:$src1, addr:$src2)>;
5293 def : Pat<(v2f64 (X86Unpckl VR128X:$src1,
5294 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
5295 (VMOVHPDZ128rm VR128X:$src1, addr:$src2)>;
5297 def : Pat<(v4f32 (X86Movlps VR128X:$src1, (load addr:$src2))),
5298 (VMOVLPSZ128rm VR128X:$src1, addr:$src2)>;
5299 def : Pat<(v4i32 (X86Movlps VR128X:$src1, (load addr:$src2))),
5300 (VMOVLPSZ128rm VR128X:$src1, addr:$src2)>;
5302 def : Pat<(v2f64 (X86Movlpd VR128X:$src1, (load addr:$src2))),
5303 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5304 def : Pat<(v2i64 (X86Movlpd VR128X:$src1, (load addr:$src2))),
5305 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5306 def : Pat<(v2f64 (X86Movsd VR128X:$src1,
5307 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5308 (VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
5311 def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
5312 (ins f64mem:$dst, VR128X:$src),
5313 "vmovhps\t{$src, $dst|$dst, $src}",
5314 [(store (f64 (extractelt
5315 (X86Unpckh (bc_v2f64 (v4f32 VR128X:$src)),
5316 (bc_v2f64 (v4f32 VR128X:$src))),
5317 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
5318 EVEX, EVEX_CD8<32, CD8VT2>;
5319 def VMOVHPDZ128mr : AVX512PDI<0x17, MRMDestMem, (outs),
5320 (ins f64mem:$dst, VR128X:$src),
5321 "vmovhpd\t{$src, $dst|$dst, $src}",
5322 [(store (f64 (extractelt
5323 (v2f64 (X86Unpckh VR128X:$src, VR128X:$src)),
5324 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
5325 EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
5326 def VMOVLPSZ128mr : AVX512PSI<0x13, MRMDestMem, (outs),
5327 (ins f64mem:$dst, VR128X:$src),
5328 "vmovlps\t{$src, $dst|$dst, $src}",
5329 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128X:$src)),
5330 (iPTR 0))), addr:$dst)],
5332 EVEX, EVEX_CD8<32, CD8VT2>;
5333 def VMOVLPDZ128mr : AVX512PDI<0x13, MRMDestMem, (outs),
5334 (ins f64mem:$dst, VR128X:$src),
5335 "vmovlpd\t{$src, $dst|$dst, $src}",
5336 [(store (f64 (extractelt (v2f64 VR128X:$src),
5337 (iPTR 0))), addr:$dst)],
5339 EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
5341 let Predicates = [HasAVX512] in {
5343 def : Pat<(store (f64 (extractelt
5344 (v2f64 (X86VPermilpi VR128X:$src, (i8 1))),
5345 (iPTR 0))), addr:$dst),
5346 (VMOVHPDZ128mr addr:$dst, VR128X:$src)>;
5348 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128X:$src2)),
5350 (VMOVLPSZ128mr addr:$src1, VR128X:$src2)>;
5351 def : Pat<(store (v4i32 (X86Movlps
5352 (bc_v4i32 (loadv2i64 addr:$src1)), VR128X:$src2)), addr:$src1),
5353 (VMOVLPSZ128mr addr:$src1, VR128X:$src2)>;
5355 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128X:$src2)),
5357 (VMOVLPDZ128mr addr:$src1, VR128X:$src2)>;
5358 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128X:$src2)),
5360 (VMOVLPDZ128mr addr:$src1, VR128X:$src2)>;
5362 //===----------------------------------------------------------------------===//
5363 // FMA - Fused Multiply Operations
5366 multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5367 X86VectorVTInfo _, string Suff> {
5368 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5369 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5370 (ins _.RC:$src2, _.RC:$src3),
5371 OpcodeStr, "$src3, $src2", "$src2, $src3",
5372 (_.VT (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)), 1, 1>,
5375 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5376 (ins _.RC:$src2, _.MemOp:$src3),
5377 OpcodeStr, "$src3, $src2", "$src2, $src3",
5378 (_.VT (OpNode _.RC:$src2, _.RC:$src1, (_.LdFrag addr:$src3))), 1, 0>,
5381 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5382 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5383 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
5384 !strconcat("$src2, ${src3}", _.BroadcastStr ),
5386 _.RC:$src1,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3)))), 1, 0>,
5387 AVX512FMA3Base, EVEX_B;
5390 // Additional pattern for folding broadcast nodes in other orders.
5391 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5392 (OpNode _.RC:$src1, _.RC:$src2,
5393 (X86VBroadcast (_.ScalarLdFrag addr:$src3))),
5395 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5396 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5399 multiclass avx512_fma3_213_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5400 X86VectorVTInfo _, string Suff> {
5401 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5402 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5403 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5404 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5405 (_.VT ( OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 imm:$rc))), 1, 1>,
5406 AVX512FMA3Base, EVEX_B, EVEX_RC;
5409 multiclass avx512_fma3p_213_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5410 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5412 let Predicates = [HasAVX512] in {
5413 defm Z : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5414 avx512_fma3_213_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5415 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5417 let Predicates = [HasVLX, HasAVX512] in {
5418 defm Z256 : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5419 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5420 defm Z128 : avx512_fma3p_213_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5421 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5425 multiclass avx512_fma3p_213_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5426 SDNode OpNodeRnd > {
5427 defm PS : avx512_fma3p_213_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5428 avx512vl_f32_info, "PS">;
5429 defm PD : avx512_fma3p_213_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5430 avx512vl_f64_info, "PD">, VEX_W;
5433 defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", X86Fmadd, X86FmaddRnd>;
5434 defm VFMSUB213 : avx512_fma3p_213_f<0xAA, "vfmsub213", X86Fmsub, X86FmsubRnd>;
5435 defm VFMADDSUB213 : avx512_fma3p_213_f<0xA6, "vfmaddsub213", X86Fmaddsub, X86FmaddsubRnd>;
5436 defm VFMSUBADD213 : avx512_fma3p_213_f<0xA7, "vfmsubadd213", X86Fmsubadd, X86FmsubaddRnd>;
5437 defm VFNMADD213 : avx512_fma3p_213_f<0xAC, "vfnmadd213", X86Fnmadd, X86FnmaddRnd>;
5438 defm VFNMSUB213 : avx512_fma3p_213_f<0xAE, "vfnmsub213", X86Fnmsub, X86FnmsubRnd>;
5441 multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5442 X86VectorVTInfo _, string Suff> {
5443 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5444 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5445 (ins _.RC:$src2, _.RC:$src3),
5446 OpcodeStr, "$src3, $src2", "$src2, $src3",
5447 (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), 1, 1>,
5450 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5451 (ins _.RC:$src2, _.MemOp:$src3),
5452 OpcodeStr, "$src3, $src2", "$src2, $src3",
5453 (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1)), 1, 0>,
5456 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5457 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5458 OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
5459 "$src2, ${src3}"##_.BroadcastStr,
5460 (_.VT (OpNode _.RC:$src2,
5461 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
5462 _.RC:$src1)), 1, 0>, AVX512FMA3Base, EVEX_B;
5465 // Additional patterns for folding broadcast nodes in other orders.
5466 def : Pat<(_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5467 _.RC:$src2, _.RC:$src1)),
5468 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mb) _.RC:$src1,
5469 _.RC:$src2, addr:$src3)>;
5470 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5471 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5472 _.RC:$src2, _.RC:$src1),
5474 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5475 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5476 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5477 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5478 _.RC:$src2, _.RC:$src1),
5480 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbkz) _.RC:$src1,
5481 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5484 multiclass avx512_fma3_231_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5485 X86VectorVTInfo _, string Suff> {
5486 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5487 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5488 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5489 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5490 (_.VT ( OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 imm:$rc))), 1, 1>,
5491 AVX512FMA3Base, EVEX_B, EVEX_RC;
5494 multiclass avx512_fma3p_231_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5495 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5497 let Predicates = [HasAVX512] in {
5498 defm Z : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5499 avx512_fma3_231_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5500 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5502 let Predicates = [HasVLX, HasAVX512] in {
5503 defm Z256 : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5504 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5505 defm Z128 : avx512_fma3p_231_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5506 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5510 multiclass avx512_fma3p_231_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5511 SDNode OpNodeRnd > {
5512 defm PS : avx512_fma3p_231_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5513 avx512vl_f32_info, "PS">;
5514 defm PD : avx512_fma3p_231_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5515 avx512vl_f64_info, "PD">, VEX_W;
5518 defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", X86Fmadd, X86FmaddRnd>;
5519 defm VFMSUB231 : avx512_fma3p_231_f<0xBA, "vfmsub231", X86Fmsub, X86FmsubRnd>;
5520 defm VFMADDSUB231 : avx512_fma3p_231_f<0xB6, "vfmaddsub231", X86Fmaddsub, X86FmaddsubRnd>;
5521 defm VFMSUBADD231 : avx512_fma3p_231_f<0xB7, "vfmsubadd231", X86Fmsubadd, X86FmsubaddRnd>;
5522 defm VFNMADD231 : avx512_fma3p_231_f<0xBC, "vfnmadd231", X86Fnmadd, X86FnmaddRnd>;
5523 defm VFNMSUB231 : avx512_fma3p_231_f<0xBE, "vfnmsub231", X86Fnmsub, X86FnmsubRnd>;
5525 multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5526 X86VectorVTInfo _, string Suff> {
5527 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
5528 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5529 (ins _.RC:$src2, _.RC:$src3),
5530 OpcodeStr, "$src3, $src2", "$src2, $src3",
5531 (_.VT (OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2)), 1, 1>,
5534 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5535 (ins _.RC:$src2, _.MemOp:$src3),
5536 OpcodeStr, "$src3, $src2", "$src2, $src3",
5537 (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src3), _.RC:$src2)), 1, 0>,
5540 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5541 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5542 OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
5543 "$src2, ${src3}"##_.BroadcastStr,
5544 (_.VT (OpNode _.RC:$src1,
5545 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
5546 _.RC:$src2)), 1, 0>, AVX512FMA3Base, EVEX_B;
5549 // Additional patterns for folding broadcast nodes in other orders.
5550 def : Pat<(_.VT (vselect _.KRCWM:$mask,
5551 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
5552 _.RC:$src1, _.RC:$src2),
5554 (!cast<Instruction>(NAME#Suff#_.ZSuffix#mbk) _.RC:$src1,
5555 _.KRCWM:$mask, _.RC:$src2, addr:$src3)>;
5558 multiclass avx512_fma3_132_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
5559 X86VectorVTInfo _, string Suff> {
5560 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in
5561 defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5562 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5563 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc",
5564 (_.VT ( OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 imm:$rc))), 1, 1>,
5565 AVX512FMA3Base, EVEX_B, EVEX_RC;
5568 multiclass avx512_fma3p_132_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5569 SDNode OpNodeRnd, AVX512VLVectorVTInfo _,
5571 let Predicates = [HasAVX512] in {
5572 defm Z : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info512, Suff>,
5573 avx512_fma3_132_round<opc, OpcodeStr, OpNodeRnd, _.info512,
5574 Suff>, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5576 let Predicates = [HasVLX, HasAVX512] in {
5577 defm Z256 : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info256, Suff>,
5578 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5579 defm Z128 : avx512_fma3p_132_rm<opc, OpcodeStr, OpNode, _.info128, Suff>,
5580 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5584 multiclass avx512_fma3p_132_f<bits<8> opc, string OpcodeStr, SDNode OpNode,
5585 SDNode OpNodeRnd > {
5586 defm PS : avx512_fma3p_132_common<opc, OpcodeStr#"ps", OpNode, OpNodeRnd,
5587 avx512vl_f32_info, "PS">;
5588 defm PD : avx512_fma3p_132_common<opc, OpcodeStr#"pd", OpNode, OpNodeRnd,
5589 avx512vl_f64_info, "PD">, VEX_W;
5592 defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", X86Fmadd, X86FmaddRnd>;
5593 defm VFMSUB132 : avx512_fma3p_132_f<0x9A, "vfmsub132", X86Fmsub, X86FmsubRnd>;
5594 defm VFMADDSUB132 : avx512_fma3p_132_f<0x96, "vfmaddsub132", X86Fmaddsub, X86FmaddsubRnd>;
5595 defm VFMSUBADD132 : avx512_fma3p_132_f<0x97, "vfmsubadd132", X86Fmsubadd, X86FmsubaddRnd>;
5596 defm VFNMADD132 : avx512_fma3p_132_f<0x9C, "vfnmadd132", X86Fnmadd, X86FnmaddRnd>;
5597 defm VFNMSUB132 : avx512_fma3p_132_f<0x9E, "vfnmsub132", X86Fnmsub, X86FnmsubRnd>;
5600 let Constraints = "$src1 = $dst" in {
5601 multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
5602 dag RHS_VEC_r, dag RHS_VEC_m, dag RHS_VEC_rb,
5603 dag RHS_r, dag RHS_m > {
5604 defm r_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5605 (ins _.RC:$src2, _.RC:$src3), OpcodeStr,
5606 "$src3, $src2", "$src2, $src3", RHS_VEC_r, 1, 1>, AVX512FMA3Base;
5608 defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
5609 (ins _.RC:$src2, _.IntScalarMemOp:$src3), OpcodeStr,
5610 "$src3, $src2", "$src2, $src3", RHS_VEC_m, 1, 1>, AVX512FMA3Base;
5612 defm rb_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
5613 (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
5614 OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", RHS_VEC_rb, 1, 1>,
5615 AVX512FMA3Base, EVEX_B, EVEX_RC;
5617 let isCodeGenOnly = 1, isCommutable = 1 in {
5618 def r : AVX512FMA3<opc, MRMSrcReg, (outs _.FRC:$dst),
5619 (ins _.FRC:$src1, _.FRC:$src2, _.FRC:$src3),
5620 !strconcat(OpcodeStr,
5621 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5623 def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
5624 (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
5625 !strconcat(OpcodeStr,
5626 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5628 }// isCodeGenOnly = 1
5630 }// Constraints = "$src1 = $dst"
5632 multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
5633 string OpcodeStr, SDNode OpNode, SDNode OpNodeRnds1,
5634 SDNode OpNodeRnds3, X86VectorVTInfo _ , string SUFF> {
5635 let ExeDomain = _.ExeDomain in {
5636 defm NAME#213#SUFF#Z: avx512_fma3s_common<opc213, OpcodeStr#"213"#_.Suffix , _ ,
5637 // Operands for intrinsic are in 123 order to preserve passthu
5639 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2, _.RC:$src3, (i32 FROUND_CURRENT))),
5640 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2,
5641 _.ScalarIntMemCPat:$src3, (i32 FROUND_CURRENT))),
5642 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src2, _.RC:$src3,
5644 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
5646 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
5647 (_.ScalarLdFrag addr:$src3))))>;
5649 defm NAME#231#SUFF#Z: avx512_fma3s_common<opc231, OpcodeStr#"231"#_.Suffix , _ ,
5650 (_.VT (OpNodeRnds3 _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 FROUND_CURRENT))),
5651 (_.VT (OpNodeRnds3 _.RC:$src2, _.ScalarIntMemCPat:$src3,
5652 _.RC:$src1, (i32 FROUND_CURRENT))),
5653 (_.VT ( OpNodeRnds3 _.RC:$src2, _.RC:$src3, _.RC:$src1,
5655 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src3,
5657 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2,
5658 (_.ScalarLdFrag addr:$src3), _.FRC:$src1)))>;
5660 defm NAME#132#SUFF#Z: avx512_fma3s_common<opc132, OpcodeStr#"132"#_.Suffix , _ ,
5661 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 FROUND_CURRENT))),
5662 (_.VT (OpNodeRnds1 _.RC:$src1, _.ScalarIntMemCPat:$src3,
5663 _.RC:$src2, (i32 FROUND_CURRENT))),
5664 (_.VT (OpNodeRnds1 _.RC:$src1, _.RC:$src3, _.RC:$src2,
5666 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1, _.FRC:$src3,
5668 (set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1,
5669 (_.ScalarLdFrag addr:$src3), _.FRC:$src2)))>;
5673 multiclass avx512_fma3s<bits<8> opc213, bits<8> opc231, bits<8> opc132,
5674 string OpcodeStr, SDNode OpNode, SDNode OpNodeRnds1,
5675 SDNode OpNodeRnds3> {
5676 let Predicates = [HasAVX512] in {
5677 defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
5678 OpNodeRnds1, OpNodeRnds3, f32x_info, "SS">,
5679 EVEX_CD8<32, CD8VT1>, VEX_LIG;
5680 defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
5681 OpNodeRnds1, OpNodeRnds3, f64x_info, "SD">,
5682 EVEX_CD8<64, CD8VT1>, VEX_LIG, VEX_W;
5686 defm VFMADD : avx512_fma3s<0xA9, 0xB9, 0x99, "vfmadd", X86Fmadd, X86FmaddRnds1,
5688 defm VFMSUB : avx512_fma3s<0xAB, 0xBB, 0x9B, "vfmsub", X86Fmsub, X86FmsubRnds1,
5690 defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd,
5691 X86FnmaddRnds1, X86FnmaddRnds3>;
5692 defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub,
5693 X86FnmsubRnds1, X86FnmsubRnds3>;
5695 //===----------------------------------------------------------------------===//
5696 // AVX-512 Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit IFMA
5697 //===----------------------------------------------------------------------===//
5698 let Constraints = "$src1 = $dst" in {
5699 multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5700 X86VectorVTInfo _> {
5701 let ExeDomain = _.ExeDomain in {
5702 defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
5703 (ins _.RC:$src2, _.RC:$src3),
5704 OpcodeStr, "$src3, $src2", "$src2, $src3",
5705 (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
5708 defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5709 (ins _.RC:$src2, _.MemOp:$src3),
5710 OpcodeStr, "$src3, $src2", "$src2, $src3",
5711 (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
5714 defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
5715 (ins _.RC:$src2, _.ScalarMemOp:$src3),
5716 OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
5717 !strconcat("$src2, ${src3}", _.BroadcastStr ),
5719 _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
5720 AVX512FMA3Base, EVEX_B;
5723 } // Constraints = "$src1 = $dst"
5725 multiclass avx512_pmadd52_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
5726 AVX512VLVectorVTInfo _> {
5727 let Predicates = [HasIFMA] in {
5728 defm Z : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info512>,
5729 EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
5731 let Predicates = [HasVLX, HasIFMA] in {
5732 defm Z256 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info256>,
5733 EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
5734 defm Z128 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info128>,
5735 EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
5739 defm VPMADD52LUQ : avx512_pmadd52_common<0xb4, "vpmadd52luq", x86vpmadd52l,
5740 avx512vl_i64_info>, VEX_W;
5741 defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h,
5742 avx512vl_i64_info>, VEX_W;
5744 //===----------------------------------------------------------------------===//
5745 // AVX-512 Scalar convert from sign integer to float/double
5746 //===----------------------------------------------------------------------===//
5748 multiclass avx512_vcvtsi<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5749 X86VectorVTInfo DstVT, X86MemOperand x86memop,
5750 PatFrag ld_frag, string asm> {
5751 let hasSideEffects = 0 in {
5752 def rr : SI<opc, MRMSrcReg, (outs DstVT.FRC:$dst),
5753 (ins DstVT.FRC:$src1, SrcRC:$src),
5754 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
5757 def rm : SI<opc, MRMSrcMem, (outs DstVT.FRC:$dst),
5758 (ins DstVT.FRC:$src1, x86memop:$src),
5759 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
5761 } // hasSideEffects = 0
5762 let isCodeGenOnly = 1 in {
5763 def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
5764 (ins DstVT.RC:$src1, SrcRC:$src2),
5765 !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5766 [(set DstVT.RC:$dst,
5767 (OpNode (DstVT.VT DstVT.RC:$src1),
5769 (i32 FROUND_CURRENT)))]>, EVEX_4V;
5771 def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst),
5772 (ins DstVT.RC:$src1, x86memop:$src2),
5773 !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5774 [(set DstVT.RC:$dst,
5775 (OpNode (DstVT.VT DstVT.RC:$src1),
5776 (ld_frag addr:$src2),
5777 (i32 FROUND_CURRENT)))]>, EVEX_4V;
5778 }//isCodeGenOnly = 1
5781 multiclass avx512_vcvtsi_round<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5782 X86VectorVTInfo DstVT, string asm> {
5783 def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
5784 (ins DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc),
5786 "\t{$src2, $rc, $src1, $dst|$dst, $src1, $rc, $src2}"),
5787 [(set DstVT.RC:$dst,
5788 (OpNode (DstVT.VT DstVT.RC:$src1),
5790 (i32 imm:$rc)))]>, EVEX_4V, EVEX_B, EVEX_RC;
5793 multiclass avx512_vcvtsi_common<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
5794 X86VectorVTInfo DstVT, X86MemOperand x86memop,
5795 PatFrag ld_frag, string asm> {
5796 defm NAME : avx512_vcvtsi_round<opc, OpNode, SrcRC, DstVT, asm>,
5797 avx512_vcvtsi<opc, OpNode, SrcRC, DstVT, x86memop, ld_frag, asm>,
5801 let Predicates = [HasAVX512] in {
5802 defm VCVTSI2SSZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
5803 v4f32x_info, i32mem, loadi32, "cvtsi2ss{l}">,
5804 XS, EVEX_CD8<32, CD8VT1>;
5805 defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
5806 v4f32x_info, i64mem, loadi64, "cvtsi2ss{q}">,
5807 XS, VEX_W, EVEX_CD8<64, CD8VT1>;
5808 defm VCVTSI2SDZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
5809 v2f64x_info, i32mem, loadi32, "cvtsi2sd{l}">,
5810 XD, EVEX_CD8<32, CD8VT1>;
5811 defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
5812 v2f64x_info, i64mem, loadi64, "cvtsi2sd{q}">,
5813 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5815 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
5816 (VCVTSI2SSZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5817 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
5818 (VCVTSI2SDZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5820 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
5821 (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5822 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
5823 (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5824 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
5825 (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5826 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
5827 (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5829 def : Pat<(f32 (sint_to_fp GR32:$src)),
5830 (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
5831 def : Pat<(f32 (sint_to_fp GR64:$src)),
5832 (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
5833 def : Pat<(f64 (sint_to_fp GR32:$src)),
5834 (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
5835 def : Pat<(f64 (sint_to_fp GR64:$src)),
5836 (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
5838 defm VCVTUSI2SSZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR32,
5839 v4f32x_info, i32mem, loadi32,
5840 "cvtusi2ss{l}">, XS, EVEX_CD8<32, CD8VT1>;
5841 defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR64,
5842 v4f32x_info, i64mem, loadi64, "cvtusi2ss{q}">,
5843 XS, VEX_W, EVEX_CD8<64, CD8VT1>;
5844 defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, X86UintToFpRnd, GR32, v2f64x_info,
5845 i32mem, loadi32, "cvtusi2sd{l}">,
5846 XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
5847 defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86UintToFpRnd, GR64,
5848 v2f64x_info, i64mem, loadi64, "cvtusi2sd{q}">,
5849 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5851 def : InstAlias<"vcvtusi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
5852 (VCVTUSI2SSZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5853 def : InstAlias<"vcvtusi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
5854 (VCVTUSI2SDZrm FR64X:$dst, FR64X:$src1, i32mem:$src), 0>;
5856 def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
5857 (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5858 def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
5859 (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
5860 def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
5861 (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5862 def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
5863 (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
5865 def : Pat<(f32 (uint_to_fp GR32:$src)),
5866 (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
5867 def : Pat<(f32 (uint_to_fp GR64:$src)),
5868 (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
5869 def : Pat<(f64 (uint_to_fp GR32:$src)),
5870 (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
5871 def : Pat<(f64 (uint_to_fp GR64:$src)),
5872 (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
5875 //===----------------------------------------------------------------------===//
5876 // AVX-512 Scalar convert from float/double to integer
5877 //===----------------------------------------------------------------------===//
5878 multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT ,
5879 X86VectorVTInfo DstVT, SDNode OpNode, string asm> {
5880 let Predicates = [HasAVX512] in {
5881 def rr : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
5882 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5883 [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 FROUND_CURRENT)))]>,
5885 def rb : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
5886 !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
5887 [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 imm:$rc)))]>,
5888 EVEX, VEX_LIG, EVEX_B, EVEX_RC;
5889 def rm : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.IntScalarMemOp:$src),
5890 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5891 [(set DstVT.RC:$dst, (OpNode
5892 (SrcVT.VT SrcVT.ScalarIntMemCPat:$src),
5893 (i32 FROUND_CURRENT)))]>,
5895 } // Predicates = [HasAVX512]
5898 // Convert float/double to signed/unsigned int 32/64
5899 defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, f32x_info, i32x_info,
5900 X86cvts2si, "cvtss2si">,
5901 XS, EVEX_CD8<32, CD8VT1>;
5902 defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, f32x_info, i64x_info,
5903 X86cvts2si, "cvtss2si">,
5904 XS, VEX_W, EVEX_CD8<32, CD8VT1>;
5905 defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, f32x_info, i32x_info,
5906 X86cvts2usi, "cvtss2usi">,
5907 XS, EVEX_CD8<32, CD8VT1>;
5908 defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, f32x_info, i64x_info,
5909 X86cvts2usi, "cvtss2usi">, XS, VEX_W,
5910 EVEX_CD8<32, CD8VT1>;
5911 defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, f64x_info, i32x_info,
5912 X86cvts2si, "cvtsd2si">,
5913 XD, EVEX_CD8<64, CD8VT1>;
5914 defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, f64x_info, i64x_info,
5915 X86cvts2si, "cvtsd2si">,
5916 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
5917 defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, f64x_info, i32x_info,
5918 X86cvts2usi, "cvtsd2usi">,
5919 XD, EVEX_CD8<64, CD8VT1>;
5920 defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info,
5921 X86cvts2usi, "cvtsd2usi">, XD, VEX_W,
5922 EVEX_CD8<64, CD8VT1>;
5924 // The SSE version of these instructions are disabled for AVX512.
5925 // Therefore, the SSE intrinsics are mapped to the AVX512 instructions.
5926 let Predicates = [HasAVX512] in {
5927 def : Pat<(i32 (int_x86_sse_cvtss2si (v4f32 VR128X:$src))),
5928 (VCVTSS2SIZrr VR128X:$src)>;
5929 def : Pat<(i32 (int_x86_sse_cvtss2si sse_load_f32:$src)),
5930 (VCVTSS2SIZrm sse_load_f32:$src)>;
5931 def : Pat<(i64 (int_x86_sse_cvtss2si64 (v4f32 VR128X:$src))),
5932 (VCVTSS2SI64Zrr VR128X:$src)>;
5933 def : Pat<(i64 (int_x86_sse_cvtss2si64 sse_load_f32:$src)),
5934 (VCVTSS2SI64Zrm sse_load_f32:$src)>;
5935 def : Pat<(i32 (int_x86_sse2_cvtsd2si (v2f64 VR128X:$src))),
5936 (VCVTSD2SIZrr VR128X:$src)>;
5937 def : Pat<(i32 (int_x86_sse2_cvtsd2si sse_load_f64:$src)),
5938 (VCVTSD2SIZrm sse_load_f64:$src)>;
5939 def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (v2f64 VR128X:$src))),
5940 (VCVTSD2SI64Zrr VR128X:$src)>;
5941 def : Pat<(i64 (int_x86_sse2_cvtsd2si64 sse_load_f64:$src)),
5942 (VCVTSD2SI64Zrm sse_load_f64:$src)>;
5945 let Predicates = [HasAVX512] in {
5946 def : Pat<(int_x86_sse_cvtsi2ss VR128X:$src1, GR32:$src2),
5947 (VCVTSI2SSZrr_Int VR128X:$src1, GR32:$src2)>;
5948 def : Pat<(int_x86_sse_cvtsi2ss VR128X:$src1, (loadi32 addr:$src2)),
5949 (VCVTSI2SSZrm_Int VR128X:$src1, addr:$src2)>;
5950 def : Pat<(int_x86_sse_cvtsi642ss VR128X:$src1, GR64:$src2),
5951 (VCVTSI642SSZrr_Int VR128X:$src1, GR64:$src2)>;
5952 def : Pat<(int_x86_sse_cvtsi642ss VR128X:$src1, (loadi64 addr:$src2)),
5953 (VCVTSI642SSZrm_Int VR128X:$src1, addr:$src2)>;
5954 def : Pat<(int_x86_sse2_cvtsi2sd VR128X:$src1, GR32:$src2),
5955 (VCVTSI2SDZrr_Int VR128X:$src1, GR32:$src2)>;
5956 def : Pat<(int_x86_sse2_cvtsi2sd VR128X:$src1, (loadi32 addr:$src2)),
5957 (VCVTSI2SDZrm_Int VR128X:$src1, addr:$src2)>;
5958 def : Pat<(int_x86_sse2_cvtsi642sd VR128X:$src1, GR64:$src2),
5959 (VCVTSI642SDZrr_Int VR128X:$src1, GR64:$src2)>;
5960 def : Pat<(int_x86_sse2_cvtsi642sd VR128X:$src1, (loadi64 addr:$src2)),
5961 (VCVTSI642SDZrm_Int VR128X:$src1, addr:$src2)>;
5962 def : Pat<(int_x86_avx512_cvtusi2sd VR128X:$src1, GR32:$src2),
5963 (VCVTUSI2SDZrr_Int VR128X:$src1, GR32:$src2)>;
5964 def : Pat<(int_x86_avx512_cvtusi2sd VR128X:$src1, (loadi32 addr:$src2)),
5965 (VCVTUSI2SDZrm_Int VR128X:$src1, addr:$src2)>;
5966 } // Predicates = [HasAVX512]
5968 // Patterns used for matching vcvtsi2s{s,d} intrinsic sequences from clang
5969 // which produce unnecessary vmovs{s,d} instructions
5970 let Predicates = [HasAVX512] in {
5971 def : Pat<(v4f32 (X86Movss
5972 (v4f32 VR128X:$dst),
5973 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))),
5974 (VCVTSI642SSZrr_Int VR128X:$dst, GR64:$src)>;
5976 def : Pat<(v4f32 (X86Movss
5977 (v4f32 VR128X:$dst),
5978 (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))),
5979 (VCVTSI2SSZrr_Int VR128X:$dst, GR32:$src)>;
5981 def : Pat<(v2f64 (X86Movsd
5982 (v2f64 VR128X:$dst),
5983 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))),
5984 (VCVTSI642SDZrr_Int VR128X:$dst, GR64:$src)>;
5986 def : Pat<(v2f64 (X86Movsd
5987 (v2f64 VR128X:$dst),
5988 (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))),
5989 (VCVTSI2SDZrr_Int VR128X:$dst, GR32:$src)>;
5990 } // Predicates = [HasAVX512]
5992 // Convert float/double to signed/unsigned int 32/64 with truncation
5993 multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
5994 X86VectorVTInfo _DstRC, SDNode OpNode,
5995 SDNode OpNodeRnd, string aliasStr>{
5996 let Predicates = [HasAVX512] in {
5997 def rr : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
5998 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
5999 [(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src))]>, EVEX;
6000 let hasSideEffects = 0 in
6001 def rb : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
6002 !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
6004 def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src),
6005 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
6006 [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
6009 def : InstAlias<asm # aliasStr # "\t{$src, $dst|$dst, $src}",
6010 (!cast<Instruction>(NAME # "rr") _DstRC.RC:$dst, _SrcRC.FRC:$src), 0>;
6011 def : InstAlias<asm # aliasStr # "\t\t{{sae}, $src, $dst|$dst, $src, {sae}}",
6012 (!cast<Instruction>(NAME # "rb") _DstRC.RC:$dst, _SrcRC.FRC:$src), 0>;
6013 def : InstAlias<asm # aliasStr # "\t{$src, $dst|$dst, $src}",
6014 (!cast<Instruction>(NAME # "rm") _DstRC.RC:$dst,
6015 _SrcRC.ScalarMemOp:$src), 0>;
6017 let isCodeGenOnly = 1 in {
6018 def rr_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
6019 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
6020 [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
6021 (i32 FROUND_CURRENT)))]>, EVEX, VEX_LIG;
6022 def rb_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
6023 !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
6024 [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
6025 (i32 FROUND_NO_EXC)))]>,
6026 EVEX,VEX_LIG , EVEX_B;
6027 let mayLoad = 1, hasSideEffects = 0 in
6028 def rm_Int : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
6029 (ins _SrcRC.IntScalarMemOp:$src),
6030 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
6033 } // isCodeGenOnly = 1
6038 defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i32x_info,
6039 fp_to_sint, X86cvtts2IntRnd, "{l}">,
6040 XS, EVEX_CD8<32, CD8VT1>;
6041 defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i64x_info,
6042 fp_to_sint, X86cvtts2IntRnd, "{q}">,
6043 VEX_W, XS, EVEX_CD8<32, CD8VT1>;
6044 defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i32x_info,
6045 fp_to_sint, X86cvtts2IntRnd, "{l}">,
6046 XD, EVEX_CD8<64, CD8VT1>;
6047 defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i64x_info,
6048 fp_to_sint, X86cvtts2IntRnd, "{q}">,
6049 VEX_W, XD, EVEX_CD8<64, CD8VT1>;
6051 defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i32x_info,
6052 fp_to_uint, X86cvtts2UIntRnd, "{l}">,
6053 XS, EVEX_CD8<32, CD8VT1>;
6054 defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i64x_info,
6055 fp_to_uint, X86cvtts2UIntRnd, "{q}">,
6056 XS,VEX_W, EVEX_CD8<32, CD8VT1>;
6057 defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i32x_info,
6058 fp_to_uint, X86cvtts2UIntRnd, "{l}">,
6059 XD, EVEX_CD8<64, CD8VT1>;
6060 defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i64x_info,
6061 fp_to_uint, X86cvtts2UIntRnd, "{q}">,
6062 XD, VEX_W, EVEX_CD8<64, CD8VT1>;
6063 let Predicates = [HasAVX512] in {
6064 def : Pat<(i32 (int_x86_sse_cvttss2si (v4f32 VR128X:$src))),
6065 (VCVTTSS2SIZrr_Int VR128X:$src)>;
6066 def : Pat<(i32 (int_x86_sse_cvttss2si sse_load_f32:$src)),
6067 (VCVTTSS2SIZrm_Int ssmem:$src)>;
6068 def : Pat<(i64 (int_x86_sse_cvttss2si64 (v4f32 VR128X:$src))),
6069 (VCVTTSS2SI64Zrr_Int VR128X:$src)>;
6070 def : Pat<(i64 (int_x86_sse_cvttss2si64 sse_load_f32:$src)),
6071 (VCVTTSS2SI64Zrm_Int ssmem:$src)>;
6072 def : Pat<(i32 (int_x86_sse2_cvttsd2si (v2f64 VR128X:$src))),
6073 (VCVTTSD2SIZrr_Int VR128X:$src)>;
6074 def : Pat<(i32 (int_x86_sse2_cvttsd2si sse_load_f64:$src)),
6075 (VCVTTSD2SIZrm_Int sdmem:$src)>;
6076 def : Pat<(i64 (int_x86_sse2_cvttsd2si64 (v2f64 VR128X:$src))),
6077 (VCVTTSD2SI64Zrr_Int VR128X:$src)>;
6078 def : Pat<(i64 (int_x86_sse2_cvttsd2si64 sse_load_f64:$src)),
6079 (VCVTTSD2SI64Zrm_Int sdmem:$src)>;
6081 //===----------------------------------------------------------------------===//
6082 // AVX-512 Convert form float to double and back
6083 //===----------------------------------------------------------------------===//
6084 multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6085 X86VectorVTInfo _Src, SDNode OpNode> {
6086 defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6087 (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
6088 "$src2, $src1", "$src1, $src2",
6089 (_.VT (OpNode (_.VT _.RC:$src1),
6090 (_Src.VT _Src.RC:$src2),
6091 (i32 FROUND_CURRENT)))>,
6092 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
6093 defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
6094 (ins _.RC:$src1, _Src.IntScalarMemOp:$src2), OpcodeStr,
6095 "$src2, $src1", "$src1, $src2",
6096 (_.VT (OpNode (_.VT _.RC:$src1),
6097 (_Src.VT _Src.ScalarIntMemCPat:$src2),
6098 (i32 FROUND_CURRENT)))>,
6099 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
6101 let isCodeGenOnly = 1, hasSideEffects = 0 in {
6102 def rr : I<opc, MRMSrcReg, (outs _.FRC:$dst),
6103 (ins _.FRC:$src1, _Src.FRC:$src2),
6104 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
6105 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
6107 def rm : I<opc, MRMSrcMem, (outs _.FRC:$dst),
6108 (ins _.FRC:$src1, _Src.ScalarMemOp:$src2),
6109 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
6110 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
6114 // Scalar Coversion with SAE - suppress all exceptions
6115 multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6116 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6117 defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6118 (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
6119 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
6120 (_.VT (OpNodeRnd (_.VT _.RC:$src1),
6121 (_Src.VT _Src.RC:$src2),
6122 (i32 FROUND_NO_EXC)))>,
6123 EVEX_4V, VEX_LIG, EVEX_B;
6126 // Scalar Conversion with rounding control (RC)
6127 multiclass avx512_cvt_fp_rc_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6128 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6129 defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6130 (ins _.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
6131 "$rc, $src2, $src1", "$src1, $src2, $rc",
6132 (_.VT (OpNodeRnd (_.VT _.RC:$src1),
6133 (_Src.VT _Src.RC:$src2), (i32 imm:$rc)))>,
6134 EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
6137 multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr,
6138 SDNode OpNodeRnd, X86VectorVTInfo _src,
6139 X86VectorVTInfo _dst> {
6140 let Predicates = [HasAVX512] in {
6141 defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6142 avx512_cvt_fp_rc_scalar<opc, OpcodeStr, _dst, _src,
6143 OpNodeRnd>, VEX_W, EVEX_CD8<64, CD8VT1>, XD;
6147 multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr,
6148 SDNode OpNodeRnd, X86VectorVTInfo _src,
6149 X86VectorVTInfo _dst> {
6150 let Predicates = [HasAVX512] in {
6151 defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6152 avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
6153 EVEX_CD8<32, CD8VT1>, XS;
6156 defm VCVTSD2SS : avx512_cvt_fp_scalar_sd2ss<0x5A, "vcvtsd2ss",
6157 X86froundRnd, f64x_info, f32x_info>;
6158 defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd",
6159 X86fpextRnd,f32x_info, f64x_info >;
6161 def : Pat<(f64 (fpextend FR32X:$src)),
6162 (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, FR64X), FR32X:$src)>,
6163 Requires<[HasAVX512]>;
6164 def : Pat<(f64 (fpextend (loadf32 addr:$src))),
6165 (VCVTSS2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>,
6166 Requires<[HasAVX512]>;
6168 def : Pat<(f64 (extloadf32 addr:$src)),
6169 (VCVTSS2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>,
6170 Requires<[HasAVX512, OptForSize]>;
6172 def : Pat<(f64 (extloadf32 addr:$src)),
6173 (VCVTSS2SDZrr (f64 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
6174 Requires<[HasAVX512, OptForSpeed]>;
6176 def : Pat<(f32 (fpround FR64X:$src)),
6177 (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, FR32X), FR64X:$src)>,
6178 Requires<[HasAVX512]>;
6180 def : Pat<(v4f32 (X86Movss
6181 (v4f32 VR128X:$dst),
6182 (v4f32 (scalar_to_vector
6183 (f32 (fpround (f64 (extractelt VR128X:$src, (iPTR 0))))))))),
6184 (VCVTSD2SSZrr_Int VR128X:$dst, VR128X:$src)>,
6185 Requires<[HasAVX512]>;
6187 def : Pat<(v2f64 (X86Movsd
6188 (v2f64 VR128X:$dst),
6189 (v2f64 (scalar_to_vector
6190 (f64 (fpextend (f32 (extractelt VR128X:$src, (iPTR 0))))))))),
6191 (VCVTSS2SDZrr_Int VR128X:$dst, VR128X:$src)>,
6192 Requires<[HasAVX512]>;
6194 //===----------------------------------------------------------------------===//
6195 // AVX-512 Vector convert from signed/unsigned integer to float/double
6196 // and from float/double to signed/unsigned integer
6197 //===----------------------------------------------------------------------===//
6199 multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6200 X86VectorVTInfo _Src, SDNode OpNode,
6201 string Broadcast = _.BroadcastStr,
6202 string Alias = "", X86MemOperand MemOp = _Src.MemOp> {
6204 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6205 (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
6206 (_.VT (OpNode (_Src.VT _Src.RC:$src)))>, EVEX;
6208 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6209 (ins MemOp:$src), OpcodeStr#Alias, "$src", "$src",
6210 (_.VT (OpNode (_Src.VT
6211 (bitconvert (_Src.LdFrag addr:$src)))))>, EVEX;
6213 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6214 (ins _Src.ScalarMemOp:$src), OpcodeStr,
6215 "${src}"##Broadcast, "${src}"##Broadcast,
6216 (_.VT (OpNode (_Src.VT
6217 (X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
6220 // Coversion with SAE - suppress all exceptions
6221 multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6222 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6223 defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6224 (ins _Src.RC:$src), OpcodeStr,
6225 "{sae}, $src", "$src, {sae}",
6226 (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src),
6227 (i32 FROUND_NO_EXC)))>,
6231 // Conversion with rounding control (RC)
6232 multiclass avx512_vcvt_fp_rc<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
6233 X86VectorVTInfo _Src, SDNode OpNodeRnd> {
6234 defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6235 (ins _Src.RC:$src, AVX512RC:$rc), OpcodeStr,
6236 "$rc, $src", "$src, $rc",
6237 (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src), (i32 imm:$rc)))>,
6238 EVEX, EVEX_B, EVEX_RC;
6241 // Extend Float to Double
6242 multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr> {
6243 let Predicates = [HasAVX512] in {
6244 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8f32x_info, fpextend>,
6245 avx512_vcvt_fp_sae<opc, OpcodeStr, v8f64_info, v8f32x_info,
6246 X86vfpextRnd>, EVEX_V512;
6248 let Predicates = [HasVLX] in {
6249 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v4f32x_info,
6250 X86vfpext, "{1to2}", "", f64mem>, EVEX_V128;
6251 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4f32x_info, fpextend>,
6256 // Truncate Double to Float
6257 multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr> {
6258 let Predicates = [HasAVX512] in {
6259 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, fpround>,
6260 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8f64_info,
6261 X86vfproundRnd>, EVEX_V512;
6263 let Predicates = [HasVLX] in {
6264 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info,
6265 X86vfpround, "{1to2}", "{x}">, EVEX_V128;
6266 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fpround,
6267 "{1to4}", "{y}">, EVEX_V256;
6269 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6270 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6271 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6272 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, f128mem:$src), 0>;
6273 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6274 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6275 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6276 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, f256mem:$src), 0>;
6280 defm VCVTPD2PS : avx512_cvtpd2ps<0x5A, "vcvtpd2ps">,
6281 VEX_W, PD, EVEX_CD8<64, CD8VF>;
6282 defm VCVTPS2PD : avx512_cvtps2pd<0x5A, "vcvtps2pd">,
6283 PS, EVEX_CD8<32, CD8VH>;
6285 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
6286 (VCVTPS2PDZrm addr:$src)>;
6288 let Predicates = [HasVLX] in {
6289 let AddedComplexity = 15 in
6290 def : Pat<(X86vzmovl (v2f64 (bitconvert
6291 (v4f32 (X86vfpround (v2f64 VR128X:$src)))))),
6292 (VCVTPD2PSZ128rr VR128X:$src)>;
6293 def : Pat<(v2f64 (extloadv2f32 addr:$src)),
6294 (VCVTPS2PDZ128rm addr:$src)>;
6295 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
6296 (VCVTPS2PDZ256rm addr:$src)>;
6299 // Convert Signed/Unsigned Doubleword to Double
6300 multiclass avx512_cvtdq2pd<bits<8> opc, string OpcodeStr, SDNode OpNode,
6302 // No rounding in this op
6303 let Predicates = [HasAVX512] in
6304 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8i32x_info, OpNode>,
6307 let Predicates = [HasVLX] in {
6308 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v4i32x_info,
6309 OpNode128, "{1to2}", "", i64mem>, EVEX_V128;
6310 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4i32x_info, OpNode>,
6315 // Convert Signed/Unsigned Doubleword to Float
6316 multiclass avx512_cvtdq2ps<bits<8> opc, string OpcodeStr, SDNode OpNode,
6318 let Predicates = [HasAVX512] in
6319 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16f32_info, v16i32_info, OpNode>,
6320 avx512_vcvt_fp_rc<opc, OpcodeStr, v16f32_info, v16i32_info,
6321 OpNodeRnd>, EVEX_V512;
6323 let Predicates = [HasVLX] in {
6324 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4i32x_info, OpNode>,
6326 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8i32x_info, OpNode>,
6331 // Convert Float to Signed/Unsigned Doubleword with truncation
6332 multiclass avx512_cvttps2dq<bits<8> opc, string OpcodeStr,
6333 SDNode OpNode, SDNode OpNodeRnd> {
6334 let Predicates = [HasAVX512] in {
6335 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i32_info, v16f32_info, OpNode>,
6336 avx512_vcvt_fp_sae<opc, OpcodeStr, v16i32_info, v16f32_info,
6337 OpNodeRnd>, EVEX_V512;
6339 let Predicates = [HasVLX] in {
6340 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f32x_info, OpNode>,
6342 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f32x_info, OpNode>,
6347 // Convert Float to Signed/Unsigned Doubleword
6348 multiclass avx512_cvtps2dq<bits<8> opc, string OpcodeStr,
6349 SDNode OpNode, SDNode OpNodeRnd> {
6350 let Predicates = [HasAVX512] in {
6351 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i32_info, v16f32_info, OpNode>,
6352 avx512_vcvt_fp_rc<opc, OpcodeStr, v16i32_info, v16f32_info,
6353 OpNodeRnd>, EVEX_V512;
6355 let Predicates = [HasVLX] in {
6356 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f32x_info, OpNode>,
6358 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f32x_info, OpNode>,
6363 // Convert Double to Signed/Unsigned Doubleword with truncation
6364 multiclass avx512_cvttpd2dq<bits<8> opc, string OpcodeStr, SDNode OpNode,
6365 SDNode OpNode128, SDNode OpNodeRnd> {
6366 let Predicates = [HasAVX512] in {
6367 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f64_info, OpNode>,
6368 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i32x_info, v8f64_info,
6369 OpNodeRnd>, EVEX_V512;
6371 let Predicates = [HasVLX] in {
6372 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6373 // memory forms of these instructions in Asm Parser. They have the same
6374 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6375 // due to the same reason.
6376 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info,
6377 OpNode128, "{1to2}", "{x}">, EVEX_V128;
6378 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
6379 "{1to4}", "{y}">, EVEX_V256;
6381 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6382 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6383 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6384 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, i128mem:$src), 0>;
6385 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6386 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6387 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6388 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, i256mem:$src), 0>;
6392 // Convert Double to Signed/Unsigned Doubleword
6393 multiclass avx512_cvtpd2dq<bits<8> opc, string OpcodeStr,
6394 SDNode OpNode, SDNode OpNodeRnd> {
6395 let Predicates = [HasAVX512] in {
6396 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f64_info, OpNode>,
6397 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i32x_info, v8f64_info,
6398 OpNodeRnd>, EVEX_V512;
6400 let Predicates = [HasVLX] in {
6401 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6402 // memory forms of these instructions in Asm Parcer. They have the same
6403 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6404 // due to the same reason.
6405 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info, OpNode,
6406 "{1to2}", "{x}">, EVEX_V128;
6407 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode,
6408 "{1to4}", "{y}">, EVEX_V256;
6410 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6411 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6412 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6413 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, f128mem:$src), 0>;
6414 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6415 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6416 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6417 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, f256mem:$src), 0>;
6421 // Convert Double to Signed/Unsigned Quardword
6422 multiclass avx512_cvtpd2qq<bits<8> opc, string OpcodeStr,
6423 SDNode OpNode, SDNode OpNodeRnd> {
6424 let Predicates = [HasDQI] in {
6425 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f64_info, OpNode>,
6426 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i64_info, v8f64_info,
6427 OpNodeRnd>, EVEX_V512;
6429 let Predicates = [HasDQI, HasVLX] in {
6430 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v2f64x_info, OpNode>,
6432 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f64x_info, OpNode>,
6437 // Convert Double to Signed/Unsigned Quardword with truncation
6438 multiclass avx512_cvttpd2qq<bits<8> opc, string OpcodeStr,
6439 SDNode OpNode, SDNode OpNodeRnd> {
6440 let Predicates = [HasDQI] in {
6441 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f64_info, OpNode>,
6442 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f64_info,
6443 OpNodeRnd>, EVEX_V512;
6445 let Predicates = [HasDQI, HasVLX] in {
6446 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v2f64x_info, OpNode>,
6448 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f64x_info, OpNode>,
6453 // Convert Signed/Unsigned Quardword to Double
6454 multiclass avx512_cvtqq2pd<bits<8> opc, string OpcodeStr,
6455 SDNode OpNode, SDNode OpNodeRnd> {
6456 let Predicates = [HasDQI] in {
6457 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8i64_info, OpNode>,
6458 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f64_info, v8i64_info,
6459 OpNodeRnd>, EVEX_V512;
6461 let Predicates = [HasDQI, HasVLX] in {
6462 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v2i64x_info, OpNode>,
6464 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4i64x_info, OpNode>,
6469 // Convert Float to Signed/Unsigned Quardword
6470 multiclass avx512_cvtps2qq<bits<8> opc, string OpcodeStr,
6471 SDNode OpNode, SDNode OpNodeRnd> {
6472 let Predicates = [HasDQI] in {
6473 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f32x_info, OpNode>,
6474 avx512_vcvt_fp_rc<opc, OpcodeStr, v8i64_info, v8f32x_info,
6475 OpNodeRnd>, EVEX_V512;
6477 let Predicates = [HasDQI, HasVLX] in {
6478 // Explicitly specified broadcast string, since we take only 2 elements
6479 // from v4f32x_info source
6480 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v4f32x_info, OpNode,
6481 "{1to2}", "", f64mem>, EVEX_V128;
6482 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNode>,
6487 // Convert Float to Signed/Unsigned Quardword with truncation
6488 multiclass avx512_cvttps2qq<bits<8> opc, string OpcodeStr, SDNode OpNode,
6489 SDNode OpNode128, SDNode OpNodeRnd> {
6490 let Predicates = [HasDQI] in {
6491 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f32x_info, OpNode>,
6492 avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f32x_info,
6493 OpNodeRnd>, EVEX_V512;
6495 let Predicates = [HasDQI, HasVLX] in {
6496 // Explicitly specified broadcast string, since we take only 2 elements
6497 // from v4f32x_info source
6498 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v4f32x_info, OpNode128,
6499 "{1to2}", "", f64mem>, EVEX_V128;
6500 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNode>,
6505 // Convert Signed/Unsigned Quardword to Float
6506 multiclass avx512_cvtqq2ps<bits<8> opc, string OpcodeStr, SDNode OpNode,
6507 SDNode OpNode128, SDNode OpNodeRnd> {
6508 let Predicates = [HasDQI] in {
6509 defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8i64_info, OpNode>,
6510 avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8i64_info,
6511 OpNodeRnd>, EVEX_V512;
6513 let Predicates = [HasDQI, HasVLX] in {
6514 // we need "x"/"y" suffixes in order to distinguish between 128 and 256
6515 // memory forms of these instructions in Asm Parcer. They have the same
6516 // dest type - 'v4i32x_info'. We also specify the broadcast string explicitly
6517 // due to the same reason.
6518 defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2i64x_info, OpNode128,
6519 "{1to2}", "{x}">, EVEX_V128;
6520 defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4i64x_info, OpNode,
6521 "{1to4}", "{y}">, EVEX_V256;
6523 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6524 (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, VR128X:$src), 0>;
6525 def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}",
6526 (!cast<Instruction>(NAME # "Z128rm") VR128X:$dst, i128mem:$src), 0>;
6527 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6528 (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, VR256X:$src), 0>;
6529 def : InstAlias<OpcodeStr##"y\t{$src, $dst|$dst, $src}",
6530 (!cast<Instruction>(NAME # "Z256rm") VR128X:$dst, i256mem:$src), 0>;
6534 defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", sint_to_fp, X86VSintToFP>,
6535 XS, EVEX_CD8<32, CD8VH>;
6537 defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", sint_to_fp,
6539 PS, EVEX_CD8<32, CD8VF>;
6541 defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", fp_to_sint,
6543 XS, EVEX_CD8<32, CD8VF>;
6545 defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", fp_to_sint, X86cvttp2si,
6547 PD, VEX_W, EVEX_CD8<64, CD8VF>;
6549 defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", fp_to_uint,
6550 X86cvttp2uiRnd>, PS,
6551 EVEX_CD8<32, CD8VF>;
6553 defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", fp_to_uint,
6554 X86cvttp2ui, X86cvttp2uiRnd>, PS, VEX_W,
6555 EVEX_CD8<64, CD8VF>;
6557 defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", uint_to_fp, X86VUintToFP>,
6558 XS, EVEX_CD8<32, CD8VH>;
6560 defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", uint_to_fp,
6561 X86VUintToFpRnd>, XD,
6562 EVEX_CD8<32, CD8VF>;
6564 defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtp2Int,
6565 X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
6567 defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int,
6568 X86cvtp2IntRnd>, XD, VEX_W,
6569 EVEX_CD8<64, CD8VF>;
6571 defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt,
6573 PS, EVEX_CD8<32, CD8VF>;
6574 defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt,
6575 X86cvtp2UIntRnd>, VEX_W,
6576 PS, EVEX_CD8<64, CD8VF>;
6578 defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int,
6579 X86cvtp2IntRnd>, VEX_W,
6580 PD, EVEX_CD8<64, CD8VF>;
6582 defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtp2Int,
6583 X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
6585 defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtp2UInt,
6586 X86cvtp2UIntRnd>, VEX_W,
6587 PD, EVEX_CD8<64, CD8VF>;
6589 defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt,
6590 X86cvtp2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
6592 defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", fp_to_sint,
6593 X86cvttp2siRnd>, VEX_W,
6594 PD, EVEX_CD8<64, CD8VF>;
6596 defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", fp_to_sint, X86cvttp2si,
6597 X86cvttp2siRnd>, PD, EVEX_CD8<32, CD8VH>;
6599 defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", fp_to_uint,
6600 X86cvttp2uiRnd>, VEX_W,
6601 PD, EVEX_CD8<64, CD8VF>;
6603 defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", fp_to_uint, X86cvttp2ui,
6604 X86cvttp2uiRnd>, PD, EVEX_CD8<32, CD8VH>;
6606 defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", sint_to_fp,
6607 X86VSintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
6609 defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", uint_to_fp,
6610 X86VUintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
6612 defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", sint_to_fp, X86VSintToFP,
6613 X86VSintToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
6615 defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", uint_to_fp, X86VUintToFP,
6616 X86VUintToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
6618 let Predicates = [HasAVX512, NoVLX] in {
6619 def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
6620 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
6621 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF),
6622 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6624 def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
6625 (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
6626 (v16f32 (INSERT_SUBREG (IMPLICIT_DEF),
6627 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6629 def : Pat<(v4i32 (fp_to_uint (v4f64 VR256X:$src1))),
6630 (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
6631 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6632 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6634 def : Pat<(v4i32 (X86cvttp2ui (v2f64 VR128X:$src))),
6635 (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
6636 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6637 VR128X:$src, sub_xmm)))), sub_xmm)>;
6639 def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
6640 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
6641 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
6642 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6644 def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
6645 (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
6646 (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
6647 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6649 def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
6650 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
6651 (v8i32 (INSERT_SUBREG (IMPLICIT_DEF),
6652 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6654 def : Pat<(v2f64 (X86VUintToFP (v4i32 VR128X:$src1))),
6655 (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
6656 (v8i32 (INSERT_SUBREG (IMPLICIT_DEF),
6657 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6660 let Predicates = [HasAVX512, HasVLX] in {
6661 let AddedComplexity = 15 in {
6662 def : Pat<(X86vzmovl (v2i64 (bitconvert
6663 (v4i32 (X86cvtp2Int (v2f64 VR128X:$src)))))),
6664 (VCVTPD2DQZ128rr VR128X:$src)>;
6665 def : Pat<(v4i32 (bitconvert (X86vzmovl (v2i64 (bitconvert
6666 (v4i32 (X86cvtp2UInt (v2f64 VR128X:$src)))))))),
6667 (VCVTPD2UDQZ128rr VR128X:$src)>;
6668 def : Pat<(X86vzmovl (v2i64 (bitconvert
6669 (v4i32 (X86cvttp2si (v2f64 VR128X:$src)))))),
6670 (VCVTTPD2DQZ128rr VR128X:$src)>;
6671 def : Pat<(v4i32 (bitconvert (X86vzmovl (v2i64 (bitconvert
6672 (v4i32 (X86cvttp2ui (v2f64 VR128X:$src)))))))),
6673 (VCVTTPD2UDQZ128rr VR128X:$src)>;
6677 let Predicates = [HasAVX512] in {
6678 def : Pat<(v8f32 (fpround (loadv8f64 addr:$src))),
6679 (VCVTPD2PSZrm addr:$src)>;
6680 def : Pat<(v8f64 (extloadv8f32 addr:$src)),
6681 (VCVTPS2PDZrm addr:$src)>;
6684 let Predicates = [HasDQI, HasVLX] in {
6685 let AddedComplexity = 15 in {
6686 def : Pat<(X86vzmovl (v2f64 (bitconvert
6687 (v4f32 (X86VSintToFP (v2i64 VR128X:$src)))))),
6688 (VCVTQQ2PSZ128rr VR128X:$src)>;
6689 def : Pat<(X86vzmovl (v2f64 (bitconvert
6690 (v4f32 (X86VUintToFP (v2i64 VR128X:$src)))))),
6691 (VCVTUQQ2PSZ128rr VR128X:$src)>;
6695 let Predicates = [HasDQI, NoVLX] in {
6696 def : Pat<(v2i64 (fp_to_sint (v2f64 VR128X:$src1))),
6697 (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr
6698 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6699 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6701 def : Pat<(v4i64 (fp_to_sint (v4f32 VR128X:$src1))),
6702 (EXTRACT_SUBREG (v8i64 (VCVTTPS2QQZrr
6703 (v8f32 (INSERT_SUBREG (IMPLICIT_DEF),
6704 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6706 def : Pat<(v4i64 (fp_to_sint (v4f64 VR256X:$src1))),
6707 (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr
6708 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6709 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6711 def : Pat<(v2i64 (fp_to_uint (v2f64 VR128X:$src1))),
6712 (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr
6713 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6714 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6716 def : Pat<(v4i64 (fp_to_uint (v4f32 VR128X:$src1))),
6717 (EXTRACT_SUBREG (v8i64 (VCVTTPS2UQQZrr
6718 (v8f32 (INSERT_SUBREG (IMPLICIT_DEF),
6719 VR128X:$src1, sub_xmm)))), sub_ymm)>;
6721 def : Pat<(v4i64 (fp_to_uint (v4f64 VR256X:$src1))),
6722 (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr
6723 (v8f64 (INSERT_SUBREG (IMPLICIT_DEF),
6724 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6726 def : Pat<(v4f32 (sint_to_fp (v4i64 VR256X:$src1))),
6727 (EXTRACT_SUBREG (v8f32 (VCVTQQ2PSZrr
6728 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6729 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6731 def : Pat<(v2f64 (sint_to_fp (v2i64 VR128X:$src1))),
6732 (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr
6733 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6734 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6736 def : Pat<(v4f64 (sint_to_fp (v4i64 VR256X:$src1))),
6737 (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr
6738 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6739 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6741 def : Pat<(v4f32 (uint_to_fp (v4i64 VR256X:$src1))),
6742 (EXTRACT_SUBREG (v8f32 (VCVTUQQ2PSZrr
6743 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6744 VR256X:$src1, sub_ymm)))), sub_xmm)>;
6746 def : Pat<(v2f64 (uint_to_fp (v2i64 VR128X:$src1))),
6747 (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr
6748 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6749 VR128X:$src1, sub_xmm)))), sub_xmm)>;
6751 def : Pat<(v4f64 (uint_to_fp (v4i64 VR256X:$src1))),
6752 (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr
6753 (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
6754 VR256X:$src1, sub_ymm)))), sub_ymm)>;
6757 //===----------------------------------------------------------------------===//
6758 // Half precision conversion instructions
6759 //===----------------------------------------------------------------------===//
6760 multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
6761 X86MemOperand x86memop, PatFrag ld_frag> {
6762 defm rr : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
6763 "vcvtph2ps", "$src", "$src",
6764 (X86cvtph2ps (_src.VT _src.RC:$src),
6765 (i32 FROUND_CURRENT))>, T8PD;
6766 defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
6767 "vcvtph2ps", "$src", "$src",
6768 (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
6769 (i32 FROUND_CURRENT))>, T8PD;
6772 multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
6773 defm rb : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
6774 "vcvtph2ps", "{sae}, $src", "$src, {sae}",
6775 (X86cvtph2ps (_src.VT _src.RC:$src),
6776 (i32 FROUND_NO_EXC))>, T8PD, EVEX_B;
6780 let Predicates = [HasAVX512] in {
6781 defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, loadv4i64>,
6782 avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
6783 EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
6784 let Predicates = [HasVLX] in {
6785 defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
6786 loadv2i64>,EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
6787 defm VCVTPH2PSZ128 : avx512_cvtph2ps<v4f32x_info, v8i16x_info, f64mem,
6788 loadv2i64>, EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
6792 multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
6793 X86MemOperand x86memop> {
6794 defm rr : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
6795 (ins _src.RC:$src1, i32u8imm:$src2),
6796 "vcvtps2ph", "$src2, $src1", "$src1, $src2",
6797 (X86cvtps2ph (_src.VT _src.RC:$src1),
6799 NoItinerary, 0, 0, X86select>, AVX512AIi8Base;
6800 def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
6801 (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
6802 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6803 [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
6806 let hasSideEffects = 0, mayStore = 1 in
6807 def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
6808 (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
6809 "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
6812 multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
6813 let hasSideEffects = 0 in
6814 defm rb : AVX512_maskable_in_asm<0x1D, MRMDestReg, _dest,
6815 (outs _dest.RC:$dst),
6816 (ins _src.RC:$src1, i32u8imm:$src2),
6817 "vcvtps2ph", "$src2, {sae}, $src1", "$src1, {sae}, $src2",
6818 []>, EVEX_B, AVX512AIi8Base;
6820 let Predicates = [HasAVX512] in {
6821 defm VCVTPS2PHZ : avx512_cvtps2ph<v16i16x_info, v16f32_info, f256mem>,
6822 avx512_cvtps2ph_sae<v16i16x_info, v16f32_info>,
6823 EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
6824 let Predicates = [HasVLX] in {
6825 defm VCVTPS2PHZ256 : avx512_cvtps2ph<v8i16x_info, v8f32x_info, f128mem>,
6826 EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
6827 defm VCVTPS2PHZ128 : avx512_cvtps2ph<v8i16x_info, v4f32x_info, f64mem>,
6828 EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
6832 // Patterns for matching conversions from float to half-float and vice versa.
6833 let Predicates = [HasVLX] in {
6834 // Use MXCSR.RC for rounding instead of explicitly specifying the default
6835 // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
6836 // configurations we support (the default). However, falling back to MXCSR is
6837 // more consistent with other instructions, which are always controlled by it.
6838 // It's encoded as 0b100.
6839 def : Pat<(fp_to_f16 FR32X:$src),
6840 (i16 (EXTRACT_SUBREG (VMOVPDI2DIZrr (VCVTPS2PHZ128rr
6841 (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), sub_16bit))>;
6843 def : Pat<(f16_to_fp GR16:$src),
6844 (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
6845 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)), FR32X)) >;
6847 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
6848 (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
6849 (VCVTPS2PHZ128rr (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), FR32X)) >;
6852 // Patterns for matching float to half-float conversion when AVX512 is supported
6853 // but F16C isn't. In that case we have to use 512-bit vectors.
6854 let Predicates = [HasAVX512, NoVLX, NoF16C] in {
6855 def : Pat<(fp_to_f16 FR32X:$src),
6856 (i16 (EXTRACT_SUBREG
6858 (v8i16 (EXTRACT_SUBREG
6860 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
6861 (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)),
6862 sub_xmm), 4), sub_xmm))), sub_16bit))>;
6864 def : Pat<(f16_to_fp GR16:$src),
6865 (f32 (COPY_TO_REGCLASS
6866 (v4f32 (EXTRACT_SUBREG
6868 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)),
6869 (v8i16 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)),
6870 sub_xmm)), sub_xmm)), FR32X))>;
6872 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
6873 (f32 (COPY_TO_REGCLASS
6874 (v4f32 (EXTRACT_SUBREG
6876 (VCVTPS2PHZrr (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
6877 (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)),
6878 sub_xmm), 4)), sub_xmm)), FR32X))>;
6881 // Unordered/Ordered scalar fp compare with Sea and set EFLAGS
6882 multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _,
6884 def rb: AVX512<opc, MRMSrcReg, (outs), (ins _.RC:$src1, _.RC:$src2),
6885 !strconcat(OpcodeStr, "\t{{sae}, $src2, $src1|$src1, $src2, {sae}}"),
6886 [], IIC_SSE_COMIS_RR>, EVEX, EVEX_B, VEX_LIG, EVEX_V128,
6890 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
6891 defm VUCOMISSZ : avx512_ord_cmp_sae<0x2E, v4f32x_info, "vucomiss">,
6892 AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>;
6893 defm VUCOMISDZ : avx512_ord_cmp_sae<0x2E, v2f64x_info, "vucomisd">,
6894 AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>;
6895 defm VCOMISSZ : avx512_ord_cmp_sae<0x2F, v4f32x_info, "vcomiss">,
6896 AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>;
6897 defm VCOMISDZ : avx512_ord_cmp_sae<0x2F, v2f64x_info, "vcomisd">,
6898 AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>;
6901 let Defs = [EFLAGS], Predicates = [HasAVX512] in {
6902 defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
6903 "ucomiss">, PS, EVEX, VEX_LIG,
6904 EVEX_CD8<32, CD8VT1>;
6905 defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
6906 "ucomisd">, PD, EVEX,
6907 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6908 let Pattern = []<dag> in {
6909 defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, undef, f32, f32mem, loadf32,
6910 "comiss">, PS, EVEX, VEX_LIG,
6911 EVEX_CD8<32, CD8VT1>;
6912 defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, undef, f64, f64mem, loadf64,
6913 "comisd">, PD, EVEX,
6914 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6916 let isCodeGenOnly = 1 in {
6917 defm Int_VUCOMISSZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v4f32, ssmem,
6918 sse_load_f32, "ucomiss">, PS, EVEX, VEX_LIG,
6919 EVEX_CD8<32, CD8VT1>;
6920 defm Int_VUCOMISDZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v2f64, sdmem,
6921 sse_load_f64, "ucomisd">, PD, EVEX,
6922 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6924 defm Int_VCOMISSZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v4f32, ssmem,
6925 sse_load_f32, "comiss">, PS, EVEX, VEX_LIG,
6926 EVEX_CD8<32, CD8VT1>;
6927 defm Int_VCOMISDZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v2f64, sdmem,
6928 sse_load_f64, "comisd">, PD, EVEX,
6929 VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
6933 /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
6934 multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
6935 X86VectorVTInfo _> {
6936 let Predicates = [HasAVX512], ExeDomain = _.ExeDomain in {
6937 defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
6938 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
6939 "$src2, $src1", "$src1, $src2",
6940 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, EVEX_4V;
6941 defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
6942 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
6943 "$src2, $src1", "$src1, $src2",
6944 (OpNode (_.VT _.RC:$src1),
6945 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))))>, EVEX_4V;
6949 defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", X86frcp14s, f32x_info>,
6950 EVEX_CD8<32, CD8VT1>, T8PD;
6951 defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", X86frcp14s, f64x_info>,
6952 VEX_W, EVEX_CD8<64, CD8VT1>, T8PD;
6953 defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", X86frsqrt14s, f32x_info>,
6954 EVEX_CD8<32, CD8VT1>, T8PD;
6955 defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", X86frsqrt14s, f64x_info>,
6956 VEX_W, EVEX_CD8<64, CD8VT1>, T8PD;
6958 /// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
6959 multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
6960 X86VectorVTInfo _> {
6961 let ExeDomain = _.ExeDomain in {
6962 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
6963 (ins _.RC:$src), OpcodeStr, "$src", "$src",
6964 (_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
6965 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6966 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
6968 (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
6969 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
6970 (ins _.ScalarMemOp:$src), OpcodeStr,
6971 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
6973 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
6978 multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
6979 defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, v16f32_info>,
6980 EVEX_V512, EVEX_CD8<32, CD8VF>;
6981 defm PDZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), OpNode, v8f64_info>,
6982 EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
6984 // Define only if AVX512VL feature is present.
6985 let Predicates = [HasVLX] in {
6986 defm PSZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
6987 OpNode, v4f32x_info>,
6988 EVEX_V128, EVEX_CD8<32, CD8VF>;
6989 defm PSZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
6990 OpNode, v8f32x_info>,
6991 EVEX_V256, EVEX_CD8<32, CD8VF>;
6992 defm PDZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
6993 OpNode, v2f64x_info>,
6994 EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
6995 defm PDZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
6996 OpNode, v4f64x_info>,
6997 EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
7001 defm VRSQRT14 : avx512_fp14_p_vl_all<0x4E, "vrsqrt14", X86frsqrt>;
7002 defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86frcp>;
7004 /// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
7005 multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
7007 let ExeDomain = _.ExeDomain in {
7008 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7009 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
7010 "$src2, $src1", "$src1, $src2",
7011 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7012 (i32 FROUND_CURRENT))>;
7014 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7015 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
7016 "{sae}, $src2, $src1", "$src1, $src2, {sae}",
7017 (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7018 (i32 FROUND_NO_EXC))>, EVEX_B;
7020 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
7021 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
7022 "$src2, $src1", "$src1, $src2",
7023 (OpNode (_.VT _.RC:$src1),
7024 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
7025 (i32 FROUND_CURRENT))>;
7029 multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
7030 defm SS : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode>,
7031 EVEX_CD8<32, CD8VT1>;
7032 defm SD : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode>,
7033 EVEX_CD8<64, CD8VT1>, VEX_W;
7036 let Predicates = [HasERI] in {
7037 defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
7038 defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
7041 defm VGETEXP : avx512_eri_s<0x43, "vgetexp", X86fgetexpRnds>, T8PD, EVEX_4V;
7042 /// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
7044 multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7046 let ExeDomain = _.ExeDomain in {
7047 defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7048 (ins _.RC:$src), OpcodeStr, "$src", "$src",
7049 (OpNode (_.VT _.RC:$src), (i32 FROUND_CURRENT))>;
7051 defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7052 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
7054 (bitconvert (_.LdFrag addr:$src))),
7055 (i32 FROUND_CURRENT))>;
7057 defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7058 (ins _.ScalarMemOp:$src), OpcodeStr,
7059 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
7061 (X86VBroadcast (_.ScalarLdFrag addr:$src))),
7062 (i32 FROUND_CURRENT))>, EVEX_B;
7065 multiclass avx512_fp28_p_round<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7067 let ExeDomain = _.ExeDomain in
7068 defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7069 (ins _.RC:$src), OpcodeStr,
7070 "{sae}, $src", "$src, {sae}",
7071 (OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC))>, EVEX_B;
7074 multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> {
7075 defm PS : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
7076 avx512_fp28_p_round<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
7077 T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
7078 defm PD : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
7079 avx512_fp28_p_round<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
7080 T8PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
7083 multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
7085 // Define only if AVX512VL feature is present.
7086 let Predicates = [HasVLX] in {
7087 defm PSZ128 : avx512_fp28_p<opc, OpcodeStr#"ps", v4f32x_info, OpNode>,
7088 EVEX_V128, T8PD, EVEX_CD8<32, CD8VF>;
7089 defm PSZ256 : avx512_fp28_p<opc, OpcodeStr#"ps", v8f32x_info, OpNode>,
7090 EVEX_V256, T8PD, EVEX_CD8<32, CD8VF>;
7091 defm PDZ128 : avx512_fp28_p<opc, OpcodeStr#"pd", v2f64x_info, OpNode>,
7092 EVEX_V128, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
7093 defm PDZ256 : avx512_fp28_p<opc, OpcodeStr#"pd", v4f64x_info, OpNode>,
7094 EVEX_V256, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
7097 let Predicates = [HasERI] in {
7099 defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX;
7100 defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX;
7101 defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX;
7103 defm VGETEXP : avx512_eri<0x42, "vgetexp", X86fgetexpRnd>,
7104 avx512_fp_unaryop_packed<0x42, "vgetexp", X86fgetexpRnd> , EVEX;
7106 multiclass avx512_sqrt_packed_round<bits<8> opc, string OpcodeStr,
7107 SDNode OpNodeRnd, X86VectorVTInfo _>{
7108 let ExeDomain = _.ExeDomain in
7109 defm rb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7110 (ins _.RC:$src, AVX512RC:$rc), OpcodeStr, "$rc, $src", "$src, $rc",
7111 (_.VT (OpNodeRnd _.RC:$src, (i32 imm:$rc)))>,
7112 EVEX, EVEX_B, EVEX_RC;
7115 multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
7116 SDNode OpNode, X86VectorVTInfo _>{
7117 let ExeDomain = _.ExeDomain in {
7118 defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
7119 (ins _.RC:$src), OpcodeStr, "$src", "$src",
7120 (_.FloatVT (OpNode _.RC:$src))>, EVEX;
7121 defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7122 (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
7124 (bitconvert (_.LdFrag addr:$src))))>, EVEX;
7126 defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
7127 (ins _.ScalarMemOp:$src), OpcodeStr,
7128 "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
7130 (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
7135 multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
7137 defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
7139 EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
7140 defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
7142 EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7143 // Define only if AVX512VL feature is present.
7144 let Predicates = [HasVLX] in {
7145 defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
7146 OpNode, v4f32x_info>,
7147 EVEX_V128, PS, EVEX_CD8<32, CD8VF>;
7148 defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
7149 OpNode, v8f32x_info>,
7150 EVEX_V256, PS, EVEX_CD8<32, CD8VF>;
7151 defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
7152 OpNode, v2f64x_info>,
7153 EVEX_V128, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7154 defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
7155 OpNode, v4f64x_info>,
7156 EVEX_V256, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7160 multiclass avx512_sqrt_packed_all_round<bits<8> opc, string OpcodeStr,
7162 defm PSZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ps"), OpNodeRnd,
7163 v16f32_info>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
7164 defm PDZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "pd"), OpNodeRnd,
7165 v8f64_info>, EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
7168 multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
7169 string SUFF, SDNode OpNode, SDNode OpNodeRnd> {
7170 let ExeDomain = _.ExeDomain in {
7171 defm r_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7172 (ins _.RC:$src1, _.RC:$src2), OpcodeStr,
7173 "$src2, $src1", "$src1, $src2",
7174 (OpNodeRnd (_.VT _.RC:$src1),
7176 (i32 FROUND_CURRENT))>;
7177 defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
7178 (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
7179 "$src2, $src1", "$src1, $src2",
7180 (OpNodeRnd (_.VT _.RC:$src1),
7181 (_.VT (scalar_to_vector
7182 (_.ScalarLdFrag addr:$src2))),
7183 (i32 FROUND_CURRENT))>;
7185 defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7186 (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
7187 "$rc, $src2, $src1", "$src1, $src2, $rc",
7188 (OpNodeRnd (_.VT _.RC:$src1),
7193 let isCodeGenOnly = 1, hasSideEffects = 0 in {
7194 def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
7195 (ins _.FRC:$src1, _.FRC:$src2),
7196 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
7199 def m : I<opc, MRMSrcMem, (outs _.FRC:$dst),
7200 (ins _.FRC:$src1, _.ScalarMemOp:$src2),
7201 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
7205 def : Pat<(_.EltVT (OpNode _.FRC:$src)),
7206 (!cast<Instruction>(NAME#SUFF#Zr)
7207 (_.EltVT (IMPLICIT_DEF)), _.FRC:$src)>;
7209 def : Pat<(_.EltVT (OpNode (load addr:$src))),
7210 (!cast<Instruction>(NAME#SUFF#Zm)
7211 (_.EltVT (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512, OptForSize]>;
7214 multiclass avx512_sqrt_scalar_all<bits<8> opc, string OpcodeStr> {
7215 defm SSZ : avx512_sqrt_scalar<opc, OpcodeStr#"ss", f32x_info, "SS", fsqrt,
7216 X86fsqrtRnds>, EVEX_CD8<32, CD8VT1>, EVEX_4V, XS;
7217 defm SDZ : avx512_sqrt_scalar<opc, OpcodeStr#"sd", f64x_info, "SD", fsqrt,
7218 X86fsqrtRnds>, EVEX_CD8<64, CD8VT1>, EVEX_4V, XD, VEX_W;
7221 defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>,
7222 avx512_sqrt_packed_all_round<0x51, "vsqrt", X86fsqrtRnd>;
7224 defm VSQRT : avx512_sqrt_scalar_all<0x51, "vsqrt">, VEX_LIG;
7226 let Predicates = [HasAVX512] in {
7227 def : Pat<(f32 (X86frsqrt FR32X:$src)),
7228 (COPY_TO_REGCLASS (VRSQRT14SSrr (v4f32 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>;
7229 def : Pat<(f32 (X86frsqrt (load addr:$src))),
7230 (COPY_TO_REGCLASS (VRSQRT14SSrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
7231 Requires<[OptForSize]>;
7232 def : Pat<(f32 (X86frcp FR32X:$src)),
7233 (COPY_TO_REGCLASS (VRCP14SSrr (v4f32 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X )>;
7234 def : Pat<(f32 (X86frcp (load addr:$src))),
7235 (COPY_TO_REGCLASS (VRCP14SSrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>,
7236 Requires<[OptForSize]>;
7240 avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
7242 let ExeDomain = _.ExeDomain in {
7243 defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7244 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
7245 "$src3, $src2, $src1", "$src1, $src2, $src3",
7246 (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7247 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
7249 defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
7250 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
7251 "$src3, {sae}, $src2, $src1", "$src1, $src2, {sae}, $src3",
7252 (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
7253 (i32 imm:$src3), (i32 FROUND_NO_EXC)))>, EVEX_B;
7255 defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
7256 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
7258 "$src3, $src2, $src1", "$src1, $src2, $src3",
7259 (_.VT (X86RndScales (_.VT _.RC:$src1),
7260 (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
7261 (i32 imm:$src3), (i32 FROUND_CURRENT)))>;
7263 let Predicates = [HasAVX512] in {
7264 def : Pat<(ffloor _.FRC:$src), (COPY_TO_REGCLASS
7265 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7266 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x1))), _.FRC)>;
7267 def : Pat<(fceil _.FRC:$src), (COPY_TO_REGCLASS
7268 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7269 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x2))), _.FRC)>;
7270 def : Pat<(ftrunc _.FRC:$src), (COPY_TO_REGCLASS
7271 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7272 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x3))), _.FRC)>;
7273 def : Pat<(frint _.FRC:$src), (COPY_TO_REGCLASS
7274 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7275 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0x4))), _.FRC)>;
7276 def : Pat<(fnearbyint _.FRC:$src), (COPY_TO_REGCLASS
7277 (_.VT (!cast<Instruction>(NAME##r) (_.VT (IMPLICIT_DEF)),
7278 (_.VT (COPY_TO_REGCLASS _.FRC:$src, _.RC)), (i32 0xc))), _.FRC)>;
7280 def : Pat<(ffloor (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7281 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7282 addr:$src, (i32 0x1))), _.FRC)>;
7283 def : Pat<(fceil (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7284 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7285 addr:$src, (i32 0x2))), _.FRC)>;
7286 def : Pat<(ftrunc (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7287 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7288 addr:$src, (i32 0x3))), _.FRC)>;
7289 def : Pat<(frint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7290 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7291 addr:$src, (i32 0x4))), _.FRC)>;
7292 def : Pat<(fnearbyint (_.ScalarLdFrag addr:$src)), (COPY_TO_REGCLASS
7293 (_.VT (!cast<Instruction>(NAME##m) (_.VT (IMPLICIT_DEF)),
7294 addr:$src, (i32 0xc))), _.FRC)>;
7298 defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", f32x_info>,
7299 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VT1>;
7301 defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", f64x_info>, VEX_W,
7302 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VT1>;
7304 //-------------------------------------------------
7305 // Integer truncate and extend operations
7306 //-------------------------------------------------
7308 multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
7309 X86VectorVTInfo SrcInfo, X86VectorVTInfo DestInfo,
7310 X86MemOperand x86memop> {
7311 let ExeDomain = DestInfo.ExeDomain in
7312 defm rr : AVX512_maskable<opc, MRMDestReg, DestInfo, (outs DestInfo.RC:$dst),
7313 (ins SrcInfo.RC:$src1), OpcodeStr ,"$src1", "$src1",
7314 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1)))>,
7317 // for intrinsic patter match
7318 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7319 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7321 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrkz) DestInfo.KRCWM:$mask ,
7324 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7325 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7326 DestInfo.ImmAllZerosV)),
7327 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrkz) DestInfo.KRCWM:$mask ,
7330 def : Pat<(DestInfo.VT (X86select DestInfo.KRCWM:$mask,
7331 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1))),
7332 DestInfo.RC:$src0)),
7333 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##rrk) DestInfo.RC:$src0,
7334 DestInfo.KRCWM:$mask ,
7337 let mayStore = 1, mayLoad = 1, hasSideEffects = 0,
7338 ExeDomain = DestInfo.ExeDomain in {
7339 def mr : AVX512XS8I<opc, MRMDestMem, (outs),
7340 (ins x86memop:$dst, SrcInfo.RC:$src),
7341 OpcodeStr # "\t{$src, $dst|$dst, $src}",
7344 def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
7345 (ins x86memop:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
7346 OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
7348 }//mayStore = 1, mayLoad = 1, hasSideEffects = 0
7351 multiclass avx512_trunc_mr_lowering<X86VectorVTInfo SrcInfo,
7352 X86VectorVTInfo DestInfo,
7353 PatFrag truncFrag, PatFrag mtruncFrag > {
7355 def : Pat<(truncFrag (SrcInfo.VT SrcInfo.RC:$src), addr:$dst),
7356 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##mr)
7357 addr:$dst, SrcInfo.RC:$src)>;
7359 def : Pat<(mtruncFrag addr:$dst, SrcInfo.KRCWM:$mask,
7360 (SrcInfo.VT SrcInfo.RC:$src)),
7361 (!cast<Instruction>(NAME#SrcInfo.ZSuffix##mrk)
7362 addr:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src)>;
7365 multiclass avx512_trunc<bits<8> opc, string OpcodeStr, SDNode OpNode,
7366 AVX512VLVectorVTInfo VTSrcInfo, X86VectorVTInfo DestInfoZ128,
7367 X86VectorVTInfo DestInfoZ256, X86VectorVTInfo DestInfoZ,
7368 X86MemOperand x86memopZ128, X86MemOperand x86memopZ256,
7369 X86MemOperand x86memopZ, PatFrag truncFrag, PatFrag mtruncFrag,
7370 Predicate prd = HasAVX512>{
7372 let Predicates = [HasVLX, prd] in {
7373 defm Z128: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info128,
7374 DestInfoZ128, x86memopZ128>,
7375 avx512_trunc_mr_lowering<VTSrcInfo.info128, DestInfoZ128,
7376 truncFrag, mtruncFrag>, EVEX_V128;
7378 defm Z256: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info256,
7379 DestInfoZ256, x86memopZ256>,
7380 avx512_trunc_mr_lowering<VTSrcInfo.info256, DestInfoZ256,
7381 truncFrag, mtruncFrag>, EVEX_V256;
7383 let Predicates = [prd] in
7384 defm Z: avx512_trunc_common<opc, OpcodeStr, OpNode, VTSrcInfo.info512,
7385 DestInfoZ, x86memopZ>,
7386 avx512_trunc_mr_lowering<VTSrcInfo.info512, DestInfoZ,
7387 truncFrag, mtruncFrag>, EVEX_V512;
7390 multiclass avx512_trunc_qb<bits<8> opc, string OpcodeStr, SDNode OpNode,
7391 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7392 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7393 v16i8x_info, v16i8x_info, v16i8x_info, i16mem, i32mem, i64mem,
7394 StoreNode, MaskedStoreNode>, EVEX_CD8<8, CD8VO>;
7397 multiclass avx512_trunc_qw<bits<8> opc, string OpcodeStr, SDNode OpNode,
7398 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7399 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7400 v8i16x_info, v8i16x_info, v8i16x_info, i32mem, i64mem, i128mem,
7401 StoreNode, MaskedStoreNode>, EVEX_CD8<16, CD8VQ>;
7404 multiclass avx512_trunc_qd<bits<8> opc, string OpcodeStr, SDNode OpNode,
7405 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7406 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i64_info,
7407 v4i32x_info, v4i32x_info, v8i32x_info, i64mem, i128mem, i256mem,
7408 StoreNode, MaskedStoreNode>, EVEX_CD8<32, CD8VH>;
7411 multiclass avx512_trunc_db<bits<8> opc, string OpcodeStr, SDNode OpNode,
7412 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7413 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i32_info,
7414 v16i8x_info, v16i8x_info, v16i8x_info, i32mem, i64mem, i128mem,
7415 StoreNode, MaskedStoreNode>, EVEX_CD8<8, CD8VQ>;
7418 multiclass avx512_trunc_dw<bits<8> opc, string OpcodeStr, SDNode OpNode,
7419 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7420 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i32_info,
7421 v8i16x_info, v8i16x_info, v16i16x_info, i64mem, i128mem, i256mem,
7422 StoreNode, MaskedStoreNode>, EVEX_CD8<16, CD8VH>;
7425 multiclass avx512_trunc_wb<bits<8> opc, string OpcodeStr, SDNode OpNode,
7426 PatFrag StoreNode, PatFrag MaskedStoreNode> {
7427 defm NAME: avx512_trunc<opc, OpcodeStr, OpNode, avx512vl_i16_info,
7428 v16i8x_info, v16i8x_info, v32i8x_info, i64mem, i128mem, i256mem,
7429 StoreNode, MaskedStoreNode, HasBWI>, EVEX_CD8<16, CD8VH>;
7432 defm VPMOVQB : avx512_trunc_qb<0x32, "vpmovqb", X86vtrunc,
7433 truncstorevi8, masked_truncstorevi8>;
7434 defm VPMOVSQB : avx512_trunc_qb<0x22, "vpmovsqb", X86vtruncs,
7435 truncstore_s_vi8, masked_truncstore_s_vi8>;
7436 defm VPMOVUSQB : avx512_trunc_qb<0x12, "vpmovusqb", X86vtruncus,
7437 truncstore_us_vi8, masked_truncstore_us_vi8>;
7439 defm VPMOVQW : avx512_trunc_qw<0x34, "vpmovqw", X86vtrunc,
7440 truncstorevi16, masked_truncstorevi16>;
7441 defm VPMOVSQW : avx512_trunc_qw<0x24, "vpmovsqw", X86vtruncs,
7442 truncstore_s_vi16, masked_truncstore_s_vi16>;
7443 defm VPMOVUSQW : avx512_trunc_qw<0x14, "vpmovusqw", X86vtruncus,
7444 truncstore_us_vi16, masked_truncstore_us_vi16>;
7446 defm VPMOVQD : avx512_trunc_qd<0x35, "vpmovqd", X86vtrunc,
7447 truncstorevi32, masked_truncstorevi32>;
7448 defm VPMOVSQD : avx512_trunc_qd<0x25, "vpmovsqd", X86vtruncs,
7449 truncstore_s_vi32, masked_truncstore_s_vi32>;
7450 defm VPMOVUSQD : avx512_trunc_qd<0x15, "vpmovusqd", X86vtruncus,
7451 truncstore_us_vi32, masked_truncstore_us_vi32>;
7453 defm VPMOVDB : avx512_trunc_db<0x31, "vpmovdb", X86vtrunc,
7454 truncstorevi8, masked_truncstorevi8>;
7455 defm VPMOVSDB : avx512_trunc_db<0x21, "vpmovsdb", X86vtruncs,
7456 truncstore_s_vi8, masked_truncstore_s_vi8>;
7457 defm VPMOVUSDB : avx512_trunc_db<0x11, "vpmovusdb", X86vtruncus,
7458 truncstore_us_vi8, masked_truncstore_us_vi8>;
7460 defm VPMOVDW : avx512_trunc_dw<0x33, "vpmovdw", X86vtrunc,
7461 truncstorevi16, masked_truncstorevi16>;
7462 defm VPMOVSDW : avx512_trunc_dw<0x23, "vpmovsdw", X86vtruncs,
7463 truncstore_s_vi16, masked_truncstore_s_vi16>;
7464 defm VPMOVUSDW : avx512_trunc_dw<0x13, "vpmovusdw", X86vtruncus,
7465 truncstore_us_vi16, masked_truncstore_us_vi16>;
7467 defm VPMOVWB : avx512_trunc_wb<0x30, "vpmovwb", X86vtrunc,
7468 truncstorevi8, masked_truncstorevi8>;
7469 defm VPMOVSWB : avx512_trunc_wb<0x20, "vpmovswb", X86vtruncs,
7470 truncstore_s_vi8, masked_truncstore_s_vi8>;
7471 defm VPMOVUSWB : avx512_trunc_wb<0x10, "vpmovuswb", X86vtruncus,
7472 truncstore_us_vi8, masked_truncstore_us_vi8>;
7474 let Predicates = [HasAVX512, NoVLX] in {
7475 def: Pat<(v8i16 (X86vtrunc (v8i32 VR256X:$src))),
7476 (v8i16 (EXTRACT_SUBREG
7477 (v16i16 (VPMOVDWZrr (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
7478 VR256X:$src, sub_ymm)))), sub_xmm))>;
7479 def: Pat<(v4i32 (X86vtrunc (v4i64 VR256X:$src))),
7480 (v4i32 (EXTRACT_SUBREG
7481 (v8i32 (VPMOVQDZrr (v8i64 (INSERT_SUBREG (IMPLICIT_DEF),
7482 VR256X:$src, sub_ymm)))), sub_xmm))>;
7485 let Predicates = [HasBWI, NoVLX] in {
7486 def: Pat<(v16i8 (X86vtrunc (v16i16 VR256X:$src))),
7487 (v16i8 (EXTRACT_SUBREG (VPMOVWBZrr (v32i16 (INSERT_SUBREG (IMPLICIT_DEF),
7488 VR256X:$src, sub_ymm))), sub_xmm))>;
7491 multiclass avx512_extend_common<bits<8> opc, string OpcodeStr,
7492 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
7493 X86MemOperand x86memop, PatFrag LdFrag, SDPatternOperator OpNode>{
7494 let ExeDomain = DestInfo.ExeDomain in {
7495 defm rr : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
7496 (ins SrcInfo.RC:$src), OpcodeStr ,"$src", "$src",
7497 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>,
7500 defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
7501 (ins x86memop:$src), OpcodeStr ,"$src", "$src",
7502 (DestInfo.VT (LdFrag addr:$src))>,
7507 multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr,
7508 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7509 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7510 let Predicates = [HasVLX, HasBWI] in {
7511 defm Z128: avx512_extend_common<opc, OpcodeStr, v8i16x_info,
7512 v16i8x_info, i64mem, LdFrag, InVecNode>,
7513 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V128;
7515 defm Z256: avx512_extend_common<opc, OpcodeStr, v16i16x_info,
7516 v16i8x_info, i128mem, LdFrag, OpNode>,
7517 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V256;
7519 let Predicates = [HasBWI] in {
7520 defm Z : avx512_extend_common<opc, OpcodeStr, v32i16_info,
7521 v32i8x_info, i256mem, LdFrag, OpNode>,
7522 EVEX_CD8<8, CD8VH>, T8PD, EVEX_V512;
7526 multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr,
7527 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7528 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7529 let Predicates = [HasVLX, HasAVX512] in {
7530 defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
7531 v16i8x_info, i32mem, LdFrag, InVecNode>,
7532 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V128;
7534 defm Z256: avx512_extend_common<opc, OpcodeStr, v8i32x_info,
7535 v16i8x_info, i64mem, LdFrag, OpNode>,
7536 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V256;
7538 let Predicates = [HasAVX512] in {
7539 defm Z : avx512_extend_common<opc, OpcodeStr, v16i32_info,
7540 v16i8x_info, i128mem, LdFrag, OpNode>,
7541 EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V512;
7545 multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr,
7546 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7547 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
7548 let Predicates = [HasVLX, HasAVX512] in {
7549 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7550 v16i8x_info, i16mem, LdFrag, InVecNode>,
7551 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V128;
7553 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7554 v16i8x_info, i32mem, LdFrag, OpNode>,
7555 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V256;
7557 let Predicates = [HasAVX512] in {
7558 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7559 v16i8x_info, i64mem, LdFrag, OpNode>,
7560 EVEX_CD8<8, CD8VO>, T8PD, EVEX_V512;
7564 multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr,
7565 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7566 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
7567 let Predicates = [HasVLX, HasAVX512] in {
7568 defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
7569 v8i16x_info, i64mem, LdFrag, InVecNode>,
7570 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V128;
7572 defm Z256: avx512_extend_common<opc, OpcodeStr, v8i32x_info,
7573 v8i16x_info, i128mem, LdFrag, OpNode>,
7574 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V256;
7576 let Predicates = [HasAVX512] in {
7577 defm Z : avx512_extend_common<opc, OpcodeStr, v16i32_info,
7578 v16i16x_info, i256mem, LdFrag, OpNode>,
7579 EVEX_CD8<16, CD8VH>, T8PD, EVEX_V512;
7583 multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr,
7584 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7585 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
7586 let Predicates = [HasVLX, HasAVX512] in {
7587 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7588 v8i16x_info, i32mem, LdFrag, InVecNode>,
7589 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V128;
7591 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7592 v8i16x_info, i64mem, LdFrag, OpNode>,
7593 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V256;
7595 let Predicates = [HasAVX512] in {
7596 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7597 v8i16x_info, i128mem, LdFrag, OpNode>,
7598 EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V512;
7602 multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr,
7603 SDPatternOperator OpNode, SDPatternOperator InVecNode,
7604 string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi32")> {
7606 let Predicates = [HasVLX, HasAVX512] in {
7607 defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
7608 v4i32x_info, i64mem, LdFrag, InVecNode>,
7609 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V128;
7611 defm Z256: avx512_extend_common<opc, OpcodeStr, v4i64x_info,
7612 v4i32x_info, i128mem, LdFrag, OpNode>,
7613 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V256;
7615 let Predicates = [HasAVX512] in {
7616 defm Z : avx512_extend_common<opc, OpcodeStr, v8i64_info,
7617 v8i32x_info, i256mem, LdFrag, OpNode>,
7618 EVEX_CD8<32, CD8VH>, T8PD, EVEX_V512;
7622 defm VPMOVZXBW : avx512_extend_BW<0x30, "vpmovzxbw", X86vzext, zext_invec, "z">;
7623 defm VPMOVZXBD : avx512_extend_BD<0x31, "vpmovzxbd", X86vzext, zext_invec, "z">;
7624 defm VPMOVZXBQ : avx512_extend_BQ<0x32, "vpmovzxbq", X86vzext, zext_invec, "z">;
7625 defm VPMOVZXWD : avx512_extend_WD<0x33, "vpmovzxwd", X86vzext, zext_invec, "z">;
7626 defm VPMOVZXWQ : avx512_extend_WQ<0x34, "vpmovzxwq", X86vzext, zext_invec, "z">;
7627 defm VPMOVZXDQ : avx512_extend_DQ<0x35, "vpmovzxdq", X86vzext, zext_invec, "z">;
7629 defm VPMOVSXBW: avx512_extend_BW<0x20, "vpmovsxbw", X86vsext, sext_invec, "s">;
7630 defm VPMOVSXBD: avx512_extend_BD<0x21, "vpmovsxbd", X86vsext, sext_invec, "s">;
7631 defm VPMOVSXBQ: avx512_extend_BQ<0x22, "vpmovsxbq", X86vsext, sext_invec, "s">;
7632 defm VPMOVSXWD: avx512_extend_WD<0x23, "vpmovsxwd", X86vsext, sext_invec, "s">;
7633 defm VPMOVSXWQ: avx512_extend_WQ<0x24, "vpmovsxwq", X86vsext, sext_invec, "s">;
7634 defm VPMOVSXDQ: avx512_extend_DQ<0x25, "vpmovsxdq", X86vsext, sext_invec, "s">;
7636 // EXTLOAD patterns, implemented using vpmovz
7637 multiclass avx512_ext_lowering<string InstrStr, X86VectorVTInfo To,
7638 X86VectorVTInfo From, PatFrag LdFrag> {
7639 def : Pat<(To.VT (LdFrag addr:$src)),
7640 (!cast<Instruction>("VPMOVZX"#InstrStr#"rm") addr:$src)>;
7641 def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src), To.RC:$src0)),
7642 (!cast<Instruction>("VPMOVZX"#InstrStr#"rmk") To.RC:$src0,
7643 To.KRC:$mask, addr:$src)>;
7644 def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src),
7646 (!cast<Instruction>("VPMOVZX"#InstrStr#"rmkz") To.KRC:$mask,
7650 let Predicates = [HasVLX, HasBWI] in {
7651 defm : avx512_ext_lowering<"BWZ128", v8i16x_info, v16i8x_info, extloadvi8>;
7652 defm : avx512_ext_lowering<"BWZ256", v16i16x_info, v16i8x_info, extloadvi8>;
7654 let Predicates = [HasBWI] in {
7655 defm : avx512_ext_lowering<"BWZ", v32i16_info, v32i8x_info, extloadvi8>;
7657 let Predicates = [HasVLX, HasAVX512] in {
7658 defm : avx512_ext_lowering<"BDZ128", v4i32x_info, v16i8x_info, extloadvi8>;
7659 defm : avx512_ext_lowering<"BDZ256", v8i32x_info, v16i8x_info, extloadvi8>;
7660 defm : avx512_ext_lowering<"BQZ128", v2i64x_info, v16i8x_info, extloadvi8>;
7661 defm : avx512_ext_lowering<"BQZ256", v4i64x_info, v16i8x_info, extloadvi8>;
7662 defm : avx512_ext_lowering<"WDZ128", v4i32x_info, v8i16x_info, extloadvi16>;
7663 defm : avx512_ext_lowering<"WDZ256", v8i32x_info, v8i16x_info, extloadvi16>;
7664 defm : avx512_ext_lowering<"WQZ128", v2i64x_info, v8i16x_info, extloadvi16>;
7665 defm : avx512_ext_lowering<"WQZ256", v4i64x_info, v8i16x_info, extloadvi16>;
7666 defm : avx512_ext_lowering<"DQZ128", v2i64x_info, v4i32x_info, extloadvi32>;
7667 defm : avx512_ext_lowering<"DQZ256", v4i64x_info, v4i32x_info, extloadvi32>;
7669 let Predicates = [HasAVX512] in {
7670 defm : avx512_ext_lowering<"BDZ", v16i32_info, v16i8x_info, extloadvi8>;
7671 defm : avx512_ext_lowering<"BQZ", v8i64_info, v16i8x_info, extloadvi8>;
7672 defm : avx512_ext_lowering<"WDZ", v16i32_info, v16i16x_info, extloadvi16>;
7673 defm : avx512_ext_lowering<"WQZ", v8i64_info, v8i16x_info, extloadvi16>;
7674 defm : avx512_ext_lowering<"DQZ", v8i64_info, v8i32x_info, extloadvi32>;
7677 multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
7678 SDNode InVecOp, PatFrag ExtLoad16> {
7680 let Predicates = [HasVLX, HasBWI] in {
7681 def : Pat<(v8i16 (InVecOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7682 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7683 def : Pat<(v8i16 (InVecOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7684 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7685 def : Pat<(v8i16 (InVecOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7686 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7687 def : Pat<(v8i16 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
7688 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7689 def : Pat<(v8i16 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
7690 (!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
7692 let Predicates = [HasVLX] in {
7693 def : Pat<(v4i32 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7694 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7695 def : Pat<(v4i32 (InVecOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7696 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7697 def : Pat<(v4i32 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
7698 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7699 def : Pat<(v4i32 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
7700 (!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
7702 def : Pat<(v2i64 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
7703 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7704 def : Pat<(v2i64 (InVecOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7705 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7706 def : Pat<(v2i64 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
7707 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7708 def : Pat<(v2i64 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
7709 (!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
7711 def : Pat<(v4i32 (InVecOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7712 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7713 def : Pat<(v4i32 (InVecOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7714 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7715 def : Pat<(v4i32 (InVecOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7716 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7717 def : Pat<(v4i32 (InVecOp (v8i16 (vzload_v2i64 addr:$src)))),
7718 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7719 def : Pat<(v4i32 (InVecOp (bc_v8i16 (loadv2i64 addr:$src)))),
7720 (!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
7722 def : Pat<(v2i64 (InVecOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7723 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7724 def : Pat<(v2i64 (InVecOp (v8i16 (vzmovl_v4i32 addr:$src)))),
7725 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7726 def : Pat<(v2i64 (InVecOp (v8i16 (vzload_v2i64 addr:$src)))),
7727 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7728 def : Pat<(v2i64 (InVecOp (bc_v8i16 (loadv2i64 addr:$src)))),
7729 (!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
7731 def : Pat<(v2i64 (InVecOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7732 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7733 def : Pat<(v2i64 (InVecOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
7734 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7735 def : Pat<(v2i64 (InVecOp (v4i32 (vzmovl_v2i64 addr:$src)))),
7736 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7737 def : Pat<(v2i64 (InVecOp (v4i32 (vzload_v2i64 addr:$src)))),
7738 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7739 def : Pat<(v2i64 (InVecOp (bc_v4i32 (loadv2i64 addr:$src)))),
7740 (!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
7743 let Predicates = [HasVLX, HasBWI] in {
7744 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7745 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7746 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7747 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7748 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7749 (!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
7751 let Predicates = [HasVLX] in {
7752 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7753 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7754 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
7755 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7756 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7757 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7758 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7759 (!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
7761 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
7762 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7763 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
7764 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7765 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
7766 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7767 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7768 (!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
7770 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7771 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7772 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7773 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7774 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7775 (!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
7777 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7778 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7779 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
7780 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7781 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
7782 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7783 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7784 (!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
7786 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
7787 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7788 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
7789 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7790 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
7791 (!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
7794 let Predicates = [HasBWI] in {
7795 def : Pat<(v32i16 (ExtOp (bc_v32i8 (loadv4i64 addr:$src)))),
7796 (!cast<I>(OpcPrefix#BWZrm) addr:$src)>;
7798 let Predicates = [HasAVX512] in {
7799 def : Pat<(v16i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7800 (!cast<I>(OpcPrefix#BDZrm) addr:$src)>;
7802 def : Pat<(v8i64 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
7803 (!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
7804 def : Pat<(v8i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
7805 (!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
7807 def : Pat<(v16i32 (ExtOp (bc_v16i16 (loadv4i64 addr:$src)))),
7808 (!cast<I>(OpcPrefix#WDZrm) addr:$src)>;
7810 def : Pat<(v8i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
7811 (!cast<I>(OpcPrefix#WQZrm) addr:$src)>;
7813 def : Pat<(v8i64 (ExtOp (bc_v8i32 (loadv4i64 addr:$src)))),
7814 (!cast<I>(OpcPrefix#DQZrm) addr:$src)>;
7818 defm : AVX512_pmovx_patterns<"VPMOVSX", X86vsext, sext_invec, extloadi32i16>;
7819 defm : AVX512_pmovx_patterns<"VPMOVZX", X86vzext, zext_invec, loadi16_anyext>;
7821 //===----------------------------------------------------------------------===//
7822 // GATHER - SCATTER Operations
7824 multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7825 X86MemOperand memop, PatFrag GatherNode> {
7826 let Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb",
7827 ExeDomain = _.ExeDomain in
7828 def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst, _.KRCWM:$mask_wb),
7829 (ins _.RC:$src1, _.KRCWM:$mask, memop:$src2),
7830 !strconcat(OpcodeStr#_.Suffix,
7831 "\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
7832 [(set _.RC:$dst, _.KRCWM:$mask_wb,
7833 (GatherNode (_.VT _.RC:$src1), _.KRCWM:$mask,
7834 vectoraddr:$src2))]>, EVEX, EVEX_K,
7835 EVEX_CD8<_.EltSize, CD8VT1>;
7838 multiclass avx512_gather_q_pd<bits<8> dopc, bits<8> qopc,
7839 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7840 defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512,
7841 vy512mem, mgatherv8i32>, EVEX_V512, VEX_W;
7842 defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info512,
7843 vz512mem, mgatherv8i64>, EVEX_V512, VEX_W;
7844 let Predicates = [HasVLX] in {
7845 defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
7846 vx256xmem, mgatherv4i32>, EVEX_V256, VEX_W;
7847 defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info256,
7848 vy256xmem, mgatherv4i64>, EVEX_V256, VEX_W;
7849 defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
7850 vx128xmem, mgatherv4i32>, EVEX_V128, VEX_W;
7851 defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7852 vx128xmem, mgatherv2i64>, EVEX_V128, VEX_W;
7856 multiclass avx512_gather_d_ps<bits<8> dopc, bits<8> qopc,
7857 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7858 defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz512mem,
7859 mgatherv16i32>, EVEX_V512;
7860 defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz256xmem,
7861 mgatherv8i64>, EVEX_V512;
7862 let Predicates = [HasVLX] in {
7863 defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
7864 vy256xmem, mgatherv8i32>, EVEX_V256;
7865 defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7866 vy128xmem, mgatherv4i64>, EVEX_V256;
7867 defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
7868 vx128xmem, mgatherv4i32>, EVEX_V128;
7869 defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
7870 vx64xmem, mgatherv2i64>, EVEX_V128;
7875 defm VGATHER : avx512_gather_q_pd<0x92, 0x93, avx512vl_f64_info, "vgather", "PD">,
7876 avx512_gather_d_ps<0x92, 0x93, avx512vl_f32_info, "vgather", "PS">;
7878 defm VPGATHER : avx512_gather_q_pd<0x90, 0x91, avx512vl_i64_info, "vpgather", "Q">,
7879 avx512_gather_d_ps<0x90, 0x91, avx512vl_i32_info, "vpgather", "D">;
7881 multiclass avx512_scatter<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
7882 X86MemOperand memop, PatFrag ScatterNode> {
7884 let mayStore = 1, Constraints = "$mask = $mask_wb", ExeDomain = _.ExeDomain in
7886 def mr : AVX5128I<opc, MRMDestMem, (outs _.KRCWM:$mask_wb),
7887 (ins memop:$dst, _.KRCWM:$mask, _.RC:$src),
7888 !strconcat(OpcodeStr#_.Suffix,
7889 "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
7890 [(set _.KRCWM:$mask_wb, (ScatterNode (_.VT _.RC:$src),
7891 _.KRCWM:$mask, vectoraddr:$dst))]>,
7892 EVEX, EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
7895 multiclass avx512_scatter_q_pd<bits<8> dopc, bits<8> qopc,
7896 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7897 defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512,
7898 vy512mem, mscatterv8i32>, EVEX_V512, VEX_W;
7899 defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info512,
7900 vz512mem, mscatterv8i64>, EVEX_V512, VEX_W;
7901 let Predicates = [HasVLX] in {
7902 defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
7903 vx256xmem, mscatterv4i32>, EVEX_V256, VEX_W;
7904 defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info256,
7905 vy256xmem, mscatterv4i64>, EVEX_V256, VEX_W;
7906 defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
7907 vx128xmem, mscatterv4i32>, EVEX_V128, VEX_W;
7908 defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7909 vx128xmem, mscatterv2i64>, EVEX_V128, VEX_W;
7913 multiclass avx512_scatter_d_ps<bits<8> dopc, bits<8> qopc,
7914 AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
7915 defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz512mem,
7916 mscatterv16i32>, EVEX_V512;
7917 defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz256xmem,
7918 mscatterv8i64>, EVEX_V512;
7919 let Predicates = [HasVLX] in {
7920 defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
7921 vy256xmem, mscatterv8i32>, EVEX_V256;
7922 defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7923 vy128xmem, mscatterv4i64>, EVEX_V256;
7924 defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
7925 vx128xmem, mscatterv4i32>, EVEX_V128;
7926 defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
7927 vx64xmem, mscatterv2i64>, EVEX_V128;
7931 defm VSCATTER : avx512_scatter_q_pd<0xA2, 0xA3, avx512vl_f64_info, "vscatter", "PD">,
7932 avx512_scatter_d_ps<0xA2, 0xA3, avx512vl_f32_info, "vscatter", "PS">;
7934 defm VPSCATTER : avx512_scatter_q_pd<0xA0, 0xA1, avx512vl_i64_info, "vpscatter", "Q">,
7935 avx512_scatter_d_ps<0xA0, 0xA1, avx512vl_i32_info, "vpscatter", "D">;
7938 multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
7939 RegisterClass KRC, X86MemOperand memop> {
7940 let Predicates = [HasPFI], hasSideEffects = 1 in
7941 def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
7942 !strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"),
7946 defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
7947 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7949 defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
7950 VK8WM, vz256xmem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7952 defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
7953 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7955 defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
7956 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7958 defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
7959 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7961 defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
7962 VK8WM, vz256xmem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7964 defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
7965 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7967 defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
7968 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7970 defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
7971 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7973 defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
7974 VK8WM, vz256xmem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7976 defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
7977 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7979 defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
7980 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7982 defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
7983 VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
7985 defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
7986 VK8WM, vz256xmem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
7988 defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
7989 VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
7991 defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
7992 VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
7994 // Helper fragments to match sext vXi1 to vXiY.
7995 def v64i1sextv64i8 : PatLeaf<(v64i8
7998 (bc_v64i8 (v16i32 immAllZerosV)),
8000 def v32i1sextv32i16 : PatLeaf<(v32i16 (X86vsrai VR512:$src, (i8 15)))>;
8001 def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
8002 def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
8004 multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
8005 def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
8006 !strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
8007 [(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX;
8010 // Use 512bit version to implement 128/256 bit in case NoVLX.
8011 multiclass avx512_convert_mask_to_vector_lowering<X86VectorVTInfo X86Info,
8012 X86VectorVTInfo _> {
8014 def : Pat<(X86Info.VT (X86vsext (X86Info.KVT X86Info.KRC:$src))),
8015 (X86Info.VT (EXTRACT_SUBREG
8016 (_.VT (!cast<Instruction>(NAME#"Zrr")
8017 (_.KVT (COPY_TO_REGCLASS X86Info.KRC:$src,_.KRC)))),
8018 X86Info.SubRegIdx))>;
8021 multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
8022 string OpcodeStr, Predicate prd> {
8023 let Predicates = [prd] in
8024 defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
8026 let Predicates = [prd, HasVLX] in {
8027 defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
8028 defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
8030 let Predicates = [prd, NoVLX] in {
8031 defm Z256_Alt : avx512_convert_mask_to_vector_lowering<VTInfo.info256,VTInfo.info512>;
8032 defm Z128_Alt : avx512_convert_mask_to_vector_lowering<VTInfo.info128,VTInfo.info512>;
8037 defm VPMOVM2B : cvt_mask_by_elt_width<0x28, avx512vl_i8_info, "vpmovm2" , HasBWI>;
8038 defm VPMOVM2W : cvt_mask_by_elt_width<0x28, avx512vl_i16_info, "vpmovm2", HasBWI> , VEX_W;
8039 defm VPMOVM2D : cvt_mask_by_elt_width<0x38, avx512vl_i32_info, "vpmovm2", HasDQI>;
8040 defm VPMOVM2Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, "vpmovm2", HasDQI> , VEX_W;
8042 multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
8043 def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
8044 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8045 [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
8048 // Use 512bit version to implement 128/256 bit in case NoVLX.
8049 multiclass convert_vector_to_mask_lowering<X86VectorVTInfo ExtendInfo,
8050 X86VectorVTInfo _> {
8052 def : Pat<(_.KVT (X86cvt2mask (_.VT _.RC:$src))),
8053 (_.KVT (COPY_TO_REGCLASS
8054 (!cast<Instruction>(NAME#"Zrr")
8055 (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
8056 _.RC:$src, _.SubRegIdx)),
8060 multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
8061 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
8062 let Predicates = [prd] in
8063 defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
8066 let Predicates = [prd, HasVLX] in {
8067 defm Z256 : convert_vector_to_mask_common<opc, VTInfo.info256, OpcodeStr>,
8069 defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
8072 let Predicates = [prd, NoVLX] in {
8073 defm Z256_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info256>;
8074 defm Z128_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info128>;
8078 defm VPMOVB2M : avx512_convert_vector_to_mask<0x29, "vpmovb2m",
8079 avx512vl_i8_info, HasBWI>;
8080 defm VPMOVW2M : avx512_convert_vector_to_mask<0x29, "vpmovw2m",
8081 avx512vl_i16_info, HasBWI>, VEX_W;
8082 defm VPMOVD2M : avx512_convert_vector_to_mask<0x39, "vpmovd2m",
8083 avx512vl_i32_info, HasDQI>;
8084 defm VPMOVQ2M : avx512_convert_vector_to_mask<0x39, "vpmovq2m",
8085 avx512vl_i64_info, HasDQI>, VEX_W;
8087 //===----------------------------------------------------------------------===//
8088 // AVX-512 - COMPRESS and EXPAND
8091 multiclass compress_by_vec_width_common<bits<8> opc, X86VectorVTInfo _,
8093 defm rr : AVX512_maskable<opc, MRMDestReg, _, (outs _.RC:$dst),
8094 (ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
8095 (_.VT (X86compress _.RC:$src1))>, AVX5128IBase;
8097 let mayStore = 1, hasSideEffects = 0 in
8098 def mr : AVX5128I<opc, MRMDestMem, (outs),
8099 (ins _.MemOp:$dst, _.RC:$src),
8100 OpcodeStr # "\t{$src, $dst|$dst, $src}",
8101 []>, EVEX_CD8<_.EltSize, CD8VT1>;
8103 def mrk : AVX5128I<opc, MRMDestMem, (outs),
8104 (ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
8105 OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
8107 EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
8110 multiclass compress_by_vec_width_lowering<X86VectorVTInfo _ > {
8112 def : Pat<(X86mCompressingStore addr:$dst, _.KRCWM:$mask,
8114 (!cast<Instruction>(NAME#_.ZSuffix##mrk)
8115 addr:$dst, _.KRCWM:$mask, _.RC:$src)>;
8118 multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
8119 AVX512VLVectorVTInfo VTInfo> {
8120 defm Z : compress_by_vec_width_common<opc, VTInfo.info512, OpcodeStr>,
8121 compress_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
8123 let Predicates = [HasVLX] in {
8124 defm Z256 : compress_by_vec_width_common<opc, VTInfo.info256, OpcodeStr>,
8125 compress_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
8126 defm Z128 : compress_by_vec_width_common<opc, VTInfo.info128, OpcodeStr>,
8127 compress_by_vec_width_lowering<VTInfo.info128>, EVEX_V128;
8131 defm VPCOMPRESSD : compress_by_elt_width <0x8B, "vpcompressd", avx512vl_i32_info>,
8133 defm VPCOMPRESSQ : compress_by_elt_width <0x8B, "vpcompressq", avx512vl_i64_info>,
8135 defm VCOMPRESSPS : compress_by_elt_width <0x8A, "vcompressps", avx512vl_f32_info>,
8137 defm VCOMPRESSPD : compress_by_elt_width <0x8A, "vcompresspd", avx512vl_f64_info>,
8141 multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
8143 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8144 (ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
8145 (_.VT (X86expand _.RC:$src1))>, AVX5128IBase;
8147 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8148 (ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1",
8149 (_.VT (X86expand (_.VT (bitconvert
8150 (_.LdFrag addr:$src1)))))>,
8151 AVX5128IBase, EVEX_CD8<_.EltSize, CD8VT1>;
8154 multiclass expand_by_vec_width_lowering<X86VectorVTInfo _ > {
8156 def : Pat<(_.VT (X86mExpandingLoad addr:$src, _.KRCWM:$mask, undef)),
8157 (!cast<Instruction>(NAME#_.ZSuffix##rmkz)
8158 _.KRCWM:$mask, addr:$src)>;
8160 def : Pat<(_.VT (X86mExpandingLoad addr:$src, _.KRCWM:$mask,
8161 (_.VT _.RC:$src0))),
8162 (!cast<Instruction>(NAME#_.ZSuffix##rmk)
8163 _.RC:$src0, _.KRCWM:$mask, addr:$src)>;
8166 multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr,
8167 AVX512VLVectorVTInfo VTInfo> {
8168 defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>,
8169 expand_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
8171 let Predicates = [HasVLX] in {
8172 defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>,
8173 expand_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
8174 defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>,
8175 expand_by_vec_width_lowering<VTInfo.info128>, EVEX_V128;
8179 defm VPEXPANDD : expand_by_elt_width <0x89, "vpexpandd", avx512vl_i32_info>,
8181 defm VPEXPANDQ : expand_by_elt_width <0x89, "vpexpandq", avx512vl_i64_info>,
8183 defm VEXPANDPS : expand_by_elt_width <0x88, "vexpandps", avx512vl_f32_info>,
8185 defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", avx512vl_f64_info>,
8188 //handle instruction reg_vec1 = op(reg_vec,imm)
8190 // op(broadcast(eltVt),imm)
8191 //all instruction created with FROUND_CURRENT
8192 multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8194 let ExeDomain = _.ExeDomain in {
8195 defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8196 (ins _.RC:$src1, i32u8imm:$src2),
8197 OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
8198 (OpNode (_.VT _.RC:$src1),
8200 (i32 FROUND_CURRENT))>;
8201 defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8202 (ins _.MemOp:$src1, i32u8imm:$src2),
8203 OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
8204 (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
8206 (i32 FROUND_CURRENT))>;
8207 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8208 (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
8209 OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
8210 "${src1}"##_.BroadcastStr##", $src2",
8211 (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
8213 (i32 FROUND_CURRENT))>, EVEX_B;
8217 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8218 multiclass avx512_unary_fp_sae_packed_imm<bits<8> opc, string OpcodeStr,
8219 SDNode OpNode, X86VectorVTInfo _>{
8220 let ExeDomain = _.ExeDomain in
8221 defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8222 (ins _.RC:$src1, i32u8imm:$src2),
8223 OpcodeStr##_.Suffix, "$src2, {sae}, $src1",
8224 "$src1, {sae}, $src2",
8225 (OpNode (_.VT _.RC:$src1),
8227 (i32 FROUND_NO_EXC))>, EVEX_B;
8230 multiclass avx512_common_unary_fp_sae_packed_imm<string OpcodeStr,
8231 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8232 let Predicates = [prd] in {
8233 defm Z : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8234 avx512_unary_fp_sae_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8237 let Predicates = [prd, HasVLX] in {
8238 defm Z128 : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info128>,
8240 defm Z256 : avx512_unary_fp_packed_imm<opc, OpcodeStr, OpNode, _.info256>,
8245 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8246 // op(reg_vec2,mem_vec,imm)
8247 // op(reg_vec2,broadcast(eltVt),imm)
8248 //all instruction created with FROUND_CURRENT
8249 multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8251 let ExeDomain = _.ExeDomain in {
8252 defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8253 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8254 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8255 (OpNode (_.VT _.RC:$src1),
8258 (i32 FROUND_CURRENT))>;
8259 defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8260 (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
8261 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8262 (OpNode (_.VT _.RC:$src1),
8263 (_.VT (bitconvert (_.LdFrag addr:$src2))),
8265 (i32 FROUND_CURRENT))>;
8266 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8267 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
8268 OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
8269 "$src1, ${src2}"##_.BroadcastStr##", $src3",
8270 (OpNode (_.VT _.RC:$src1),
8271 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
8273 (i32 FROUND_CURRENT))>, EVEX_B;
8277 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8278 // op(reg_vec2,mem_vec,imm)
8279 multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
8280 X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo>{
8281 let ExeDomain = DestInfo.ExeDomain in {
8282 defm rri : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
8283 (ins SrcInfo.RC:$src1, SrcInfo.RC:$src2, u8imm:$src3),
8284 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8285 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
8286 (SrcInfo.VT SrcInfo.RC:$src2),
8288 defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
8289 (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
8290 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8291 (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
8292 (SrcInfo.VT (bitconvert
8293 (SrcInfo.LdFrag addr:$src2))),
8298 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8299 // op(reg_vec2,mem_vec,imm)
8300 // op(reg_vec2,broadcast(eltVt),imm)
8301 multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
8303 avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, _, _>{
8305 let ExeDomain = _.ExeDomain in
8306 defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8307 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
8308 OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
8309 "$src1, ${src2}"##_.BroadcastStr##", $src3",
8310 (OpNode (_.VT _.RC:$src1),
8311 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
8312 (i8 imm:$src3))>, EVEX_B;
8315 //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
8316 // op(reg_vec2,mem_scalar,imm)
8317 //all instruction created with FROUND_CURRENT
8318 multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8319 X86VectorVTInfo _> {
8320 let ExeDomain = _.ExeDomain in {
8321 defm rri : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8322 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8323 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8324 (OpNode (_.VT _.RC:$src1),
8327 (i32 FROUND_CURRENT))>;
8328 defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
8329 (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
8330 OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
8331 (OpNode (_.VT _.RC:$src1),
8332 (_.VT (scalar_to_vector
8333 (_.ScalarLdFrag addr:$src2))),
8335 (i32 FROUND_CURRENT))>;
8339 //handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8340 multiclass avx512_fp_sae_packed_imm<bits<8> opc, string OpcodeStr,
8341 SDNode OpNode, X86VectorVTInfo _>{
8342 let ExeDomain = _.ExeDomain in
8343 defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8344 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8345 OpcodeStr, "$src3, {sae}, $src2, $src1",
8346 "$src1, $src2, {sae}, $src3",
8347 (OpNode (_.VT _.RC:$src1),
8350 (i32 FROUND_NO_EXC))>, EVEX_B;
8352 //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
8353 multiclass avx512_fp_sae_scalar_imm<bits<8> opc, string OpcodeStr,
8354 SDNode OpNode, X86VectorVTInfo _> {
8355 let ExeDomain = _.ExeDomain in
8356 defm NAME#rrib : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
8357 (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3),
8358 OpcodeStr, "$src3, {sae}, $src2, $src1",
8359 "$src1, $src2, {sae}, $src3",
8360 (OpNode (_.VT _.RC:$src1),
8363 (i32 FROUND_NO_EXC))>, EVEX_B;
8366 multiclass avx512_common_fp_sae_packed_imm<string OpcodeStr,
8367 AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8368 let Predicates = [prd] in {
8369 defm Z : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8370 avx512_fp_sae_packed_imm<opc, OpcodeStr, OpNode, _.info512>,
8374 let Predicates = [prd, HasVLX] in {
8375 defm Z128 : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info128>,
8377 defm Z256 : avx512_fp_packed_imm<opc, OpcodeStr, OpNode, _.info256>,
8382 multiclass avx512_common_3Op_rm_imm8<bits<8> opc, SDNode OpNode, string OpStr,
8383 AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo>{
8384 let Predicates = [HasBWI] in {
8385 defm Z : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info512,
8386 SrcInfo.info512>, EVEX_V512, AVX512AIi8Base, EVEX_4V;
8388 let Predicates = [HasBWI, HasVLX] in {
8389 defm Z128 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info128,
8390 SrcInfo.info128>, EVEX_V128, AVX512AIi8Base, EVEX_4V;
8391 defm Z256 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info256,
8392 SrcInfo.info256>, EVEX_V256, AVX512AIi8Base, EVEX_4V;
8396 multiclass avx512_common_3Op_imm8<string OpcodeStr, AVX512VLVectorVTInfo _,
8397 bits<8> opc, SDNode OpNode>{
8398 let Predicates = [HasAVX512] in {
8399 defm Z : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
8401 let Predicates = [HasAVX512, HasVLX] in {
8402 defm Z128 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
8403 defm Z256 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
8407 multiclass avx512_common_fp_sae_scalar_imm<string OpcodeStr,
8408 X86VectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd>{
8409 let Predicates = [prd] in {
8410 defm Z128 : avx512_fp_scalar_imm<opc, OpcodeStr, OpNode, _>,
8411 avx512_fp_sae_scalar_imm<opc, OpcodeStr, OpNode, _>;
8415 multiclass avx512_common_unary_fp_sae_packed_imm_all<string OpcodeStr,
8416 bits<8> opcPs, bits<8> opcPd, SDNode OpNode, Predicate prd>{
8417 defm PS : avx512_common_unary_fp_sae_packed_imm<OpcodeStr, avx512vl_f32_info,
8418 opcPs, OpNode, prd>, EVEX_CD8<32, CD8VF>;
8419 defm PD : avx512_common_unary_fp_sae_packed_imm<OpcodeStr, avx512vl_f64_info,
8420 opcPd, OpNode, prd>, EVEX_CD8<64, CD8VF>, VEX_W;
8424 defm VREDUCE : avx512_common_unary_fp_sae_packed_imm_all<"vreduce", 0x56, 0x56,
8425 X86VReduce, HasDQI>, AVX512AIi8Base, EVEX;
8426 defm VRNDSCALE : avx512_common_unary_fp_sae_packed_imm_all<"vrndscale", 0x08, 0x09,
8427 X86VRndScale, HasAVX512>, AVX512AIi8Base, EVEX;
8428 defm VGETMANT : avx512_common_unary_fp_sae_packed_imm_all<"vgetmant", 0x26, 0x26,
8429 X86VGetMant, HasAVX512>, AVX512AIi8Base, EVEX;
8432 defm VRANGEPD : avx512_common_fp_sae_packed_imm<"vrangepd", avx512vl_f64_info,
8433 0x50, X86VRange, HasDQI>,
8434 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8435 defm VRANGEPS : avx512_common_fp_sae_packed_imm<"vrangeps", avx512vl_f32_info,
8436 0x50, X86VRange, HasDQI>,
8437 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8439 defm VRANGESD: avx512_common_fp_sae_scalar_imm<"vrangesd", f64x_info,
8440 0x51, X86VRange, HasDQI>,
8441 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8442 defm VRANGESS: avx512_common_fp_sae_scalar_imm<"vrangess", f32x_info,
8443 0x51, X86VRange, HasDQI>,
8444 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8446 defm VREDUCESD: avx512_common_fp_sae_scalar_imm<"vreducesd", f64x_info,
8447 0x57, X86Reduces, HasDQI>,
8448 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8449 defm VREDUCESS: avx512_common_fp_sae_scalar_imm<"vreducess", f32x_info,
8450 0x57, X86Reduces, HasDQI>,
8451 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8453 defm VGETMANTSD: avx512_common_fp_sae_scalar_imm<"vgetmantsd", f64x_info,
8454 0x27, X86GetMants, HasAVX512>,
8455 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
8456 defm VGETMANTSS: avx512_common_fp_sae_scalar_imm<"vgetmantss", f32x_info,
8457 0x27, X86GetMants, HasAVX512>,
8458 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
8460 multiclass avx512_shuff_packed_128<string OpcodeStr, AVX512VLVectorVTInfo _,
8461 bits<8> opc, SDNode OpNode = X86Shuf128>{
8462 let Predicates = [HasAVX512] in {
8463 defm Z : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
8466 let Predicates = [HasAVX512, HasVLX] in {
8467 defm Z256 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
8470 let Predicates = [HasAVX512] in {
8471 def : Pat<(v16f32 (ffloor VR512:$src)),
8472 (VRNDSCALEPSZrri VR512:$src, (i32 0x1))>;
8473 def : Pat<(v16f32 (fnearbyint VR512:$src)),
8474 (VRNDSCALEPSZrri VR512:$src, (i32 0xC))>;
8475 def : Pat<(v16f32 (fceil VR512:$src)),
8476 (VRNDSCALEPSZrri VR512:$src, (i32 0x2))>;
8477 def : Pat<(v16f32 (frint VR512:$src)),
8478 (VRNDSCALEPSZrri VR512:$src, (i32 0x4))>;
8479 def : Pat<(v16f32 (ftrunc VR512:$src)),
8480 (VRNDSCALEPSZrri VR512:$src, (i32 0x3))>;
8482 def : Pat<(v8f64 (ffloor VR512:$src)),
8483 (VRNDSCALEPDZrri VR512:$src, (i32 0x1))>;
8484 def : Pat<(v8f64 (fnearbyint VR512:$src)),
8485 (VRNDSCALEPDZrri VR512:$src, (i32 0xC))>;
8486 def : Pat<(v8f64 (fceil VR512:$src)),
8487 (VRNDSCALEPDZrri VR512:$src, (i32 0x2))>;
8488 def : Pat<(v8f64 (frint VR512:$src)),
8489 (VRNDSCALEPDZrri VR512:$src, (i32 0x4))>;
8490 def : Pat<(v8f64 (ftrunc VR512:$src)),
8491 (VRNDSCALEPDZrri VR512:$src, (i32 0x3))>;
8494 defm VSHUFF32X4 : avx512_shuff_packed_128<"vshuff32x4",avx512vl_f32_info, 0x23>,
8495 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8496 defm VSHUFF64X2 : avx512_shuff_packed_128<"vshuff64x2",avx512vl_f64_info, 0x23>,
8497 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8498 defm VSHUFI32X4 : avx512_shuff_packed_128<"vshufi32x4",avx512vl_i32_info, 0x43>,
8499 AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
8500 defm VSHUFI64X2 : avx512_shuff_packed_128<"vshufi64x2",avx512vl_i64_info, 0x43>,
8501 AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
8503 let Predicates = [HasAVX512] in {
8504 // Provide fallback in case the load node that is used in the broadcast
8505 // patterns above is used by additional users, which prevents the pattern
8507 def : Pat<(v8f64 (X86SubVBroadcast (v2f64 VR128X:$src))),
8508 (VSHUFF64X2Zrri (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8509 (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8511 def : Pat<(v8i64 (X86SubVBroadcast (v2i64 VR128X:$src))),
8512 (VSHUFI64X2Zrri (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8513 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8516 def : Pat<(v16f32 (X86SubVBroadcast (v4f32 VR128X:$src))),
8517 (VSHUFF32X4Zrri (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8518 (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8520 def : Pat<(v16i32 (X86SubVBroadcast (v4i32 VR128X:$src))),
8521 (VSHUFI32X4Zrri (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8522 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8525 def : Pat<(v32i16 (X86SubVBroadcast (v8i16 VR128X:$src))),
8526 (VSHUFI32X4Zrri (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8527 (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8530 def : Pat<(v64i8 (X86SubVBroadcast (v16i8 VR128X:$src))),
8531 (VSHUFI32X4Zrri (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8532 (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
8536 multiclass avx512_valign<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_I> {
8537 defm NAME: avx512_common_3Op_imm8<OpcodeStr, VTInfo_I, 0x03, X86VAlign>,
8538 AVX512AIi8Base, EVEX_4V;
8541 defm VALIGND: avx512_valign<"valignd", avx512vl_i32_info>,
8542 EVEX_CD8<32, CD8VF>;
8543 defm VALIGNQ: avx512_valign<"valignq", avx512vl_i64_info>,
8544 EVEX_CD8<64, CD8VF>, VEX_W;
8546 multiclass avx512_vpalignr_lowering<X86VectorVTInfo _ , list<Predicate> p>{
8547 let Predicates = p in
8548 def NAME#_.VTName#rri:
8549 Pat<(_.VT (X86PAlignr _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
8550 (!cast<Instruction>(NAME#_.ZSuffix#rri)
8551 _.RC:$src1, _.RC:$src2, imm:$imm)>;
8554 multiclass avx512_vpalignr_lowering_common<AVX512VLVectorVTInfo _>:
8555 avx512_vpalignr_lowering<_.info512, [HasBWI]>,
8556 avx512_vpalignr_lowering<_.info128, [HasBWI, HasVLX]>,
8557 avx512_vpalignr_lowering<_.info256, [HasBWI, HasVLX]>;
8559 defm VPALIGNR: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
8560 avx512vl_i8_info, avx512vl_i8_info>,
8561 avx512_vpalignr_lowering_common<avx512vl_i16_info>,
8562 avx512_vpalignr_lowering_common<avx512vl_i32_info>,
8563 avx512_vpalignr_lowering_common<avx512vl_f32_info>,
8564 avx512_vpalignr_lowering_common<avx512vl_i64_info>,
8565 avx512_vpalignr_lowering_common<avx512vl_f64_info>,
8568 defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw" ,
8569 avx512vl_i16_info, avx512vl_i8_info>, EVEX_CD8<8, CD8VF>;
8571 multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
8572 X86VectorVTInfo _> {
8573 let ExeDomain = _.ExeDomain in {
8574 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8575 (ins _.RC:$src1), OpcodeStr,
8577 (_.VT (OpNode _.RC:$src1))>, EVEX, AVX5128IBase;
8579 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8580 (ins _.MemOp:$src1), OpcodeStr,
8582 (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
8583 EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
8587 multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
8588 X86VectorVTInfo _> :
8589 avx512_unary_rm<opc, OpcodeStr, OpNode, _> {
8590 defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8591 (ins _.ScalarMemOp:$src1), OpcodeStr,
8592 "${src1}"##_.BroadcastStr,
8593 "${src1}"##_.BroadcastStr,
8594 (_.VT (OpNode (X86VBroadcast
8595 (_.ScalarLdFrag addr:$src1))))>,
8596 EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
8599 multiclass avx512_unary_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
8600 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
8601 let Predicates = [prd] in
8602 defm Z : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info512>, EVEX_V512;
8604 let Predicates = [prd, HasVLX] in {
8605 defm Z256 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info256>,
8607 defm Z128 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info128>,
8612 multiclass avx512_unary_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
8613 AVX512VLVectorVTInfo VTInfo, Predicate prd> {
8614 let Predicates = [prd] in
8615 defm Z : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
8618 let Predicates = [prd, HasVLX] in {
8619 defm Z256 : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
8621 defm Z128 : avx512_unary_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
8626 multiclass avx512_unary_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
8627 SDNode OpNode, Predicate prd> {
8628 defm Q : avx512_unary_rmb_vl<opc_q, OpcodeStr#"q", OpNode, avx512vl_i64_info,
8630 defm D : avx512_unary_rmb_vl<opc_d, OpcodeStr#"d", OpNode, avx512vl_i32_info,
8634 multiclass avx512_unary_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
8635 SDNode OpNode, Predicate prd> {
8636 defm W : avx512_unary_rm_vl<opc_w, OpcodeStr#"w", OpNode, avx512vl_i16_info, prd>;
8637 defm B : avx512_unary_rm_vl<opc_b, OpcodeStr#"b", OpNode, avx512vl_i8_info, prd>;
8640 multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
8641 bits<8> opc_d, bits<8> opc_q,
8642 string OpcodeStr, SDNode OpNode> {
8643 defm NAME : avx512_unary_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
8645 avx512_unary_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
8649 defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", abs>;
8651 // VPABS: Use 512bit version to implement 128/256 bit in case NoVLX.
8652 let Predicates = [HasAVX512, NoVLX] in {
8653 def : Pat<(v4i64 (abs VR256X:$src)),
8656 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
8658 def : Pat<(v2i64 (abs VR128X:$src)),
8661 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
8665 multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
8667 defm NAME : avx512_unary_rm_vl_dq<opc, opc, OpcodeStr, ctlz, prd>;
8670 defm VPLZCNT : avx512_ctlz<0x44, "vplzcnt", HasCDI>;
8671 defm VPCONFLICT : avx512_unary_rm_vl_dq<0xC4, 0xC4, "vpconflict", X86Conflict, HasCDI>;
8673 // VPLZCNT: Use 512bit version to implement 128/256 bit in case NoVLX.
8674 let Predicates = [HasCDI, NoVLX] in {
8675 def : Pat<(v4i64 (ctlz VR256X:$src)),
8678 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
8680 def : Pat<(v2i64 (ctlz VR128X:$src)),
8683 (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
8686 def : Pat<(v8i32 (ctlz VR256X:$src)),
8689 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
8691 def : Pat<(v4i32 (ctlz VR128X:$src)),
8694 (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
8698 //===---------------------------------------------------------------------===//
8699 // Counts number of ones - VPOPCNTD and VPOPCNTQ
8700 //===---------------------------------------------------------------------===//
8702 multiclass avx512_unary_rmb_popcnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo VTInfo> {
8703 let Predicates = [HasVPOPCNTDQ] in
8704 defm Z : avx512_unary_rmb<opc, OpcodeStr, ctpop, VTInfo>, EVEX_V512;
8707 // Use 512bit version to implement 128/256 bit.
8708 multiclass avx512_unary_lowering<SDNode OpNode, AVX512VLVectorVTInfo _, Predicate prd> {
8709 let Predicates = [prd] in {
8710 def Z256_Alt : Pat<(_.info256.VT(OpNode _.info256.RC:$src1)),
8712 (!cast<Instruction>(NAME # "Zrr")
8713 (INSERT_SUBREG(_.info512.VT(IMPLICIT_DEF)),
8715 _.info256.SubRegIdx)),
8716 _.info256.SubRegIdx)>;
8718 def Z128_Alt : Pat<(_.info128.VT(OpNode _.info128.RC:$src1)),
8720 (!cast<Instruction>(NAME # "Zrr")
8721 (INSERT_SUBREG(_.info512.VT(IMPLICIT_DEF)),
8723 _.info128.SubRegIdx)),
8724 _.info128.SubRegIdx)>;
8728 defm VPOPCNTD : avx512_unary_rmb_popcnt<0x55, "vpopcntd", v16i32_info>,
8729 avx512_unary_lowering<ctpop, avx512vl_i32_info, HasVPOPCNTDQ>;
8730 defm VPOPCNTQ : avx512_unary_rmb_popcnt<0x55, "vpopcntq", v8i64_info>,
8731 avx512_unary_lowering<ctpop, avx512vl_i64_info, HasVPOPCNTDQ>, VEX_W;
8733 //===---------------------------------------------------------------------===//
8734 // Replicate Single FP - MOVSHDUP and MOVSLDUP
8735 //===---------------------------------------------------------------------===//
8736 multiclass avx512_replicate<bits<8> opc, string OpcodeStr, SDNode OpNode>{
8737 defm NAME: avx512_unary_rm_vl<opc, OpcodeStr, OpNode, avx512vl_f32_info,
8741 defm VMOVSHDUP : avx512_replicate<0x16, "vmovshdup", X86Movshdup>;
8742 defm VMOVSLDUP : avx512_replicate<0x12, "vmovsldup", X86Movsldup>;
8744 //===----------------------------------------------------------------------===//
8745 // AVX-512 - MOVDDUP
8746 //===----------------------------------------------------------------------===//
8748 multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr, SDNode OpNode,
8749 X86VectorVTInfo _> {
8750 let ExeDomain = _.ExeDomain in {
8751 defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
8752 (ins _.RC:$src), OpcodeStr, "$src", "$src",
8753 (_.VT (OpNode (_.VT _.RC:$src)))>, EVEX;
8754 defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
8755 (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
8756 (_.VT (OpNode (_.VT (scalar_to_vector
8757 (_.ScalarLdFrag addr:$src)))))>,
8758 EVEX, EVEX_CD8<_.EltSize, CD8VH>;
8762 multiclass avx512_movddup_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
8763 AVX512VLVectorVTInfo VTInfo> {
8765 defm Z : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info512>, EVEX_V512;
8767 let Predicates = [HasAVX512, HasVLX] in {
8768 defm Z256 : avx512_unary_rm<opc, OpcodeStr, OpNode, VTInfo.info256>,
8770 defm Z128 : avx512_movddup_128<opc, OpcodeStr, OpNode, VTInfo.info128>,
8775 multiclass avx512_movddup<bits<8> opc, string OpcodeStr, SDNode OpNode>{
8776 defm NAME: avx512_movddup_common<opc, OpcodeStr, OpNode,
8777 avx512vl_f64_info>, XD, VEX_W;
8780 defm VMOVDDUP : avx512_movddup<0x12, "vmovddup", X86Movddup>;
8782 let Predicates = [HasVLX] in {
8783 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
8784 (VMOVDDUPZ128rm addr:$src)>;
8785 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
8786 (VMOVDDUPZ128rm addr:$src)>;
8787 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8788 (VMOVDDUPZ128rr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
8790 def : Pat<(vselect (v2i1 VK2WM:$mask), (X86Movddup (loadv2f64 addr:$src)),
8791 (v2f64 VR128X:$src0)),
8792 (VMOVDDUPZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
8793 def : Pat<(vselect (v2i1 VK2WM:$mask), (X86Movddup (loadv2f64 addr:$src)),
8794 (bitconvert (v4i32 immAllZerosV))),
8795 (VMOVDDUPZ128rmkz VK2WM:$mask, addr:$src)>;
8797 def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
8798 (v2f64 VR128X:$src0)),
8799 (VMOVDDUPZ128rrk VR128X:$src0, VK2WM:$mask,
8800 (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
8801 def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
8802 (bitconvert (v4i32 immAllZerosV))),
8803 (VMOVDDUPZ128rrkz VK2WM:$mask, (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
8805 def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast (loadf64 addr:$src))),
8806 (v2f64 VR128X:$src0)),
8807 (VMOVDDUPZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>;
8808 def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast (loadf64 addr:$src))),
8809 (bitconvert (v4i32 immAllZerosV))),
8810 (VMOVDDUPZ128rmkz VK2WM:$mask, addr:$src)>;
8813 //===----------------------------------------------------------------------===//
8814 // AVX-512 - Unpack Instructions
8815 //===----------------------------------------------------------------------===//
8816 defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh, HasAVX512,
8818 defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl, HasAVX512,
8821 defm VPUNPCKLBW : avx512_binop_rm_vl_b<0x60, "vpunpcklbw", X86Unpckl,
8822 SSE_INTALU_ITINS_P, HasBWI>;
8823 defm VPUNPCKHBW : avx512_binop_rm_vl_b<0x68, "vpunpckhbw", X86Unpckh,
8824 SSE_INTALU_ITINS_P, HasBWI>;
8825 defm VPUNPCKLWD : avx512_binop_rm_vl_w<0x61, "vpunpcklwd", X86Unpckl,
8826 SSE_INTALU_ITINS_P, HasBWI>;
8827 defm VPUNPCKHWD : avx512_binop_rm_vl_w<0x69, "vpunpckhwd", X86Unpckh,
8828 SSE_INTALU_ITINS_P, HasBWI>;
8830 defm VPUNPCKLDQ : avx512_binop_rm_vl_d<0x62, "vpunpckldq", X86Unpckl,
8831 SSE_INTALU_ITINS_P, HasAVX512>;
8832 defm VPUNPCKHDQ : avx512_binop_rm_vl_d<0x6A, "vpunpckhdq", X86Unpckh,
8833 SSE_INTALU_ITINS_P, HasAVX512>;
8834 defm VPUNPCKLQDQ : avx512_binop_rm_vl_q<0x6C, "vpunpcklqdq", X86Unpckl,
8835 SSE_INTALU_ITINS_P, HasAVX512>;
8836 defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
8837 SSE_INTALU_ITINS_P, HasAVX512>;
8839 //===----------------------------------------------------------------------===//
8840 // AVX-512 - Extract & Insert Integer Instructions
8841 //===----------------------------------------------------------------------===//
8843 multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
8844 X86VectorVTInfo _> {
8845 def mr : AVX512Ii8<opc, MRMDestMem, (outs),
8846 (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
8847 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8848 [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
8851 EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
8854 multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
8855 let Predicates = [HasBWI] in {
8856 def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst),
8857 (ins _.RC:$src1, u8imm:$src2),
8858 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8859 [(set GR32orGR64:$dst,
8860 (X86pextrb (_.VT _.RC:$src1), imm:$src2))]>,
8863 defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TAPD;
8867 multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
8868 let Predicates = [HasBWI] in {
8869 def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst),
8870 (ins _.RC:$src1, u8imm:$src2),
8871 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8872 [(set GR32orGR64:$dst,
8873 (X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
8876 let hasSideEffects = 0 in
8877 def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
8878 (ins _.RC:$src1, u8imm:$src2),
8879 OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8880 EVEX, TAPD, FoldGenData<NAME#rr>;
8882 defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
8886 multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
8887 RegisterClass GRC> {
8888 let Predicates = [HasDQI] in {
8889 def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst),
8890 (ins _.RC:$src1, u8imm:$src2),
8891 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8893 (extractelt (_.VT _.RC:$src1), imm:$src2))]>,
8896 def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
8897 (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
8898 OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8899 [(store (extractelt (_.VT _.RC:$src1),
8900 imm:$src2),addr:$dst)]>,
8901 EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
8905 defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>;
8906 defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>;
8907 defm VPEXTRDZ : avx512_extract_elt_dq<"vpextrd", v4i32x_info, GR32>;
8908 defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, VEX_W;
8910 multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
8911 X86VectorVTInfo _, PatFrag LdFrag> {
8912 def rm : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
8913 (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
8914 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8916 (_.VT (OpNode _.RC:$src1, (LdFrag addr:$src2), imm:$src3)))]>,
8917 EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
8920 multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode,
8921 X86VectorVTInfo _, PatFrag LdFrag> {
8922 let Predicates = [HasBWI] in {
8923 def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
8924 (ins _.RC:$src1, GR32orGR64:$src2, u8imm:$src3),
8925 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8927 (OpNode _.RC:$src1, GR32orGR64:$src2, imm:$src3))]>, EVEX_4V;
8929 defm NAME : avx512_insert_elt_m<opc, OpcodeStr, OpNode, _, LdFrag>;
8933 multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
8934 X86VectorVTInfo _, RegisterClass GRC> {
8935 let Predicates = [HasDQI] in {
8936 def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
8937 (ins _.RC:$src1, GRC:$src2, u8imm:$src3),
8938 OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8940 (_.VT (insertelt _.RC:$src1, GRC:$src2, imm:$src3)))]>,
8943 defm NAME : avx512_insert_elt_m<opc, OpcodeStr, insertelt, _,
8944 _.ScalarLdFrag>, TAPD;
8948 defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info,
8950 defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info,
8952 defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>;
8953 defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, VEX_W;
8954 //===----------------------------------------------------------------------===//
8955 // VSHUFPS - VSHUFPD Operations
8956 //===----------------------------------------------------------------------===//
8957 multiclass avx512_shufp<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_I,
8958 AVX512VLVectorVTInfo VTInfo_FP>{
8959 defm NAME: avx512_common_3Op_imm8<OpcodeStr, VTInfo_FP, 0xC6, X86Shufp>,
8960 EVEX_CD8<VTInfo_FP.info512.EltSize, CD8VF>,
8961 AVX512AIi8Base, EVEX_4V;
8964 defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_i32_info, avx512vl_f32_info>, PS;
8965 defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_i64_info, avx512vl_f64_info>, PD, VEX_W;
8966 //===----------------------------------------------------------------------===//
8967 // AVX-512 - Byte shift Left/Right
8968 //===----------------------------------------------------------------------===//
8970 multiclass avx512_shift_packed<bits<8> opc, SDNode OpNode, Format MRMr,
8971 Format MRMm, string OpcodeStr, X86VectorVTInfo _>{
8972 def rr : AVX512<opc, MRMr,
8973 (outs _.RC:$dst), (ins _.RC:$src1, u8imm:$src2),
8974 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8975 [(set _.RC:$dst,(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>;
8976 def rm : AVX512<opc, MRMm,
8977 (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
8978 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8979 [(set _.RC:$dst,(_.VT (OpNode
8980 (_.VT (bitconvert (_.LdFrag addr:$src1))),
8981 (i8 imm:$src2))))]>;
8984 multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
8985 Format MRMm, string OpcodeStr, Predicate prd>{
8986 let Predicates = [prd] in
8987 defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8988 OpcodeStr, v64i8_info>, EVEX_V512;
8989 let Predicates = [prd, HasVLX] in {
8990 defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8991 OpcodeStr, v32i8x_info>, EVEX_V256;
8992 defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
8993 OpcodeStr, v16i8x_info>, EVEX_V128;
8996 defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
8997 HasBWI>, AVX512PDIi8Base, EVEX_4V;
8998 defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
8999 HasBWI>, AVX512PDIi8Base, EVEX_4V;
9002 multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
9003 string OpcodeStr, X86VectorVTInfo _dst,
9004 X86VectorVTInfo _src>{
9005 def rr : AVX512BI<opc, MRMSrcReg,
9006 (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.RC:$src2),
9007 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9008 [(set _dst.RC:$dst,(_dst.VT
9009 (OpNode (_src.VT _src.RC:$src1),
9010 (_src.VT _src.RC:$src2))))]>;
9011 def rm : AVX512BI<opc, MRMSrcMem,
9012 (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
9013 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9014 [(set _dst.RC:$dst,(_dst.VT
9015 (OpNode (_src.VT _src.RC:$src1),
9016 (_src.VT (bitconvert
9017 (_src.LdFrag addr:$src2))))))]>;
9020 multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
9021 string OpcodeStr, Predicate prd> {
9022 let Predicates = [prd] in
9023 defm Z512 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v8i64_info,
9024 v64i8_info>, EVEX_V512;
9025 let Predicates = [prd, HasVLX] in {
9026 defm Z256 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v4i64x_info,
9027 v32i8x_info>, EVEX_V256;
9028 defm Z128 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v2i64x_info,
9029 v16i8x_info>, EVEX_V128;
9033 defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
9036 // Transforms to swizzle an immediate to enable better matching when
9037 // memory operand isn't in the right place.
9038 def VPTERNLOG321_imm8 : SDNodeXForm<imm, [{
9039 // Convert a VPTERNLOG immediate by swapping operand 0 and operand 2.
9040 uint8_t Imm = N->getZExtValue();
9041 // Swap bits 1/4 and 3/6.
9042 uint8_t NewImm = Imm & 0xa5;
9043 if (Imm & 0x02) NewImm |= 0x10;
9044 if (Imm & 0x10) NewImm |= 0x02;
9045 if (Imm & 0x08) NewImm |= 0x40;
9046 if (Imm & 0x40) NewImm |= 0x08;
9047 return getI8Imm(NewImm, SDLoc(N));
9049 def VPTERNLOG213_imm8 : SDNodeXForm<imm, [{
9050 // Convert a VPTERNLOG immediate by swapping operand 1 and operand 2.
9051 uint8_t Imm = N->getZExtValue();
9052 // Swap bits 2/4 and 3/5.
9053 uint8_t NewImm = Imm & 0xc3;
9054 if (Imm & 0x04) NewImm |= 0x10;
9055 if (Imm & 0x10) NewImm |= 0x04;
9056 if (Imm & 0x08) NewImm |= 0x20;
9057 if (Imm & 0x20) NewImm |= 0x08;
9058 return getI8Imm(NewImm, SDLoc(N));
9060 def VPTERNLOG132_imm8 : SDNodeXForm<imm, [{
9061 // Convert a VPTERNLOG immediate by swapping operand 1 and operand 2.
9062 uint8_t Imm = N->getZExtValue();
9063 // Swap bits 1/2 and 5/6.
9064 uint8_t NewImm = Imm & 0x99;
9065 if (Imm & 0x02) NewImm |= 0x04;
9066 if (Imm & 0x04) NewImm |= 0x02;
9067 if (Imm & 0x20) NewImm |= 0x40;
9068 if (Imm & 0x40) NewImm |= 0x20;
9069 return getI8Imm(NewImm, SDLoc(N));
9071 def VPTERNLOG231_imm8 : SDNodeXForm<imm, [{
9072 // Convert a VPTERNLOG immediate by moving operand 1 to the end.
9073 uint8_t Imm = N->getZExtValue();
9074 // Move bits 1->2, 2->4, 3->6, 4->1, 5->3, 6->5
9075 uint8_t NewImm = Imm & 0x81;
9076 if (Imm & 0x02) NewImm |= 0x04;
9077 if (Imm & 0x04) NewImm |= 0x10;
9078 if (Imm & 0x08) NewImm |= 0x40;
9079 if (Imm & 0x10) NewImm |= 0x02;
9080 if (Imm & 0x20) NewImm |= 0x08;
9081 if (Imm & 0x40) NewImm |= 0x20;
9082 return getI8Imm(NewImm, SDLoc(N));
9084 def VPTERNLOG312_imm8 : SDNodeXForm<imm, [{
9085 // Convert a VPTERNLOG immediate by moving operand 2 to the beginning.
9086 uint8_t Imm = N->getZExtValue();
9087 // Move bits 1->4, 2->1, 3->5, 4->2, 5->6, 6->3
9088 uint8_t NewImm = Imm & 0x81;
9089 if (Imm & 0x02) NewImm |= 0x10;
9090 if (Imm & 0x04) NewImm |= 0x02;
9091 if (Imm & 0x08) NewImm |= 0x20;
9092 if (Imm & 0x10) NewImm |= 0x04;
9093 if (Imm & 0x20) NewImm |= 0x40;
9094 if (Imm & 0x40) NewImm |= 0x08;
9095 return getI8Imm(NewImm, SDLoc(N));
9098 multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
9100 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
9101 defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
9102 (ins _.RC:$src2, _.RC:$src3, u8imm:$src4),
9103 OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
9104 (OpNode (_.VT _.RC:$src1),
9107 (i8 imm:$src4)), 1, 1>, AVX512AIi8Base, EVEX_4V;
9108 defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
9109 (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
9110 OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
9111 (OpNode (_.VT _.RC:$src1),
9113 (_.VT (bitconvert (_.LdFrag addr:$src3))),
9114 (i8 imm:$src4)), 1, 0>,
9115 AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
9116 defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
9117 (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
9118 OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
9119 "$src2, ${src3}"##_.BroadcastStr##", $src4",
9120 (OpNode (_.VT _.RC:$src1),
9122 (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
9123 (i8 imm:$src4)), 1, 0>, EVEX_B,
9124 AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
9125 }// Constraints = "$src1 = $dst"
9127 // Additional patterns for matching passthru operand in other positions.
9128 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9129 (OpNode _.RC:$src3, _.RC:$src2, _.RC:$src1, (i8 imm:$src4)),
9131 (!cast<Instruction>(NAME#_.ZSuffix#rrik) _.RC:$src1, _.KRCWM:$mask,
9132 _.RC:$src2, _.RC:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9133 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9134 (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i8 imm:$src4)),
9136 (!cast<Instruction>(NAME#_.ZSuffix#rrik) _.RC:$src1, _.KRCWM:$mask,
9137 _.RC:$src2, _.RC:$src3, (VPTERNLOG213_imm8 imm:$src4))>;
9139 // Additional patterns for matching loads in other positions.
9140 def : Pat<(_.VT (OpNode (bitconvert (_.LdFrag addr:$src3)),
9141 _.RC:$src2, _.RC:$src1, (i8 imm:$src4))),
9142 (!cast<Instruction>(NAME#_.ZSuffix#rmi) _.RC:$src1, _.RC:$src2,
9143 addr:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9144 def : Pat<(_.VT (OpNode _.RC:$src1,
9145 (bitconvert (_.LdFrag addr:$src3)),
9146 _.RC:$src2, (i8 imm:$src4))),
9147 (!cast<Instruction>(NAME#_.ZSuffix#rmi) _.RC:$src1, _.RC:$src2,
9148 addr:$src3, (VPTERNLOG132_imm8 imm:$src4))>;
9150 // Additional patterns for matching zero masking with loads in other
9152 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9153 (OpNode (bitconvert (_.LdFrag addr:$src3)),
9154 _.RC:$src2, _.RC:$src1, (i8 imm:$src4)),
9156 (!cast<Instruction>(NAME#_.ZSuffix#rmikz) _.RC:$src1, _.KRCWM:$mask,
9157 _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9158 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9159 (OpNode _.RC:$src1, (bitconvert (_.LdFrag addr:$src3)),
9160 _.RC:$src2, (i8 imm:$src4)),
9162 (!cast<Instruction>(NAME#_.ZSuffix#rmikz) _.RC:$src1, _.KRCWM:$mask,
9163 _.RC:$src2, addr:$src3, (VPTERNLOG132_imm8 imm:$src4))>;
9165 // Additional patterns for matching masked loads with different
9167 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9168 (OpNode _.RC:$src1, (bitconvert (_.LdFrag addr:$src3)),
9169 _.RC:$src2, (i8 imm:$src4)),
9171 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9172 _.RC:$src2, addr:$src3, (VPTERNLOG132_imm8 imm:$src4))>;
9173 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9174 (OpNode (bitconvert (_.LdFrag addr:$src3)),
9175 _.RC:$src2, _.RC:$src1, (i8 imm:$src4)),
9177 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9178 _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9179 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9180 (OpNode _.RC:$src2, _.RC:$src1,
9181 (bitconvert (_.LdFrag addr:$src3)), (i8 imm:$src4)),
9183 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9184 _.RC:$src2, addr:$src3, (VPTERNLOG213_imm8 imm:$src4))>;
9185 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9186 (OpNode _.RC:$src2, (bitconvert (_.LdFrag addr:$src3)),
9187 _.RC:$src1, (i8 imm:$src4)),
9189 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9190 _.RC:$src2, addr:$src3, (VPTERNLOG231_imm8 imm:$src4))>;
9191 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9192 (OpNode (bitconvert (_.LdFrag addr:$src3)),
9193 _.RC:$src1, _.RC:$src2, (i8 imm:$src4)),
9195 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9196 _.RC:$src2, addr:$src3, (VPTERNLOG312_imm8 imm:$src4))>;
9198 // Additional patterns for matching broadcasts in other positions.
9199 def : Pat<(_.VT (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9200 _.RC:$src2, _.RC:$src1, (i8 imm:$src4))),
9201 (!cast<Instruction>(NAME#_.ZSuffix#rmbi) _.RC:$src1, _.RC:$src2,
9202 addr:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9203 def : Pat<(_.VT (OpNode _.RC:$src1,
9204 (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9205 _.RC:$src2, (i8 imm:$src4))),
9206 (!cast<Instruction>(NAME#_.ZSuffix#rmbi) _.RC:$src1, _.RC:$src2,
9207 addr:$src3, (VPTERNLOG132_imm8 imm:$src4))>;
9209 // Additional patterns for matching zero masking with broadcasts in other
9211 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9212 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9213 _.RC:$src2, _.RC:$src1, (i8 imm:$src4)),
9215 (!cast<Instruction>(NAME#_.ZSuffix#rmbikz) _.RC:$src1,
9216 _.KRCWM:$mask, _.RC:$src2, addr:$src3,
9217 (VPTERNLOG321_imm8 imm:$src4))>;
9218 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9220 (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9221 _.RC:$src2, (i8 imm:$src4)),
9223 (!cast<Instruction>(NAME#_.ZSuffix#rmbikz) _.RC:$src1,
9224 _.KRCWM:$mask, _.RC:$src2, addr:$src3,
9225 (VPTERNLOG132_imm8 imm:$src4))>;
9227 // Additional patterns for matching masked broadcasts with different
9229 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9231 (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9232 _.RC:$src2, (i8 imm:$src4)),
9234 (!cast<Instruction>(NAME#_.ZSuffix#rmbik) _.RC:$src1, _.KRCWM:$mask,
9235 _.RC:$src2, addr:$src3, (VPTERNLOG132_imm8 imm:$src4))>;
9236 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9237 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9238 _.RC:$src2, _.RC:$src1, (i8 imm:$src4)),
9240 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9241 _.RC:$src2, addr:$src3, (VPTERNLOG321_imm8 imm:$src4))>;
9242 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9243 (OpNode _.RC:$src2, _.RC:$src1,
9244 (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9245 (i8 imm:$src4)), _.RC:$src1)),
9246 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9247 _.RC:$src2, addr:$src3, (VPTERNLOG213_imm8 imm:$src4))>;
9248 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9250 (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9251 _.RC:$src1, (i8 imm:$src4)),
9253 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9254 _.RC:$src2, addr:$src3, (VPTERNLOG231_imm8 imm:$src4))>;
9255 def : Pat<(_.VT (vselect _.KRCWM:$mask,
9256 (OpNode (X86VBroadcast (_.ScalarLdFrag addr:$src3)),
9257 _.RC:$src1, _.RC:$src2, (i8 imm:$src4)),
9259 (!cast<Instruction>(NAME#_.ZSuffix#rmik) _.RC:$src1, _.KRCWM:$mask,
9260 _.RC:$src2, addr:$src3, (VPTERNLOG312_imm8 imm:$src4))>;
9263 multiclass avx512_common_ternlog<string OpcodeStr, AVX512VLVectorVTInfo _>{
9264 let Predicates = [HasAVX512] in
9265 defm Z : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info512>, EVEX_V512;
9266 let Predicates = [HasAVX512, HasVLX] in {
9267 defm Z128 : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info128>, EVEX_V128;
9268 defm Z256 : avx512_ternlog<0x25, OpcodeStr, X86vpternlog, _.info256>, EVEX_V256;
9272 defm VPTERNLOGD : avx512_common_ternlog<"vpternlogd", avx512vl_i32_info>;
9273 defm VPTERNLOGQ : avx512_common_ternlog<"vpternlogq", avx512vl_i64_info>, VEX_W;
9275 //===----------------------------------------------------------------------===//
9276 // AVX-512 - FixupImm
9277 //===----------------------------------------------------------------------===//
9279 multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
9281 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
9282 defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
9283 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
9284 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
9285 (OpNode (_.VT _.RC:$src1),
9287 (_.IntVT _.RC:$src3),
9289 (i32 FROUND_CURRENT))>;
9290 defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
9291 (ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4),
9292 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
9293 (OpNode (_.VT _.RC:$src1),
9295 (_.IntVT (bitconvert (_.LdFrag addr:$src3))),
9297 (i32 FROUND_CURRENT))>;
9298 defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
9299 (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
9300 OpcodeStr##_.Suffix, "$src4, ${src3}"##_.BroadcastStr##", $src2",
9301 "$src2, ${src3}"##_.BroadcastStr##", $src4",
9302 (OpNode (_.VT _.RC:$src1),
9304 (_.IntVT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
9306 (i32 FROUND_CURRENT))>, EVEX_B;
9307 } // Constraints = "$src1 = $dst"
9310 multiclass avx512_fixupimm_packed_sae<bits<8> opc, string OpcodeStr,
9311 SDNode OpNode, X86VectorVTInfo _>{
9312 let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
9313 defm rrib : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
9314 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
9315 OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
9316 "$src2, $src3, {sae}, $src4",
9317 (OpNode (_.VT _.RC:$src1),
9319 (_.IntVT _.RC:$src3),
9321 (i32 FROUND_NO_EXC))>, EVEX_B;
9325 multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
9326 X86VectorVTInfo _, X86VectorVTInfo _src3VT> {
9327 let Constraints = "$src1 = $dst" , Predicates = [HasAVX512],
9328 ExeDomain = _.ExeDomain in {
9329 defm rri : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
9330 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
9331 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
9332 (OpNode (_.VT _.RC:$src1),
9334 (_src3VT.VT _src3VT.RC:$src3),
9336 (i32 FROUND_CURRENT))>;
9338 defm rrib : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
9339 (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
9340 OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
9341 "$src2, $src3, {sae}, $src4",
9342 (OpNode (_.VT _.RC:$src1),
9344 (_src3VT.VT _src3VT.RC:$src3),
9346 (i32 FROUND_NO_EXC))>, EVEX_B;
9347 defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
9348 (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
9349 OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
9350 (OpNode (_.VT _.RC:$src1),
9352 (_src3VT.VT (scalar_to_vector
9353 (_src3VT.ScalarLdFrag addr:$src3))),
9355 (i32 FROUND_CURRENT))>;
9359 multiclass avx512_fixupimm_packed_all<AVX512VLVectorVTInfo _Vec>{
9360 let Predicates = [HasAVX512] in
9361 defm Z : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
9362 avx512_fixupimm_packed_sae<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
9363 AVX512AIi8Base, EVEX_4V, EVEX_V512;
9364 let Predicates = [HasAVX512, HasVLX] in {
9365 defm Z128 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info128>,
9366 AVX512AIi8Base, EVEX_4V, EVEX_V128;
9367 defm Z256 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info256>,
9368 AVX512AIi8Base, EVEX_4V, EVEX_V256;
9372 defm VFIXUPIMMSS : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
9373 f32x_info, v4i32x_info>,
9374 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
9375 defm VFIXUPIMMSD : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
9376 f64x_info, v2i64x_info>,
9377 AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
9378 defm VFIXUPIMMPS : avx512_fixupimm_packed_all<avx512vl_f32_info>,
9379 EVEX_CD8<32, CD8VF>;
9380 defm VFIXUPIMMPD : avx512_fixupimm_packed_all<avx512vl_f64_info>,
9381 EVEX_CD8<64, CD8VF>, VEX_W;
9385 // Patterns used to select SSE scalar fp arithmetic instructions from
9388 // (1) a scalar fp operation followed by a blend
9390 // The effect is that the backend no longer emits unnecessary vector
9391 // insert instructions immediately after SSE scalar fp instructions
9392 // like addss or mulss.
9394 // For example, given the following code:
9395 // __m128 foo(__m128 A, __m128 B) {
9400 // Previously we generated:
9401 // addss %xmm0, %xmm1
9402 // movss %xmm1, %xmm0
9405 // addss %xmm1, %xmm0
9407 // (2) a vector packed single/double fp operation followed by a vector insert
9409 // The effect is that the backend converts the packed fp instruction
9410 // followed by a vector insert into a single SSE scalar fp instruction.
9412 // For example, given the following code:
9413 // __m128 foo(__m128 A, __m128 B) {
9414 // __m128 C = A + B;
9415 // return (__m128) {c[0], a[1], a[2], a[3]};
9418 // Previously we generated:
9419 // addps %xmm0, %xmm1
9420 // movss %xmm1, %xmm0
9423 // addss %xmm1, %xmm0
9425 // TODO: Some canonicalization in lowering would simplify the number of
9426 // patterns we have to try to match.
9427 multiclass AVX512_scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
9428 let Predicates = [HasAVX512] in {
9429 // extracted scalar math op with insert via movss
9430 def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector
9431 (Op (f32 (extractelt (v4f32 VR128X:$dst), (iPTR 0))),
9433 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
9434 (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
9436 // extracted scalar math op with insert via blend
9437 def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector
9438 (Op (f32 (extractelt (v4f32 VR128X:$dst), (iPTR 0))),
9439 FR32X:$src))), (i8 1))),
9440 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
9441 (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
9443 // vector math op with insert via movss
9444 def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst),
9445 (Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)))),
9446 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
9448 // vector math op with insert via blend
9449 def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst),
9450 (Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)), (i8 1))),
9451 (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
9453 // extracted masked scalar math op with insert via movss
9454 def : Pat<(X86Movss (v4f32 VR128X:$src1),
9456 (X86selects VK1WM:$mask,
9457 (Op (f32 (extractelt (v4f32 VR128X:$src1), (iPTR 0))),
9460 (!cast<I>("V"#OpcPrefix#SSZrr_Intk) (COPY_TO_REGCLASS FR32X:$src0, VR128X),
9461 VK1WM:$mask, v4f32:$src1,
9462 (COPY_TO_REGCLASS FR32X:$src2, VR128X))>;
9466 defm : AVX512_scalar_math_f32_patterns<fadd, "ADD">;
9467 defm : AVX512_scalar_math_f32_patterns<fsub, "SUB">;
9468 defm : AVX512_scalar_math_f32_patterns<fmul, "MUL">;
9469 defm : AVX512_scalar_math_f32_patterns<fdiv, "DIV">;
9471 multiclass AVX512_scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
9472 let Predicates = [HasAVX512] in {
9473 // extracted scalar math op with insert via movsd
9474 def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector
9475 (Op (f64 (extractelt (v2f64 VR128X:$dst), (iPTR 0))),
9477 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
9478 (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
9480 // extracted scalar math op with insert via blend
9481 def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector
9482 (Op (f64 (extractelt (v2f64 VR128X:$dst), (iPTR 0))),
9483 FR64X:$src))), (i8 1))),
9484 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
9485 (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
9487 // vector math op with insert via movsd
9488 def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst),
9489 (Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)))),
9490 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
9492 // vector math op with insert via blend
9493 def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst),
9494 (Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)), (i8 1))),
9495 (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
9497 // extracted masked scalar math op with insert via movss
9498 def : Pat<(X86Movsd (v2f64 VR128X:$src1),
9500 (X86selects VK1WM:$mask,
9501 (Op (f64 (extractelt (v2f64 VR128X:$src1), (iPTR 0))),
9504 (!cast<I>("V"#OpcPrefix#SDZrr_Intk) (COPY_TO_REGCLASS FR64X:$src0, VR128X),
9505 VK1WM:$mask, v2f64:$src1,
9506 (COPY_TO_REGCLASS FR64X:$src2, VR128X))>;
9510 defm : AVX512_scalar_math_f64_patterns<fadd, "ADD">;
9511 defm : AVX512_scalar_math_f64_patterns<fsub, "SUB">;
9512 defm : AVX512_scalar_math_f64_patterns<fmul, "MUL">;
9513 defm : AVX512_scalar_math_f64_patterns<fdiv, "DIV">;