1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
244 Domain d, OpndItins itins, bit Is2Addr = 1> {
245 let isCommutable = 1 in {
246 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
248 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
249 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
250 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr, d>,
251 Sched<[itins.Sched]>;
253 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
255 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
256 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
257 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm, d>,
258 Sched<[itins.Sched.Folded, ReadAfterLd]>;
261 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
262 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
263 string asm, string SSEVer, string FPSizeStr,
264 Operand memopr, ComplexPattern mem_cpat,
265 Domain d, OpndItins itins, bit Is2Addr = 1> {
266 let isCodeGenOnly = 1 in {
267 def rr_Int : SI_Int<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
269 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
270 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
271 [(set RC:$dst, (!cast<Intrinsic>(
272 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
273 RC:$src1, RC:$src2))], itins.rr, d>,
274 Sched<[itins.Sched]>;
275 def rm_Int : SI_Int<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
277 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
278 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
279 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
280 SSEVer, "_", OpcodeStr, FPSizeStr))
281 RC:$src1, mem_cpat:$src2))], itins.rm, d>,
282 Sched<[itins.Sched.Folded, ReadAfterLd]>;
286 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
287 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
288 RegisterClass RC, ValueType vt,
289 X86MemOperand x86memop, PatFrag mem_frag,
290 Domain d, OpndItins itins, bit Is2Addr = 1> {
291 let isCommutable = 1 in
292 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
296 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
297 Sched<[itins.Sched]>;
299 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
301 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
303 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
305 Sched<[itins.Sched.Folded, ReadAfterLd]>;
308 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
309 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
310 string OpcodeStr, X86MemOperand x86memop,
311 list<dag> pat_rr, list<dag> pat_rm,
313 let isCommutable = 1, hasSideEffects = 0 in
314 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
316 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
317 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
318 pat_rr, NoItinerary, d>,
319 Sched<[WriteVecLogic]>;
320 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
322 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
323 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
324 pat_rm, NoItinerary, d>,
325 Sched<[WriteVecLogicLd, ReadAfterLd]>;
328 //===----------------------------------------------------------------------===//
329 // Non-instruction patterns
330 //===----------------------------------------------------------------------===//
332 // A vector extract of the first f32/f64 position is a subregister copy
333 def : Pat<(f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
334 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
335 def : Pat<(f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
338 // A 128-bit subvector extract from the first 256-bit vector position
339 // is a subregister copy that needs no instruction.
340 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
341 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
342 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
343 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
345 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
346 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
347 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
348 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
350 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
351 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
352 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
353 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
355 // A 128-bit subvector insert to the first 256-bit vector position
356 // is a subregister copy that needs no instruction.
357 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
358 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
359 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
360 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
372 // Implicitly promote a 32-bit scalar to a vector.
373 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
374 (COPY_TO_REGCLASS FR32:$src, VR128)>;
375 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 // Implicitly promote a 64-bit scalar to a vector.
378 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
379 (COPY_TO_REGCLASS FR64:$src, VR128)>;
380 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
383 // Bitcasts between 128-bit vector types. Return the original type since
384 // no instruction is needed for the conversion
385 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
386 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
387 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
388 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
391 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
392 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
396 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
397 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
401 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
402 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
406 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
407 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
411 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
412 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
416 def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
418 // Bitcasts between 256-bit vector types. Return the original type since
419 // no instruction is needed for the conversion
420 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
421 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
422 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
423 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
424 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
425 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
426 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
427 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
428 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
429 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
430 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
431 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
432 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
433 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
434 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
435 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
436 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
437 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
441 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
442 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
443 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
444 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
445 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
446 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
447 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
448 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
449 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
451 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
452 // This is expanded by ExpandPostRAPseudos.
453 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
454 isPseudo = 1, SchedRW = [WriteZero] in {
455 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
456 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
457 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
458 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
461 //===----------------------------------------------------------------------===//
462 // AVX & SSE - Zero/One Vectors
463 //===----------------------------------------------------------------------===//
465 // Alias instruction that maps zero vector to pxor / xorp* for sse.
466 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
467 // swizzled by ExecutionDepsFix to pxor.
468 // We set canFoldAsLoad because this can be converted to a constant-pool
469 // load of an all-zeros value if folding it would be beneficial.
470 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
471 isPseudo = 1, Predicates = [NoVLX], SchedRW = [WriteZero] in {
472 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
473 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
476 let Predicates = [NoVLX] in
477 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
480 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
481 // and doesn't need it because on sandy bridge the register is set to zero
482 // at the rename stage without using any execution unit, so SET0PSY
483 // and SET0PDY can be used for vector int instructions without penalty
484 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
485 isPseudo = 1, Predicates = [HasAVX, NoVLX], SchedRW = [WriteZero] in {
486 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
487 [(set VR256:$dst, (v8i32 immAllZerosV))]>;
490 // We set canFoldAsLoad because this can be converted to a constant-pool
491 // load of an all-ones value if folding it would be beneficial.
492 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
493 isPseudo = 1, SchedRW = [WriteZero] in {
494 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
495 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
496 let Predicates = [HasAVX2] in
497 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
498 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
502 //===----------------------------------------------------------------------===//
503 // SSE 1 & 2 - Move FP Scalar Instructions
505 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
506 // register copies because it's a partial register update; Register-to-register
507 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
508 // that the insert be implementable in terms of a copy, and just mentioned, we
509 // don't use movss/movsd for copies.
510 //===----------------------------------------------------------------------===//
512 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
513 X86MemOperand x86memop, string base_opc,
514 string asm_opr, Domain d = GenericDomain> {
515 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
516 (ins VR128:$src1, RC:$src2),
517 !strconcat(base_opc, asm_opr),
518 [(set VR128:$dst, (vt (OpNode VR128:$src1,
519 (scalar_to_vector RC:$src2))))],
520 IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
522 // For the disassembler
523 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
524 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
525 (ins VR128:$src1, RC:$src2),
526 !strconcat(base_opc, asm_opr),
527 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
530 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
531 X86MemOperand x86memop, string OpcodeStr,
532 Domain d = GenericDomain> {
534 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
535 "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
538 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
539 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
540 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
541 VEX, VEX_LIG, Sched<[WriteStore]>;
543 let Constraints = "$src1 = $dst" in {
544 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
545 "\t{$src2, $dst|$dst, $src2}", d>;
548 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
549 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
550 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
554 // Loading from memory automatically zeroing upper bits.
555 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
556 PatFrag mem_pat, string OpcodeStr,
557 Domain d = GenericDomain> {
558 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
559 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
560 [(set RC:$dst, (mem_pat addr:$src))],
561 IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>;
562 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
563 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
564 [(set RC:$dst, (mem_pat addr:$src))],
565 IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
568 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
569 SSEPackedSingle>, XS;
570 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
571 SSEPackedDouble>, XD;
573 let canFoldAsLoad = 1, isReMaterializable = 1 in {
574 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss",
575 SSEPackedSingle>, XS;
577 let AddedComplexity = 20 in
578 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd",
579 SSEPackedDouble>, XD;
583 let Predicates = [UseAVX] in {
584 let AddedComplexity = 20 in {
585 // MOVSSrm zeros the high parts of the register; represent this
586 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
587 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
588 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
589 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
590 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
591 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
592 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
594 // MOVSDrm zeros the high parts of the register; represent this
595 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
596 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
597 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
598 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
599 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
600 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
601 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
602 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
603 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
604 def : Pat<(v2f64 (X86vzload addr:$src)),
605 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
607 // Represent the same patterns above but in the form they appear for
609 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
610 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
611 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
612 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
613 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
614 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
615 def : Pat<(v4f64 (X86vzload addr:$src)),
616 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
619 // Extract and store.
620 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
622 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
624 // Shuffle with VMOVSS
625 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
626 (VMOVSSrr (v4i32 VR128:$src1),
627 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
628 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
629 (VMOVSSrr (v4f32 VR128:$src1),
630 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
633 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
634 (SUBREG_TO_REG (i32 0),
635 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
636 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
638 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
639 (SUBREG_TO_REG (i32 0),
640 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
641 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
644 // Shuffle with VMOVSD
645 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
646 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
647 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
648 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
649 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
650 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
651 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
652 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
655 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
656 (SUBREG_TO_REG (i32 0),
657 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
658 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
660 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
661 (SUBREG_TO_REG (i32 0),
662 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
663 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
666 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
667 // is during lowering, where it's not possible to recognize the fold cause
668 // it has two uses through a bitcast. One use disappears at isel time and the
669 // fold opportunity reappears.
670 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
671 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
672 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
673 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
674 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
675 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
676 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
677 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
680 let Predicates = [UseSSE1] in {
681 let Predicates = [NoSSE41], AddedComplexity = 15 in {
682 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
683 // MOVSS to the lower bits.
684 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
685 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
686 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
687 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
688 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
689 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
692 let AddedComplexity = 20 in {
693 // MOVSSrm already zeros the high parts of the register.
694 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
695 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
696 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
697 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
698 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
699 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
702 // Extract and store.
703 def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
705 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
707 // Shuffle with MOVSS
708 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
709 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
710 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
711 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
714 let Predicates = [UseSSE2] in {
715 let Predicates = [NoSSE41], AddedComplexity = 15 in {
716 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
717 // MOVSD to the lower bits.
718 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
719 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
722 let AddedComplexity = 20 in {
723 // MOVSDrm already zeros the high parts of the register.
724 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
725 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
726 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
727 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
728 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
729 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
730 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
731 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
732 def : Pat<(v2f64 (X86vzload addr:$src)),
733 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
736 // Shuffle with MOVSD
737 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
738 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
739 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
740 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
741 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
742 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
743 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
744 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
746 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
747 // is during lowering, where it's not possible to recognize the fold because
748 // it has two uses through a bitcast. One use disappears at isel time and the
749 // fold opportunity reappears.
750 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
751 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
752 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
753 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
754 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
755 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
756 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
757 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
760 // Aliases to help the assembler pick two byte VEX encodings by swapping the
761 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
762 def : InstAlias<"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
763 (VMOVSSrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
764 def : InstAlias<"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
765 (VMOVSDrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
767 //===----------------------------------------------------------------------===//
768 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
769 //===----------------------------------------------------------------------===//
771 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
772 X86MemOperand x86memop, PatFrag ld_frag,
773 string asm, Domain d,
775 bit IsReMaterializable = 1> {
776 let hasSideEffects = 0 in
777 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
778 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
779 Sched<[WriteFShuffle]>;
780 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
781 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
782 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
783 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
787 let Predicates = [HasAVX, NoVLX] in {
788 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
789 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
791 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
792 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
794 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
795 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
797 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
798 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
801 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
802 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
804 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
805 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
807 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
808 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
810 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
811 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
815 let Predicates = [UseSSE1] in {
816 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
817 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
819 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
820 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
823 let Predicates = [UseSSE2] in {
824 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
825 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
827 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
828 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
832 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
833 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
834 "movaps\t{$src, $dst|$dst, $src}",
835 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
836 IIC_SSE_MOVA_P_MR>, VEX;
837 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
838 "movapd\t{$src, $dst|$dst, $src}",
839 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
840 IIC_SSE_MOVA_P_MR>, VEX;
841 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
842 "movups\t{$src, $dst|$dst, $src}",
843 [(store (v4f32 VR128:$src), addr:$dst)],
844 IIC_SSE_MOVU_P_MR>, VEX;
845 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
846 "movupd\t{$src, $dst|$dst, $src}",
847 [(store (v2f64 VR128:$src), addr:$dst)],
848 IIC_SSE_MOVU_P_MR>, VEX;
849 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
850 "movaps\t{$src, $dst|$dst, $src}",
851 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
852 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
853 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
854 "movapd\t{$src, $dst|$dst, $src}",
855 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
856 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
857 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
858 "movups\t{$src, $dst|$dst, $src}",
859 [(store (v8f32 VR256:$src), addr:$dst)],
860 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
861 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
862 "movupd\t{$src, $dst|$dst, $src}",
863 [(store (v4f64 VR256:$src), addr:$dst)],
864 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
868 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
869 SchedRW = [WriteFShuffle] in {
870 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
872 "movaps\t{$src, $dst|$dst, $src}", [],
873 IIC_SSE_MOVA_P_RR>, VEX;
874 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
876 "movapd\t{$src, $dst|$dst, $src}", [],
877 IIC_SSE_MOVA_P_RR>, VEX;
878 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
880 "movups\t{$src, $dst|$dst, $src}", [],
881 IIC_SSE_MOVU_P_RR>, VEX;
882 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
884 "movupd\t{$src, $dst|$dst, $src}", [],
885 IIC_SSE_MOVU_P_RR>, VEX;
886 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
888 "movaps\t{$src, $dst|$dst, $src}", [],
889 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
890 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
892 "movapd\t{$src, $dst|$dst, $src}", [],
893 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
894 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
896 "movups\t{$src, $dst|$dst, $src}", [],
897 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
898 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
900 "movupd\t{$src, $dst|$dst, $src}", [],
901 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
904 // Aliases to help the assembler pick two byte VEX encodings by swapping the
905 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
906 def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
907 (VMOVAPSrr_REV VR128L:$dst, VR128H:$src), 0>;
908 def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
909 (VMOVAPDrr_REV VR128L:$dst, VR128H:$src), 0>;
910 def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
911 (VMOVUPSrr_REV VR128L:$dst, VR128H:$src), 0>;
912 def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
913 (VMOVUPDrr_REV VR128L:$dst, VR128H:$src), 0>;
914 def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
915 (VMOVAPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
916 def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
917 (VMOVAPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
918 def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
919 (VMOVUPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
920 def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
921 (VMOVUPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
923 let SchedRW = [WriteStore] in {
924 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
925 "movaps\t{$src, $dst|$dst, $src}",
926 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
928 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
929 "movapd\t{$src, $dst|$dst, $src}",
930 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
932 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
933 "movups\t{$src, $dst|$dst, $src}",
934 [(store (v4f32 VR128:$src), addr:$dst)],
936 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
937 "movupd\t{$src, $dst|$dst, $src}",
938 [(store (v2f64 VR128:$src), addr:$dst)],
943 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
944 SchedRW = [WriteFShuffle] in {
945 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
946 "movaps\t{$src, $dst|$dst, $src}", [],
948 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
949 "movapd\t{$src, $dst|$dst, $src}", [],
951 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
952 "movups\t{$src, $dst|$dst, $src}", [],
954 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
955 "movupd\t{$src, $dst|$dst, $src}", [],
959 // Use vmovaps/vmovups for AVX integer load/store.
960 let Predicates = [HasAVX, NoVLX] in {
961 // 128-bit load/store
962 def : Pat<(alignedloadv2i64 addr:$src),
963 (VMOVAPSrm addr:$src)>;
964 def : Pat<(loadv2i64 addr:$src),
965 (VMOVUPSrm addr:$src)>;
967 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
968 (VMOVAPSmr addr:$dst, VR128:$src)>;
969 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
970 (VMOVAPSmr addr:$dst, VR128:$src)>;
971 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
972 (VMOVUPSmr addr:$dst, VR128:$src)>;
973 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
974 (VMOVUPSmr addr:$dst, VR128:$src)>;
976 // 256-bit load/store
977 def : Pat<(alignedloadv4i64 addr:$src),
978 (VMOVAPSYrm addr:$src)>;
979 def : Pat<(loadv4i64 addr:$src),
980 (VMOVUPSYrm addr:$src)>;
981 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
982 (VMOVAPSYmr addr:$dst, VR256:$src)>;
983 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
984 (VMOVAPSYmr addr:$dst, VR256:$src)>;
985 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
986 (VMOVUPSYmr addr:$dst, VR256:$src)>;
987 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
988 (VMOVUPSYmr addr:$dst, VR256:$src)>;
990 // Special patterns for storing subvector extracts of lower 128-bits
991 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
992 def : Pat<(alignedstore (v2f64 (extract_subvector
993 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
994 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
995 def : Pat<(alignedstore (v4f32 (extract_subvector
996 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
997 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
998 def : Pat<(alignedstore (v2i64 (extract_subvector
999 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1000 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1001 def : Pat<(alignedstore (v4i32 (extract_subvector
1002 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1003 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1004 def : Pat<(alignedstore (v8i16 (extract_subvector
1005 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1006 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1007 def : Pat<(alignedstore (v16i8 (extract_subvector
1008 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1009 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1011 def : Pat<(store (v2f64 (extract_subvector
1012 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1013 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1014 def : Pat<(store (v4f32 (extract_subvector
1015 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1016 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1017 def : Pat<(store (v2i64 (extract_subvector
1018 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1019 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1020 def : Pat<(store (v4i32 (extract_subvector
1021 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1022 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1023 def : Pat<(store (v8i16 (extract_subvector
1024 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1025 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1026 def : Pat<(store (v16i8 (extract_subvector
1027 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1028 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1031 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
1032 // 128-bit load/store
1033 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1034 (VMOVAPSmr addr:$dst, VR128:$src)>;
1035 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1036 (VMOVAPSmr addr:$dst, VR128:$src)>;
1037 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1038 (VMOVUPSmr addr:$dst, VR128:$src)>;
1039 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1040 (VMOVUPSmr addr:$dst, VR128:$src)>;
1042 // 256-bit load/store
1043 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1044 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1045 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1046 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1047 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1048 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1049 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1050 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1053 // Use movaps / movups for SSE integer load / store (one byte shorter).
1054 // The instructions selected below are then converted to MOVDQA/MOVDQU
1055 // during the SSE domain pass.
1056 let Predicates = [UseSSE1] in {
1057 def : Pat<(alignedloadv2i64 addr:$src),
1058 (MOVAPSrm addr:$src)>;
1059 def : Pat<(loadv2i64 addr:$src),
1060 (MOVUPSrm addr:$src)>;
1062 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1063 (MOVAPSmr addr:$dst, VR128:$src)>;
1064 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1065 (MOVAPSmr addr:$dst, VR128:$src)>;
1066 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1067 (MOVAPSmr addr:$dst, VR128:$src)>;
1068 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1069 (MOVAPSmr addr:$dst, VR128:$src)>;
1070 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1071 (MOVUPSmr addr:$dst, VR128:$src)>;
1072 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1073 (MOVUPSmr addr:$dst, VR128:$src)>;
1074 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1075 (MOVUPSmr addr:$dst, VR128:$src)>;
1076 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1077 (MOVUPSmr addr:$dst, VR128:$src)>;
1080 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1081 // bits are disregarded. FIXME: Set encoding to pseudo!
1082 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1083 let isCodeGenOnly = 1 in {
1084 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1085 "movaps\t{$src, $dst|$dst, $src}",
1086 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1087 IIC_SSE_MOVA_P_RM>, VEX;
1088 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1089 "movapd\t{$src, $dst|$dst, $src}",
1090 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1091 IIC_SSE_MOVA_P_RM>, VEX;
1092 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1093 "movaps\t{$src, $dst|$dst, $src}",
1094 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1096 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1097 "movapd\t{$src, $dst|$dst, $src}",
1098 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1103 //===----------------------------------------------------------------------===//
1104 // SSE 1 & 2 - Move Low packed FP Instructions
1105 //===----------------------------------------------------------------------===//
1107 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1108 string base_opc, string asm_opr,
1109 InstrItinClass itin> {
1110 def PSrm : PI<opc, MRMSrcMem,
1111 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1112 !strconcat(base_opc, "s", asm_opr),
1114 (psnode VR128:$src1,
1115 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1116 itin, SSEPackedSingle>, PS,
1117 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1119 def PDrm : PI<opc, MRMSrcMem,
1120 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1121 !strconcat(base_opc, "d", asm_opr),
1122 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1123 (scalar_to_vector (loadf64 addr:$src2)))))],
1124 itin, SSEPackedDouble>, PD,
1125 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1129 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1130 string base_opc, InstrItinClass itin> {
1131 let Predicates = [UseAVX] in
1132 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1133 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1136 let Constraints = "$src1 = $dst" in
1137 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1138 "\t{$src2, $dst|$dst, $src2}",
1142 let AddedComplexity = 20 in {
1143 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1147 let SchedRW = [WriteStore] in {
1148 let Predicates = [UseAVX] in {
1149 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1150 "movlps\t{$src, $dst|$dst, $src}",
1151 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1152 (iPTR 0))), addr:$dst)],
1153 IIC_SSE_MOV_LH>, VEX;
1154 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1155 "movlpd\t{$src, $dst|$dst, $src}",
1156 [(store (f64 (extractelt (v2f64 VR128:$src),
1157 (iPTR 0))), addr:$dst)],
1158 IIC_SSE_MOV_LH>, VEX;
1160 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1161 "movlps\t{$src, $dst|$dst, $src}",
1162 [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
1163 (iPTR 0))), addr:$dst)],
1165 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1166 "movlpd\t{$src, $dst|$dst, $src}",
1167 [(store (f64 (extractelt (v2f64 VR128:$src),
1168 (iPTR 0))), addr:$dst)],
1172 let Predicates = [UseAVX] in {
1173 // Shuffle with VMOVLPS
1174 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1175 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1176 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1177 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1179 // Shuffle with VMOVLPD
1180 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1181 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1182 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1183 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1184 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1185 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1186 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1189 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1191 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1192 def : Pat<(store (v4i32 (X86Movlps
1193 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1194 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1195 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1197 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1198 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1200 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1203 let Predicates = [UseSSE1] in {
1204 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1205 def : Pat<(store (i64 (extractelt (bc_v2i64 (v4f32 VR128:$src2)),
1206 (iPTR 0))), addr:$src1),
1207 (MOVLPSmr addr:$src1, VR128:$src2)>;
1209 // Shuffle with MOVLPS
1210 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1211 (MOVLPSrm VR128:$src1, addr:$src2)>;
1212 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1213 (MOVLPSrm VR128:$src1, addr:$src2)>;
1214 def : Pat<(X86Movlps VR128:$src1,
1215 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1216 (MOVLPSrm VR128:$src1, addr:$src2)>;
1219 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1221 (MOVLPSmr addr:$src1, VR128:$src2)>;
1222 def : Pat<(store (v4i32 (X86Movlps
1223 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1225 (MOVLPSmr addr:$src1, VR128:$src2)>;
1228 let Predicates = [UseSSE2] in {
1229 // Shuffle with MOVLPD
1230 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1231 (MOVLPDrm VR128:$src1, addr:$src2)>;
1232 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1233 (MOVLPDrm VR128:$src1, addr:$src2)>;
1234 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1235 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1236 (MOVLPDrm VR128:$src1, addr:$src2)>;
1239 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1241 (MOVLPDmr addr:$src1, VR128:$src2)>;
1242 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1244 (MOVLPDmr addr:$src1, VR128:$src2)>;
1247 //===----------------------------------------------------------------------===//
1248 // SSE 1 & 2 - Move Hi packed FP Instructions
1249 //===----------------------------------------------------------------------===//
1251 let AddedComplexity = 20 in {
1252 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1256 let SchedRW = [WriteStore] in {
1257 // v2f64 extract element 1 is always custom lowered to unpack high to low
1258 // and extract element 0 so the non-store version isn't too horrible.
1259 let Predicates = [UseAVX] in {
1260 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1261 "movhps\t{$src, $dst|$dst, $src}",
1262 [(store (f64 (extractelt
1263 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1264 (bc_v2f64 (v4f32 VR128:$src))),
1265 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1266 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1267 "movhpd\t{$src, $dst|$dst, $src}",
1268 [(store (f64 (extractelt
1269 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1270 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1272 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1273 "movhps\t{$src, $dst|$dst, $src}",
1274 [(store (f64 (extractelt
1275 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1276 (bc_v2f64 (v4f32 VR128:$src))),
1277 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1278 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1279 "movhpd\t{$src, $dst|$dst, $src}",
1280 [(store (f64 (extractelt
1281 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1282 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1285 let Predicates = [UseAVX] in {
1287 def : Pat<(X86Movlhps VR128:$src1,
1288 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1289 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1290 def : Pat<(X86Movlhps VR128:$src1,
1291 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1292 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1296 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1297 // is during lowering, where it's not possible to recognize the load fold
1298 // cause it has two uses through a bitcast. One use disappears at isel time
1299 // and the fold opportunity reappears.
1300 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1301 (scalar_to_vector (loadf64 addr:$src2)))),
1302 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1303 // Also handle an i64 load because that may get selected as a faster way to
1305 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1306 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1307 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1309 def : Pat<(store (f64 (extractelt
1310 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1311 (iPTR 0))), addr:$dst),
1312 (VMOVHPDmr addr:$dst, VR128:$src)>;
1315 let Predicates = [UseSSE1] in {
1317 def : Pat<(X86Movlhps VR128:$src1,
1318 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1319 (MOVHPSrm VR128:$src1, addr:$src2)>;
1320 def : Pat<(X86Movlhps VR128:$src1,
1321 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1322 (MOVHPSrm VR128:$src1, addr:$src2)>;
1325 let Predicates = [UseSSE2] in {
1328 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1329 // is during lowering, where it's not possible to recognize the load fold
1330 // cause it has two uses through a bitcast. One use disappears at isel time
1331 // and the fold opportunity reappears.
1332 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1333 (scalar_to_vector (loadf64 addr:$src2)))),
1334 (MOVHPDrm VR128:$src1, addr:$src2)>;
1335 // Also handle an i64 load because that may get selected as a faster way to
1337 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1338 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1339 (MOVHPDrm VR128:$src1, addr:$src2)>;
1341 def : Pat<(store (f64 (extractelt
1342 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1343 (iPTR 0))), addr:$dst),
1344 (MOVHPDmr addr:$dst, VR128:$src)>;
1347 //===----------------------------------------------------------------------===//
1348 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1349 //===----------------------------------------------------------------------===//
1351 let AddedComplexity = 20, Predicates = [UseAVX] in {
1352 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1353 (ins VR128:$src1, VR128:$src2),
1354 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1356 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1358 VEX_4V, Sched<[WriteFShuffle]>;
1359 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1360 (ins VR128:$src1, VR128:$src2),
1361 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1363 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1365 VEX_4V, Sched<[WriteFShuffle]>;
1367 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1368 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1369 (ins VR128:$src1, VR128:$src2),
1370 "movlhps\t{$src2, $dst|$dst, $src2}",
1372 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1373 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1374 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1375 (ins VR128:$src1, VR128:$src2),
1376 "movhlps\t{$src2, $dst|$dst, $src2}",
1378 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1379 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1382 let Predicates = [UseAVX] in {
1384 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1385 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1386 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1387 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1390 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1391 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1394 let Predicates = [UseSSE1] in {
1396 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1397 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1398 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1399 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1402 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1403 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1406 //===----------------------------------------------------------------------===//
1407 // SSE 1 & 2 - Conversion Instructions
1408 //===----------------------------------------------------------------------===//
1410 def SSE_CVT_PD : OpndItins<
1411 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1414 let Sched = WriteCvtI2F in
1415 def SSE_CVT_PS : OpndItins<
1416 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1419 let Sched = WriteCvtI2F in
1420 def SSE_CVT_Scalar : OpndItins<
1421 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1424 let Sched = WriteCvtF2I in
1425 def SSE_CVT_SS2SI_32 : OpndItins<
1426 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1429 let Sched = WriteCvtF2I in
1430 def SSE_CVT_SS2SI_64 : OpndItins<
1431 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1434 let Sched = WriteCvtF2I in
1435 def SSE_CVT_SD2SI : OpndItins<
1436 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1439 // FIXME: We probably want to match the rm form only when optimizing for
1440 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1441 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1442 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1443 string asm, OpndItins itins> {
1444 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1445 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1446 itins.rr>, Sched<[itins.Sched]>;
1447 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1448 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1449 itins.rm>, Sched<[itins.Sched.Folded]>;
1452 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1453 X86MemOperand x86memop, string asm, Domain d,
1455 let hasSideEffects = 0 in {
1456 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1457 [], itins.rr, d>, Sched<[itins.Sched]>;
1459 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1460 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1464 // FIXME: We probably want to match the rm form only when optimizing for
1465 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1466 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1467 X86MemOperand x86memop, string asm> {
1468 let hasSideEffects = 0, Predicates = [UseAVX] in {
1469 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1470 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1471 Sched<[WriteCvtI2F]>;
1473 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1474 (ins DstRC:$src1, x86memop:$src),
1475 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1476 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1477 } // hasSideEffects = 0
1480 let Predicates = [UseAVX] in {
1481 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1482 "cvttss2si\t{$src, $dst|$dst, $src}",
1485 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1486 "cvttss2si\t{$src, $dst|$dst, $src}",
1488 XS, VEX, VEX_W, VEX_LIG;
1489 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1490 "cvttsd2si\t{$src, $dst|$dst, $src}",
1493 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1494 "cvttsd2si\t{$src, $dst|$dst, $src}",
1496 XD, VEX, VEX_W, VEX_LIG;
1498 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1499 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1500 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1501 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1502 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1503 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1504 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1505 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1506 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1507 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1508 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1509 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1510 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1511 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1512 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1513 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1515 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1516 // register, but the same isn't true when only using memory operands,
1517 // provide other assembly "l" and "q" forms to address this explicitly
1518 // where appropriate to do so.
1519 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1520 XS, VEX_4V, VEX_LIG;
1521 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1522 XS, VEX_4V, VEX_W, VEX_LIG;
1523 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1524 XD, VEX_4V, VEX_LIG;
1525 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1526 XD, VEX_4V, VEX_W, VEX_LIG;
1528 let Predicates = [UseAVX] in {
1529 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1530 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1531 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1532 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1534 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1535 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1536 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1537 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1538 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1539 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1540 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1541 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1543 def : Pat<(f32 (sint_to_fp GR32:$src)),
1544 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1545 def : Pat<(f32 (sint_to_fp GR64:$src)),
1546 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1547 def : Pat<(f64 (sint_to_fp GR32:$src)),
1548 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1549 def : Pat<(f64 (sint_to_fp GR64:$src)),
1550 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1553 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1554 "cvttss2si\t{$src, $dst|$dst, $src}",
1555 SSE_CVT_SS2SI_32>, XS;
1556 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1557 "cvttss2si\t{$src, $dst|$dst, $src}",
1558 SSE_CVT_SS2SI_64>, XS, REX_W;
1559 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1560 "cvttsd2si\t{$src, $dst|$dst, $src}",
1562 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1563 "cvttsd2si\t{$src, $dst|$dst, $src}",
1564 SSE_CVT_SD2SI>, XD, REX_W;
1565 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1566 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1567 SSE_CVT_Scalar>, XS;
1568 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1569 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1570 SSE_CVT_Scalar>, XS, REX_W;
1571 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1572 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1573 SSE_CVT_Scalar>, XD;
1574 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1575 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1576 SSE_CVT_Scalar>, XD, REX_W;
1578 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1579 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1580 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1581 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1582 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1583 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1584 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1585 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1586 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1587 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1588 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1589 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1590 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1591 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1592 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1593 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1595 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1596 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1597 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1598 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1600 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1601 // and/or XMM operand(s).
1603 // FIXME: We probably want to match the rm form only when optimizing for
1604 // size, to avoid false depenendecies (see sse_fp_unop_s for details)
1605 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1606 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1607 string asm, OpndItins itins> {
1608 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1609 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1610 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1611 Sched<[itins.Sched]>;
1612 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1613 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1614 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1615 Sched<[itins.Sched.Folded]>;
1618 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1619 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1620 PatFrag ld_frag, string asm, OpndItins itins,
1622 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1624 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1625 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1626 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1627 itins.rr>, Sched<[itins.Sched]>;
1628 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1629 (ins DstRC:$src1, x86memop:$src2),
1631 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1632 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1633 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1634 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1637 let Predicates = [UseAVX] in {
1638 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1639 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1640 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1641 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1642 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1643 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1645 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1646 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1647 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1648 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1651 let isCodeGenOnly = 1 in {
1652 let Predicates = [UseAVX] in {
1653 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1654 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1655 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1656 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1657 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1658 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1660 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1661 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1662 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1663 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1664 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1665 SSE_CVT_Scalar, 0>, XD,
1668 let Constraints = "$src1 = $dst" in {
1669 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1670 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1671 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1672 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1673 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1674 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1675 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1676 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1677 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1678 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1679 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1680 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1682 } // isCodeGenOnly = 1
1686 // Aliases for intrinsics
1687 let isCodeGenOnly = 1 in {
1688 let Predicates = [UseAVX] in {
1689 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1690 ssmem, sse_load_f32, "cvttss2si",
1691 SSE_CVT_SS2SI_32>, XS, VEX;
1692 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1693 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1694 "cvttss2si", SSE_CVT_SS2SI_64>,
1696 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1697 sdmem, sse_load_f64, "cvttsd2si",
1698 SSE_CVT_SD2SI>, XD, VEX;
1699 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1700 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1701 "cvttsd2si", SSE_CVT_SD2SI>,
1704 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1705 ssmem, sse_load_f32, "cvttss2si",
1706 SSE_CVT_SS2SI_32>, XS;
1707 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1708 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1709 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1710 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1711 sdmem, sse_load_f64, "cvttsd2si",
1713 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1714 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1715 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1716 } // isCodeGenOnly = 1
1718 let Predicates = [UseAVX] in {
1719 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1720 ssmem, sse_load_f32, "cvtss2si",
1721 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1722 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1723 ssmem, sse_load_f32, "cvtss2si",
1724 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1726 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1727 ssmem, sse_load_f32, "cvtss2si",
1728 SSE_CVT_SS2SI_32>, XS;
1729 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1730 ssmem, sse_load_f32, "cvtss2si",
1731 SSE_CVT_SS2SI_64>, XS, REX_W;
1733 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1734 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1735 SSEPackedSingle, SSE_CVT_PS>,
1736 PS, VEX, Requires<[HasAVX]>;
1737 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1738 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1739 SSEPackedSingle, SSE_CVT_PS>,
1740 PS, VEX, VEX_L, Requires<[HasAVX]>;
1742 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1743 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1744 SSEPackedSingle, SSE_CVT_PS>,
1745 PS, Requires<[UseSSE2]>;
1747 let Predicates = [UseAVX] in {
1748 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1749 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1750 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1751 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1752 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1753 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1754 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1755 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1756 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1757 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1758 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1759 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1760 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1761 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1762 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1763 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1766 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1767 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1768 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1769 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1770 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1771 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1772 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1773 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1774 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1775 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1776 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1777 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1778 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1779 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1780 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1781 (CVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1785 // Convert scalar double to scalar single
1786 let hasSideEffects = 0, Predicates = [UseAVX] in {
1787 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1788 (ins FR64:$src1, FR64:$src2),
1789 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1790 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1791 Sched<[WriteCvtF2F]>;
1793 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1794 (ins FR64:$src1, f64mem:$src2),
1795 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1796 [], IIC_SSE_CVT_Scalar_RM>,
1797 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1798 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1801 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1804 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1805 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1806 [(set FR32:$dst, (fround FR64:$src))],
1807 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1808 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1809 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1810 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1811 IIC_SSE_CVT_Scalar_RM>,
1813 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1815 let isCodeGenOnly = 1 in {
1816 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1817 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1818 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1820 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1821 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[HasAVX]>,
1822 Sched<[WriteCvtF2F]>;
1823 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcMem,
1824 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1825 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1826 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1827 VR128:$src1, sse_load_f64:$src2))],
1828 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[HasAVX]>,
1829 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1831 let Constraints = "$src1 = $dst" in {
1832 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1833 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1834 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1836 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1837 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1838 Sched<[WriteCvtF2F]>;
1839 def Int_CVTSD2SSrm: I<0x5A, MRMSrcMem,
1840 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1841 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1842 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1843 VR128:$src1, sse_load_f64:$src2))],
1844 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1845 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1847 } // isCodeGenOnly = 1
1849 // Convert scalar single to scalar double
1850 // SSE2 instructions with XS prefix
1851 let hasSideEffects = 0, Predicates = [UseAVX] in {
1852 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1853 (ins FR32:$src1, FR32:$src2),
1854 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1855 [], IIC_SSE_CVT_Scalar_RR>,
1856 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1857 Sched<[WriteCvtF2F]>;
1859 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1860 (ins FR32:$src1, f32mem:$src2),
1861 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1862 [], IIC_SSE_CVT_Scalar_RM>,
1863 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1864 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1867 def : Pat<(f64 (fextend FR32:$src)),
1868 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1869 def : Pat<(fextend (loadf32 addr:$src)),
1870 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1872 def : Pat<(extloadf32 addr:$src),
1873 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1874 Requires<[UseAVX, OptForSize]>;
1875 def : Pat<(extloadf32 addr:$src),
1876 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1877 Requires<[UseAVX, OptForSpeed]>;
1879 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1880 "cvtss2sd\t{$src, $dst|$dst, $src}",
1881 [(set FR64:$dst, (fextend FR32:$src))],
1882 IIC_SSE_CVT_Scalar_RR>, XS,
1883 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1884 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1885 "cvtss2sd\t{$src, $dst|$dst, $src}",
1886 [(set FR64:$dst, (extloadf32 addr:$src))],
1887 IIC_SSE_CVT_Scalar_RM>, XS,
1888 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1890 // extload f32 -> f64. This matches load+fextend because we have a hack in
1891 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1893 // Since these loads aren't folded into the fextend, we have to match it
1895 def : Pat<(fextend (loadf32 addr:$src)),
1896 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1897 def : Pat<(extloadf32 addr:$src),
1898 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1900 let isCodeGenOnly = 1 in {
1901 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1902 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1903 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1905 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1906 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[HasAVX]>,
1907 Sched<[WriteCvtF2F]>;
1908 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1909 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1910 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1912 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1913 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[HasAVX]>,
1914 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1915 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1916 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1917 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1918 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1920 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1921 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1922 Sched<[WriteCvtF2F]>;
1923 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1924 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1925 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1927 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1928 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1929 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1931 } // isCodeGenOnly = 1
1933 // Convert packed single/double fp to doubleword
1934 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1935 "cvtps2dq\t{$src, $dst|$dst, $src}",
1936 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1937 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1938 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1939 "cvtps2dq\t{$src, $dst|$dst, $src}",
1941 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1942 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1943 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1944 "cvtps2dq\t{$src, $dst|$dst, $src}",
1946 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1947 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1948 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1949 "cvtps2dq\t{$src, $dst|$dst, $src}",
1951 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1952 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1953 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1954 "cvtps2dq\t{$src, $dst|$dst, $src}",
1955 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1956 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1957 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1958 "cvtps2dq\t{$src, $dst|$dst, $src}",
1960 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1961 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
1964 // Convert Packed Double FP to Packed DW Integers
1965 let Predicates = [HasAVX] in {
1966 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1967 // register, but the same isn't true when using memory operands instead.
1968 // Provide other assembly rr and rm forms to address this explicitly.
1969 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1970 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1971 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1972 VEX, Sched<[WriteCvtF2I]>;
1975 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1976 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
1977 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1978 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
1980 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
1981 Sched<[WriteCvtF2ILd]>;
1984 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1985 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1987 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
1988 Sched<[WriteCvtF2I]>;
1989 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1990 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
1992 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
1993 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1994 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
1995 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
1998 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1999 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2001 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2002 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2003 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2004 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2005 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2006 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2008 // Convert with truncation packed single/double fp to doubleword
2009 // SSE2 packed instructions with XS prefix
2010 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2011 "cvttps2dq\t{$src, $dst|$dst, $src}",
2013 (int_x86_sse2_cvttps2dq VR128:$src))],
2014 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2015 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2016 "cvttps2dq\t{$src, $dst|$dst, $src}",
2017 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2018 (loadv4f32 addr:$src)))],
2019 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2020 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2021 "cvttps2dq\t{$src, $dst|$dst, $src}",
2023 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2024 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2025 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2026 "cvttps2dq\t{$src, $dst|$dst, $src}",
2027 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2028 (loadv8f32 addr:$src)))],
2029 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2030 Sched<[WriteCvtF2ILd]>;
2032 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2033 "cvttps2dq\t{$src, $dst|$dst, $src}",
2034 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2035 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2036 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2037 "cvttps2dq\t{$src, $dst|$dst, $src}",
2039 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2040 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2042 let Predicates = [HasAVX] in {
2043 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2044 (VCVTDQ2PSrr VR128:$src)>;
2045 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2046 (VCVTDQ2PSrm addr:$src)>;
2049 let Predicates = [HasAVX, NoVLX] in {
2050 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2051 (VCVTDQ2PSrr VR128:$src)>;
2052 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2053 (VCVTDQ2PSrm addr:$src)>;
2055 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2056 (VCVTTPS2DQrr VR128:$src)>;
2057 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2058 (VCVTTPS2DQrm addr:$src)>;
2060 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2061 (VCVTDQ2PSYrr VR256:$src)>;
2062 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2063 (VCVTDQ2PSYrm addr:$src)>;
2065 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2066 (VCVTTPS2DQYrr VR256:$src)>;
2067 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2068 (VCVTTPS2DQYrm addr:$src)>;
2071 let Predicates = [UseSSE2] in {
2072 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2073 (CVTDQ2PSrr VR128:$src)>;
2074 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2075 (CVTDQ2PSrm addr:$src)>;
2077 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2078 (CVTDQ2PSrr VR128:$src)>;
2079 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2080 (CVTDQ2PSrm addr:$src)>;
2082 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2083 (CVTTPS2DQrr VR128:$src)>;
2084 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2085 (CVTTPS2DQrm addr:$src)>;
2088 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2089 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2091 (int_x86_sse2_cvttpd2dq VR128:$src))],
2092 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2094 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2095 // register, but the same isn't true when using memory operands instead.
2096 // Provide other assembly rr and rm forms to address this explicitly.
2099 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2100 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2101 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2102 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2103 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2104 (loadv2f64 addr:$src)))],
2105 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2108 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2109 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2111 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2112 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2113 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2114 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2116 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2117 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2118 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2119 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2121 let Predicates = [HasAVX, NoVLX] in {
2122 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2123 (VCVTTPD2DQYrr VR256:$src)>;
2124 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2125 (VCVTTPD2DQYrm addr:$src)>;
2126 } // Predicates = [HasAVX]
2128 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2129 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2130 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2131 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2132 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2133 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2134 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2135 (memopv2f64 addr:$src)))],
2137 Sched<[WriteCvtF2ILd]>;
2139 // Convert packed single to packed double
2140 let Predicates = [HasAVX] in {
2141 // SSE2 instructions without OpSize prefix
2142 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2143 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2144 [], IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2145 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2146 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2147 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2148 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2149 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2150 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2151 [], IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2152 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2153 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2154 [], IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2157 let Predicates = [UseSSE2] in {
2158 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2159 "cvtps2pd\t{$src, $dst|$dst, $src}",
2160 [], IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2161 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2162 "cvtps2pd\t{$src, $dst|$dst, $src}",
2163 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2164 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2167 // Convert Packed DW Integers to Packed Double FP
2168 let Predicates = [HasAVX] in {
2169 let hasSideEffects = 0, mayLoad = 1 in
2170 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2171 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2172 []>, VEX, Sched<[WriteCvtI2FLd]>;
2173 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2174 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2175 []>, VEX, Sched<[WriteCvtI2F]>;
2176 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2177 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2178 []>, VEX, VEX_L, Sched<[WriteCvtI2FLd]>;
2179 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2180 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2181 []>, VEX, VEX_L, Sched<[WriteCvtI2F]>;
2184 let hasSideEffects = 0, mayLoad = 1 in
2185 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2186 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2187 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2188 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2189 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2190 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2192 // AVX register conversion intrinsics
2193 let Predicates = [HasAVX] in {
2194 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2195 (VCVTDQ2PDrr VR128:$src)>;
2196 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2197 (VCVTDQ2PDrm addr:$src)>;
2198 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
2199 (VCVTDQ2PDrm addr:$src)>;
2201 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2202 (VCVTDQ2PDYrr VR128:$src)>;
2203 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2204 (VCVTDQ2PDYrm addr:$src)>;
2205 } // Predicates = [HasAVX]
2207 // SSE2 register conversion intrinsics
2208 let Predicates = [HasSSE2] in {
2209 def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
2210 (CVTDQ2PDrr VR128:$src)>;
2211 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
2212 (CVTDQ2PDrm addr:$src)>;
2213 def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
2214 (CVTDQ2PDrm addr:$src)>;
2215 } // Predicates = [HasSSE2]
2217 // Convert packed double to packed single
2218 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2219 // register, but the same isn't true when using memory operands instead.
2220 // Provide other assembly rr and rm forms to address this explicitly.
2221 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2222 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2223 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2224 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2227 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2228 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2229 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2230 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2232 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2233 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2236 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2237 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2239 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2240 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2241 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2242 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2244 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2245 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2246 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2247 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2249 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2250 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2251 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2252 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2253 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2254 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2256 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2257 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2260 // AVX 256-bit register conversion intrinsics
2261 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2262 // whenever possible to avoid declaring two versions of each one.
2263 let Predicates = [HasAVX] in {
2264 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2265 (VCVTDQ2PSYrr VR256:$src)>;
2266 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2267 (VCVTDQ2PSYrm addr:$src)>;
2270 let Predicates = [HasAVX, NoVLX] in {
2271 // Match fround and fextend for 128/256-bit conversions
2272 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2273 (VCVTPD2PSrr VR128:$src)>;
2274 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2275 (VCVTPD2PSXrm addr:$src)>;
2276 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2277 (VCVTPD2PSYrr VR256:$src)>;
2278 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2279 (VCVTPD2PSYrm addr:$src)>;
2281 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2282 (VCVTPS2PDrr VR128:$src)>;
2283 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2284 (VCVTPS2PDYrr VR128:$src)>;
2285 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2286 (VCVTPS2PDYrm addr:$src)>;
2289 let Predicates = [UseSSE2] in {
2290 // Match fround and fextend for 128 conversions
2291 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2292 (CVTPD2PSrr VR128:$src)>;
2293 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2294 (CVTPD2PSrm addr:$src)>;
2296 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2297 (CVTPS2PDrr VR128:$src)>;
2300 //===----------------------------------------------------------------------===//
2301 // SSE 1 & 2 - Compare Instructions
2302 //===----------------------------------------------------------------------===//
2304 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2305 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2306 Operand CC, SDNode OpNode, ValueType VT,
2307 PatFrag ld_frag, string asm, string asm_alt,
2308 OpndItins itins, ImmLeaf immLeaf> {
2309 def rr : SIi8<0xC2, MRMSrcReg,
2310 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2311 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, immLeaf:$cc))],
2312 itins.rr>, Sched<[itins.Sched]>;
2313 def rm : SIi8<0xC2, MRMSrcMem,
2314 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2315 [(set RC:$dst, (OpNode (VT RC:$src1),
2316 (ld_frag addr:$src2), immLeaf:$cc))],
2318 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2320 // Accept explicit immediate argument form instead of comparison code.
2321 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2322 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2323 (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
2324 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2326 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2327 (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
2328 IIC_SSE_ALU_F32S_RM>,
2329 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2333 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2334 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2335 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2336 SSE_ALU_F32S, i8immZExt5>, XS, VEX_4V, VEX_LIG;
2337 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2338 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2339 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2340 SSE_ALU_F32S, i8immZExt5>, // same latency as 32 bit compare
2341 XD, VEX_4V, VEX_LIG;
2343 let Constraints = "$src1 = $dst" in {
2344 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2345 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2346 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S,
2348 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2349 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2350 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2351 SSE_ALU_F64S, i8immZExt3>, XD;
2354 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2355 Intrinsic Int, string asm, OpndItins itins,
2357 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2358 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2359 [(set VR128:$dst, (Int VR128:$src1,
2360 VR128:$src, immLeaf:$cc))],
2362 Sched<[itins.Sched]>;
2363 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2364 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2365 [(set VR128:$dst, (Int VR128:$src1,
2366 (load addr:$src), immLeaf:$cc))],
2368 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2371 let isCodeGenOnly = 1 in {
2372 // Aliases to match intrinsics which expect XMM operand(s).
2373 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2374 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2375 SSE_ALU_F32S, i8immZExt5>,
2377 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2378 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2379 SSE_ALU_F32S, i8immZExt5>, // same latency as f32
2381 let Constraints = "$src1 = $dst" in {
2382 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2383 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2384 SSE_ALU_F32S, i8immZExt3>, XS;
2385 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2386 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2387 SSE_ALU_F64S, i8immZExt3>,
2393 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2394 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2395 ValueType vt, X86MemOperand x86memop,
2396 PatFrag ld_frag, string OpcodeStr> {
2397 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2398 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2399 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2402 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2403 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2404 [(set EFLAGS, (OpNode (vt RC:$src1),
2405 (ld_frag addr:$src2)))],
2407 Sched<[WriteFAddLd, ReadAfterLd]>;
2410 let Defs = [EFLAGS] in {
2411 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2412 "ucomiss">, PS, VEX, VEX_LIG;
2413 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2414 "ucomisd">, PD, VEX, VEX_LIG;
2415 let Pattern = []<dag> in {
2416 defm VCOMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2417 "comiss">, PS, VEX, VEX_LIG;
2418 defm VCOMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2419 "comisd">, PD, VEX, VEX_LIG;
2422 let isCodeGenOnly = 1 in {
2423 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2424 load, "ucomiss">, PS, VEX;
2425 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2426 load, "ucomisd">, PD, VEX;
2428 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2429 load, "comiss">, PS, VEX;
2430 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2431 load, "comisd">, PD, VEX;
2433 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2435 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2438 let Pattern = []<dag> in {
2439 defm COMISS : sse12_ord_cmp<0x2F, FR32, undef, f32, f32mem, loadf32,
2441 defm COMISD : sse12_ord_cmp<0x2F, FR64, undef, f64, f64mem, loadf64,
2445 let isCodeGenOnly = 1 in {
2446 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2447 load, "ucomiss">, PS;
2448 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2449 load, "ucomisd">, PD;
2451 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2453 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2456 } // Defs = [EFLAGS]
2458 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2459 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2460 Operand CC, Intrinsic Int, string asm,
2461 string asm_alt, Domain d, ImmLeaf immLeaf,
2462 PatFrag ld_frag, OpndItins itins = SSE_ALU_F32P> {
2463 let isCommutable = 1 in
2464 def rri : PIi8<0xC2, MRMSrcReg,
2465 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2466 [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))],
2469 def rmi : PIi8<0xC2, MRMSrcMem,
2470 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2471 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2), immLeaf:$cc))],
2473 Sched<[WriteFAddLd, ReadAfterLd]>;
2475 // Accept explicit immediate argument form instead of comparison code.
2476 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2477 def rri_alt : PIi8<0xC2, MRMSrcReg,
2478 (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc),
2479 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2481 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2482 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc),
2483 asm_alt, [], itins.rm, d>,
2484 Sched<[WriteFAddLd, ReadAfterLd]>;
2488 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2489 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2490 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2491 SSEPackedSingle, i8immZExt5, loadv4f32>, PS, VEX_4V;
2492 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2493 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2494 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2495 SSEPackedDouble, i8immZExt5, loadv2f64>, PD, VEX_4V;
2496 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2497 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2498 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2499 SSEPackedSingle, i8immZExt5, loadv8f32>, PS, VEX_4V, VEX_L;
2500 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2501 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2502 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2503 SSEPackedDouble, i8immZExt5, loadv4f64>, PD, VEX_4V, VEX_L;
2504 let Constraints = "$src1 = $dst" in {
2505 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2506 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2507 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2508 SSEPackedSingle, i8immZExt5, memopv4f32, SSE_ALU_F32P>, PS;
2509 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2510 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2511 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2512 SSEPackedDouble, i8immZExt5, memopv2f64, SSE_ALU_F64P>, PD;
2515 let Predicates = [HasAVX] in {
2516 def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2517 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2518 def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
2519 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2520 def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2521 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2522 def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
2523 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2525 def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2526 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2527 def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
2528 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2529 def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2530 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2531 def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
2532 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2535 let Predicates = [UseSSE1] in {
2536 def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2537 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2538 def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
2539 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2542 let Predicates = [UseSSE2] in {
2543 def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2544 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2545 def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
2546 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2549 //===----------------------------------------------------------------------===//
2550 // SSE 1 & 2 - Shuffle Instructions
2551 //===----------------------------------------------------------------------===//
2553 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2554 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2555 ValueType vt, string asm, PatFrag mem_frag,
2557 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2558 (ins RC:$src1, x86memop:$src2, u8imm:$src3), asm,
2559 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2560 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2561 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2562 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2563 (ins RC:$src1, RC:$src2, u8imm:$src3), asm,
2564 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2565 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2566 Sched<[WriteFShuffle]>;
2569 let Predicates = [HasAVX, NoVLX] in {
2570 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2571 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2572 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2573 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2574 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2575 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2576 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2577 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2578 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2579 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2580 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2581 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2583 let Constraints = "$src1 = $dst" in {
2584 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2585 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2586 memopv4f32, SSEPackedSingle>, PS;
2587 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2588 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2589 memopv2f64, SSEPackedDouble>, PD;
2592 let Predicates = [HasAVX, NoVLX] in {
2593 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2594 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2595 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2596 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2597 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2599 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2600 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2601 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2602 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2603 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2606 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2607 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2608 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2609 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2610 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2612 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2613 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2614 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2615 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2616 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2619 let Predicates = [UseSSE1] in {
2620 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2621 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2622 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2623 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2624 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2627 let Predicates = [UseSSE2] in {
2628 // Generic SHUFPD patterns
2629 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2630 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2631 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2632 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2633 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2636 //===----------------------------------------------------------------------===//
2637 // SSE 1 & 2 - Unpack FP Instructions
2638 //===----------------------------------------------------------------------===//
2640 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2641 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2642 PatFrag mem_frag, RegisterClass RC,
2643 X86MemOperand x86memop, string asm,
2645 def rr : PI<opc, MRMSrcReg,
2646 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2648 (vt (OpNode RC:$src1, RC:$src2)))],
2649 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2650 def rm : PI<opc, MRMSrcMem,
2651 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2653 (vt (OpNode RC:$src1,
2654 (mem_frag addr:$src2))))],
2656 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2659 let Predicates = [HasAVX, NoVLX] in {
2660 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2661 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2662 SSEPackedSingle>, PS, VEX_4V;
2663 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2664 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2665 SSEPackedDouble>, PD, VEX_4V;
2666 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2667 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2668 SSEPackedSingle>, PS, VEX_4V;
2669 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2670 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2671 SSEPackedDouble>, PD, VEX_4V;
2673 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2674 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2675 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2676 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2677 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2678 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2679 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2680 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2681 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2682 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2683 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2684 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2685 }// Predicates = [HasAVX, NoVLX]
2686 let Constraints = "$src1 = $dst" in {
2687 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2688 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2689 SSEPackedSingle>, PS;
2690 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2691 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2692 SSEPackedDouble>, PD;
2693 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2694 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2695 SSEPackedSingle>, PS;
2696 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2697 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2698 SSEPackedDouble>, PD;
2699 } // Constraints = "$src1 = $dst"
2701 let Predicates = [HasAVX1Only] in {
2702 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2703 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2704 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2705 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2706 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2707 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2708 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2709 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2711 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2712 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2713 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2714 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2715 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2716 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2717 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2718 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2721 //===----------------------------------------------------------------------===//
2722 // SSE 1 & 2 - Extract Floating-Point Sign mask
2723 //===----------------------------------------------------------------------===//
2725 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2726 multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
2727 string asm, Domain d> {
2728 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2729 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2730 [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], IIC_SSE_MOVMSK, d>,
2731 Sched<[WriteVecLogic]>;
2734 let Predicates = [HasAVX] in {
2735 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2736 SSEPackedSingle>, PS, VEX;
2737 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2738 SSEPackedDouble>, PD, VEX;
2739 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
2740 SSEPackedSingle>, PS, VEX, VEX_L;
2741 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
2742 SSEPackedDouble>, PD, VEX, VEX_L;
2745 defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
2746 SSEPackedSingle>, PS;
2747 defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
2748 SSEPackedDouble>, PD;
2750 //===---------------------------------------------------------------------===//
2751 // SSE2 - Packed Integer Logical Instructions
2752 //===---------------------------------------------------------------------===//
2754 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2756 /// PDI_binop_rm - Simple SSE2 binary operator.
2757 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2758 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2759 X86MemOperand x86memop, OpndItins itins,
2760 bit IsCommutable, bit Is2Addr> {
2761 let isCommutable = IsCommutable in
2762 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2763 (ins RC:$src1, RC:$src2),
2765 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2766 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2767 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2768 Sched<[itins.Sched]>;
2769 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2770 (ins RC:$src1, x86memop:$src2),
2772 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2773 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2774 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2775 (bitconvert (memop_frag addr:$src2)))))],
2777 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2779 } // ExeDomain = SSEPackedInt
2781 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2782 ValueType OpVT128, ValueType OpVT256,
2783 OpndItins itins, bit IsCommutable = 0, Predicate prd> {
2784 let Predicates = [HasAVX, prd] in
2785 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2786 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2788 let Constraints = "$src1 = $dst" in
2789 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2790 memopv2i64, i128mem, itins, IsCommutable, 1>;
2792 let Predicates = [HasAVX2, prd] in
2793 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2794 OpVT256, VR256, loadv4i64, i256mem, itins,
2795 IsCommutable, 0>, VEX_4V, VEX_L;
2798 // These are ordered here for pattern ordering requirements with the fp versions
2800 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2801 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2802 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2803 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2804 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2805 SSE_VEC_BIT_ITINS_P, 1, NoVLX>;
2806 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2807 SSE_VEC_BIT_ITINS_P, 0, NoVLX>;
2809 //===----------------------------------------------------------------------===//
2810 // SSE 1 & 2 - Logical Instructions
2811 //===----------------------------------------------------------------------===//
2813 // Multiclass for scalars using the X86 logical operation aliases for FP.
2814 multiclass sse12_fp_packed_scalar_logical_alias<
2815 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2816 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2817 FR32, f32, f128mem, loadf32_128, SSEPackedSingle, itins, 0>,
2820 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2821 FR64, f64, f128mem, loadf64_128, SSEPackedDouble, itins, 0>,
2824 let Constraints = "$src1 = $dst" in {
2825 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2826 f32, f128mem, memopfsf32_128, SSEPackedSingle, itins>, PS;
2828 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2829 f64, f128mem, memopfsf64_128, SSEPackedDouble, itins>, PD;
2833 let isCodeGenOnly = 1 in {
2834 defm FsAND : sse12_fp_packed_scalar_logical_alias<0x54, "and", X86fand,
2836 defm FsOR : sse12_fp_packed_scalar_logical_alias<0x56, "or", X86for,
2838 defm FsXOR : sse12_fp_packed_scalar_logical_alias<0x57, "xor", X86fxor,
2841 let isCommutable = 0 in
2842 defm FsANDN : sse12_fp_packed_scalar_logical_alias<0x55, "andn", X86fandn,
2846 // Multiclass for vectors using the X86 logical operation aliases for FP.
2847 multiclass sse12_fp_packed_vector_logical_alias<
2848 bits<8> opc, string OpcodeStr, SDNode OpNode, OpndItins itins> {
2849 let Predicates = [HasAVX, NoVLX_Or_NoDQI] in {
2850 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2851 VR128, v4f32, f128mem, loadv4f32, SSEPackedSingle, itins, 0>,
2854 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2855 VR128, v2f64, f128mem, loadv2f64, SSEPackedDouble, itins, 0>,
2858 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2859 VR256, v8f32, f256mem, loadv8f32, SSEPackedSingle, itins, 0>,
2862 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2863 VR256, v4f64, f256mem, loadv4f64, SSEPackedDouble, itins, 0>,
2867 let Constraints = "$src1 = $dst" in {
2868 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2869 v4f32, f128mem, memopv4f32, SSEPackedSingle, itins>,
2872 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2873 v2f64, f128mem, memopv2f64, SSEPackedDouble, itins>,
2878 let isCodeGenOnly = 1 in {
2879 defm FvAND : sse12_fp_packed_vector_logical_alias<0x54, "and", X86fand,
2881 defm FvOR : sse12_fp_packed_vector_logical_alias<0x56, "or", X86for,
2883 defm FvXOR : sse12_fp_packed_vector_logical_alias<0x57, "xor", X86fxor,
2886 let isCommutable = 0 in
2887 defm FvANDN : sse12_fp_packed_vector_logical_alias<0x55, "andn", X86fandn,
2891 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2893 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2895 let Predicates = [HasAVX, NoVLX] in {
2896 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2897 !strconcat(OpcodeStr, "ps"), f256mem,
2898 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2899 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2900 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2902 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2903 !strconcat(OpcodeStr, "pd"), f256mem,
2904 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2905 (bc_v4i64 (v4f64 VR256:$src2))))],
2906 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2907 (loadv4i64 addr:$src2)))], 0>,
2910 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2911 // are all promoted to v2i64, and the patterns are covered by the int
2912 // version. This is needed in SSE only, because v2i64 isn't supported on
2913 // SSE1, but only on SSE2.
2914 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2915 !strconcat(OpcodeStr, "ps"), f128mem, [],
2916 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2917 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2919 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2920 !strconcat(OpcodeStr, "pd"), f128mem,
2921 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2922 (bc_v2i64 (v2f64 VR128:$src2))))],
2923 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2924 (loadv2i64 addr:$src2)))], 0>,
2928 let Constraints = "$src1 = $dst" in {
2929 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2930 !strconcat(OpcodeStr, "ps"), f128mem,
2931 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2932 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2933 (memopv2i64 addr:$src2)))]>, PS;
2935 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2936 !strconcat(OpcodeStr, "pd"), f128mem,
2937 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2938 (bc_v2i64 (v2f64 VR128:$src2))))],
2939 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2940 (memopv2i64 addr:$src2)))]>, PD;
2944 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2945 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2946 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2947 let isCommutable = 0 in
2948 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2950 // AVX1 requires type coercions in order to fold loads directly into logical
2952 let Predicates = [HasAVX1Only] in {
2953 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
2954 (VANDPSYrm VR256:$src1, addr:$src2)>;
2955 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
2956 (VORPSYrm VR256:$src1, addr:$src2)>;
2957 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
2958 (VXORPSYrm VR256:$src1, addr:$src2)>;
2959 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
2960 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2963 //===----------------------------------------------------------------------===//
2964 // SSE 1 & 2 - Arithmetic Instructions
2965 //===----------------------------------------------------------------------===//
2967 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2970 /// In addition, we also have a special variant of the scalar form here to
2971 /// represent the associated intrinsic operation. This form is unlike the
2972 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2973 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2975 /// These three forms can each be reg+reg or reg+mem.
2978 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2980 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
2981 SDNode OpNode, SizeItins itins> {
2982 let Predicates = [HasAVX, NoVLX] in {
2983 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2984 VR128, v4f32, f128mem, loadv4f32,
2985 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
2986 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2987 VR128, v2f64, f128mem, loadv2f64,
2988 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
2990 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
2991 OpNode, VR256, v8f32, f256mem, loadv8f32,
2992 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
2993 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
2994 OpNode, VR256, v4f64, f256mem, loadv4f64,
2995 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
2998 let Constraints = "$src1 = $dst" in {
2999 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3000 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3002 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3003 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3008 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3010 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3011 OpNode, FR32, f32mem, SSEPackedSingle, itins.s, 0>,
3012 XS, VEX_4V, VEX_LIG;
3013 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3014 OpNode, FR64, f64mem, SSEPackedDouble, itins.d, 0>,
3015 XD, VEX_4V, VEX_LIG;
3017 let Constraints = "$src1 = $dst" in {
3018 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3019 OpNode, FR32, f32mem, SSEPackedSingle,
3021 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3022 OpNode, FR64, f64mem, SSEPackedDouble,
3027 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3029 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3030 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3031 SSEPackedSingle, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3032 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3033 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3034 SSEPackedDouble, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3036 let Constraints = "$src1 = $dst" in {
3037 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3038 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3039 SSEPackedSingle, itins.s>, XS;
3040 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3041 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3042 SSEPackedDouble, itins.d>, XD;
3046 // Binary Arithmetic instructions
3047 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3048 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3049 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3050 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3051 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3052 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3053 let isCommutable = 0 in {
3054 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3055 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3056 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3057 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3058 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3059 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3060 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3061 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3062 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3063 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3064 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3065 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3068 let isCodeGenOnly = 1 in {
3069 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3070 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3071 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3072 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3075 // Patterns used to select SSE scalar fp arithmetic instructions from
3078 // (1) a scalar fp operation followed by a blend
3080 // The effect is that the backend no longer emits unnecessary vector
3081 // insert instructions immediately after SSE scalar fp instructions
3082 // like addss or mulss.
3084 // For example, given the following code:
3085 // __m128 foo(__m128 A, __m128 B) {
3090 // Previously we generated:
3091 // addss %xmm0, %xmm1
3092 // movss %xmm1, %xmm0
3095 // addss %xmm1, %xmm0
3097 // (2) a vector packed single/double fp operation followed by a vector insert
3099 // The effect is that the backend converts the packed fp instruction
3100 // followed by a vector insert into a single SSE scalar fp instruction.
3102 // For example, given the following code:
3103 // __m128 foo(__m128 A, __m128 B) {
3104 // __m128 C = A + B;
3105 // return (__m128) {c[0], a[1], a[2], a[3]};
3108 // Previously we generated:
3109 // addps %xmm0, %xmm1
3110 // movss %xmm1, %xmm0
3113 // addss %xmm1, %xmm0
3115 // TODO: Some canonicalization in lowering would simplify the number of
3116 // patterns we have to try to match.
3117 multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
3118 let Predicates = [UseSSE1] in {
3119 // extracted scalar math op with insert via movss
3120 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3121 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3123 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3124 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3126 // vector math op with insert via movss
3127 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3128 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3129 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3132 // With SSE 4.1, blendi is preferred to movsd, so match that too.
3133 let Predicates = [UseSSE41] in {
3134 // extracted scalar math op with insert via blend
3135 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3136 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3137 FR32:$src))), (i8 1))),
3138 (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
3139 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3141 // vector math op with insert via blend
3142 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3143 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3144 (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
3148 // Repeat everything for AVX, except for the movss + scalar combo...
3149 // because that one shouldn't occur with AVX codegen?
3150 let Predicates = [HasAVX] in {
3151 // extracted scalar math op with insert via blend
3152 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3153 (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
3154 FR32:$src))), (i8 1))),
3155 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
3156 (COPY_TO_REGCLASS FR32:$src, VR128))>;
3158 // vector math op with insert via movss
3159 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3160 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3161 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3163 // vector math op with insert via blend
3164 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3165 (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3166 (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
3170 defm : scalar_math_f32_patterns<fadd, "ADD">;
3171 defm : scalar_math_f32_patterns<fsub, "SUB">;
3172 defm : scalar_math_f32_patterns<fmul, "MUL">;
3173 defm : scalar_math_f32_patterns<fdiv, "DIV">;
3175 multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
3176 let Predicates = [UseSSE2] in {
3177 // extracted scalar math op with insert via movsd
3178 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3179 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3181 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3182 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3184 // vector math op with insert via movsd
3185 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3186 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3187 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3190 // With SSE 4.1, blendi is preferred to movsd, so match those too.
3191 let Predicates = [UseSSE41] in {
3192 // extracted scalar math op with insert via blend
3193 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3194 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3195 FR64:$src))), (i8 1))),
3196 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
3197 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3199 // vector math op with insert via blend
3200 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3201 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3202 (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3205 // Repeat everything for AVX.
3206 let Predicates = [HasAVX] in {
3207 // extracted scalar math op with insert via movsd
3208 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3209 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3211 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3212 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3214 // extracted scalar math op with insert via blend
3215 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
3216 (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
3217 FR64:$src))), (i8 1))),
3218 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
3219 (COPY_TO_REGCLASS FR64:$src, VR128))>;
3221 // vector math op with insert via movsd
3222 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3223 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3224 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3226 // vector math op with insert via blend
3227 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3228 (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3229 (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
3233 defm : scalar_math_f64_patterns<fadd, "ADD">;
3234 defm : scalar_math_f64_patterns<fsub, "SUB">;
3235 defm : scalar_math_f64_patterns<fmul, "MUL">;
3236 defm : scalar_math_f64_patterns<fdiv, "DIV">;
3240 /// In addition, we also have a special variant of the scalar form here to
3241 /// represent the associated intrinsic operation. This form is unlike the
3242 /// plain scalar form, in that it takes an entire vector (instead of a
3243 /// scalar) and leaves the top elements undefined.
3245 /// And, we have a special variant form for a full-vector intrinsic form.
3247 let Sched = WriteFSqrt in {
3248 def SSE_SQRTPS : OpndItins<
3249 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3252 def SSE_SQRTSS : OpndItins<
3253 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3256 def SSE_SQRTPD : OpndItins<
3257 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3260 def SSE_SQRTSD : OpndItins<
3261 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3265 let Sched = WriteFRsqrt in {
3266 def SSE_RSQRTPS : OpndItins<
3267 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3270 def SSE_RSQRTSS : OpndItins<
3271 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3275 let Sched = WriteFRcp in {
3276 def SSE_RCPP : OpndItins<
3277 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3280 def SSE_RCPS : OpndItins<
3281 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3285 /// sse_fp_unop_s - SSE1 unops in scalar form
3286 /// For the non-AVX defs, we need $src1 to be tied to $dst because
3287 /// the HW instructions are 2 operand / destructive.
3288 multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3289 ValueType vt, ValueType ScalarVT,
3290 X86MemOperand x86memop, Operand vec_memop,
3291 ComplexPattern mem_cpat, Intrinsic Intr,
3292 SDNode OpNode, Domain d, OpndItins itins,
3293 Predicate target, string Suffix> {
3294 let hasSideEffects = 0 in {
3295 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1),
3296 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3297 [(set RC:$dst, (OpNode RC:$src1))], itins.rr, d>, Sched<[itins.Sched]>,
3300 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1),
3301 !strconcat(OpcodeStr, "\t{$src1, $dst|$dst, $src1}"),
3302 [(set RC:$dst, (OpNode (load addr:$src1)))], itins.rm, d>,
3303 Sched<[itins.Sched.Folded, ReadAfterLd]>,
3304 Requires<[target, OptForSize]>;
3306 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3307 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3308 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3309 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3311 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, vec_memop:$src2),
3312 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3313 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3317 let Predicates = [target] in {
3318 def : Pat<(vt (OpNode mem_cpat:$src)),
3319 (vt (COPY_TO_REGCLASS (vt (!cast<Instruction>(NAME#Suffix##m_Int)
3320 (vt (IMPLICIT_DEF)), mem_cpat:$src)), RC))>;
3321 // These are unary operations, but they are modeled as having 2 source operands
3322 // because the high elements of the destination are unchanged in SSE.
3323 def : Pat<(Intr VR128:$src),
3324 (!cast<Instruction>(NAME#Suffix##r_Int) VR128:$src, VR128:$src)>;
3325 def : Pat<(Intr (load addr:$src)),
3326 (vt (COPY_TO_REGCLASS(!cast<Instruction>(NAME#Suffix##m)
3327 addr:$src), VR128))>;
3329 // We don't want to fold scalar loads into these instructions unless
3330 // optimizing for size. This is because the folded instruction will have a
3331 // partial register update, while the unfolded sequence will not, e.g.
3333 // rcpss %xmm0, %xmm0
3334 // which has a clobber before the rcp, vs.
3336 let Predicates = [target, OptForSize] in {
3337 def : Pat<(Intr mem_cpat:$src),
3338 (!cast<Instruction>(NAME#Suffix##m_Int)
3339 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3343 multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
3344 ValueType vt, ValueType ScalarVT,
3345 X86MemOperand x86memop, Operand vec_memop,
3346 ComplexPattern mem_cpat,
3347 Intrinsic Intr, SDNode OpNode, Domain d,
3348 OpndItins itins, string Suffix> {
3349 let hasSideEffects = 0 in {
3350 def r : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3351 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3352 [], itins.rr, d>, Sched<[itins.Sched]>;
3354 def m : I<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3355 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3356 [], itins.rm, d>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3357 let isCodeGenOnly = 1 in {
3358 def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
3359 (ins VR128:$src1, VR128:$src2),
3360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3361 []>, Sched<[itins.Sched.Folded]>;
3363 def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
3364 (ins VR128:$src1, vec_memop:$src2),
3365 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3366 []>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3370 // We don't want to fold scalar loads into these instructions unless
3371 // optimizing for size. This is because the folded instruction will have a
3372 // partial register update, while the unfolded sequence will not, e.g.
3373 // vmovss mem, %xmm0
3374 // vrcpss %xmm0, %xmm0, %xmm0
3375 // which has a clobber before the rcp, vs.
3376 // vrcpss mem, %xmm0, %xmm0
3377 // TODO: In theory, we could fold the load, and avoid the stall caused by
3378 // the partial register store, either in ExeDepFix or with smarter RA.
3379 let Predicates = [UseAVX] in {
3380 def : Pat<(OpNode RC:$src), (!cast<Instruction>("V"#NAME#Suffix##r)
3381 (ScalarVT (IMPLICIT_DEF)), RC:$src)>;
3383 let Predicates = [HasAVX] in {
3384 def : Pat<(Intr VR128:$src),
3385 (!cast<Instruction>("V"#NAME#Suffix##r_Int) (vt (IMPLICIT_DEF)),
3388 let Predicates = [HasAVX, OptForSize] in {
3389 def : Pat<(Intr mem_cpat:$src),
3390 (!cast<Instruction>("V"#NAME#Suffix##m_Int)
3391 (vt (IMPLICIT_DEF)), mem_cpat:$src)>;
3393 let Predicates = [UseAVX, OptForSize] in {
3394 def : Pat<(ScalarVT (OpNode (load addr:$src))),
3395 (!cast<Instruction>("V"#NAME#Suffix##m) (ScalarVT (IMPLICIT_DEF)),
3397 def : Pat<(vt (OpNode mem_cpat:$src)),
3398 (!cast<Instruction>("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)),
3403 /// sse1_fp_unop_p - SSE1 unops in packed form.
3404 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3405 OpndItins itins, list<Predicate> prds> {
3406 let Predicates = prds in {
3407 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3408 !strconcat("v", OpcodeStr,
3409 "ps\t{$src, $dst|$dst, $src}"),
3410 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3411 itins.rr>, VEX, Sched<[itins.Sched]>;
3412 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3413 !strconcat("v", OpcodeStr,
3414 "ps\t{$src, $dst|$dst, $src}"),
3415 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3416 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3417 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3418 !strconcat("v", OpcodeStr,
3419 "ps\t{$src, $dst|$dst, $src}"),
3420 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3421 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3422 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3423 !strconcat("v", OpcodeStr,
3424 "ps\t{$src, $dst|$dst, $src}"),
3425 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3426 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3429 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3430 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3431 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3432 Sched<[itins.Sched]>;
3433 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3434 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3435 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3436 Sched<[itins.Sched.Folded]>;
3439 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3440 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3441 SDNode OpNode, OpndItins itins> {
3442 let Predicates = [HasAVX] in {
3443 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3444 !strconcat("v", OpcodeStr,
3445 "pd\t{$src, $dst|$dst, $src}"),
3446 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3447 itins.rr>, VEX, Sched<[itins.Sched]>;
3448 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3449 !strconcat("v", OpcodeStr,
3450 "pd\t{$src, $dst|$dst, $src}"),
3451 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3452 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3453 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3454 !strconcat("v", OpcodeStr,
3455 "pd\t{$src, $dst|$dst, $src}"),
3456 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3457 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3458 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3459 !strconcat("v", OpcodeStr,
3460 "pd\t{$src, $dst|$dst, $src}"),
3461 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3462 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3465 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3466 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3467 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3468 Sched<[itins.Sched]>;
3469 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3470 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3471 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3472 Sched<[itins.Sched.Folded]>;
3475 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3477 defm SS : sse_fp_unop_s<opc, OpcodeStr##ss, FR32, v4f32, f32, f32mem,
3478 ssmem, sse_load_f32,
3479 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3480 SSEPackedSingle, itins, UseSSE1, "SS">, XS;
3481 defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr##ss, FR32, v4f32, f32,
3482 f32mem, ssmem, sse_load_f32,
3483 !cast<Intrinsic>("int_x86_sse_"##OpcodeStr##_ss), OpNode,
3484 SSEPackedSingle, itins, "SS">, XS, VEX_4V, VEX_LIG;
3487 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3489 defm SD : sse_fp_unop_s<opc, OpcodeStr##sd, FR64, v2f64, f64, f64mem,
3490 sdmem, sse_load_f64,
3491 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3492 OpNode, SSEPackedDouble, itins, UseSSE2, "SD">, XD;
3493 defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr##sd, FR64, v2f64, f64,
3494 f64mem, sdmem, sse_load_f64,
3495 !cast<Intrinsic>("int_x86_sse2_"##OpcodeStr##_sd),
3496 OpNode, SSEPackedDouble, itins, "SD">,
3497 XD, VEX_4V, VEX_LIG;
3501 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSS>,
3502 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS, [HasAVX]>,
3503 sse2_fp_unop_s<0x51, "sqrt", fsqrt, SSE_SQRTSD>,
3504 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3506 // Reciprocal approximations. Note that these typically require refinement
3507 // in order to obtain suitable precision.
3508 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3509 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS, [HasAVX, NoVLX] >;
3510 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,
3511 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP, [HasAVX, NoVLX]>;
3513 // There is no f64 version of the reciprocal approximation instructions.
3515 // TODO: We should add *scalar* op patterns for these just like we have for
3516 // the binops above. If the binop and unop patterns could all be unified
3517 // that would be even better.
3519 multiclass scalar_unary_math_patterns<Intrinsic Intr, string OpcPrefix,
3520 SDNode Move, ValueType VT,
3521 Predicate BasePredicate> {
3522 let Predicates = [BasePredicate] in {
3523 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3524 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3527 // With SSE 4.1, blendi is preferred to movs*, so match that too.
3528 let Predicates = [UseSSE41] in {
3529 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3530 (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3533 // Repeat for AVX versions of the instructions.
3534 let Predicates = [HasAVX] in {
3535 def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
3536 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3538 def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
3539 (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
3543 defm : scalar_unary_math_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
3545 defm : scalar_unary_math_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
3547 defm : scalar_unary_math_patterns<int_x86_sse_sqrt_ss, "SQRTSS", X86Movss,
3549 defm : scalar_unary_math_patterns<int_x86_sse2_sqrt_sd, "SQRTSD", X86Movsd,
3553 //===----------------------------------------------------------------------===//
3554 // SSE 1 & 2 - Non-temporal stores
3555 //===----------------------------------------------------------------------===//
3557 let AddedComplexity = 400 in { // Prefer non-temporal versions
3558 let SchedRW = [WriteStore] in {
3559 let Predicates = [HasAVX, NoVLX] in {
3560 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3561 (ins f128mem:$dst, VR128:$src),
3562 "movntps\t{$src, $dst|$dst, $src}",
3563 [(alignednontemporalstore (v4f32 VR128:$src),
3565 IIC_SSE_MOVNT>, VEX;
3566 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3567 (ins f128mem:$dst, VR128:$src),
3568 "movntpd\t{$src, $dst|$dst, $src}",
3569 [(alignednontemporalstore (v2f64 VR128:$src),
3571 IIC_SSE_MOVNT>, VEX;
3573 let ExeDomain = SSEPackedInt in
3574 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3575 (ins f128mem:$dst, VR128:$src),
3576 "movntdq\t{$src, $dst|$dst, $src}",
3577 [(alignednontemporalstore (v2i64 VR128:$src),
3579 IIC_SSE_MOVNT>, VEX;
3581 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3582 (ins f256mem:$dst, VR256:$src),
3583 "movntps\t{$src, $dst|$dst, $src}",
3584 [(alignednontemporalstore (v8f32 VR256:$src),
3586 IIC_SSE_MOVNT>, VEX, VEX_L;
3587 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3588 (ins f256mem:$dst, VR256:$src),
3589 "movntpd\t{$src, $dst|$dst, $src}",
3590 [(alignednontemporalstore (v4f64 VR256:$src),
3592 IIC_SSE_MOVNT>, VEX, VEX_L;
3593 let ExeDomain = SSEPackedInt in
3594 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3595 (ins f256mem:$dst, VR256:$src),
3596 "movntdq\t{$src, $dst|$dst, $src}",
3597 [(alignednontemporalstore (v4i64 VR256:$src),
3599 IIC_SSE_MOVNT>, VEX, VEX_L;
3602 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3603 "movntps\t{$src, $dst|$dst, $src}",
3604 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3606 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3607 "movntpd\t{$src, $dst|$dst, $src}",
3608 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3611 let ExeDomain = SSEPackedInt in
3612 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3613 "movntdq\t{$src, $dst|$dst, $src}",
3614 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3617 // There is no AVX form for instructions below this point
3618 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3619 "movnti{l}\t{$src, $dst|$dst, $src}",
3620 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3622 PS, Requires<[HasSSE2]>;
3623 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3624 "movnti{q}\t{$src, $dst|$dst, $src}",
3625 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3627 PS, Requires<[HasSSE2]>;
3628 } // SchedRW = [WriteStore]
3630 let Predicates = [HasAVX, NoVLX] in {
3631 def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
3632 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3633 def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
3634 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3635 def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
3636 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3638 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3639 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3640 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3641 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3642 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3643 (VMOVNTDQmr addr:$dst, VR128:$src)>;
3646 let Predicates = [UseSSE2] in {
3647 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3648 (MOVNTDQmr addr:$dst, VR128:$src)>;
3649 def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
3650 (MOVNTDQmr addr:$dst, VR128:$src)>;
3651 def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
3652 (MOVNTDQmr addr:$dst, VR128:$src)>;
3655 } // AddedComplexity
3657 //===----------------------------------------------------------------------===//
3658 // SSE 1 & 2 - Prefetch and memory fence
3659 //===----------------------------------------------------------------------===//
3661 // Prefetch intrinsic.
3662 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3663 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3664 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3665 IIC_SSE_PREFETCH>, TB;
3666 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3667 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3668 IIC_SSE_PREFETCH>, TB;
3669 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3670 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3671 IIC_SSE_PREFETCH>, TB;
3672 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3673 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3674 IIC_SSE_PREFETCH>, TB;
3677 // FIXME: How should flush instruction be modeled?
3678 let SchedRW = [WriteLoad] in {
3680 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3681 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3682 IIC_SSE_PREFETCH>, PS, Requires<[HasSSE2]>;
3685 let SchedRW = [WriteNop] in {
3686 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3687 // was introduced with SSE2, it's backward compatible.
3688 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3689 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3690 OBXS, Requires<[HasSSE2]>;
3693 let SchedRW = [WriteFence] in {
3694 // Load, store, and memory fence
3695 // TODO: As with mfence, we may want to ease the availablity of sfence/lfence
3696 // to include any 64-bit target.
3697 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3698 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
3699 PS, Requires<[HasSSE1]>;
3700 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3701 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
3702 TB, Requires<[HasSSE2]>;
3703 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3704 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
3705 TB, Requires<[HasMFence]>;
3708 def : Pat<(X86MFence), (MFENCE)>;
3710 //===----------------------------------------------------------------------===//
3711 // SSE 1 & 2 - Load/Store XCSR register
3712 //===----------------------------------------------------------------------===//
3714 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3715 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3716 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
3717 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3718 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3719 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
3721 let Predicates = [UseSSE1] in {
3722 def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
3723 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
3724 IIC_SSE_LDMXCSR>, TB, Sched<[WriteLoad]>;
3725 def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3726 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
3727 IIC_SSE_STMXCSR>, TB, Sched<[WriteStore]>;
3730 //===---------------------------------------------------------------------===//
3731 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3732 //===---------------------------------------------------------------------===//
3734 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3736 let hasSideEffects = 0, SchedRW = [WriteMove] in {
3737 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3738 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3740 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3741 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
3743 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3744 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3746 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3747 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
3752 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
3753 SchedRW = [WriteMove] in {
3754 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3755 "movdqa\t{$src, $dst|$dst, $src}", [],
3758 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3759 "movdqa\t{$src, $dst|$dst, $src}", [],
3760 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
3761 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3762 "movdqu\t{$src, $dst|$dst, $src}", [],
3765 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
3766 "movdqu\t{$src, $dst|$dst, $src}", [],
3767 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
3770 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3771 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3772 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3773 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3775 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3776 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
3778 let Predicates = [HasAVX] in {
3779 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3780 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3782 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3783 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
3788 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3789 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3790 (ins i128mem:$dst, VR128:$src),
3791 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3793 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3794 (ins i256mem:$dst, VR256:$src),
3795 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
3797 let Predicates = [HasAVX] in {
3798 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3799 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3801 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3802 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
3807 let SchedRW = [WriteMove] in {
3808 let hasSideEffects = 0 in
3809 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3810 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
3812 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3813 "movdqu\t{$src, $dst|$dst, $src}",
3814 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3817 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
3818 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3819 "movdqa\t{$src, $dst|$dst, $src}", [],
3822 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
3823 "movdqu\t{$src, $dst|$dst, $src}",
3824 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
3828 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
3829 hasSideEffects = 0, SchedRW = [WriteLoad] in {
3830 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3831 "movdqa\t{$src, $dst|$dst, $src}",
3832 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
3834 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3835 "movdqu\t{$src, $dst|$dst, $src}",
3836 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
3838 XS, Requires<[UseSSE2]>;
3841 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
3842 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3843 "movdqa\t{$src, $dst|$dst, $src}",
3844 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
3846 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3847 "movdqu\t{$src, $dst|$dst, $src}",
3848 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
3850 XS, Requires<[UseSSE2]>;
3853 } // ExeDomain = SSEPackedInt
3855 // Aliases to help the assembler pick two byte VEX encodings by swapping the
3856 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
3857 def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
3858 (VMOVDQArr_REV VR128L:$dst, VR128H:$src), 0>;
3859 def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
3860 (VMOVDQAYrr_REV VR256L:$dst, VR256H:$src), 0>;
3861 def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
3862 (VMOVDQUrr_REV VR128L:$dst, VR128H:$src), 0>;
3863 def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
3864 (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>;
3866 //===---------------------------------------------------------------------===//
3867 // SSE2 - Packed Integer Arithmetic Instructions
3868 //===---------------------------------------------------------------------===//
3870 let Sched = WriteVecIMul in
3871 def SSE_PMADD : OpndItins<
3872 IIC_SSE_PMADD, IIC_SSE_PMADD
3875 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3877 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3878 RegisterClass RC, PatFrag memop_frag,
3879 X86MemOperand x86memop,
3881 bit IsCommutable = 0,
3883 let isCommutable = IsCommutable in
3884 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3885 (ins RC:$src1, RC:$src2),
3887 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3888 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3889 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
3890 Sched<[itins.Sched]>;
3891 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3892 (ins RC:$src1, x86memop:$src2),
3894 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3895 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3896 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
3897 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3900 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
3901 Intrinsic IntId256, OpndItins itins,
3902 bit IsCommutable = 0> {
3903 let Predicates = [HasAVX] in
3904 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
3905 VR128, loadv2i64, i128mem, itins,
3906 IsCommutable, 0>, VEX_4V;
3908 let Constraints = "$src1 = $dst" in
3909 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
3910 i128mem, itins, IsCommutable, 1>;
3912 let Predicates = [HasAVX2] in
3913 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
3914 VR256, loadv4i64, i256mem, itins,
3915 IsCommutable, 0>, VEX_4V, VEX_L;
3918 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
3919 string OpcodeStr, SDNode OpNode,
3920 SDNode OpNode2, RegisterClass RC,
3921 ValueType DstVT, ValueType SrcVT,
3922 PatFrag ld_frag, ShiftOpndItins itins,
3924 // src2 is always 128-bit
3925 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3926 (ins RC:$src1, VR128:$src2),
3928 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3929 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3930 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
3931 itins.rr>, Sched<[WriteVecShift]>;
3932 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3933 (ins RC:$src1, i128mem:$src2),
3935 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3936 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3937 [(set RC:$dst, (DstVT (OpNode RC:$src1,
3938 (SrcVT (bitconvert (ld_frag addr:$src2))))))], itins.rm>,
3939 Sched<[WriteVecShiftLd, ReadAfterLd]>;
3940 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
3941 (ins RC:$src1, u8imm:$src2),
3943 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3944 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3945 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
3946 Sched<[WriteVecShift]>;
3949 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
3950 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
3951 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
3952 PatFrag memop_frag, X86MemOperand x86memop,
3954 bit IsCommutable = 0, bit Is2Addr = 1> {
3955 let isCommutable = IsCommutable in
3956 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
3957 (ins RC:$src1, RC:$src2),
3959 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3960 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3961 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
3962 Sched<[itins.Sched]>;
3963 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
3964 (ins RC:$src1, x86memop:$src2),
3966 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3967 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3968 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
3969 (bitconvert (memop_frag addr:$src2)))))]>,
3970 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3972 } // ExeDomain = SSEPackedInt
3974 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
3975 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
3976 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
3977 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
3978 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
3979 SSE_INTALU_ITINS_P, 1, NoVLX>;
3980 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
3981 SSE_INTALUQ_ITINS_P, 1, NoVLX>;
3982 defm PADDSB : PDI_binop_all<0xEC, "paddsb", X86adds, v16i8, v32i8,
3983 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
3984 defm PADDSW : PDI_binop_all<0xED, "paddsw", X86adds, v8i16, v16i16,
3985 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
3986 defm PADDUSB : PDI_binop_all<0xDC, "paddusb", X86addus, v16i8, v32i8,
3987 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
3988 defm PADDUSW : PDI_binop_all<0xDD, "paddusw", X86addus, v8i16, v16i16,
3989 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
3990 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
3991 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
3992 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
3993 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
3994 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
3995 SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
3996 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
3997 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
3998 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
3999 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4000 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4001 SSE_INTALU_ITINS_P, 0, NoVLX>;
4002 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4003 SSE_INTALUQ_ITINS_P, 0, NoVLX>;
4004 defm PSUBSB : PDI_binop_all<0xE8, "psubsb", X86subs, v16i8, v32i8,
4005 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4006 defm PSUBSW : PDI_binop_all<0xE9, "psubsw", X86subs, v8i16, v16i16,
4007 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4008 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4009 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4010 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4011 SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
4012 defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8,
4013 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4014 defm PMINSW : PDI_binop_all<0xEA, "pminsw", smin, v8i16, v16i16,
4015 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4016 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
4017 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4018 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
4019 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4020 defm PAVGB : PDI_binop_all<0xE0, "pavgb", X86avg, v16i8, v32i8,
4021 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4022 defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
4023 SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
4026 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4027 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4029 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
4030 defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
4031 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4033 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
4034 defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
4035 loadv4i64, i256mem, SSE_INTMUL_ITINS_P, 1, 0>,
4037 let Constraints = "$src1 = $dst" in
4038 defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
4039 memopv2i64, i128mem, SSE_INTALU_ITINS_P, 1>;
4041 let Predicates = [HasAVX, NoVLX] in
4042 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4043 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4045 let Predicates = [HasAVX2, NoVLX] in
4046 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4047 VR256, loadv4i64, i256mem,
4048 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4049 let Constraints = "$src1 = $dst" in
4050 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4051 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4053 //===---------------------------------------------------------------------===//
4054 // SSE2 - Packed Integer Logical Instructions
4055 //===---------------------------------------------------------------------===//
4057 let Predicates = [HasAVX, NoVLX] in {
4058 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4059 VR128, v4i32, v4i32, loadv2i64,
4060 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4061 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4062 VR128, v2i64, v2i64, loadv2i64,
4063 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4065 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4066 VR128, v4i32, v4i32, loadv2i64,
4067 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4068 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4069 VR128, v2i64, v2i64, loadv2i64,
4070 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4072 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4073 VR128, v4i32, v4i32, loadv2i64,
4074 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4075 } // Predicates = [HasAVX, NoVLX]
4077 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4078 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4079 VR128, v8i16, v8i16, loadv2i64,
4080 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4081 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4082 VR128, v8i16, v8i16, loadv2i64,
4083 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4084 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4085 VR128, v8i16, v8i16, loadv2i64,
4086 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4087 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
4090 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
4091 Predicates = [HasAVX, NoVLX_Or_NoBWI]in {
4092 // 128-bit logical shifts.
4093 def VPSLLDQri : PDIi8<0x73, MRM7r,
4094 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4095 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4097 (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
4099 def VPSRLDQri : PDIi8<0x73, MRM3r,
4100 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4101 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4103 (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
4105 // PSRADQri doesn't exist in SSE[1-3].
4106 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
4108 let Predicates = [HasAVX2, NoVLX] in {
4109 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4110 VR256, v8i32, v4i32, loadv2i64,
4111 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4112 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4113 VR256, v4i64, v2i64, loadv2i64,
4114 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4116 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4117 VR256, v8i32, v4i32, loadv2i64,
4118 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4119 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4120 VR256, v4i64, v2i64, loadv2i64,
4121 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4123 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4124 VR256, v8i32, v4i32, loadv2i64,
4125 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4126 }// Predicates = [HasAVX2, NoVLX]
4128 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4129 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4130 VR256, v16i16, v8i16, loadv2i64,
4131 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4132 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4133 VR256, v16i16, v8i16, loadv2i64,
4134 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4135 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4136 VR256, v16i16, v8i16, loadv2i64,
4137 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4138 }// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
4140 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
4141 Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4142 // 256-bit logical shifts.
4143 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4144 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4145 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4147 (v32i8 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
4149 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4150 (outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
4151 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4153 (v32i8 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
4155 // PSRADQYri doesn't exist in SSE[1-3].
4156 } // Predicates = [HasAVX2, NoVLX_Or_NoBWI]
4158 let Constraints = "$src1 = $dst" in {
4159 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4160 VR128, v8i16, v8i16, memopv2i64,
4161 SSE_INTSHIFT_ITINS_P>;
4162 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4163 VR128, v4i32, v4i32, memopv2i64,
4164 SSE_INTSHIFT_ITINS_P>;
4165 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4166 VR128, v2i64, v2i64, memopv2i64,
4167 SSE_INTSHIFT_ITINS_P>;
4169 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4170 VR128, v8i16, v8i16, memopv2i64,
4171 SSE_INTSHIFT_ITINS_P>;
4172 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4173 VR128, v4i32, v4i32, memopv2i64,
4174 SSE_INTSHIFT_ITINS_P>;
4175 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4176 VR128, v2i64, v2i64, memopv2i64,
4177 SSE_INTSHIFT_ITINS_P>;
4179 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4180 VR128, v8i16, v8i16, memopv2i64,
4181 SSE_INTSHIFT_ITINS_P>;
4182 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4183 VR128, v4i32, v4i32, memopv2i64,
4184 SSE_INTSHIFT_ITINS_P>;
4186 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
4187 // 128-bit logical shifts.
4188 def PSLLDQri : PDIi8<0x73, MRM7r,
4189 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4190 "pslldq\t{$src2, $dst|$dst, $src2}",
4192 (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
4193 IIC_SSE_INTSHDQ_P_RI>;
4194 def PSRLDQri : PDIi8<0x73, MRM3r,
4195 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4196 "psrldq\t{$src2, $dst|$dst, $src2}",
4198 (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
4199 IIC_SSE_INTSHDQ_P_RI>;
4200 // PSRADQri doesn't exist in SSE[1-3].
4202 } // Constraints = "$src1 = $dst"
4204 //===---------------------------------------------------------------------===//
4205 // SSE2 - Packed Integer Comparison Instructions
4206 //===---------------------------------------------------------------------===//
4208 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4209 SSE_INTALU_ITINS_P, 1, TruePredicate>;
4210 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4211 SSE_INTALU_ITINS_P, 1, TruePredicate>;
4212 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4213 SSE_INTALU_ITINS_P, 1, TruePredicate>;
4214 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4215 SSE_INTALU_ITINS_P, 0, TruePredicate>;
4216 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4217 SSE_INTALU_ITINS_P, 0, TruePredicate>;
4218 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4219 SSE_INTALU_ITINS_P, 0, TruePredicate>;
4221 //===---------------------------------------------------------------------===//
4222 // SSE2 - Packed Integer Shuffle Instructions
4223 //===---------------------------------------------------------------------===//
4225 let ExeDomain = SSEPackedInt in {
4226 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4227 SDNode OpNode, Predicate prd> {
4228 let Predicates = [HasAVX, prd] in {
4229 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4230 (ins VR128:$src1, u8imm:$src2),
4231 !strconcat("v", OpcodeStr,
4232 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4234 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4235 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4236 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4237 (ins i128mem:$src1, u8imm:$src2),
4238 !strconcat("v", OpcodeStr,
4239 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4241 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4242 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4243 Sched<[WriteShuffleLd]>;
4246 let Predicates = [HasAVX2, prd] in {
4247 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4248 (ins VR256:$src1, u8imm:$src2),
4249 !strconcat("v", OpcodeStr,
4250 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4252 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4253 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4254 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4255 (ins i256mem:$src1, u8imm:$src2),
4256 !strconcat("v", OpcodeStr,
4257 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4259 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4260 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4261 Sched<[WriteShuffleLd]>;
4264 let Predicates = [UseSSE2] in {
4265 def ri : Ii8<0x70, MRMSrcReg,
4266 (outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
4267 !strconcat(OpcodeStr,
4268 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4270 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4271 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4272 def mi : Ii8<0x70, MRMSrcMem,
4273 (outs VR128:$dst), (ins i128mem:$src1, u8imm:$src2),
4274 !strconcat(OpcodeStr,
4275 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4277 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4278 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4279 Sched<[WriteShuffleLd, ReadAfterLd]>;
4282 } // ExeDomain = SSEPackedInt
4284 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd, NoVLX>, PD;
4285 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
4286 NoVLX_Or_NoBWI>, XS;
4287 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
4288 NoVLX_Or_NoBWI>, XD;
4290 let Predicates = [HasAVX] in {
4291 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4292 (VPSHUFDmi addr:$src1, imm:$imm)>;
4293 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4294 (VPSHUFDri VR128:$src1, imm:$imm)>;
4297 let Predicates = [UseSSE2] in {
4298 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4299 (PSHUFDmi addr:$src1, imm:$imm)>;
4300 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4301 (PSHUFDri VR128:$src1, imm:$imm)>;
4304 //===---------------------------------------------------------------------===//
4305 // Packed Integer Pack Instructions (SSE & AVX)
4306 //===---------------------------------------------------------------------===//
4308 let ExeDomain = SSEPackedInt in {
4309 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4310 ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
4312 def rr : PDI<opc, MRMSrcReg,
4313 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4315 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4316 !strconcat(OpcodeStr,
4317 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4319 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4320 Sched<[WriteShuffle]>;
4321 def rm : PDI<opc, MRMSrcMem,
4322 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4325 !strconcat(OpcodeStr,
4326 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4328 (OutVT (OpNode (ArgVT VR128:$src1),
4329 (bitconvert (ld_frag addr:$src2)))))]>,
4330 Sched<[WriteShuffleLd, ReadAfterLd]>;
4333 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4334 ValueType ArgVT, SDNode OpNode> {
4335 def Yrr : PDI<opc, MRMSrcReg,
4336 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4337 !strconcat(OpcodeStr,
4338 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4340 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4341 Sched<[WriteShuffle]>;
4342 def Yrm : PDI<opc, MRMSrcMem,
4343 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4344 !strconcat(OpcodeStr,
4345 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4347 (OutVT (OpNode (ArgVT VR256:$src1),
4348 (bitconvert (loadv4i64 addr:$src2)))))]>,
4349 Sched<[WriteShuffleLd, ReadAfterLd]>;
4352 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4353 ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
4355 def rr : SS48I<opc, MRMSrcReg,
4356 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4358 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4359 !strconcat(OpcodeStr,
4360 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4362 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4363 Sched<[WriteShuffle]>;
4364 def rm : SS48I<opc, MRMSrcMem,
4365 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4367 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4368 !strconcat(OpcodeStr,
4369 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4371 (OutVT (OpNode (ArgVT VR128:$src1),
4372 (bitconvert (ld_frag addr:$src2)))))]>,
4373 Sched<[WriteShuffleLd, ReadAfterLd]>;
4376 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4377 ValueType ArgVT, SDNode OpNode> {
4378 def Yrr : SS48I<opc, MRMSrcReg,
4379 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4380 !strconcat(OpcodeStr,
4381 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4383 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4384 Sched<[WriteShuffle]>;
4385 def Yrm : SS48I<opc, MRMSrcMem,
4386 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4387 !strconcat(OpcodeStr,
4388 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4390 (OutVT (OpNode (ArgVT VR256:$src1),
4391 (bitconvert (loadv4i64 addr:$src2)))))]>,
4392 Sched<[WriteShuffleLd, ReadAfterLd]>;
4395 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4396 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4397 loadv2i64, 0>, VEX_4V;
4398 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4399 loadv2i64, 0>, VEX_4V;
4401 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4402 loadv2i64, 0>, VEX_4V;
4403 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4404 loadv2i64, 0>, VEX_4V;
4407 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4408 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss>,
4410 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss>,
4413 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus>,
4415 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus>,
4419 let Constraints = "$src1 = $dst" in {
4420 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4422 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4425 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4428 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4431 } // ExeDomain = SSEPackedInt
4433 //===---------------------------------------------------------------------===//
4434 // SSE2 - Packed Integer Unpack Instructions
4435 //===---------------------------------------------------------------------===//
4437 let ExeDomain = SSEPackedInt in {
4438 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4439 SDNode OpNode, PatFrag ld_frag, bit Is2Addr = 1> {
4440 def rr : PDI<opc, MRMSrcReg,
4441 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4443 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4444 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4445 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4446 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4447 def rm : PDI<opc, MRMSrcMem,
4448 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4450 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4451 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4452 [(set VR128:$dst, (vt (OpNode VR128:$src1,
4453 (bitconvert (ld_frag addr:$src2)))))],
4455 Sched<[WriteShuffleLd, ReadAfterLd]>;
4458 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4460 def Yrr : PDI<opc, MRMSrcReg,
4461 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4462 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4463 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4464 Sched<[WriteShuffle]>;
4465 def Yrm : PDI<opc, MRMSrcMem,
4466 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4467 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4468 [(set VR256:$dst, (vt (OpNode VR256:$src1,
4469 (bitconvert (loadv4i64 addr:$src2)))))]>,
4470 Sched<[WriteShuffleLd, ReadAfterLd]>;
4474 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
4475 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4476 loadv2i64, 0>, VEX_4V;
4477 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4478 loadv2i64, 0>, VEX_4V;
4479 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4480 loadv2i64, 0>, VEX_4V;
4481 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4482 loadv2i64, 0>, VEX_4V;
4484 let Predicates = [HasAVX, NoVLX] in {
4485 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4486 loadv2i64, 0>, VEX_4V;
4487 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4488 loadv2i64, 0>, VEX_4V;
4489 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4490 loadv2i64, 0>, VEX_4V;
4491 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4492 loadv2i64, 0>, VEX_4V;
4495 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
4496 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl>,
4498 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl>,
4500 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh>,
4502 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh>,
4505 let Predicates = [HasAVX2, NoVLX] in {
4506 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl>,
4508 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl>,
4510 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh>,
4512 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh>,
4516 let Constraints = "$src1 = $dst" in {
4517 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4519 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4521 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4523 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4526 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4528 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4530 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4532 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4535 } // ExeDomain = SSEPackedInt
4537 //===---------------------------------------------------------------------===//
4538 // SSE2 - Packed Integer Extract and Insert
4539 //===---------------------------------------------------------------------===//
4541 let ExeDomain = SSEPackedInt in {
4542 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4543 def rri : Ii8<0xC4, MRMSrcReg,
4544 (outs VR128:$dst), (ins VR128:$src1,
4545 GR32orGR64:$src2, u8imm:$src3),
4547 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4548 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4550 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4551 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4552 def rmi : Ii8<0xC4, MRMSrcMem,
4553 (outs VR128:$dst), (ins VR128:$src1,
4554 i16mem:$src2, u8imm:$src3),
4556 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4557 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4559 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4560 imm:$src3))], IIC_SSE_PINSRW>,
4561 Sched<[WriteShuffleLd, ReadAfterLd]>;
4565 let Predicates = [HasAVX, NoBWI] in
4566 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4567 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4568 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4569 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4570 imm:$src2))]>, PD, VEX,
4571 Sched<[WriteShuffle]>;
4572 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4573 (outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
4574 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4575 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4576 imm:$src2))], IIC_SSE_PEXTRW>,
4577 Sched<[WriteShuffleLd, ReadAfterLd]>;
4580 let Predicates = [HasAVX, NoBWI] in
4581 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4583 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4584 defm PINSRW : sse2_pinsrw, PD;
4586 } // ExeDomain = SSEPackedInt
4588 //===---------------------------------------------------------------------===//
4589 // SSE2 - Packed Mask Creation
4590 //===---------------------------------------------------------------------===//
4592 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4594 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4596 "pmovmskb\t{$src, $dst|$dst, $src}",
4597 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
4598 IIC_SSE_MOVMSK>, VEX;
4600 let Predicates = [HasAVX2] in {
4601 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4603 "pmovmskb\t{$src, $dst|$dst, $src}",
4604 [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
4608 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4609 "pmovmskb\t{$src, $dst|$dst, $src}",
4610 [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
4613 } // ExeDomain = SSEPackedInt
4615 //===---------------------------------------------------------------------===//
4616 // SSE2 - Conditional Store
4617 //===---------------------------------------------------------------------===//
4619 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4621 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4622 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4623 (ins VR128:$src, VR128:$mask),
4624 "maskmovdqu\t{$mask, $src|$src, $mask}",
4625 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4626 IIC_SSE_MASKMOV>, VEX;
4627 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4628 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4629 (ins VR128:$src, VR128:$mask),
4630 "maskmovdqu\t{$mask, $src|$src, $mask}",
4631 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4632 IIC_SSE_MASKMOV>, VEX;
4634 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4635 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4636 "maskmovdqu\t{$mask, $src|$src, $mask}",
4637 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4639 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4640 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4641 "maskmovdqu\t{$mask, $src|$src, $mask}",
4642 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4645 } // ExeDomain = SSEPackedInt
4647 //===---------------------------------------------------------------------===//
4648 // SSE2 - Move Doubleword/Quadword
4649 //===---------------------------------------------------------------------===//
4651 //===---------------------------------------------------------------------===//
4652 // Move Int Doubleword to Packed Double Int
4654 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4655 "movd\t{$src, $dst|$dst, $src}",
4657 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4658 VEX, Sched<[WriteMove]>;
4659 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4660 "movd\t{$src, $dst|$dst, $src}",
4662 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4664 VEX, Sched<[WriteLoad]>;
4665 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4666 "movq\t{$src, $dst|$dst, $src}",
4668 (v2i64 (scalar_to_vector GR64:$src)))],
4669 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4670 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4671 def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4672 "movq\t{$src, $dst|$dst, $src}",
4673 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
4674 let isCodeGenOnly = 1 in
4675 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4676 "movq\t{$src, $dst|$dst, $src}",
4677 [(set FR64:$dst, (bitconvert GR64:$src))],
4678 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4680 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4681 "movd\t{$src, $dst|$dst, $src}",
4683 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4685 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4686 "movd\t{$src, $dst|$dst, $src}",
4688 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4689 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4690 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4691 "mov{d|q}\t{$src, $dst|$dst, $src}",
4693 (v2i64 (scalar_to_vector GR64:$src)))],
4694 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4695 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
4696 def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4697 "mov{d|q}\t{$src, $dst|$dst, $src}",
4698 [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4699 let isCodeGenOnly = 1 in
4700 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4701 "mov{d|q}\t{$src, $dst|$dst, $src}",
4702 [(set FR64:$dst, (bitconvert GR64:$src))],
4703 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4705 //===---------------------------------------------------------------------===//
4706 // Move Int Doubleword to Single Scalar
4708 let isCodeGenOnly = 1 in {
4709 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4710 "movd\t{$src, $dst|$dst, $src}",
4711 [(set FR32:$dst, (bitconvert GR32:$src))],
4712 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4714 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4715 "movd\t{$src, $dst|$dst, $src}",
4716 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4718 VEX, Sched<[WriteLoad]>;
4719 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4720 "movd\t{$src, $dst|$dst, $src}",
4721 [(set FR32:$dst, (bitconvert GR32:$src))],
4722 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
4724 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4725 "movd\t{$src, $dst|$dst, $src}",
4726 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
4727 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4730 //===---------------------------------------------------------------------===//
4731 // Move Packed Doubleword Int to Packed Double Int
4733 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4734 "movd\t{$src, $dst|$dst, $src}",
4735 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4736 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
4738 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
4739 (ins i32mem:$dst, VR128:$src),
4740 "movd\t{$src, $dst|$dst, $src}",
4741 [(store (i32 (extractelt (v4i32 VR128:$src),
4742 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
4743 VEX, Sched<[WriteStore]>;
4744 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4745 "movd\t{$src, $dst|$dst, $src}",
4746 [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
4747 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
4749 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4750 "movd\t{$src, $dst|$dst, $src}",
4751 [(store (i32 (extractelt (v4i32 VR128:$src),
4752 (iPTR 0))), addr:$dst)],
4753 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4755 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
4756 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4758 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
4759 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4761 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
4762 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
4764 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
4765 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
4767 //===---------------------------------------------------------------------===//
4768 // Move Packed Doubleword Int first element to Doubleword Int
4770 let SchedRW = [WriteMove] in {
4771 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4772 "movq\t{$src, $dst|$dst, $src}",
4773 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4778 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4779 "mov{d|q}\t{$src, $dst|$dst, $src}",
4780 [(set GR64:$dst, (extractelt (v2i64 VR128:$src),
4785 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4786 def VMOVPQIto64rm : VRS2I<0x7E, MRMDestMem, (outs),
4787 (ins i64mem:$dst, VR128:$src),
4788 "movq\t{$src, $dst|$dst, $src}",
4789 [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4790 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
4791 def MOVPQIto64rm : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4792 "mov{d|q}\t{$src, $dst|$dst, $src}",
4793 [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4795 //===---------------------------------------------------------------------===//
4796 // Bitcast FR64 <-> GR64
4798 let isCodeGenOnly = 1 in {
4799 let Predicates = [UseAVX] in
4800 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4801 "movq\t{$src, $dst|$dst, $src}",
4802 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
4803 VEX, Sched<[WriteLoad]>;
4804 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4805 "movq\t{$src, $dst|$dst, $src}",
4806 [(set GR64:$dst, (bitconvert FR64:$src))],
4807 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4808 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4809 "movq\t{$src, $dst|$dst, $src}",
4810 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4811 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4813 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4814 "movq\t{$src, $dst|$dst, $src}",
4815 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
4816 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
4817 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4818 "mov{d|q}\t{$src, $dst|$dst, $src}",
4819 [(set GR64:$dst, (bitconvert FR64:$src))],
4820 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4821 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4822 "movq\t{$src, $dst|$dst, $src}",
4823 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
4824 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4827 //===---------------------------------------------------------------------===//
4828 // Move Scalar Single to Double Int
4830 let isCodeGenOnly = 1 in {
4831 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4832 "movd\t{$src, $dst|$dst, $src}",
4833 [(set GR32:$dst, (bitconvert FR32:$src))],
4834 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
4835 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4836 "movd\t{$src, $dst|$dst, $src}",
4837 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4838 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
4839 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4840 "movd\t{$src, $dst|$dst, $src}",
4841 [(set GR32:$dst, (bitconvert FR32:$src))],
4842 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
4843 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4844 "movd\t{$src, $dst|$dst, $src}",
4845 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
4846 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
4849 let Predicates = [UseAVX] in {
4850 let AddedComplexity = 15 in {
4851 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4852 (VMOVDI2PDIrr GR32:$src)>;
4854 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4855 (VMOV64toPQIrr GR64:$src)>;
4857 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4858 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
4859 (SUBREG_TO_REG (i64 0), (VMOV64toPQIrr GR64:$src), sub_xmm)>;
4861 // AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
4862 // These instructions also write zeros in the high part of a 256-bit register.
4863 let AddedComplexity = 20 in {
4864 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4865 (VMOVDI2PDIrm addr:$src)>;
4866 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4867 (VMOVDI2PDIrm addr:$src)>;
4868 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4869 (VMOVDI2PDIrm addr:$src)>;
4870 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4871 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
4872 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
4874 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4875 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4876 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
4877 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
4880 let Predicates = [UseSSE2] in {
4881 let AddedComplexity = 15 in {
4882 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
4883 (MOVDI2PDIrr GR32:$src)>;
4885 def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
4886 (MOV64toPQIrr GR64:$src)>;
4888 let AddedComplexity = 20 in {
4889 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
4890 (MOVDI2PDIrm addr:$src)>;
4891 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4892 (MOVDI2PDIrm addr:$src)>;
4893 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4894 (MOVDI2PDIrm addr:$src)>;
4898 // These are the correct encodings of the instructions so that we know how to
4899 // read correct assembly, even though we continue to emit the wrong ones for
4900 // compatibility with Darwin's buggy assembler.
4901 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4902 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4903 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4904 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4905 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
4906 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4907 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4908 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
4909 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4911 //===---------------------------------------------------------------------===//
4912 // SSE2 - Move Quadword
4913 //===---------------------------------------------------------------------===//
4915 //===---------------------------------------------------------------------===//
4916 // Move Quadword Int to Packed Quadword Int
4919 let ExeDomain = SSEPackedInt, SchedRW = [WriteLoad] in {
4920 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4921 "vmovq\t{$src, $dst|$dst, $src}",
4923 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4924 VEX, Requires<[UseAVX]>;
4925 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4926 "movq\t{$src, $dst|$dst, $src}",
4928 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
4930 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
4931 } // ExeDomain, SchedRW
4933 //===---------------------------------------------------------------------===//
4934 // Move Packed Quadword Int to Quadword Int
4936 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4937 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4938 "movq\t{$src, $dst|$dst, $src}",
4939 [(store (i64 (extractelt (v2i64 VR128:$src),
4940 (iPTR 0))), addr:$dst)],
4941 IIC_SSE_MOVDQ>, VEX;
4942 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4943 "movq\t{$src, $dst|$dst, $src}",
4944 [(store (i64 (extractelt (v2i64 VR128:$src),
4945 (iPTR 0))), addr:$dst)],
4947 } // ExeDomain, SchedRW
4949 // For disassembler only
4950 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
4951 SchedRW = [WriteVecLogic] in {
4952 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4953 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
4954 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4955 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
4958 // Aliases to help the assembler pick two byte VEX encodings by swapping the
4959 // operands relative to the normal instructions to use VEX.R instead of VEX.B.
4960 def : InstAlias<"vmovq\t{$src, $dst|$dst, $src}",
4961 (VMOVPQI2QIrr VR128L:$dst, VR128H:$src), 0>;
4963 //===---------------------------------------------------------------------===//
4964 // Store / copy lower 64-bits of a XMM register.
4966 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
4967 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4968 "vmovq\t{$src, $dst|$dst, $src}",
4970 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4971 (loadi64 addr:$src))))))],
4973 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
4975 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4976 "movq\t{$src, $dst|$dst, $src}",
4978 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4979 (loadi64 addr:$src))))))],
4981 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
4982 } // ExeDomain, isCodeGenOnly, AddedComplexity
4984 let Predicates = [UseAVX], AddedComplexity = 20 in {
4985 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4986 (VMOVZQI2PQIrm addr:$src)>;
4987 def : Pat<(v2i64 (X86vzload addr:$src)),
4988 (VMOVZQI2PQIrm addr:$src)>;
4989 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4990 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
4991 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
4992 def : Pat<(v4i64 (X86vzload addr:$src)),
4993 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
4996 let Predicates = [UseSSE2], AddedComplexity = 20 in {
4997 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4998 (MOVZQI2PQIrm addr:$src)>;
4999 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5002 //===---------------------------------------------------------------------===//
5003 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5004 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5006 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
5007 let AddedComplexity = 15 in
5008 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5009 "vmovq\t{$src, $dst|$dst, $src}",
5010 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5012 XS, VEX, Requires<[UseAVX]>;
5013 let AddedComplexity = 15 in
5014 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5015 "movq\t{$src, $dst|$dst, $src}",
5016 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5018 XS, Requires<[UseSSE2]>;
5019 } // ExeDomain, SchedRW
5021 let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5022 let AddedComplexity = 20 in
5023 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5024 "vmovq\t{$src, $dst|$dst, $src}",
5025 [(set VR128:$dst, (v2i64 (X86vzmovl
5026 (loadv2i64 addr:$src))))],
5028 XS, VEX, Requires<[UseAVX]>;
5029 let AddedComplexity = 20 in {
5030 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5031 "movq\t{$src, $dst|$dst, $src}",
5032 [(set VR128:$dst, (v2i64 (X86vzmovl
5033 (loadv2i64 addr:$src))))],
5035 XS, Requires<[UseSSE2]>;
5037 } // ExeDomain, isCodeGenOnly, SchedRW
5039 let AddedComplexity = 20 in {
5040 let Predicates = [UseAVX] in {
5041 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5042 (VMOVZPQILo2PQIrr VR128:$src)>;
5044 let Predicates = [UseSSE2] in {
5045 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5046 (MOVZPQILo2PQIrr VR128:$src)>;
5050 //===---------------------------------------------------------------------===//
5051 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5052 //===---------------------------------------------------------------------===//
5053 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5054 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5055 X86MemOperand x86memop> {
5056 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5057 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5058 [(set RC:$dst, (vt (OpNode RC:$src)))],
5059 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5060 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5061 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5062 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5063 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5066 let Predicates = [HasAVX, NoVLX] in {
5067 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5068 v4f32, VR128, loadv4f32, f128mem>, VEX;
5069 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5070 v4f32, VR128, loadv4f32, f128mem>, VEX;
5071 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5072 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5073 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5074 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5076 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5077 memopv4f32, f128mem>;
5078 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5079 memopv4f32, f128mem>;
5081 let Predicates = [HasAVX, NoVLX] in {
5082 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5083 (VMOVSHDUPrr VR128:$src)>;
5084 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5085 (VMOVSHDUPrm addr:$src)>;
5086 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5087 (VMOVSLDUPrr VR128:$src)>;
5088 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5089 (VMOVSLDUPrm addr:$src)>;
5090 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5091 (VMOVSHDUPYrr VR256:$src)>;
5092 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5093 (VMOVSHDUPYrm addr:$src)>;
5094 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5095 (VMOVSLDUPYrr VR256:$src)>;
5096 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5097 (VMOVSLDUPYrm addr:$src)>;
5100 let Predicates = [UseSSE3] in {
5101 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5102 (MOVSHDUPrr VR128:$src)>;
5103 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5104 (MOVSHDUPrm addr:$src)>;
5105 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5106 (MOVSLDUPrr VR128:$src)>;
5107 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5108 (MOVSLDUPrm addr:$src)>;
5111 //===---------------------------------------------------------------------===//
5112 // SSE3 - Replicate Double FP - MOVDDUP
5113 //===---------------------------------------------------------------------===//
5115 multiclass sse3_replicate_dfp<string OpcodeStr> {
5116 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5117 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5118 [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
5119 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5120 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5121 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5124 (scalar_to_vector (loadf64 addr:$src)))))],
5125 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5128 // FIXME: Merge with above classe when there're patterns for the ymm version
5129 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5130 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5131 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5132 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5133 Sched<[WriteFShuffle]>;
5134 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5135 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5137 (v4f64 (X86Movddup (loadv4f64 addr:$src))))]>,
5141 let Predicates = [HasAVX, NoVLX] in {
5142 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5143 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5146 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5149 let Predicates = [HasAVX, NoVLX] in {
5150 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5151 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5154 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5155 (VMOVDDUPYrm addr:$src)>;
5156 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5157 (VMOVDDUPYrr VR256:$src)>;
5160 let Predicates = [HasAVX] in {
5161 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5162 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5163 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5164 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5165 def : Pat<(X86Movddup (bc_v2f64
5166 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5167 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5170 let Predicates = [UseAVX, OptForSize] in {
5171 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5172 (VMOVDDUPrm addr:$src)>;
5173 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5174 (VMOVDDUPrm addr:$src)>;
5177 let Predicates = [UseSSE3] in {
5178 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5179 (MOVDDUPrm addr:$src)>;
5180 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5181 (MOVDDUPrm addr:$src)>;
5182 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5183 (MOVDDUPrm addr:$src)>;
5184 def : Pat<(X86Movddup (bc_v2f64
5185 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5186 (MOVDDUPrm addr:$src)>;
5189 //===---------------------------------------------------------------------===//
5190 // SSE3 - Move Unaligned Integer
5191 //===---------------------------------------------------------------------===//
5193 let SchedRW = [WriteLoad] in {
5194 let Predicates = [HasAVX] in {
5195 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5196 "vlddqu\t{$src, $dst|$dst, $src}",
5197 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5198 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5199 "vlddqu\t{$src, $dst|$dst, $src}",
5200 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5203 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5204 "lddqu\t{$src, $dst|$dst, $src}",
5205 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5209 //===---------------------------------------------------------------------===//
5210 // SSE3 - Arithmetic
5211 //===---------------------------------------------------------------------===//
5213 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5214 X86MemOperand x86memop, OpndItins itins,
5215 PatFrag ld_frag, bit Is2Addr = 1> {
5216 def rr : I<0xD0, MRMSrcReg,
5217 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5219 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5220 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5221 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5222 Sched<[itins.Sched]>;
5223 def rm : I<0xD0, MRMSrcMem,
5224 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5226 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5227 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5228 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))], itins.rr>,
5229 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5232 let Predicates = [HasAVX] in {
5233 let ExeDomain = SSEPackedSingle in {
5234 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5235 f128mem, SSE_ALU_F32P, loadv4f32, 0>, XD, VEX_4V;
5236 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5237 f256mem, SSE_ALU_F32P, loadv8f32, 0>, XD, VEX_4V, VEX_L;
5239 let ExeDomain = SSEPackedDouble in {
5240 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5241 f128mem, SSE_ALU_F64P, loadv2f64, 0>, PD, VEX_4V;
5242 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5243 f256mem, SSE_ALU_F64P, loadv4f64, 0>, PD, VEX_4V, VEX_L;
5246 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5247 let ExeDomain = SSEPackedSingle in
5248 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5249 f128mem, SSE_ALU_F32P, memopv4f32>, XD;
5250 let ExeDomain = SSEPackedDouble in
5251 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5252 f128mem, SSE_ALU_F64P, memopv2f64>, PD;
5255 // Patterns used to select 'addsub' instructions.
5256 let Predicates = [HasAVX] in {
5257 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5258 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5259 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (loadv4f32 addr:$rhs))),
5260 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5261 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5262 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5263 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (loadv2f64 addr:$rhs))),
5264 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5266 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5267 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5268 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (loadv8f32 addr:$rhs))),
5269 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5270 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5271 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5272 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (loadv4f64 addr:$rhs))),
5273 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5276 let Predicates = [UseSSE3] in {
5277 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5278 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5279 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (memopv4f32 addr:$rhs))),
5280 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5281 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5282 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5283 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (memopv2f64 addr:$rhs))),
5284 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5287 //===---------------------------------------------------------------------===//
5288 // SSE3 Instructions
5289 //===---------------------------------------------------------------------===//
5292 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5293 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5295 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5297 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5298 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5299 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5302 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5304 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5305 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5306 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5307 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5309 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5310 X86MemOperand x86memop, SDNode OpNode, PatFrag ld_frag,
5312 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5314 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5315 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5316 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5319 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5321 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5322 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5323 [(set RC:$dst, (vt (OpNode RC:$src1, (ld_frag addr:$src2))))],
5324 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5327 let Predicates = [HasAVX] in {
5328 let ExeDomain = SSEPackedSingle in {
5329 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5330 X86fhadd, loadv4f32, 0>, VEX_4V;
5331 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5332 X86fhsub, loadv4f32, 0>, VEX_4V;
5333 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5334 X86fhadd, loadv8f32, 0>, VEX_4V, VEX_L;
5335 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5336 X86fhsub, loadv8f32, 0>, VEX_4V, VEX_L;
5338 let ExeDomain = SSEPackedDouble in {
5339 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5340 X86fhadd, loadv2f64, 0>, VEX_4V;
5341 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5342 X86fhsub, loadv2f64, 0>, VEX_4V;
5343 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5344 X86fhadd, loadv4f64, 0>, VEX_4V, VEX_L;
5345 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5346 X86fhsub, loadv4f64, 0>, VEX_4V, VEX_L;
5350 let Constraints = "$src1 = $dst" in {
5351 let ExeDomain = SSEPackedSingle in {
5352 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd,
5354 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub,
5357 let ExeDomain = SSEPackedDouble in {
5358 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd,
5360 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub,
5365 //===---------------------------------------------------------------------===//
5366 // SSSE3 - Packed Absolute Instructions
5367 //===---------------------------------------------------------------------===//
5370 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5371 multiclass SS3I_unop_rm<bits<8> opc, string OpcodeStr, ValueType vt,
5372 SDNode OpNode, PatFrag ld_frag> {
5373 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5375 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5376 [(set VR128:$dst, (vt (OpNode VR128:$src)))],
5377 IIC_SSE_PABS_RR>, Sched<[WriteVecALU]>;
5379 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5381 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5383 (vt (OpNode (bitconvert (ld_frag addr:$src)))))],
5384 IIC_SSE_PABS_RM>, Sched<[WriteVecALULd]>;
5387 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5388 multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
5390 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5392 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5393 [(set VR256:$dst, (vt (OpNode VR256:$src)))]>,
5394 Sched<[WriteVecALU]>;
5396 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5398 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5400 (vt (OpNode (bitconvert (loadv4i64 addr:$src)))))]>,
5401 Sched<[WriteVecALULd]>;
5404 // Helper fragments to match sext vXi1 to vXiY.
5405 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5407 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5408 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5409 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5411 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5412 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5414 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5415 defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, X86Abs, loadv2i64>, VEX;
5416 defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, X86Abs, loadv2i64>, VEX;
5418 let Predicates = [HasAVX, NoVLX] in {
5419 defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, X86Abs, loadv2i64>, VEX;
5422 let Predicates = [HasAVX] in {
5424 (bc_v2i64 (v16i1sextv16i8)),
5425 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5426 (VPABSBrr128 VR128:$src)>;
5428 (bc_v2i64 (v8i1sextv8i16)),
5429 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5430 (VPABSWrr128 VR128:$src)>;
5432 (bc_v2i64 (v4i1sextv4i32)),
5433 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5434 (VPABSDrr128 VR128:$src)>;
5437 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5438 defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, X86Abs>, VEX, VEX_L;
5439 defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, X86Abs>, VEX, VEX_L;
5441 let Predicates = [HasAVX2, NoVLX] in {
5442 defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, X86Abs>, VEX, VEX_L;
5445 let Predicates = [HasAVX2] in {
5447 (bc_v4i64 (v32i1sextv32i8)),
5448 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5449 (VPABSBrr256 VR256:$src)>;
5451 (bc_v4i64 (v16i1sextv16i16)),
5452 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5453 (VPABSWrr256 VR256:$src)>;
5455 (bc_v4i64 (v8i1sextv8i32)),
5456 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5457 (VPABSDrr256 VR256:$src)>;
5460 defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, X86Abs, memopv2i64>;
5461 defm PABSW : SS3I_unop_rm<0x1D, "pabsw", v8i16, X86Abs, memopv2i64>;
5462 defm PABSD : SS3I_unop_rm<0x1E, "pabsd", v4i32, X86Abs, memopv2i64>;
5464 let Predicates = [UseSSSE3] in {
5466 (bc_v2i64 (v16i1sextv16i8)),
5467 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5468 (PABSBrr128 VR128:$src)>;
5470 (bc_v2i64 (v8i1sextv8i16)),
5471 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5472 (PABSWrr128 VR128:$src)>;
5474 (bc_v2i64 (v4i1sextv4i32)),
5475 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5476 (PABSDrr128 VR128:$src)>;
5479 //===---------------------------------------------------------------------===//
5480 // SSSE3 - Packed Binary Operator Instructions
5481 //===---------------------------------------------------------------------===//
5483 let Sched = WriteVecALU in {
5484 def SSE_PHADDSUBD : OpndItins<
5485 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5487 def SSE_PHADDSUBSW : OpndItins<
5488 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5490 def SSE_PHADDSUBW : OpndItins<
5491 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5494 let Sched = WriteShuffle in
5495 def SSE_PSHUFB : OpndItins<
5496 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5498 let Sched = WriteVecALU in
5499 def SSE_PSIGN : OpndItins<
5500 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5502 let Sched = WriteVecIMul in
5503 def SSE_PMULHRSW : OpndItins<
5504 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5507 /// SS3I_binop_rm - Simple SSSE3 bin op
5508 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5509 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5510 X86MemOperand x86memop, OpndItins itins,
5512 let isCommutable = 1 in
5513 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5514 (ins RC:$src1, RC:$src2),
5516 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5517 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5518 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5519 Sched<[itins.Sched]>;
5520 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5521 (ins RC:$src1, x86memop:$src2),
5523 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5524 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5526 (OpVT (OpNode RC:$src1,
5527 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5528 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5531 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5532 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5533 Intrinsic IntId128, OpndItins itins,
5534 PatFrag ld_frag, bit Is2Addr = 1> {
5535 let isCommutable = 1 in
5536 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5537 (ins VR128:$src1, VR128:$src2),
5539 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5540 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5541 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5542 Sched<[itins.Sched]>;
5543 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5544 (ins VR128:$src1, i128mem:$src2),
5546 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5547 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5549 (IntId128 VR128:$src1,
5550 (bitconvert (ld_frag addr:$src2))))]>,
5551 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5554 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5556 X86FoldableSchedWrite Sched> {
5557 let isCommutable = 1 in
5558 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5559 (ins VR256:$src1, VR256:$src2),
5560 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5561 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5563 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5564 (ins VR256:$src1, i256mem:$src2),
5565 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5567 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5568 Sched<[Sched.Folded, ReadAfterLd]>;
5571 let ImmT = NoImm, Predicates = [HasAVX] in {
5572 let isCommutable = 0 in {
5573 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5575 SSE_PHADDSUBW, 0>, VEX_4V;
5576 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5578 SSE_PHADDSUBD, 0>, VEX_4V;
5579 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5581 SSE_PHADDSUBW, 0>, VEX_4V;
5582 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5584 SSE_PHADDSUBD, 0>, VEX_4V;
5585 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
5586 int_x86_ssse3_psign_b_128,
5587 SSE_PSIGN, loadv2i64, 0>, VEX_4V;
5588 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
5589 int_x86_ssse3_psign_w_128,
5590 SSE_PSIGN, loadv2i64, 0>, VEX_4V;
5591 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
5592 int_x86_ssse3_psign_d_128,
5593 SSE_PSIGN, loadv2i64, 0>, VEX_4V;
5594 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5596 SSE_PSHUFB, 0>, VEX_4V;
5597 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5598 int_x86_ssse3_phadd_sw_128,
5599 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5600 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5601 int_x86_ssse3_phsub_sw_128,
5602 SSE_PHADDSUBSW, loadv2i64, 0>, VEX_4V;
5603 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5604 int_x86_ssse3_pmadd_ub_sw_128,
5605 SSE_PMADD, loadv2i64, 0>, VEX_4V;
5607 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5608 int_x86_ssse3_pmul_hr_sw_128,
5609 SSE_PMULHRSW, loadv2i64, 0>, VEX_4V;
5612 let ImmT = NoImm, Predicates = [HasAVX2] in {
5613 let isCommutable = 0 in {
5614 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5616 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5617 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5619 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5620 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5622 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5623 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5625 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5626 defm VPSIGNBY : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
5627 WriteVecALU>, VEX_4V, VEX_L;
5628 defm VPSIGNWY : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
5629 WriteVecALU>, VEX_4V, VEX_L;
5630 defm VPSIGNDY : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
5631 WriteVecALU>, VEX_4V, VEX_L;
5632 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5634 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5635 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5636 int_x86_avx2_phadd_sw,
5637 WriteVecALU>, VEX_4V, VEX_L;
5638 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5639 int_x86_avx2_phsub_sw,
5640 WriteVecALU>, VEX_4V, VEX_L;
5641 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5642 int_x86_avx2_pmadd_ub_sw,
5643 WriteVecIMul>, VEX_4V, VEX_L;
5645 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5646 int_x86_avx2_pmul_hr_sw,
5647 WriteVecIMul>, VEX_4V, VEX_L;
5650 // None of these have i8 immediate fields.
5651 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5652 let isCommutable = 0 in {
5653 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5654 memopv2i64, i128mem, SSE_PHADDSUBW>;
5655 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5656 memopv2i64, i128mem, SSE_PHADDSUBD>;
5657 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5658 memopv2i64, i128mem, SSE_PHADDSUBW>;
5659 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5660 memopv2i64, i128mem, SSE_PHADDSUBD>;
5661 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", int_x86_ssse3_psign_b_128,
5662 SSE_PSIGN, memopv2i64>;
5663 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", int_x86_ssse3_psign_w_128,
5664 SSE_PSIGN, memopv2i64>;
5665 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", int_x86_ssse3_psign_d_128,
5666 SSE_PSIGN, memopv2i64>;
5667 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5668 memopv2i64, i128mem, SSE_PSHUFB>;
5669 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5670 int_x86_ssse3_phadd_sw_128,
5671 SSE_PHADDSUBSW, memopv2i64>;
5672 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5673 int_x86_ssse3_phsub_sw_128,
5674 SSE_PHADDSUBSW, memopv2i64>;
5675 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5676 int_x86_ssse3_pmadd_ub_sw_128,
5677 SSE_PMADD, memopv2i64>;
5679 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5680 int_x86_ssse3_pmul_hr_sw_128,
5681 SSE_PMULHRSW, memopv2i64>;
5684 //===---------------------------------------------------------------------===//
5685 // SSSE3 - Packed Align Instruction Patterns
5686 //===---------------------------------------------------------------------===//
5688 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
5689 let hasSideEffects = 0 in {
5690 def rri : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
5691 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
5693 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5695 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5696 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
5698 def rmi : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
5699 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
5701 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5703 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5704 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5708 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
5709 let hasSideEffects = 0 in {
5710 def Yrri : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
5711 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
5713 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5714 []>, Sched<[WriteShuffle]>;
5716 def Yrmi : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
5717 (ins VR256:$src1, i256mem:$src2, u8imm:$src3),
5719 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5720 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
5724 let Predicates = [HasAVX] in
5725 defm VPALIGNR : ssse3_palignr<"vpalignr", 0>, VEX_4V;
5726 let Predicates = [HasAVX2] in
5727 defm VPALIGNR : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
5728 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
5729 defm PALIGNR : ssse3_palignr<"palignr">;
5731 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
5732 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5733 (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
5734 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5735 (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
5736 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5737 (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
5738 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5739 (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
5742 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5743 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5744 (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5745 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5746 (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5747 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5748 (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5749 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5750 (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5753 let Predicates = [UseSSSE3] in {
5754 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5755 (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5756 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5757 (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5758 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5759 (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5760 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5761 (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
5764 //===---------------------------------------------------------------------===//
5765 // SSSE3 - Thread synchronization
5766 //===---------------------------------------------------------------------===//
5768 let SchedRW = [WriteSystem] in {
5769 let usesCustomInserter = 1 in {
5770 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
5771 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
5772 Requires<[HasSSE3]>;
5775 let Uses = [EAX, ECX, EDX] in
5776 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
5777 TB, Requires<[HasSSE3]>;
5779 let Uses = [ECX, EAX] in
5780 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
5781 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
5782 TB, Requires<[HasSSE3]>;
5785 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
5786 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
5788 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
5789 Requires<[Not64BitMode]>;
5790 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
5791 Requires<[In64BitMode]>;
5793 //===----------------------------------------------------------------------===//
5794 // SSE4.1 - Packed Move with Sign/Zero Extend
5795 //===----------------------------------------------------------------------===//
5797 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5798 RegisterClass OutRC, RegisterClass InRC,
5800 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
5801 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5803 Sched<[itins.Sched]>;
5805 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
5806 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5808 itins.rm>, Sched<[itins.Sched.Folded]>;
5811 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
5812 X86MemOperand MemOp, X86MemOperand MemYOp,
5813 OpndItins SSEItins, OpndItins AVXItins,
5814 OpndItins AVX2Itins, Predicate prd> {
5815 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
5816 let Predicates = [HasAVX, prd] in
5817 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
5818 VR128, VR128, AVXItins>, VEX;
5819 let Predicates = [HasAVX2, prd] in
5820 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
5821 VR256, VR128, AVX2Itins>, VEX, VEX_L;
5824 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
5825 X86MemOperand MemYOp, Predicate prd> {
5826 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
5828 SSE_INTALU_ITINS_SHUFF_P,
5829 DEFAULT_ITINS_SHUFFLESCHED,
5830 DEFAULT_ITINS_SHUFFLESCHED, prd>;
5831 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
5832 !strconcat("pmovzx", OpcodeStr),
5834 SSE_INTALU_ITINS_SHUFF_P,
5835 DEFAULT_ITINS_SHUFFLESCHED,
5836 DEFAULT_ITINS_SHUFFLESCHED, prd>;
5839 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem, NoVLX_Or_NoBWI>;
5840 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem, NoVLX>;
5841 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem, NoVLX>;
5843 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem, NoVLX>;
5844 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem, NoVLX>;
5846 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>;
5849 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
5850 // Register-Register patterns
5851 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5852 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
5853 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
5855 let Predicates = [HasAVX, NoVLX] in {
5856 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
5857 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
5858 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
5859 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
5861 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
5862 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
5863 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
5864 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
5866 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
5867 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
5870 // Simple Register-Memory patterns
5871 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5872 def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5873 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5875 let Predicates = [HasAVX, NoVLX] in {
5876 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5877 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5878 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5879 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5881 def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5882 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5883 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5884 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5886 def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5887 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5890 // AVX2 Register-Memory patterns
5891 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5892 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5893 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5894 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5895 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5896 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5897 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5898 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5899 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
5901 let Predicates = [HasAVX, NoVLX] in {
5902 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5903 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5904 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5905 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5906 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5907 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5908 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5909 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
5911 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
5912 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5913 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
5914 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5915 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5916 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5917 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
5918 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
5920 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
5921 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5922 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
5923 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5924 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
5925 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5926 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
5927 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
5929 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5930 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5931 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
5932 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5933 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
5934 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5935 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
5936 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
5938 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
5939 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5940 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
5941 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5942 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
5943 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5944 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
5945 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
5949 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
5950 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
5952 // SSE4.1/AVX patterns.
5953 multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
5954 SDNode ExtOp, PatFrag ExtLoad16> {
5955 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5956 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
5957 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
5959 let Predicates = [HasAVX, NoVLX] in {
5960 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
5961 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
5962 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
5963 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
5965 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
5966 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
5967 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
5968 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
5970 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
5971 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
5973 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5974 def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5975 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5977 let Predicates = [HasAVX, NoVLX] in {
5978 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5979 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
5980 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
5981 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
5983 def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5984 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
5985 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
5986 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
5988 def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
5989 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
5991 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
5992 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
5993 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5994 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
5995 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5996 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
5997 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
5998 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
5999 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6000 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6001 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6003 let Predicates = [HasAVX, NoVLX] in {
6004 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6005 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6006 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6007 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6008 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6009 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6010 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6011 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6013 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6014 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6015 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6016 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6017 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6018 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6019 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6020 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6022 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6023 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6024 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6025 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6026 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6027 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6028 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6029 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6030 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6031 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6033 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6034 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6035 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6036 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6037 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6038 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6039 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6040 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6042 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6043 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6044 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6045 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6046 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6047 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6048 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6049 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6050 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6051 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6055 defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
6056 defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
6058 let Predicates = [UseSSE41] in {
6059 defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
6060 defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
6063 //===----------------------------------------------------------------------===//
6064 // SSE4.1 - Extract Instructions
6065 //===----------------------------------------------------------------------===//
6067 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6068 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6069 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6070 (ins VR128:$src1, u8imm:$src2),
6071 !strconcat(OpcodeStr,
6072 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6073 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6075 Sched<[WriteShuffle]>;
6076 let hasSideEffects = 0, mayStore = 1,
6077 SchedRW = [WriteShuffleLd, WriteRMW] in
6078 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6079 (ins i8mem:$dst, VR128:$src1, u8imm:$src2),
6080 !strconcat(OpcodeStr,
6081 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6082 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6083 imm:$src2)))), addr:$dst)]>;
6086 let Predicates = [HasAVX, NoBWI] in
6087 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6089 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6092 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6093 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6094 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6095 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6096 (ins VR128:$src1, u8imm:$src2),
6097 !strconcat(OpcodeStr,
6098 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6099 []>, Sched<[WriteShuffle]>;
6101 let hasSideEffects = 0, mayStore = 1,
6102 SchedRW = [WriteShuffleLd, WriteRMW] in
6103 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6104 (ins i16mem:$dst, VR128:$src1, u8imm:$src2),
6105 !strconcat(OpcodeStr,
6106 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6107 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6108 imm:$src2)))), addr:$dst)]>;
6111 let Predicates = [HasAVX, NoBWI] in
6112 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6114 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6117 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6118 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6119 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6120 (ins VR128:$src1, u8imm:$src2),
6121 !strconcat(OpcodeStr,
6122 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6124 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6125 Sched<[WriteShuffle]>;
6126 let SchedRW = [WriteShuffleLd, WriteRMW] in
6127 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6128 (ins i32mem:$dst, VR128:$src1, u8imm:$src2),
6129 !strconcat(OpcodeStr,
6130 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6131 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6135 let Predicates = [HasAVX, NoDQI] in
6136 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6138 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6140 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6141 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6142 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6143 (ins VR128:$src1, u8imm:$src2),
6144 !strconcat(OpcodeStr,
6145 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6147 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6148 Sched<[WriteShuffle]>, REX_W;
6149 let SchedRW = [WriteShuffleLd, WriteRMW] in
6150 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6151 (ins i64mem:$dst, VR128:$src1, u8imm:$src2),
6152 !strconcat(OpcodeStr,
6153 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6154 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6155 addr:$dst)]>, REX_W;
6158 let Predicates = [HasAVX, NoDQI] in
6159 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6161 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6163 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6165 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6166 OpndItins itins = DEFAULT_ITINS> {
6167 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6168 (ins VR128:$src1, u8imm:$src2),
6169 !strconcat(OpcodeStr,
6170 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6171 [(set GR32orGR64:$dst,
6172 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6173 itins.rr>, Sched<[WriteFBlend]>;
6174 let SchedRW = [WriteFBlendLd, WriteRMW] in
6175 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6176 (ins f32mem:$dst, VR128:$src1, u8imm:$src2),
6177 !strconcat(OpcodeStr,
6178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6179 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6180 addr:$dst)], itins.rm>;
6183 let ExeDomain = SSEPackedSingle in {
6184 let Predicates = [UseAVX] in
6185 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6186 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6189 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6190 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6193 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6195 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6198 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6199 Requires<[UseSSE41]>;
6201 //===----------------------------------------------------------------------===//
6202 // SSE4.1 - Insert Instructions
6203 //===----------------------------------------------------------------------===//
6205 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6206 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6207 (ins VR128:$src1, GR32orGR64:$src2, u8imm:$src3),
6209 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6211 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6213 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6214 Sched<[WriteShuffle]>;
6215 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6216 (ins VR128:$src1, i8mem:$src2, u8imm:$src3),
6218 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6220 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6222 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6223 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6226 let Predicates = [HasAVX, NoBWI] in
6227 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6228 let Constraints = "$src1 = $dst" in
6229 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6231 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6232 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6233 (ins VR128:$src1, GR32:$src2, u8imm:$src3),
6235 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6237 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6239 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6240 Sched<[WriteShuffle]>;
6241 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6242 (ins VR128:$src1, i32mem:$src2, u8imm:$src3),
6244 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6246 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6248 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6249 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6252 let Predicates = [HasAVX, NoDQI] in
6253 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6254 let Constraints = "$src1 = $dst" in
6255 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6257 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6258 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6259 (ins VR128:$src1, GR64:$src2, u8imm:$src3),
6261 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6263 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6265 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6266 Sched<[WriteShuffle]>;
6267 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6268 (ins VR128:$src1, i64mem:$src2, u8imm:$src3),
6270 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6272 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6274 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6275 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6278 let Predicates = [HasAVX, NoDQI] in
6279 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6280 let Constraints = "$src1 = $dst" in
6281 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6283 // insertps has a few different modes, there's the first two here below which
6284 // are optimized inserts that won't zero arbitrary elements in the destination
6285 // vector. The next one matches the intrinsic and could zero arbitrary elements
6286 // in the target vector.
6287 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6288 OpndItins itins = DEFAULT_ITINS> {
6289 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6290 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
6292 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6294 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6296 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6297 Sched<[WriteFShuffle]>;
6298 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6299 (ins VR128:$src1, f32mem:$src2, u8imm:$src3),
6301 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6303 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6305 (X86insertps VR128:$src1,
6306 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6307 imm:$src3))], itins.rm>,
6308 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6311 let ExeDomain = SSEPackedSingle in {
6312 let Predicates = [UseAVX] in
6313 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6314 let Constraints = "$src1 = $dst" in
6315 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6318 let Predicates = [UseSSE41] in {
6319 // If we're inserting an element from a load or a null pshuf of a load,
6320 // fold the load into the insertps instruction.
6321 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6322 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6324 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6325 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6326 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6327 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6330 let Predicates = [UseAVX] in {
6331 // If we're inserting an element from a vbroadcast of a load, fold the
6332 // load into the X86insertps instruction.
6333 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6334 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6335 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6336 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6337 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6338 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6341 //===----------------------------------------------------------------------===//
6342 // SSE4.1 - Round Instructions
6343 //===----------------------------------------------------------------------===//
6345 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6346 X86MemOperand x86memop, RegisterClass RC,
6347 PatFrag mem_frag32, PatFrag mem_frag64,
6348 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6349 let ExeDomain = SSEPackedSingle in {
6350 // Intrinsic operation, reg.
6351 // Vector intrinsic operation, reg
6352 def PSr : SS4AIi8<opcps, MRMSrcReg,
6353 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6354 !strconcat(OpcodeStr,
6355 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6356 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6357 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6359 // Vector intrinsic operation, mem
6360 def PSm : SS4AIi8<opcps, MRMSrcMem,
6361 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6362 !strconcat(OpcodeStr,
6363 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6365 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6366 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6367 } // ExeDomain = SSEPackedSingle
6369 let ExeDomain = SSEPackedDouble in {
6370 // Vector intrinsic operation, reg
6371 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6372 (outs RC:$dst), (ins RC:$src1, i32u8imm:$src2),
6373 !strconcat(OpcodeStr,
6374 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6375 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6376 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6378 // Vector intrinsic operation, mem
6379 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6380 (outs RC:$dst), (ins x86memop:$src1, i32u8imm:$src2),
6381 !strconcat(OpcodeStr,
6382 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6384 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6385 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6386 } // ExeDomain = SSEPackedDouble
6389 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6392 Intrinsic F64Int, bit Is2Addr = 1> {
6393 let ExeDomain = GenericDomain in {
6395 let hasSideEffects = 0 in
6396 def SSr : SS4AIi8<opcss, MRMSrcReg,
6397 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32u8imm:$src3),
6399 !strconcat(OpcodeStr,
6400 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6401 !strconcat(OpcodeStr,
6402 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6403 []>, Sched<[WriteFAdd]>;
6405 // Intrinsic operation, reg.
6406 let isCodeGenOnly = 1 in
6407 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6408 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6410 !strconcat(OpcodeStr,
6411 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6412 !strconcat(OpcodeStr,
6413 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6414 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6417 // Intrinsic operation, mem.
6418 def SSm : SS4AIi8<opcss, MRMSrcMem,
6419 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32u8imm:$src3),
6421 !strconcat(OpcodeStr,
6422 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6423 !strconcat(OpcodeStr,
6424 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6426 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6427 Sched<[WriteFAddLd, ReadAfterLd]>;
6430 let hasSideEffects = 0 in
6431 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6432 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32u8imm:$src3),
6434 !strconcat(OpcodeStr,
6435 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6436 !strconcat(OpcodeStr,
6437 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6438 []>, Sched<[WriteFAdd]>;
6440 // Intrinsic operation, reg.
6441 let isCodeGenOnly = 1 in
6442 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6443 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32u8imm:$src3),
6445 !strconcat(OpcodeStr,
6446 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6447 !strconcat(OpcodeStr,
6448 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6449 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6452 // Intrinsic operation, mem.
6453 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6454 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32u8imm:$src3),
6456 !strconcat(OpcodeStr,
6457 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6458 !strconcat(OpcodeStr,
6459 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6461 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6462 Sched<[WriteFAddLd, ReadAfterLd]>;
6463 } // ExeDomain = GenericDomain
6466 // FP round - roundss, roundps, roundsd, roundpd
6467 let Predicates = [HasAVX] in {
6469 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6470 loadv4f32, loadv2f64,
6471 int_x86_sse41_round_ps,
6472 int_x86_sse41_round_pd>, VEX;
6473 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6474 loadv8f32, loadv4f64,
6475 int_x86_avx_round_ps_256,
6476 int_x86_avx_round_pd_256>, VEX, VEX_L;
6477 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6478 int_x86_sse41_round_ss,
6479 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6482 let Predicates = [UseAVX] in {
6483 def : Pat<(ffloor FR32:$src),
6484 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6485 def : Pat<(f64 (ffloor FR64:$src)),
6486 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6487 def : Pat<(f32 (fnearbyint FR32:$src)),
6488 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6489 def : Pat<(f64 (fnearbyint FR64:$src)),
6490 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6491 def : Pat<(f32 (fceil FR32:$src)),
6492 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6493 def : Pat<(f64 (fceil FR64:$src)),
6494 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6495 def : Pat<(f32 (frint FR32:$src)),
6496 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6497 def : Pat<(f64 (frint FR64:$src)),
6498 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6499 def : Pat<(f32 (ftrunc FR32:$src)),
6500 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6501 def : Pat<(f64 (ftrunc FR64:$src)),
6502 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6505 let Predicates = [HasAVX] in {
6506 def : Pat<(v4f32 (ffloor VR128:$src)),
6507 (VROUNDPSr VR128:$src, (i32 0x9))>;
6508 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6509 (VROUNDPSr VR128:$src, (i32 0xC))>;
6510 def : Pat<(v4f32 (fceil VR128:$src)),
6511 (VROUNDPSr VR128:$src, (i32 0xA))>;
6512 def : Pat<(v4f32 (frint VR128:$src)),
6513 (VROUNDPSr VR128:$src, (i32 0x4))>;
6514 def : Pat<(v4f32 (ftrunc VR128:$src)),
6515 (VROUNDPSr VR128:$src, (i32 0xB))>;
6517 def : Pat<(v2f64 (ffloor VR128:$src)),
6518 (VROUNDPDr VR128:$src, (i32 0x9))>;
6519 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6520 (VROUNDPDr VR128:$src, (i32 0xC))>;
6521 def : Pat<(v2f64 (fceil VR128:$src)),
6522 (VROUNDPDr VR128:$src, (i32 0xA))>;
6523 def : Pat<(v2f64 (frint VR128:$src)),
6524 (VROUNDPDr VR128:$src, (i32 0x4))>;
6525 def : Pat<(v2f64 (ftrunc VR128:$src)),
6526 (VROUNDPDr VR128:$src, (i32 0xB))>;
6528 def : Pat<(v8f32 (ffloor VR256:$src)),
6529 (VROUNDYPSr VR256:$src, (i32 0x9))>;
6530 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6531 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6532 def : Pat<(v8f32 (fceil VR256:$src)),
6533 (VROUNDYPSr VR256:$src, (i32 0xA))>;
6534 def : Pat<(v8f32 (frint VR256:$src)),
6535 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6536 def : Pat<(v8f32 (ftrunc VR256:$src)),
6537 (VROUNDYPSr VR256:$src, (i32 0xB))>;
6539 def : Pat<(v4f64 (ffloor VR256:$src)),
6540 (VROUNDYPDr VR256:$src, (i32 0x9))>;
6541 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6542 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6543 def : Pat<(v4f64 (fceil VR256:$src)),
6544 (VROUNDYPDr VR256:$src, (i32 0xA))>;
6545 def : Pat<(v4f64 (frint VR256:$src)),
6546 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6547 def : Pat<(v4f64 (ftrunc VR256:$src)),
6548 (VROUNDYPDr VR256:$src, (i32 0xB))>;
6551 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6552 memopv4f32, memopv2f64,
6553 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6554 let Constraints = "$src1 = $dst" in
6555 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6556 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6558 let Predicates = [UseSSE41] in {
6559 def : Pat<(ffloor FR32:$src),
6560 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x9))>;
6561 def : Pat<(f64 (ffloor FR64:$src)),
6562 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x9))>;
6563 def : Pat<(f32 (fnearbyint FR32:$src)),
6564 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6565 def : Pat<(f64 (fnearbyint FR64:$src)),
6566 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6567 def : Pat<(f32 (fceil FR32:$src)),
6568 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xA))>;
6569 def : Pat<(f64 (fceil FR64:$src)),
6570 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xA))>;
6571 def : Pat<(f32 (frint FR32:$src)),
6572 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6573 def : Pat<(f64 (frint FR64:$src)),
6574 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6575 def : Pat<(f32 (ftrunc FR32:$src)),
6576 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xB))>;
6577 def : Pat<(f64 (ftrunc FR64:$src)),
6578 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xB))>;
6580 def : Pat<(v4f32 (ffloor VR128:$src)),
6581 (ROUNDPSr VR128:$src, (i32 0x9))>;
6582 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6583 (ROUNDPSr VR128:$src, (i32 0xC))>;
6584 def : Pat<(v4f32 (fceil VR128:$src)),
6585 (ROUNDPSr VR128:$src, (i32 0xA))>;
6586 def : Pat<(v4f32 (frint VR128:$src)),
6587 (ROUNDPSr VR128:$src, (i32 0x4))>;
6588 def : Pat<(v4f32 (ftrunc VR128:$src)),
6589 (ROUNDPSr VR128:$src, (i32 0xB))>;
6591 def : Pat<(v2f64 (ffloor VR128:$src)),
6592 (ROUNDPDr VR128:$src, (i32 0x9))>;
6593 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6594 (ROUNDPDr VR128:$src, (i32 0xC))>;
6595 def : Pat<(v2f64 (fceil VR128:$src)),
6596 (ROUNDPDr VR128:$src, (i32 0xA))>;
6597 def : Pat<(v2f64 (frint VR128:$src)),
6598 (ROUNDPDr VR128:$src, (i32 0x4))>;
6599 def : Pat<(v2f64 (ftrunc VR128:$src)),
6600 (ROUNDPDr VR128:$src, (i32 0xB))>;
6603 //===----------------------------------------------------------------------===//
6604 // SSE4.1 - Packed Bit Test
6605 //===----------------------------------------------------------------------===//
6607 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6608 // the intel intrinsic that corresponds to this.
6609 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6610 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6611 "vptest\t{$src2, $src1|$src1, $src2}",
6612 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6613 Sched<[WriteVecLogic]>, VEX;
6614 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6615 "vptest\t{$src2, $src1|$src1, $src2}",
6616 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6617 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6619 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6620 "vptest\t{$src2, $src1|$src1, $src2}",
6621 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6622 Sched<[WriteVecLogic]>, VEX, VEX_L;
6623 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6624 "vptest\t{$src2, $src1|$src1, $src2}",
6625 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6626 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6629 let Defs = [EFLAGS] in {
6630 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6631 "ptest\t{$src2, $src1|$src1, $src2}",
6632 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6633 Sched<[WriteVecLogic]>;
6634 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6635 "ptest\t{$src2, $src1|$src1, $src2}",
6636 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6637 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6640 // The bit test instructions below are AVX only
6641 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6642 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6643 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6644 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6645 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6646 Sched<[WriteVecLogic]>, VEX;
6647 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6648 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6649 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6650 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6653 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6654 let ExeDomain = SSEPackedSingle in {
6655 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6656 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6659 let ExeDomain = SSEPackedDouble in {
6660 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6661 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6666 //===----------------------------------------------------------------------===//
6667 // SSE4.1 - Misc Instructions
6668 //===----------------------------------------------------------------------===//
6670 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6671 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6672 "popcnt{w}\t{$src, $dst|$dst, $src}",
6673 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6674 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6676 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6677 "popcnt{w}\t{$src, $dst|$dst, $src}",
6678 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6679 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6680 Sched<[WriteFAddLd]>, OpSize16, XS;
6682 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6683 "popcnt{l}\t{$src, $dst|$dst, $src}",
6684 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6685 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6688 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6689 "popcnt{l}\t{$src, $dst|$dst, $src}",
6690 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6691 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6692 Sched<[WriteFAddLd]>, OpSize32, XS;
6694 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6695 "popcnt{q}\t{$src, $dst|$dst, $src}",
6696 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6697 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6698 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6699 "popcnt{q}\t{$src, $dst|$dst, $src}",
6700 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6701 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6702 Sched<[WriteFAddLd]>, XS;
6707 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6708 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6709 Intrinsic IntId128, PatFrag ld_frag,
6710 X86FoldableSchedWrite Sched> {
6711 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6713 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6714 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6716 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6718 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6720 (IntId128 (bitconvert (ld_frag addr:$src))))]>,
6721 Sched<[Sched.Folded]>;
6724 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6725 // model, although the naming is misleading.
6726 let Predicates = [HasAVX] in
6727 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6728 int_x86_sse41_phminposuw, loadv2i64,
6730 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6731 int_x86_sse41_phminposuw, memopv2i64,
6734 /// SS48I_binop_rm - Simple SSE41 binary operator.
6735 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
6736 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6737 X86MemOperand x86memop, bit Is2Addr = 1,
6738 OpndItins itins = SSE_INTALU_ITINS_P> {
6739 let isCommutable = 1 in
6740 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6741 (ins RC:$src1, RC:$src2),
6743 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6744 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6745 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
6746 Sched<[itins.Sched]>;
6747 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6748 (ins RC:$src1, x86memop:$src2),
6750 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6751 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6753 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
6754 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6757 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
6759 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
6760 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
6761 PatFrag memop_frag, X86MemOperand x86memop,
6763 bit IsCommutable = 0, bit Is2Addr = 1> {
6764 let isCommutable = IsCommutable in
6765 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
6766 (ins RC:$src1, RC:$src2),
6768 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6769 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6770 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
6771 Sched<[itins.Sched]>;
6772 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
6773 (ins RC:$src1, x86memop:$src2),
6775 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6776 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6777 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
6778 (bitconvert (memop_frag addr:$src2)))))]>,
6779 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6782 let Predicates = [HasAVX, NoVLX] in {
6783 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
6784 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6786 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
6787 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6789 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
6790 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6792 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
6793 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6795 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
6796 VR128, loadv2i64, i128mem,
6797 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
6799 let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
6800 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
6801 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6803 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
6804 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6806 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
6807 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6809 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
6810 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6814 let Predicates = [HasAVX2, NoVLX] in {
6815 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
6816 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6818 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
6819 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6821 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
6822 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6824 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
6825 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6827 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
6828 VR256, loadv4i64, i256mem,
6829 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
6831 let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
6832 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
6833 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6835 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
6836 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6838 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
6839 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6841 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
6842 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6846 let Constraints = "$src1 = $dst" in {
6847 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", smin, v16i8, VR128,
6848 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6849 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", smin, v4i32, VR128,
6850 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6851 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", umin, v4i32, VR128,
6852 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6853 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", umin, v8i16, VR128,
6854 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6855 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", smax, v16i8, VR128,
6856 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6857 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", smax, v4i32, VR128,
6858 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6859 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", umax, v4i32, VR128,
6860 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6861 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", umax, v8i16, VR128,
6862 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
6863 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
6864 VR128, memopv2i64, i128mem,
6865 SSE_INTMUL_ITINS_P, 1>;
6868 let Predicates = [HasAVX, NoVLX] in {
6869 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
6870 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
6872 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
6873 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
6876 let Predicates = [HasAVX2] in {
6877 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
6878 loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
6880 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
6881 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
6885 let Constraints = "$src1 = $dst" in {
6886 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
6887 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
6888 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
6889 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
6892 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
6893 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
6894 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
6895 X86MemOperand x86memop, bit Is2Addr = 1,
6896 OpndItins itins = DEFAULT_ITINS> {
6897 let isCommutable = 1 in
6898 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6899 (ins RC:$src1, RC:$src2, u8imm:$src3),
6901 !strconcat(OpcodeStr,
6902 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6903 !strconcat(OpcodeStr,
6904 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6905 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
6906 Sched<[itins.Sched]>;
6907 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6908 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6910 !strconcat(OpcodeStr,
6911 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6912 !strconcat(OpcodeStr,
6913 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6916 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
6917 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6920 /// SS41I_binop_rmi - SSE 4.1 binary operator with 8-bit immediate
6921 multiclass SS41I_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
6922 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
6923 X86MemOperand x86memop, bit Is2Addr = 1,
6924 OpndItins itins = DEFAULT_ITINS> {
6925 let isCommutable = 1 in
6926 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
6927 (ins RC:$src1, RC:$src2, u8imm:$src3),
6929 !strconcat(OpcodeStr,
6930 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6931 !strconcat(OpcodeStr,
6932 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6933 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))],
6934 itins.rr>, Sched<[itins.Sched]>;
6935 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
6936 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
6938 !strconcat(OpcodeStr,
6939 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6940 !strconcat(OpcodeStr,
6941 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6943 (OpVT (OpNode RC:$src1,
6944 (bitconvert (memop_frag addr:$src2)), imm:$src3)))], itins.rm>,
6945 Sched<[itins.Sched.Folded, ReadAfterLd]>;
6948 let Predicates = [HasAVX] in {
6949 let isCommutable = 0 in {
6950 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
6951 VR128, loadv2i64, i128mem, 0,
6952 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
6955 let ExeDomain = SSEPackedSingle in {
6956 defm VBLENDPS : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v4f32,
6957 VR128, loadv4f32, f128mem, 0,
6958 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
6959 defm VBLENDPSY : SS41I_binop_rmi<0x0C, "vblendps", X86Blendi, v8f32,
6960 VR256, loadv8f32, f256mem, 0,
6961 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
6963 let ExeDomain = SSEPackedDouble in {
6964 defm VBLENDPD : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
6965 VR128, loadv2f64, f128mem, 0,
6966 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
6967 defm VBLENDPDY : SS41I_binop_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
6968 VR256, loadv4f64, f256mem, 0,
6969 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V, VEX_L;
6971 defm VPBLENDW : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
6972 VR128, loadv2i64, i128mem, 0,
6973 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
6975 let ExeDomain = SSEPackedSingle in
6976 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
6977 VR128, loadv4f32, f128mem, 0,
6978 SSE_DPPS_ITINS>, VEX_4V;
6979 let ExeDomain = SSEPackedDouble in
6980 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
6981 VR128, loadv2f64, f128mem, 0,
6982 SSE_DPPS_ITINS>, VEX_4V;
6983 let ExeDomain = SSEPackedSingle in
6984 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
6985 VR256, loadv8f32, i256mem, 0,
6986 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
6989 let Predicates = [HasAVX2] in {
6990 let isCommutable = 0 in {
6991 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
6992 VR256, loadv4i64, i256mem, 0,
6993 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
6995 defm VPBLENDWY : SS41I_binop_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
6996 VR256, loadv4i64, i256mem, 0,
6997 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7000 let Constraints = "$src1 = $dst" in {
7001 let isCommutable = 0 in {
7002 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7003 VR128, memopv2i64, i128mem,
7004 1, SSE_MPSADBW_ITINS>;
7006 let ExeDomain = SSEPackedSingle in
7007 defm BLENDPS : SS41I_binop_rmi<0x0C, "blendps", X86Blendi, v4f32,
7008 VR128, memopv4f32, f128mem,
7009 1, SSE_INTALU_ITINS_FBLEND_P>;
7010 let ExeDomain = SSEPackedDouble in
7011 defm BLENDPD : SS41I_binop_rmi<0x0D, "blendpd", X86Blendi, v2f64,
7012 VR128, memopv2f64, f128mem,
7013 1, SSE_INTALU_ITINS_FBLEND_P>;
7014 defm PBLENDW : SS41I_binop_rmi<0x0E, "pblendw", X86Blendi, v8i16,
7015 VR128, memopv2i64, i128mem,
7016 1, SSE_INTALU_ITINS_BLEND_P>;
7017 let ExeDomain = SSEPackedSingle in
7018 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7019 VR128, memopv4f32, f128mem, 1,
7021 let ExeDomain = SSEPackedDouble in
7022 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7023 VR128, memopv2f64, f128mem, 1,
7027 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7028 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7029 RegisterClass RC, X86MemOperand x86memop,
7030 PatFrag mem_frag, Intrinsic IntId,
7031 X86FoldableSchedWrite Sched> {
7032 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7033 (ins RC:$src1, RC:$src2, RC:$src3),
7034 !strconcat(OpcodeStr,
7035 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7036 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7037 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7040 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7041 (ins RC:$src1, x86memop:$src2, RC:$src3),
7042 !strconcat(OpcodeStr,
7043 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7045 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7047 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7048 Sched<[Sched.Folded, ReadAfterLd]>;
7051 let Predicates = [HasAVX] in {
7052 let ExeDomain = SSEPackedDouble in {
7053 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7054 loadv2f64, int_x86_sse41_blendvpd,
7056 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7057 loadv4f64, int_x86_avx_blendv_pd_256,
7058 WriteFVarBlend>, VEX_L;
7059 } // ExeDomain = SSEPackedDouble
7060 let ExeDomain = SSEPackedSingle in {
7061 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7062 loadv4f32, int_x86_sse41_blendvps,
7064 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7065 loadv8f32, int_x86_avx_blendv_ps_256,
7066 WriteFVarBlend>, VEX_L;
7067 } // ExeDomain = SSEPackedSingle
7068 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7069 loadv2i64, int_x86_sse41_pblendvb,
7073 let Predicates = [HasAVX2] in {
7074 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7075 loadv4i64, int_x86_avx2_pblendvb,
7076 WriteVarBlend>, VEX_L;
7079 let Predicates = [HasAVX] in {
7080 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7081 (v16i8 VR128:$src2))),
7082 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7083 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7084 (v4i32 VR128:$src2))),
7085 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7086 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7087 (v4f32 VR128:$src2))),
7088 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7089 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7090 (v2i64 VR128:$src2))),
7091 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7092 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7093 (v2f64 VR128:$src2))),
7094 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7095 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7096 (v8i32 VR256:$src2))),
7097 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7098 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7099 (v8f32 VR256:$src2))),
7100 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7101 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7102 (v4i64 VR256:$src2))),
7103 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7104 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7105 (v4f64 VR256:$src2))),
7106 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7109 let Predicates = [HasAVX2] in {
7110 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7111 (v32i8 VR256:$src2))),
7112 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7116 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7117 // on targets where they have equal performance. These were changed to use
7118 // blends because blends have better throughput on SandyBridge and Haswell, but
7119 // movs[s/d] are 1-2 byte shorter instructions.
7120 let Predicates = [UseAVX] in {
7121 let AddedComplexity = 15 in {
7122 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7123 // MOVS{S,D} to the lower bits.
7124 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7125 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7126 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7127 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7128 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7129 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7130 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7131 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7133 // Move low f32 and clear high bits.
7134 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7135 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7137 // Move low f64 and clear high bits.
7138 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7139 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7142 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7143 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7144 (SUBREG_TO_REG (i32 0),
7145 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7147 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7148 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7149 (SUBREG_TO_REG (i64 0),
7150 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7153 // These will incur an FP/int domain crossing penalty, but it may be the only
7154 // way without AVX2. Do not add any complexity because we may be able to match
7155 // more optimal patterns defined earlier in this file.
7156 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7157 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7158 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7159 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7162 // FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
7163 // on targets where they have equal performance. These were changed to use
7164 // blends because blends have better throughput on SandyBridge and Haswell, but
7165 // movs[s/d] are 1-2 byte shorter instructions.
7166 let Predicates = [UseSSE41], AddedComplexity = 15 in {
7167 // With SSE41 we can use blends for these patterns.
7168 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7169 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7170 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7171 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7175 /// SS41I_ternary_int - SSE 4.1 ternary operator
7176 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7177 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7178 X86MemOperand x86memop, Intrinsic IntId,
7179 OpndItins itins = DEFAULT_ITINS> {
7180 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7181 (ins VR128:$src1, VR128:$src2),
7182 !strconcat(OpcodeStr,
7183 "\t{$src2, $dst|$dst, $src2}"),
7184 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7185 itins.rr>, Sched<[itins.Sched]>;
7187 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7188 (ins VR128:$src1, x86memop:$src2),
7189 !strconcat(OpcodeStr,
7190 "\t{$src2, $dst|$dst, $src2}"),
7193 (bitconvert (mem_frag addr:$src2)), XMM0))],
7194 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7198 let ExeDomain = SSEPackedDouble in
7199 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7200 int_x86_sse41_blendvpd,
7201 DEFAULT_ITINS_FBLENDSCHED>;
7202 let ExeDomain = SSEPackedSingle in
7203 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7204 int_x86_sse41_blendvps,
7205 DEFAULT_ITINS_FBLENDSCHED>;
7206 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7207 int_x86_sse41_pblendvb,
7208 DEFAULT_ITINS_VARBLENDSCHED>;
7210 // Aliases with the implicit xmm0 argument
7211 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7212 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7213 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7214 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7215 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7216 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7217 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7218 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7219 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7220 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7221 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7222 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7224 let Predicates = [UseSSE41] in {
7225 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7226 (v16i8 VR128:$src2))),
7227 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7228 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7229 (v4i32 VR128:$src2))),
7230 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7231 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7232 (v4f32 VR128:$src2))),
7233 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7234 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7235 (v2i64 VR128:$src2))),
7236 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7237 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7238 (v2f64 VR128:$src2))),
7239 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7242 let AddedComplexity = 400 in { // Prefer non-temporal versions
7243 let SchedRW = [WriteLoad] in {
7244 let Predicates = [HasAVX, NoVLX] in
7245 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7246 "vmovntdqa\t{$src, $dst|$dst, $src}",
7247 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7249 let Predicates = [HasAVX2, NoVLX] in
7250 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7251 "vmovntdqa\t{$src, $dst|$dst, $src}",
7252 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7254 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7255 "movntdqa\t{$src, $dst|$dst, $src}",
7256 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7259 let Predicates = [HasAVX2, NoVLX] in {
7260 def : Pat<(v8f32 (alignednontemporalload addr:$src)),
7261 (VMOVNTDQAYrm addr:$src)>;
7262 def : Pat<(v4f64 (alignednontemporalload addr:$src)),
7263 (VMOVNTDQAYrm addr:$src)>;
7264 def : Pat<(v4i64 (alignednontemporalload addr:$src)),
7265 (VMOVNTDQAYrm addr:$src)>;
7268 let Predicates = [HasAVX, NoVLX] in {
7269 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
7270 (VMOVNTDQArm addr:$src)>;
7271 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
7272 (VMOVNTDQArm addr:$src)>;
7273 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
7274 (VMOVNTDQArm addr:$src)>;
7277 let Predicates = [UseSSE41] in {
7278 def : Pat<(v4f32 (alignednontemporalload addr:$src)),
7279 (MOVNTDQArm addr:$src)>;
7280 def : Pat<(v2f64 (alignednontemporalload addr:$src)),
7281 (MOVNTDQArm addr:$src)>;
7282 def : Pat<(v2i64 (alignednontemporalload addr:$src)),
7283 (MOVNTDQArm addr:$src)>;
7286 } // AddedComplexity
7288 //===----------------------------------------------------------------------===//
7289 // SSE4.2 - Compare Instructions
7290 //===----------------------------------------------------------------------===//
7292 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7293 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7294 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7295 X86MemOperand x86memop, bit Is2Addr = 1> {
7296 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7297 (ins RC:$src1, RC:$src2),
7299 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7300 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7301 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7302 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7303 (ins RC:$src1, x86memop:$src2),
7305 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7306 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7308 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7311 let Predicates = [HasAVX] in
7312 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7313 loadv2i64, i128mem, 0>, VEX_4V;
7315 let Predicates = [HasAVX2] in
7316 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7317 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7319 let Constraints = "$src1 = $dst" in
7320 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7321 memopv2i64, i128mem>;
7323 //===----------------------------------------------------------------------===//
7324 // SSE4.2 - String/text Processing Instructions
7325 //===----------------------------------------------------------------------===//
7327 // Packed Compare Implicit Length Strings, Return Mask
7328 multiclass pseudo_pcmpistrm<string asm, PatFrag ld_frag> {
7329 def REG : PseudoI<(outs VR128:$dst),
7330 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7331 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7333 def MEM : PseudoI<(outs VR128:$dst),
7334 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7335 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7336 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7339 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7340 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128", loadv2i64>,
7342 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128", memopv2i64>,
7343 Requires<[UseSSE42]>;
7346 multiclass pcmpistrm_SS42AI<string asm> {
7347 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7348 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7349 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7350 []>, Sched<[WritePCmpIStrM]>;
7352 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7353 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7354 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7355 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7358 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7359 let Predicates = [HasAVX] in
7360 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7361 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7364 // Packed Compare Explicit Length Strings, Return Mask
7365 multiclass pseudo_pcmpestrm<string asm, PatFrag ld_frag> {
7366 def REG : PseudoI<(outs VR128:$dst),
7367 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7368 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7369 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7370 def MEM : PseudoI<(outs VR128:$dst),
7371 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7372 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7373 (bc_v16i8 (ld_frag addr:$src3)), EDX, imm:$src5))]>;
7376 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7377 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128", loadv2i64>,
7379 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128", memopv2i64>,
7380 Requires<[UseSSE42]>;
7383 multiclass SS42AI_pcmpestrm<string asm> {
7384 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7385 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7386 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7387 []>, Sched<[WritePCmpEStrM]>;
7389 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7390 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7391 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7392 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7395 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7396 let Predicates = [HasAVX] in
7397 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7398 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7401 // Packed Compare Implicit Length Strings, Return Index
7402 multiclass pseudo_pcmpistri<string asm, PatFrag ld_frag> {
7403 def REG : PseudoI<(outs GR32:$dst),
7404 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7405 [(set GR32:$dst, EFLAGS,
7406 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7407 def MEM : PseudoI<(outs GR32:$dst),
7408 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7409 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7410 (bc_v16i8 (ld_frag addr:$src2)), imm:$src3))]>;
7413 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7414 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI", loadv2i64>,
7416 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI", memopv2i64>,
7417 Requires<[UseSSE42]>;
7420 multiclass SS42AI_pcmpistri<string asm> {
7421 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7422 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7423 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7424 []>, Sched<[WritePCmpIStrI]>;
7426 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7427 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7428 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7429 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7432 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7433 let Predicates = [HasAVX] in
7434 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7435 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7438 // Packed Compare Explicit Length Strings, Return Index
7439 multiclass pseudo_pcmpestri<string asm, PatFrag ld_frag> {
7440 def REG : PseudoI<(outs GR32:$dst),
7441 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7442 [(set GR32:$dst, EFLAGS,
7443 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7444 def MEM : PseudoI<(outs GR32:$dst),
7445 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7446 [(set GR32:$dst, EFLAGS,
7447 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (ld_frag addr:$src3)), EDX,
7451 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7452 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI", loadv2i64>,
7454 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI", memopv2i64>,
7455 Requires<[UseSSE42]>;
7458 multiclass SS42AI_pcmpestri<string asm> {
7459 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7460 (ins VR128:$src1, VR128:$src3, u8imm:$src5),
7461 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7462 []>, Sched<[WritePCmpEStrI]>;
7464 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7465 (ins VR128:$src1, i128mem:$src3, u8imm:$src5),
7466 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7467 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7470 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7471 let Predicates = [HasAVX] in
7472 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7473 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7476 //===----------------------------------------------------------------------===//
7477 // SSE4.2 - CRC Instructions
7478 //===----------------------------------------------------------------------===//
7480 // No CRC instructions have AVX equivalents
7482 // crc intrinsic instruction
7483 // This set of instructions are only rm, the only difference is the size
7485 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7486 RegisterClass RCIn, SDPatternOperator Int> :
7487 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7488 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7489 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7492 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7493 X86MemOperand x86memop, SDPatternOperator Int> :
7494 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7495 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7496 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7497 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7499 let Constraints = "$src1 = $dst" in {
7500 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7501 int_x86_sse42_crc32_32_8>;
7502 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7503 int_x86_sse42_crc32_32_8>;
7504 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7505 int_x86_sse42_crc32_32_16>, OpSize16;
7506 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7507 int_x86_sse42_crc32_32_16>, OpSize16;
7508 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7509 int_x86_sse42_crc32_32_32>, OpSize32;
7510 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7511 int_x86_sse42_crc32_32_32>, OpSize32;
7512 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7513 int_x86_sse42_crc32_64_64>, REX_W;
7514 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7515 int_x86_sse42_crc32_64_64>, REX_W;
7516 let hasSideEffects = 0 in {
7518 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7520 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7525 //===----------------------------------------------------------------------===//
7526 // SHA-NI Instructions
7527 //===----------------------------------------------------------------------===//
7529 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7531 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7532 (ins VR128:$src1, VR128:$src2),
7533 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7535 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7536 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7538 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7539 (ins VR128:$src1, i128mem:$src2),
7540 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7542 (set VR128:$dst, (IntId VR128:$src1,
7543 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7544 (set VR128:$dst, (IntId VR128:$src1,
7545 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7548 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7549 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7550 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7551 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7553 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7554 (i8 imm:$src3)))]>, TA;
7555 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7556 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7557 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7559 (int_x86_sha1rnds4 VR128:$src1,
7560 (bc_v4i32 (memopv2i64 addr:$src2)),
7561 (i8 imm:$src3)))]>, TA;
7563 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7564 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7565 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7568 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7570 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7571 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7574 // Aliases with explicit %xmm0
7575 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7576 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7577 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7578 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7580 //===----------------------------------------------------------------------===//
7581 // AES-NI Instructions
7582 //===----------------------------------------------------------------------===//
7584 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
7585 PatFrag ld_frag, bit Is2Addr = 1> {
7586 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7587 (ins VR128:$src1, VR128:$src2),
7589 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7590 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7591 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7592 Sched<[WriteAESDecEnc]>;
7593 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7594 (ins VR128:$src1, i128mem:$src2),
7596 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7597 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7599 (IntId128 VR128:$src1, (ld_frag addr:$src2)))]>,
7600 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7603 // Perform One Round of an AES Encryption/Decryption Flow
7604 let Predicates = [HasAVX, HasAES] in {
7605 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7606 int_x86_aesni_aesenc, loadv2i64, 0>, VEX_4V;
7607 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7608 int_x86_aesni_aesenclast, loadv2i64, 0>, VEX_4V;
7609 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7610 int_x86_aesni_aesdec, loadv2i64, 0>, VEX_4V;
7611 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7612 int_x86_aesni_aesdeclast, loadv2i64, 0>, VEX_4V;
7615 let Constraints = "$src1 = $dst" in {
7616 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7617 int_x86_aesni_aesenc, memopv2i64>;
7618 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7619 int_x86_aesni_aesenclast, memopv2i64>;
7620 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7621 int_x86_aesni_aesdec, memopv2i64>;
7622 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7623 int_x86_aesni_aesdeclast, memopv2i64>;
7626 // Perform the AES InvMixColumn Transformation
7627 let Predicates = [HasAVX, HasAES] in {
7628 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7630 "vaesimc\t{$src1, $dst|$dst, $src1}",
7632 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7634 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7635 (ins i128mem:$src1),
7636 "vaesimc\t{$src1, $dst|$dst, $src1}",
7637 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7638 Sched<[WriteAESIMCLd]>, VEX;
7640 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7642 "aesimc\t{$src1, $dst|$dst, $src1}",
7644 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7645 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7646 (ins i128mem:$src1),
7647 "aesimc\t{$src1, $dst|$dst, $src1}",
7648 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7649 Sched<[WriteAESIMCLd]>;
7651 // AES Round Key Generation Assist
7652 let Predicates = [HasAVX, HasAES] in {
7653 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7654 (ins VR128:$src1, u8imm:$src2),
7655 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7657 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7658 Sched<[WriteAESKeyGen]>, VEX;
7659 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7660 (ins i128mem:$src1, u8imm:$src2),
7661 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7663 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7664 Sched<[WriteAESKeyGenLd]>, VEX;
7666 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7667 (ins VR128:$src1, u8imm:$src2),
7668 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7670 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7671 Sched<[WriteAESKeyGen]>;
7672 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7673 (ins i128mem:$src1, u8imm:$src2),
7674 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7676 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7677 Sched<[WriteAESKeyGenLd]>;
7679 //===----------------------------------------------------------------------===//
7680 // PCLMUL Instructions
7681 //===----------------------------------------------------------------------===//
7683 // AVX carry-less Multiplication instructions
7684 let isCommutable = 1 in
7685 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7686 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7687 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7689 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7690 Sched<[WriteCLMul]>;
7692 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7693 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7694 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7695 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7696 (loadv2i64 addr:$src2), imm:$src3))]>,
7697 Sched<[WriteCLMulLd, ReadAfterLd]>;
7699 // Carry-less Multiplication instructions
7700 let Constraints = "$src1 = $dst" in {
7701 let isCommutable = 1 in
7702 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7703 (ins VR128:$src1, VR128:$src2, u8imm:$src3),
7704 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7706 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7707 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7709 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7710 (ins VR128:$src1, i128mem:$src2, u8imm:$src3),
7711 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7712 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7713 (memopv2i64 addr:$src2), imm:$src3))],
7714 IIC_SSE_PCLMULQDQ_RM>,
7715 Sched<[WriteCLMulLd, ReadAfterLd]>;
7716 } // Constraints = "$src1 = $dst"
7719 multiclass pclmul_alias<string asm, int immop> {
7720 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7721 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7723 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7724 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7726 def : InstAlias<!strconcat("vpclmul", asm,
7727 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7728 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7731 def : InstAlias<!strconcat("vpclmul", asm,
7732 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7733 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7736 defm : pclmul_alias<"hqhq", 0x11>;
7737 defm : pclmul_alias<"hqlq", 0x01>;
7738 defm : pclmul_alias<"lqhq", 0x10>;
7739 defm : pclmul_alias<"lqlq", 0x00>;
7741 //===----------------------------------------------------------------------===//
7742 // SSE4A Instructions
7743 //===----------------------------------------------------------------------===//
7745 let Predicates = [HasSSE4A] in {
7747 let Constraints = "$src = $dst" in {
7748 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
7749 (ins VR128:$src, u8imm:$len, u8imm:$idx),
7750 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
7751 [(set VR128:$dst, (X86extrqi VR128:$src, imm:$len,
7753 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7754 (ins VR128:$src, VR128:$mask),
7755 "extrq\t{$mask, $src|$src, $mask}",
7756 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
7757 VR128:$mask))]>, PD;
7759 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
7760 (ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
7761 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
7762 [(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
7763 imm:$len, imm:$idx))]>, XD;
7764 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
7765 (ins VR128:$src, VR128:$mask),
7766 "insertq\t{$mask, $src|$src, $mask}",
7767 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
7768 VR128:$mask))]>, XD;
7771 // Non-temporal (unaligned) scalar stores.
7772 let AddedComplexity = 400 in { // Prefer non-temporal versions
7773 let mayStore = 1, SchedRW = [WriteStore] in {
7774 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
7775 "movntss\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XS;
7777 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
7778 "movntsd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XD;
7781 def : Pat<(nontemporalstore FR32:$src, addr:$dst),
7782 (MOVNTSS addr:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
7784 def : Pat<(nontemporalstore FR64:$src, addr:$dst),
7785 (MOVNTSD addr:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
7787 } // AddedComplexity
7790 //===----------------------------------------------------------------------===//
7792 //===----------------------------------------------------------------------===//
7794 //===----------------------------------------------------------------------===//
7795 // VBROADCAST - Load from memory and broadcast to all elements of the
7796 // destination operand
7798 class avx_broadcast_rm<bits<8> opc, string OpcodeStr, RegisterClass RC,
7799 X86MemOperand x86memop, ValueType VT,
7800 PatFrag ld_frag, SchedWrite Sched> :
7801 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
7802 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7803 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
7804 Sched<[Sched]>, VEX;
7806 // AVX2 adds register forms
7807 class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
7808 ValueType ResVT, ValueType OpVT, SchedWrite Sched> :
7809 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
7810 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
7811 [(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
7812 Sched<[Sched]>, VEX;
7814 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX, NoVLX] in {
7815 def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
7816 f32mem, v4f32, loadf32, WriteLoad>;
7817 def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
7818 f32mem, v8f32, loadf32,
7819 WriteFShuffleLd>, VEX_L;
7821 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX, NoVLX] in
7822 def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
7823 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
7825 let ExeDomain = SSEPackedSingle, Predicates = [HasAVX2, NoVLX] in {
7826 def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
7827 v4f32, v4f32, WriteFShuffle>;
7828 def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
7829 v8f32, v4f32, WriteFShuffle256>, VEX_L;
7831 let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
7832 def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
7833 v4f64, v2f64, WriteFShuffle256>, VEX_L;
7835 let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
7836 def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
7838 "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
7839 Sched<[WriteLoad]>, VEX, VEX_L;
7841 def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
7843 "vbroadcastf128\t{$src, $dst|$dst, $src}",
7845 (int_x86_avx_vbroadcastf128_pd_256 addr:$src))]>,
7846 Sched<[WriteFShuffleLd]>, VEX, VEX_L;
7848 let Predicates = [HasAVX] in
7849 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
7850 (VBROADCASTF128 addr:$src)>;
7853 //===----------------------------------------------------------------------===//
7854 // VINSERTF128 - Insert packed floating-point values
7856 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7857 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
7858 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
7859 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7860 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
7862 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
7863 (ins VR256:$src1, f128mem:$src2, u8imm:$src3),
7864 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7865 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
7868 let Predicates = [HasAVX, NoVLX] in {
7869 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
7871 (VINSERTF128rr VR256:$src1, VR128:$src2,
7872 (INSERT_get_vinsert128_imm VR256:$ins))>;
7873 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
7875 (VINSERTF128rr VR256:$src1, VR128:$src2,
7876 (INSERT_get_vinsert128_imm VR256:$ins))>;
7878 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
7880 (VINSERTF128rm VR256:$src1, addr:$src2,
7881 (INSERT_get_vinsert128_imm VR256:$ins))>;
7882 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
7884 (VINSERTF128rm VR256:$src1, addr:$src2,
7885 (INSERT_get_vinsert128_imm VR256:$ins))>;
7888 let Predicates = [HasAVX1Only] in {
7889 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
7891 (VINSERTF128rr VR256:$src1, VR128:$src2,
7892 (INSERT_get_vinsert128_imm VR256:$ins))>;
7893 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
7895 (VINSERTF128rr VR256:$src1, VR128:$src2,
7896 (INSERT_get_vinsert128_imm VR256:$ins))>;
7897 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
7899 (VINSERTF128rr VR256:$src1, VR128:$src2,
7900 (INSERT_get_vinsert128_imm VR256:$ins))>;
7901 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
7903 (VINSERTF128rr VR256:$src1, VR128:$src2,
7904 (INSERT_get_vinsert128_imm VR256:$ins))>;
7906 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
7908 (VINSERTF128rm VR256:$src1, addr:$src2,
7909 (INSERT_get_vinsert128_imm VR256:$ins))>;
7910 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
7911 (bc_v4i32 (loadv2i64 addr:$src2)),
7913 (VINSERTF128rm VR256:$src1, addr:$src2,
7914 (INSERT_get_vinsert128_imm VR256:$ins))>;
7915 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
7916 (bc_v16i8 (loadv2i64 addr:$src2)),
7918 (VINSERTF128rm VR256:$src1, addr:$src2,
7919 (INSERT_get_vinsert128_imm VR256:$ins))>;
7920 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
7921 (bc_v8i16 (loadv2i64 addr:$src2)),
7923 (VINSERTF128rm VR256:$src1, addr:$src2,
7924 (INSERT_get_vinsert128_imm VR256:$ins))>;
7927 //===----------------------------------------------------------------------===//
7928 // VEXTRACTF128 - Extract packed floating-point values
7930 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
7931 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
7932 (ins VR256:$src1, u8imm:$src2),
7933 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7934 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
7936 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
7937 (ins f128mem:$dst, VR256:$src1, u8imm:$src2),
7938 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7939 []>, Sched<[WriteStore]>, VEX, VEX_L;
7943 let Predicates = [HasAVX, NoVLX] in {
7944 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7945 (v4f32 (VEXTRACTF128rr
7946 (v8f32 VR256:$src1),
7947 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7948 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7949 (v2f64 (VEXTRACTF128rr
7950 (v4f64 VR256:$src1),
7951 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7953 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
7954 (iPTR imm))), addr:$dst),
7955 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7956 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7957 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
7958 (iPTR imm))), addr:$dst),
7959 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7960 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7963 let Predicates = [HasAVX1Only] in {
7964 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7965 (v2i64 (VEXTRACTF128rr
7966 (v4i64 VR256:$src1),
7967 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7968 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7969 (v4i32 (VEXTRACTF128rr
7970 (v8i32 VR256:$src1),
7971 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7972 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7973 (v8i16 (VEXTRACTF128rr
7974 (v16i16 VR256:$src1),
7975 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7976 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
7977 (v16i8 (VEXTRACTF128rr
7978 (v32i8 VR256:$src1),
7979 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
7981 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
7982 (iPTR imm))), addr:$dst),
7983 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7984 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7985 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
7986 (iPTR imm))), addr:$dst),
7987 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7988 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7989 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
7990 (iPTR imm))), addr:$dst),
7991 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7992 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7993 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
7994 (iPTR imm))), addr:$dst),
7995 (VEXTRACTF128mr addr:$dst, VR256:$src1,
7996 (EXTRACT_get_vextract128_imm VR128:$ext))>;
7999 //===----------------------------------------------------------------------===//
8000 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8002 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8003 Intrinsic IntLd, Intrinsic IntLd256,
8004 Intrinsic IntSt, Intrinsic IntSt256> {
8005 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8006 (ins VR128:$src1, f128mem:$src2),
8007 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8008 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8010 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8011 (ins VR256:$src1, f256mem:$src2),
8012 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8013 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8015 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8016 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8017 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8018 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8019 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8020 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8021 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8022 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8025 let ExeDomain = SSEPackedSingle in
8026 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8027 int_x86_avx_maskload_ps,
8028 int_x86_avx_maskload_ps_256,
8029 int_x86_avx_maskstore_ps,
8030 int_x86_avx_maskstore_ps_256>;
8031 let ExeDomain = SSEPackedDouble in
8032 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8033 int_x86_avx_maskload_pd,
8034 int_x86_avx_maskload_pd_256,
8035 int_x86_avx_maskstore_pd,
8036 int_x86_avx_maskstore_pd_256>;
8038 //===----------------------------------------------------------------------===//
8039 // VPERMIL - Permute Single and Double Floating-Point Values
8041 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8042 RegisterClass RC, X86MemOperand x86memop_f,
8043 X86MemOperand x86memop_i, PatFrag i_frag,
8044 ValueType f_vt, ValueType i_vt> {
8045 let Predicates = [HasAVX, NoVLX] in {
8046 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8047 (ins RC:$src1, RC:$src2),
8048 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8049 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1, (i_vt RC:$src2))))]>, VEX_4V,
8050 Sched<[WriteFShuffle]>;
8051 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8052 (ins RC:$src1, x86memop_i:$src2),
8053 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8054 [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1,
8055 (i_vt (bitconvert (i_frag addr:$src2))))))]>, VEX_4V,
8056 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8058 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8059 (ins RC:$src1, u8imm:$src2),
8060 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8061 [(set RC:$dst, (f_vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8062 Sched<[WriteFShuffle]>;
8063 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8064 (ins x86memop_f:$src1, u8imm:$src2),
8065 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8067 (f_vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
8068 Sched<[WriteFShuffleLd]>;
8069 }// Predicates = [HasAVX, NoVLX]
8072 let ExeDomain = SSEPackedSingle in {
8073 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8074 loadv2i64, v4f32, v4i32>;
8075 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8076 loadv4i64, v8f32, v8i32>, VEX_L;
8078 let ExeDomain = SSEPackedDouble in {
8079 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8080 loadv2i64, v2f64, v2i64>;
8081 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8082 loadv4i64, v4f64, v4i64>, VEX_L;
8085 let Predicates = [HasAVX, NoVLX] in {
8086 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8087 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8088 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8089 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8090 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8091 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8092 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8093 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8095 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8096 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8097 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8098 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8099 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8101 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8102 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8103 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8105 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8106 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8107 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8108 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8109 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8110 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8111 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8112 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8114 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8115 (VPERMILPDri VR128:$src1, imm:$imm)>;
8116 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8117 (VPERMILPDmi addr:$src1, imm:$imm)>;
8120 //===----------------------------------------------------------------------===//
8121 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8123 let ExeDomain = SSEPackedSingle in {
8124 let isCommutable = 1 in
8125 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8126 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8127 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8128 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8129 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8130 Sched<[WriteFShuffle]>;
8131 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8132 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8133 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8134 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8135 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8136 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8139 let Predicates = [HasAVX] in {
8140 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8141 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8142 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8143 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8144 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8147 let Predicates = [HasAVX1Only] in {
8148 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8149 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8150 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8151 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8152 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8153 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8154 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8155 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8157 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8158 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8159 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8160 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8161 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8162 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8163 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8164 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8165 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8166 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8167 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8168 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8171 //===----------------------------------------------------------------------===//
8172 // VZERO - Zero YMM registers
8174 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8175 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8176 // Zero All YMM registers
8177 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8178 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8180 // Zero Upper bits of YMM registers
8181 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8182 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8185 //===----------------------------------------------------------------------===//
8186 // Half precision conversion instructions
8187 //===----------------------------------------------------------------------===//
8188 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8189 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8190 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8191 [(set RC:$dst, (Int VR128:$src))]>,
8192 T8PD, VEX, Sched<[WriteCvtF2F]>;
8193 let hasSideEffects = 0, mayLoad = 1 in
8194 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8195 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8196 Sched<[WriteCvtF2FLd]>;
8199 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8200 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8201 (ins RC:$src1, i32u8imm:$src2),
8202 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8203 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8204 TAPD, VEX, Sched<[WriteCvtF2F]>;
8205 let hasSideEffects = 0, mayStore = 1,
8206 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8207 def mr : Ii8<0x1D, MRMDestMem, (outs),
8208 (ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
8209 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8213 let Predicates = [HasF16C] in {
8214 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8215 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8216 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8217 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8219 // Pattern match vcvtph2ps of a scalar i64 load.
8220 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8221 (VCVTPH2PSrm addr:$src)>;
8222 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8223 (VCVTPH2PSrm addr:$src)>;
8224 def : Pat<(int_x86_vcvtph2ps_128 (bitconvert
8225 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
8226 (VCVTPH2PSrm addr:$src)>;
8228 def : Pat<(store (f64 (extractelt (bc_v2f64 (v8i16
8229 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8231 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8232 def : Pat<(store (i64 (extractelt (bc_v2i64 (v8i16
8233 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))),
8235 (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>;
8236 def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)),
8238 (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>;
8241 // Patterns for matching conversions from float to half-float and vice versa.
8242 let Predicates = [HasF16C] in {
8243 // Use MXCSR.RC for rounding instead of explicitly specifying the default
8244 // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
8245 // configurations we support (the default). However, falling back to MXCSR is
8246 // more consistent with other instructions, which are always controlled by it.
8247 // It's encoded as 0b100.
8248 def : Pat<(fp_to_f16 FR32:$src),
8249 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8250 (COPY_TO_REGCLASS FR32:$src, VR128), 4)), sub_16bit))>;
8252 def : Pat<(f16_to_fp GR16:$src),
8253 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8254 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8256 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8257 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8258 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 4)), FR32)) >;
8261 //===----------------------------------------------------------------------===//
8262 // AVX2 Instructions
8263 //===----------------------------------------------------------------------===//
8265 /// AVX2_binop_rmi - AVX2 binary operator with 8-bit immediate
8266 multiclass AVX2_binop_rmi<bits<8> opc, string OpcodeStr, SDNode OpNode,
8267 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
8268 X86MemOperand x86memop> {
8269 let isCommutable = 1 in
8270 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8271 (ins RC:$src1, RC:$src2, u8imm:$src3),
8272 !strconcat(OpcodeStr,
8273 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8274 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2, imm:$src3)))]>,
8275 Sched<[WriteBlend]>, VEX_4V;
8276 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8277 (ins RC:$src1, x86memop:$src2, u8imm:$src3),
8278 !strconcat(OpcodeStr,
8279 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8281 (OpVT (OpNode RC:$src1,
8282 (bitconvert (memop_frag addr:$src2)), imm:$src3)))]>,
8283 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8286 defm VPBLENDD : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v4i32,
8287 VR128, loadv2i64, i128mem>;
8288 defm VPBLENDDY : AVX2_binop_rmi<0x02, "vpblendd", X86Blendi, v8i32,
8289 VR256, loadv4i64, i256mem>, VEX_L;
8291 //===----------------------------------------------------------------------===//
8292 // VPBROADCAST - Load from memory and broadcast to all elements of the
8293 // destination operand
8295 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8296 X86MemOperand x86memop, PatFrag ld_frag,
8297 ValueType OpVT128, ValueType OpVT256, Predicate prd> {
8298 let Predicates = [HasAVX2, prd] in {
8299 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8300 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8302 (OpVT128 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8303 Sched<[WriteShuffle]>, VEX;
8304 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8305 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8307 (OpVT128 (X86VBroadcast (ld_frag addr:$src))))]>,
8308 Sched<[WriteLoad]>, VEX;
8309 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8310 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8312 (OpVT256 (X86VBroadcast (OpVT128 VR128:$src))))]>,
8313 Sched<[WriteShuffle256]>, VEX, VEX_L;
8314 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8315 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8317 (OpVT256 (X86VBroadcast (ld_frag addr:$src))))]>,
8318 Sched<[WriteLoad]>, VEX, VEX_L;
8320 // Provide aliases for broadcast from the same register class that
8321 // automatically does the extract.
8322 def : Pat<(OpVT256 (X86VBroadcast (OpVT256 VR256:$src))),
8323 (!cast<Instruction>(NAME#"Yrr")
8324 (OpVT128 (EXTRACT_SUBREG (OpVT256 VR256:$src),sub_xmm)))>;
8328 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8329 v16i8, v32i8, NoVLX_Or_NoBWI>;
8330 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8331 v8i16, v16i16, NoVLX_Or_NoBWI>;
8332 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8333 v4i32, v8i32, NoVLX>;
8334 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8335 v2i64, v4i64, NoVLX>;
8337 let Predicates = [HasAVX2] in {
8338 // loadi16 is tricky to fold, because !isTypeDesirableForOp, justifiably.
8339 // This means we'll encounter truncated i32 loads; match that here.
8340 def : Pat<(v8i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8341 (VPBROADCASTWrm addr:$src)>;
8342 def : Pat<(v16i16 (X86VBroadcast (i16 (trunc (i32 (load addr:$src)))))),
8343 (VPBROADCASTWYrm addr:$src)>;
8344 def : Pat<(v8i16 (X86VBroadcast
8345 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8346 (VPBROADCASTWrm addr:$src)>;
8347 def : Pat<(v16i16 (X86VBroadcast
8348 (i16 (trunc (i32 (zextloadi16 addr:$src)))))),
8349 (VPBROADCASTWYrm addr:$src)>;
8351 // Provide aliases for broadcast from the same register class that
8352 // automatically does the extract.
8353 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8354 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8356 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8357 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8361 let Predicates = [HasAVX2, NoVLX] in {
8362 // Provide fallback in case the load node that is used in the patterns above
8363 // is used by additional users, which prevents the pattern selection.
8364 let AddedComplexity = 20 in {
8365 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8366 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8367 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8368 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8369 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8370 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8374 let Predicates = [HasAVX2, NoVLX_Or_NoBWI], AddedComplexity = 20 in {
8375 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8376 (VPBROADCASTBrr (COPY_TO_REGCLASS
8377 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8379 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8380 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8381 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8384 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8385 (VPBROADCASTWrr (COPY_TO_REGCLASS
8386 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8388 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8389 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8390 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8393 let Predicates = [HasAVX2, NoVLX], AddedComplexity = 20 in {
8394 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8395 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8396 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8397 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8398 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8399 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8401 // The patterns for VPBROADCASTD are not needed because they would match
8402 // the exact same thing as VBROADCASTSS patterns.
8404 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8405 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8406 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8409 // AVX1 broadcast patterns
8410 let Predicates = [HasAVX1Only] in {
8411 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8412 (VBROADCASTSSYrm addr:$src)>;
8413 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8414 (VBROADCASTSDYrm addr:$src)>;
8415 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8416 (VBROADCASTSSrm addr:$src)>;
8419 // Provide fallback in case the load node that is used in the patterns above
8420 // is used by additional users, which prevents the pattern selection.
8421 let Predicates = [HasAVX], AddedComplexity = 20 in {
8422 // 128bit broadcasts:
8423 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8424 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8427 let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
8428 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8429 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8430 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8431 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8432 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8433 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8434 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8435 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8436 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8437 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8439 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8440 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8441 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8442 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8443 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8444 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8445 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8446 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8447 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8448 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8450 def : Pat<(v2i64 (X86VBroadcast i64:$src)),
8451 (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8454 //===----------------------------------------------------------------------===//
8455 // VPERM - Permute instructions
8458 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8459 ValueType OpVT, X86FoldableSchedWrite Sched> {
8460 let Predicates = [HasAVX2, NoVLX] in {
8461 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8462 (ins VR256:$src1, VR256:$src2),
8463 !strconcat(OpcodeStr,
8464 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8466 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8467 Sched<[Sched]>, VEX_4V, VEX_L;
8468 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8469 (ins VR256:$src1, i256mem:$src2),
8470 !strconcat(OpcodeStr,
8471 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8473 (OpVT (X86VPermv VR256:$src1,
8474 (bitconvert (mem_frag addr:$src2)))))]>,
8475 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8479 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8480 let ExeDomain = SSEPackedSingle in
8481 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8483 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8484 ValueType OpVT, X86FoldableSchedWrite Sched> {
8485 let Predicates = [HasAVX2, NoVLX] in {
8486 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8487 (ins VR256:$src1, u8imm:$src2),
8488 !strconcat(OpcodeStr,
8489 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8491 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8492 Sched<[Sched]>, VEX, VEX_L;
8493 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8494 (ins i256mem:$src1, u8imm:$src2),
8495 !strconcat(OpcodeStr,
8496 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8498 (OpVT (X86VPermi (mem_frag addr:$src1),
8499 (i8 imm:$src2))))]>,
8500 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8504 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8505 WriteShuffle256>, VEX_W;
8506 let ExeDomain = SSEPackedDouble in
8507 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8508 WriteFShuffle256>, VEX_W;
8510 //===----------------------------------------------------------------------===//
8511 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8513 let isCommutable = 1 in
8514 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8515 (ins VR256:$src1, VR256:$src2, u8imm:$src3),
8516 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8517 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8518 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8520 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8521 (ins VR256:$src1, f256mem:$src2, u8imm:$src3),
8522 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8523 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8525 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8527 let Predicates = [HasAVX2] in {
8528 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8529 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8530 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8531 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8532 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8533 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8535 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8537 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8538 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8539 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8540 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8541 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8543 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8547 //===----------------------------------------------------------------------===//
8548 // VINSERTI128 - Insert packed integer values
8550 let hasSideEffects = 0 in {
8551 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8552 (ins VR256:$src1, VR128:$src2, u8imm:$src3),
8553 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8554 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8556 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8557 (ins VR256:$src1, i128mem:$src2, u8imm:$src3),
8558 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8559 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8562 let Predicates = [HasAVX2, NoVLX] in {
8563 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8565 (VINSERTI128rr VR256:$src1, VR128:$src2,
8566 (INSERT_get_vinsert128_imm VR256:$ins))>;
8567 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8569 (VINSERTI128rr VR256:$src1, VR128:$src2,
8570 (INSERT_get_vinsert128_imm VR256:$ins))>;
8571 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8573 (VINSERTI128rr VR256:$src1, VR128:$src2,
8574 (INSERT_get_vinsert128_imm VR256:$ins))>;
8575 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8577 (VINSERTI128rr VR256:$src1, VR128:$src2,
8578 (INSERT_get_vinsert128_imm VR256:$ins))>;
8580 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8582 (VINSERTI128rm VR256:$src1, addr:$src2,
8583 (INSERT_get_vinsert128_imm VR256:$ins))>;
8584 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8585 (bc_v4i32 (loadv2i64 addr:$src2)),
8587 (VINSERTI128rm VR256:$src1, addr:$src2,
8588 (INSERT_get_vinsert128_imm VR256:$ins))>;
8589 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8590 (bc_v16i8 (loadv2i64 addr:$src2)),
8592 (VINSERTI128rm VR256:$src1, addr:$src2,
8593 (INSERT_get_vinsert128_imm VR256:$ins))>;
8594 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8595 (bc_v8i16 (loadv2i64 addr:$src2)),
8597 (VINSERTI128rm VR256:$src1, addr:$src2,
8598 (INSERT_get_vinsert128_imm VR256:$ins))>;
8601 //===----------------------------------------------------------------------===//
8602 // VEXTRACTI128 - Extract packed integer values
8604 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8605 (ins VR256:$src1, u8imm:$src2),
8606 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8607 Sched<[WriteShuffle256]>, VEX, VEX_L;
8608 let hasSideEffects = 0, mayStore = 1 in
8609 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8610 (ins i128mem:$dst, VR256:$src1, u8imm:$src2),
8611 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8612 Sched<[WriteStore]>, VEX, VEX_L;
8614 let Predicates = [HasAVX2, NoVLX] in {
8615 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8616 (v2i64 (VEXTRACTI128rr
8617 (v4i64 VR256:$src1),
8618 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8619 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8620 (v4i32 (VEXTRACTI128rr
8621 (v8i32 VR256:$src1),
8622 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8623 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8624 (v8i16 (VEXTRACTI128rr
8625 (v16i16 VR256:$src1),
8626 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8627 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8628 (v16i8 (VEXTRACTI128rr
8629 (v32i8 VR256:$src1),
8630 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8632 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8633 (iPTR imm))), addr:$dst),
8634 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8635 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8636 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8637 (iPTR imm))), addr:$dst),
8638 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8639 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8640 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8641 (iPTR imm))), addr:$dst),
8642 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8643 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8644 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8645 (iPTR imm))), addr:$dst),
8646 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8647 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8650 //===----------------------------------------------------------------------===//
8651 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8653 multiclass avx2_pmovmask<string OpcodeStr,
8654 Intrinsic IntLd128, Intrinsic IntLd256,
8655 Intrinsic IntSt128, Intrinsic IntSt256> {
8656 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8657 (ins VR128:$src1, i128mem:$src2),
8658 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8659 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8660 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8661 (ins VR256:$src1, i256mem:$src2),
8662 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8663 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8665 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8666 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8667 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8668 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8669 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8670 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8671 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8672 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8675 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8676 int_x86_avx2_maskload_d,
8677 int_x86_avx2_maskload_d_256,
8678 int_x86_avx2_maskstore_d,
8679 int_x86_avx2_maskstore_d_256>;
8680 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8681 int_x86_avx2_maskload_q,
8682 int_x86_avx2_maskload_q_256,
8683 int_x86_avx2_maskstore_q,
8684 int_x86_avx2_maskstore_q_256>, VEX_W;
8686 multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
8687 ValueType MaskVT, string BlendStr, ValueType ZeroVT> {
8689 def: Pat<(X86mstore addr:$ptr, (MaskVT RC:$mask), (VT RC:$src)),
8690 (!cast<Instruction>(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>;
8692 def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), undef)),
8693 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
8694 def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask),
8695 (VT (bitconvert (ZeroVT immAllZerosV))))),
8696 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
8697 def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))),
8698 (!cast<Instruction>(BlendStr#"rr")
8700 (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr),
8703 let Predicates = [HasAVX] in {
8704 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4f32, v4i32, "VBLENDVPS", v4i32>;
8705 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2f64, v2i64, "VBLENDVPD", v4i32>;
8706 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8f32, v8i32, "VBLENDVPSY", v8i32>;
8707 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4f64, v4i64, "VBLENDVPDY", v8i32>;
8709 let Predicates = [HasAVX1Only] in {
8710 // load/store i32/i64 not supported use ps/pd version
8711 defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
8712 defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
8713 defm : maskmov_lowering<"VMASKMOVPS", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
8714 defm : maskmov_lowering<"VMASKMOVPD", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
8716 let Predicates = [HasAVX2] in {
8717 defm : maskmov_lowering<"VPMASKMOVDY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
8718 defm : maskmov_lowering<"VPMASKMOVQY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
8719 defm : maskmov_lowering<"VPMASKMOVD", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
8720 defm : maskmov_lowering<"VPMASKMOVQ", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
8722 //===----------------------------------------------------------------------===//
8723 // Variable Bit Shifts
8725 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
8726 ValueType vt128, ValueType vt256> {
8727 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
8728 (ins VR128:$src1, VR128:$src2),
8729 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8731 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
8732 VEX_4V, Sched<[WriteVarVecShift]>;
8733 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
8734 (ins VR128:$src1, i128mem:$src2),
8735 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8737 (vt128 (OpNode VR128:$src1,
8738 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
8739 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8740 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8741 (ins VR256:$src1, VR256:$src2),
8742 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8744 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
8745 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
8746 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8747 (ins VR256:$src1, i256mem:$src2),
8748 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8750 (vt256 (OpNode VR256:$src1,
8751 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
8752 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
8755 let Predicates = [HasAVX2, NoVLX] in {
8756 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
8757 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
8758 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
8759 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
8760 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
8761 let isCodeGenOnly = 1 in
8762 defm VPSRAVD_Int : avx2_var_shift<0x46, "vpsravd", X86vsrav, v4i32, v8i32>;
8764 //===----------------------------------------------------------------------===//
8765 // VGATHER - GATHER Operations
8766 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
8767 X86MemOperand memop128, X86MemOperand memop256> {
8768 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
8769 (ins VR128:$src1, memop128:$src2, VR128:$mask),
8770 !strconcat(OpcodeStr,
8771 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8773 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
8774 (ins RC256:$src1, memop256:$src2, RC256:$mask),
8775 !strconcat(OpcodeStr,
8776 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
8777 []>, VEX_4VOp3, VEX_L;
8780 let mayLoad = 1, hasSideEffects = 0, Constraints
8781 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
8783 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx128mem, vx256mem>, VEX_W;
8784 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx128mem, vy256mem>, VEX_W;
8785 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx128mem, vy256mem>;
8786 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx64mem, vy128mem>;
8788 let ExeDomain = SSEPackedDouble in {
8789 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx128mem, vx256mem>, VEX_W;
8790 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx128mem, vy256mem>, VEX_W;
8793 let ExeDomain = SSEPackedSingle in {
8794 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx128mem, vy256mem>;
8795 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx64mem, vy128mem>;
8799 //===----------------------------------------------------------------------===//
8800 // Extra selection patterns for FR128, f128, f128mem
8802 // movaps is shorter than movdqa. movaps is in SSE and movdqa is in SSE2.
8803 def : Pat<(store (f128 FR128:$src), addr:$dst),
8804 (MOVAPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>;
8806 def : Pat<(loadf128 addr:$src),
8807 (COPY_TO_REGCLASS (MOVAPSrm addr:$src), FR128)>;
8809 // andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2
8810 def : Pat<(X86fand FR128:$src1, (loadf128 addr:$src2)),
8812 (ANDPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8815 def : Pat<(X86fand FR128:$src1, FR128:$src2),
8817 (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8818 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8820 def : Pat<(and FR128:$src1, FR128:$src2),
8822 (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8823 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8825 def : Pat<(X86for FR128:$src1, (loadf128 addr:$src2)),
8827 (ORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8830 def : Pat<(X86for FR128:$src1, FR128:$src2),
8832 (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8833 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8835 def : Pat<(or FR128:$src1, FR128:$src2),
8837 (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8838 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8840 def : Pat<(X86fxor FR128:$src1, (loadf128 addr:$src2)),
8842 (XORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
8845 def : Pat<(X86fxor FR128:$src1, FR128:$src2),
8847 (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8848 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
8850 def : Pat<(xor FR128:$src1, FR128:$src2),
8852 (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
8853 (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;