1 //===- X86InstrVecCompiler.td - Vector Compiler Patterns ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various vector pseudo instructions used by the
11 // compiler, as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Non-instruction patterns
17 //===----------------------------------------------------------------------===//
19 let Predicates = [NoAVX512] in {
20 // A vector extract of the first f32/f64 position is a subregister copy
21 def : Pat<(f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
22 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
23 def : Pat<(f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
24 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
27 let Predicates = [HasAVX512] in {
28 // A vector extract of the first f32/f64 position is a subregister copy
29 def : Pat<(f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
30 (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X)>;
31 def : Pat<(f64 (extractelt (v2f64 VR128X:$src), (iPTR 0))),
32 (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X)>;
35 let Predicates = [NoVLX] in {
36 // Implicitly promote a 32-bit scalar to a vector.
37 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
38 (COPY_TO_REGCLASS FR32:$src, VR128)>;
39 // Implicitly promote a 64-bit scalar to a vector.
40 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
41 (COPY_TO_REGCLASS FR64:$src, VR128)>;
44 let Predicates = [HasVLX] in {
45 // Implicitly promote a 32-bit scalar to a vector.
46 def : Pat<(v4f32 (scalar_to_vector FR32X:$src)),
47 (COPY_TO_REGCLASS FR32X:$src, VR128X)>;
48 // Implicitly promote a 64-bit scalar to a vector.
49 def : Pat<(v2f64 (scalar_to_vector FR64X:$src)),
50 (COPY_TO_REGCLASS FR64X:$src, VR128X)>;
53 //===----------------------------------------------------------------------===//
55 //===----------------------------------------------------------------------===//
57 // Patterns for insert_subvector/extract_subvector to/from index=0
58 multiclass subvector_subreg_lowering<RegisterClass subRC, ValueType subVT,
59 RegisterClass RC, ValueType VT,
61 def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
62 (subVT (EXTRACT_SUBREG RC:$src, subIdx))>;
64 def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
65 (VT (INSERT_SUBREG (IMPLICIT_DEF), subRC:$src, subIdx))>;
68 // A 128-bit subvector extract from the first 256-bit vector position is a
69 // subregister copy that needs no instruction. Likewise, a 128-bit subvector
70 // insert to the first 256-bit vector position is a subregister copy that needs
72 defm : subvector_subreg_lowering<VR128, v4i32, VR256, v8i32, sub_xmm>;
73 defm : subvector_subreg_lowering<VR128, v4f32, VR256, v8f32, sub_xmm>;
74 defm : subvector_subreg_lowering<VR128, v2i64, VR256, v4i64, sub_xmm>;
75 defm : subvector_subreg_lowering<VR128, v2f64, VR256, v4f64, sub_xmm>;
76 defm : subvector_subreg_lowering<VR128, v8i16, VR256, v16i16, sub_xmm>;
77 defm : subvector_subreg_lowering<VR128, v16i8, VR256, v32i8, sub_xmm>;
79 // A 128-bit subvector extract from the first 512-bit vector position is a
80 // subregister copy that needs no instruction. Likewise, a 128-bit subvector
81 // insert to the first 512-bit vector position is a subregister copy that needs
83 defm : subvector_subreg_lowering<VR128, v4i32, VR512, v16i32, sub_xmm>;
84 defm : subvector_subreg_lowering<VR128, v4f32, VR512, v16f32, sub_xmm>;
85 defm : subvector_subreg_lowering<VR128, v2i64, VR512, v8i64, sub_xmm>;
86 defm : subvector_subreg_lowering<VR128, v2f64, VR512, v8f64, sub_xmm>;
87 defm : subvector_subreg_lowering<VR128, v8i16, VR512, v32i16, sub_xmm>;
88 defm : subvector_subreg_lowering<VR128, v16i8, VR512, v64i8, sub_xmm>;
90 // A 128-bit subvector extract from the first 512-bit vector position is a
91 // subregister copy that needs no instruction. Likewise, a 128-bit subvector
92 // insert to the first 512-bit vector position is a subregister copy that needs
94 defm : subvector_subreg_lowering<VR256, v8i32, VR512, v16i32, sub_ymm>;
95 defm : subvector_subreg_lowering<VR256, v8f32, VR512, v16f32, sub_ymm>;
96 defm : subvector_subreg_lowering<VR256, v4i64, VR512, v8i64, sub_ymm>;
97 defm : subvector_subreg_lowering<VR256, v4f64, VR512, v8f64, sub_ymm>;
98 defm : subvector_subreg_lowering<VR256, v16i16, VR512, v32i16, sub_ymm>;
99 defm : subvector_subreg_lowering<VR256, v32i8, VR512, v64i8, sub_ymm>;
102 multiclass subvector_store_lowering<string AlignedStr, string UnalignedStr,
103 RegisterClass RC, ValueType DstTy,
104 ValueType SrcTy, SubRegIndex SubIdx> {
105 def : Pat<(alignedstore (DstTy (extract_subvector
106 (SrcTy RC:$src), (iPTR 0))), addr:$dst),
107 (!cast<Instruction>("VMOV"#AlignedStr#"mr") addr:$dst,
108 (DstTy (EXTRACT_SUBREG RC:$src, SubIdx)))>;
110 def : Pat<(store (DstTy (extract_subvector
111 (SrcTy RC:$src), (iPTR 0))), addr:$dst),
112 (!cast<Instruction>("VMOV"#UnalignedStr#"mr") addr:$dst,
113 (DstTy (EXTRACT_SUBREG RC:$src, SubIdx)))>;
116 let Predicates = [HasAVX, NoVLX] in {
117 defm : subvector_store_lowering<"APD", "UPD", VR256X, v2f64, v4f64, sub_xmm>;
118 defm : subvector_store_lowering<"APS", "UPS", VR256X, v4f32, v8f32, sub_xmm>;
119 defm : subvector_store_lowering<"DQA", "DQU", VR256X, v2i64, v4i64, sub_xmm>;
120 defm : subvector_store_lowering<"DQA", "DQU", VR256X, v4i32, v8i32, sub_xmm>;
121 defm : subvector_store_lowering<"DQA", "DQU", VR256X, v8i16, v16i16, sub_xmm>;
122 defm : subvector_store_lowering<"DQA", "DQU", VR256X, v16i8, v32i8, sub_xmm>;
125 let Predicates = [HasVLX] in {
126 // Special patterns for storing subvector extracts of lower 128-bits
127 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
128 defm : subvector_store_lowering<"APDZ128", "UPDZ128", VR256X, v2f64, v4f64,
130 defm : subvector_store_lowering<"APSZ128", "UPSZ128", VR256X, v4f32, v8f32,
132 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR256X, v2i64,
134 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR256X, v4i32,
136 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR256X, v8i16,
138 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR256X, v16i8,
141 // Special patterns for storing subvector extracts of lower 128-bits of 512.
142 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
143 defm : subvector_store_lowering<"APDZ128", "UPDZ128", VR512, v2f64, v8f64,
145 defm : subvector_store_lowering<"APSZ128", "UPSZ128", VR512, v4f32, v16f32,
147 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR512, v2i64,
149 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR512, v4i32,
151 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR512, v8i16,
153 defm : subvector_store_lowering<"DQA64Z128", "DQU64Z128", VR512, v16i8,
156 // Special patterns for storing subvector extracts of lower 256-bits of 512.
157 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
158 defm : subvector_store_lowering<"APDZ256", "UPDZ256", VR512, v4f64, v8f64,
160 defm : subvector_store_lowering<"APSZ256", "UPSZ256", VR512, v8f32, v16f32,
162 defm : subvector_store_lowering<"DQA64Z256", "DQU64Z256", VR512, v4i64,
164 defm : subvector_store_lowering<"DQA64Z256", "DQU64Z256", VR512, v8i32,
166 defm : subvector_store_lowering<"DQA64Z256", "DQU64Z256", VR512, v16i16,
168 defm : subvector_store_lowering<"DQA64Z256", "DQU64Z256", VR512, v32i8,
172 // If we're inserting into an all zeros vector, just use a plain move which
173 // will zero the upper bits. A post-isel hook will take care of removing
174 // any moves that we can prove are unnecessary.
175 multiclass subvec_zero_lowering<string MoveStr,
176 RegisterClass RC, ValueType DstTy,
177 ValueType SrcTy, ValueType ZeroTy,
178 SubRegIndex SubIdx> {
179 def : Pat<(DstTy (insert_subvector (bitconvert (ZeroTy immAllZerosV)),
180 (SrcTy RC:$src), (iPTR 0))),
181 (SUBREG_TO_REG (i64 0),
182 (SrcTy (!cast<Instruction>("VMOV"#MoveStr#"rr") RC:$src)), SubIdx)>;
185 let Predicates = [HasAVX, NoVLX] in {
186 defm : subvec_zero_lowering<"APD", VR128, v4f64, v2f64, v8i32, sub_xmm>;
187 defm : subvec_zero_lowering<"APS", VR128, v8f32, v4f32, v8i32, sub_xmm>;
188 defm : subvec_zero_lowering<"DQA", VR128, v4i64, v2i64, v8i32, sub_xmm>;
189 defm : subvec_zero_lowering<"DQA", VR128, v8i32, v4i32, v8i32, sub_xmm>;
190 defm : subvec_zero_lowering<"DQA", VR128, v16i16, v8i16, v8i32, sub_xmm>;
191 defm : subvec_zero_lowering<"DQA", VR128, v32i8, v16i8, v8i32, sub_xmm>;
194 let Predicates = [HasVLX] in {
195 defm : subvec_zero_lowering<"APDZ128", VR128X, v4f64, v2f64, v8i32, sub_xmm>;
196 defm : subvec_zero_lowering<"APSZ128", VR128X, v8f32, v4f32, v8i32, sub_xmm>;
197 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v4i64, v2i64, v8i32, sub_xmm>;
198 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v8i32, v4i32, v8i32, sub_xmm>;
199 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v16i16, v8i16, v8i32, sub_xmm>;
200 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v32i8, v16i8, v8i32, sub_xmm>;
202 defm : subvec_zero_lowering<"APDZ128", VR128X, v8f64, v2f64, v16i32, sub_xmm>;
203 defm : subvec_zero_lowering<"APSZ128", VR128X, v16f32, v4f32, v16i32, sub_xmm>;
204 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v8i64, v2i64, v16i32, sub_xmm>;
205 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v16i32, v4i32, v16i32, sub_xmm>;
206 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v32i16, v8i16, v16i32, sub_xmm>;
207 defm : subvec_zero_lowering<"DQA64Z128", VR128X, v64i8, v16i8, v16i32, sub_xmm>;
209 defm : subvec_zero_lowering<"APDZ256", VR256X, v8f64, v4f64, v16i32, sub_ymm>;
210 defm : subvec_zero_lowering<"APSZ256", VR256X, v16f32, v8f32, v16i32, sub_ymm>;
211 defm : subvec_zero_lowering<"DQA64Z256", VR256X, v8i64, v4i64, v16i32, sub_ymm>;
212 defm : subvec_zero_lowering<"DQA64Z256", VR256X, v16i32, v8i32, v16i32, sub_ymm>;
213 defm : subvec_zero_lowering<"DQA64Z256", VR256X, v32i16, v16i16, v16i32, sub_ymm>;
214 defm : subvec_zero_lowering<"DQA64Z256", VR256X, v64i8, v32i8, v16i32, sub_ymm>;
217 let Predicates = [HasAVX512, NoVLX] in {
218 defm : subvec_zero_lowering<"APD", VR128, v8f64, v2f64, v16i32, sub_xmm>;
219 defm : subvec_zero_lowering<"APS", VR128, v16f32, v4f32, v16i32, sub_xmm>;
220 defm : subvec_zero_lowering<"DQA", VR128, v8i64, v2i64, v16i32, sub_xmm>;
221 defm : subvec_zero_lowering<"DQA", VR128, v16i32, v4i32, v16i32, sub_xmm>;
222 defm : subvec_zero_lowering<"DQA", VR128, v32i16, v8i16, v16i32, sub_xmm>;
223 defm : subvec_zero_lowering<"DQA", VR128, v64i8, v16i8, v16i32, sub_xmm>;
225 defm : subvec_zero_lowering<"APDY", VR256, v8f64, v4f64, v16i32, sub_ymm>;
226 defm : subvec_zero_lowering<"APSY", VR256, v16f32, v8f32, v16i32, sub_ymm>;
227 defm : subvec_zero_lowering<"DQAY", VR256, v8i64, v4i64, v16i32, sub_ymm>;
228 defm : subvec_zero_lowering<"DQAY", VR256, v16i32, v8i32, v16i32, sub_ymm>;
229 defm : subvec_zero_lowering<"DQAY", VR256, v32i16, v16i16, v16i32, sub_ymm>;
230 defm : subvec_zero_lowering<"DQAY", VR256, v64i8, v32i8, v16i32, sub_ymm>;
233 class maskzeroupper<ValueType vt, RegisterClass RC> :
234 PatLeaf<(vt RC:$src), [{
235 return isMaskZeroExtended(N);
238 def maskzeroupperv1i1 : maskzeroupper<v1i1, VK1>;
239 def maskzeroupperv2i1 : maskzeroupper<v2i1, VK2>;
240 def maskzeroupperv4i1 : maskzeroupper<v4i1, VK4>;
241 def maskzeroupperv8i1 : maskzeroupper<v8i1, VK8>;
242 def maskzeroupperv16i1 : maskzeroupper<v16i1, VK16>;
243 def maskzeroupperv32i1 : maskzeroupper<v32i1, VK32>;
245 // The patterns determine if we can depend on the upper bits of a mask register
246 // being zeroed by the previous operation so that we can skip explicit
248 let Predicates = [HasBWI] in {
249 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
250 maskzeroupperv1i1:$src, (iPTR 0))),
251 (COPY_TO_REGCLASS VK1:$src, VK32)>;
252 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
253 maskzeroupperv8i1:$src, (iPTR 0))),
254 (COPY_TO_REGCLASS VK8:$src, VK32)>;
255 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
256 maskzeroupperv16i1:$src, (iPTR 0))),
257 (COPY_TO_REGCLASS VK16:$src, VK32)>;
259 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
260 maskzeroupperv1i1:$src, (iPTR 0))),
261 (COPY_TO_REGCLASS VK1:$src, VK64)>;
262 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
263 maskzeroupperv8i1:$src, (iPTR 0))),
264 (COPY_TO_REGCLASS VK8:$src, VK64)>;
265 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
266 maskzeroupperv16i1:$src, (iPTR 0))),
267 (COPY_TO_REGCLASS VK16:$src, VK64)>;
268 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
269 maskzeroupperv32i1:$src, (iPTR 0))),
270 (COPY_TO_REGCLASS VK32:$src, VK64)>;
273 let Predicates = [HasAVX512] in {
274 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
275 maskzeroupperv1i1:$src, (iPTR 0))),
276 (COPY_TO_REGCLASS VK1:$src, VK16)>;
277 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
278 maskzeroupperv8i1:$src, (iPTR 0))),
279 (COPY_TO_REGCLASS VK8:$src, VK16)>;
282 let Predicates = [HasDQI] in {
283 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
284 maskzeroupperv1i1:$src, (iPTR 0))),
285 (COPY_TO_REGCLASS VK1:$src, VK8)>;
288 let Predicates = [HasVLX, HasDQI] in {
289 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
290 maskzeroupperv2i1:$src, (iPTR 0))),
291 (COPY_TO_REGCLASS VK2:$src, VK8)>;
292 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
293 maskzeroupperv4i1:$src, (iPTR 0))),
294 (COPY_TO_REGCLASS VK4:$src, VK8)>;
297 let Predicates = [HasVLX] in {
298 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
299 maskzeroupperv2i1:$src, (iPTR 0))),
300 (COPY_TO_REGCLASS VK2:$src, VK16)>;
301 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
302 maskzeroupperv4i1:$src, (iPTR 0))),
303 (COPY_TO_REGCLASS VK4:$src, VK16)>;
306 let Predicates = [HasBWI, HasVLX] in {
307 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
308 maskzeroupperv2i1:$src, (iPTR 0))),
309 (COPY_TO_REGCLASS VK2:$src, VK32)>;
310 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
311 maskzeroupperv4i1:$src, (iPTR 0))),
312 (COPY_TO_REGCLASS VK4:$src, VK32)>;
313 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
314 maskzeroupperv2i1:$src, (iPTR 0))),
315 (COPY_TO_REGCLASS VK2:$src, VK64)>;
316 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
317 maskzeroupperv4i1:$src, (iPTR 0))),
318 (COPY_TO_REGCLASS VK4:$src, VK64)>;
321 // If the bits are not zero we have to fall back to explicitly zeroing by
323 let Predicates = [HasAVX512] in {
324 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
325 (v1i1 VK1:$mask), (iPTR 0))),
326 (KSHIFTRWri (KSHIFTLWri (COPY_TO_REGCLASS VK1:$mask, VK16),
329 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
330 (v2i1 VK2:$mask), (iPTR 0))),
331 (KSHIFTRWri (KSHIFTLWri (COPY_TO_REGCLASS VK2:$mask, VK16),
334 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
335 (v4i1 VK4:$mask), (iPTR 0))),
336 (KSHIFTRWri (KSHIFTLWri (COPY_TO_REGCLASS VK4:$mask, VK16),
340 let Predicates = [HasAVX512, NoDQI] in {
341 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
342 (v8i1 VK8:$mask), (iPTR 0))),
343 (KSHIFTRWri (KSHIFTLWri (COPY_TO_REGCLASS VK8:$mask, VK16),
347 let Predicates = [HasDQI] in {
348 def : Pat<(v16i1 (insert_subvector (v16i1 immAllZerosV),
349 (v8i1 VK8:$mask), (iPTR 0))),
350 (COPY_TO_REGCLASS (KMOVBkk VK8:$mask), VK16)>;
352 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
353 (v1i1 VK1:$mask), (iPTR 0))),
354 (KSHIFTRBri (KSHIFTLBri (COPY_TO_REGCLASS VK1:$mask, VK8),
356 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
357 (v2i1 VK2:$mask), (iPTR 0))),
358 (KSHIFTRBri (KSHIFTLBri (COPY_TO_REGCLASS VK2:$mask, VK8),
360 def : Pat<(v8i1 (insert_subvector (v8i1 immAllZerosV),
361 (v4i1 VK4:$mask), (iPTR 0))),
362 (KSHIFTRBri (KSHIFTLBri (COPY_TO_REGCLASS VK4:$mask, VK8),
366 let Predicates = [HasBWI] in {
367 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
368 (v16i1 VK16:$mask), (iPTR 0))),
369 (COPY_TO_REGCLASS (KMOVWkk VK16:$mask), VK32)>;
371 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
372 (v16i1 VK16:$mask), (iPTR 0))),
373 (COPY_TO_REGCLASS (KMOVWkk VK16:$mask), VK64)>;
374 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
375 (v32i1 VK32:$mask), (iPTR 0))),
376 (COPY_TO_REGCLASS (KMOVDkk VK32:$mask), VK64)>;
379 let Predicates = [HasBWI, NoDQI] in {
380 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
381 (v8i1 VK8:$mask), (iPTR 0))),
382 (KSHIFTRDri (KSHIFTLDri (COPY_TO_REGCLASS VK8:$mask, VK32),
385 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
386 (v8i1 VK8:$mask), (iPTR 0))),
387 (KSHIFTRQri (KSHIFTLQri (COPY_TO_REGCLASS VK8:$mask, VK64),
391 let Predicates = [HasBWI, HasDQI] in {
392 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
393 (v8i1 VK8:$mask), (iPTR 0))),
394 (COPY_TO_REGCLASS (KMOVBkk VK8:$mask), VK32)>;
396 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
397 (v8i1 VK8:$mask), (iPTR 0))),
398 (COPY_TO_REGCLASS (KMOVBkk VK8:$mask), VK64)>;
401 let Predicates = [HasBWI, HasVLX] in {
402 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
403 (v1i1 VK1:$mask), (iPTR 0))),
404 (KSHIFTRDri (KSHIFTLDri (COPY_TO_REGCLASS VK1:$mask, VK32),
406 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
407 (v2i1 VK2:$mask), (iPTR 0))),
408 (KSHIFTRDri (KSHIFTLDri (COPY_TO_REGCLASS VK2:$mask, VK32),
410 def : Pat<(v32i1 (insert_subvector (v32i1 immAllZerosV),
411 (v4i1 VK4:$mask), (iPTR 0))),
412 (KSHIFTRDri (KSHIFTLDri (COPY_TO_REGCLASS VK4:$mask, VK32),
415 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
416 (v1i1 VK1:$mask), (iPTR 0))),
417 (KSHIFTRQri (KSHIFTLQri (COPY_TO_REGCLASS VK1:$mask, VK64),
419 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
420 (v2i1 VK2:$mask), (iPTR 0))),
421 (KSHIFTRQri (KSHIFTLQri (COPY_TO_REGCLASS VK2:$mask, VK64),
423 def : Pat<(v64i1 (insert_subvector (v64i1 immAllZerosV),
424 (v4i1 VK4:$mask), (iPTR 0))),
425 (KSHIFTRQri (KSHIFTLQri (COPY_TO_REGCLASS VK4:$mask, VK64),
429 //===----------------------------------------------------------------------===//
430 // Extra selection patterns for f128, f128mem
432 // movaps is shorter than movdqa. movaps is in SSE and movdqa is in SSE2.
433 let Predicates = [NoAVX] in {
434 def : Pat<(alignedstore (f128 VR128:$src), addr:$dst),
435 (MOVAPSmr addr:$dst, VR128:$src)>;
436 def : Pat<(store (f128 VR128:$src), addr:$dst),
437 (MOVUPSmr addr:$dst, VR128:$src)>;
439 def : Pat<(alignedloadf128 addr:$src),
440 (MOVAPSrm addr:$src)>;
441 def : Pat<(loadf128 addr:$src),
442 (MOVUPSrm addr:$src)>;
445 let Predicates = [HasAVX, NoVLX] in {
446 def : Pat<(alignedstore (f128 VR128:$src), addr:$dst),
447 (VMOVAPSmr addr:$dst, VR128:$src)>;
448 def : Pat<(store (f128 VR128:$src), addr:$dst),
449 (VMOVUPSmr addr:$dst, VR128:$src)>;
451 def : Pat<(alignedloadf128 addr:$src),
452 (VMOVAPSrm addr:$src)>;
453 def : Pat<(loadf128 addr:$src),
454 (VMOVUPSrm addr:$src)>;
457 let Predicates = [HasVLX] in {
458 def : Pat<(alignedstore (f128 VR128X:$src), addr:$dst),
459 (VMOVAPSZ128mr addr:$dst, VR128X:$src)>;
460 def : Pat<(store (f128 VR128X:$src), addr:$dst),
461 (VMOVUPSZ128mr addr:$dst, VR128X:$src)>;
463 def : Pat<(alignedloadf128 addr:$src),
464 (VMOVAPSZ128rm addr:$src)>;
465 def : Pat<(loadf128 addr:$src),
466 (VMOVUPSZ128rm addr:$src)>;
469 let Predicates = [UseSSE1] in {
470 // andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2
471 def : Pat<(f128 (X86fand VR128:$src1, (memopf128 addr:$src2))),
472 (ANDPSrm VR128:$src1, f128mem:$src2)>;
474 def : Pat<(f128 (X86fand VR128:$src1, VR128:$src2)),
475 (ANDPSrr VR128:$src1, VR128:$src2)>;
477 def : Pat<(f128 (X86for VR128:$src1, (memopf128 addr:$src2))),
478 (ORPSrm VR128:$src1, f128mem:$src2)>;
480 def : Pat<(f128 (X86for VR128:$src1, VR128:$src2)),
481 (ORPSrr VR128:$src1, VR128:$src2)>;
483 def : Pat<(f128 (X86fxor VR128:$src1, (memopf128 addr:$src2))),
484 (XORPSrm VR128:$src1, f128mem:$src2)>;
486 def : Pat<(f128 (X86fxor VR128:$src1, VR128:$src2)),
487 (XORPSrr VR128:$src1, VR128:$src2)>;
490 let Predicates = [HasAVX] in {
491 // andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2
492 def : Pat<(f128 (X86fand VR128:$src1, (loadf128 addr:$src2))),
493 (VANDPSrm VR128:$src1, f128mem:$src2)>;
495 def : Pat<(f128 (X86fand VR128:$src1, VR128:$src2)),
496 (VANDPSrr VR128:$src1, VR128:$src2)>;
498 def : Pat<(f128 (X86for VR128:$src1, (loadf128 addr:$src2))),
499 (VORPSrm VR128:$src1, f128mem:$src2)>;
501 def : Pat<(f128 (X86for VR128:$src1, VR128:$src2)),
502 (VORPSrr VR128:$src1, VR128:$src2)>;
504 def : Pat<(f128 (X86fxor VR128:$src1, (loadf128 addr:$src2))),
505 (VXORPSrm VR128:$src1, f128mem:$src2)>;
507 def : Pat<(f128 (X86fxor VR128:$src1, VR128:$src2)),
508 (VXORPSrr VR128:$src1, VR128:$src2)>;