]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
Merge llvm-project main llvmorg-14-init-17616-g024a1fab5c35
[FreeBSD/FreeBSD.git] / contrib / llvm-project / llvm / include / llvm / IR / IntrinsicsAArch64.td
1 //===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines all of the AARCH64-specific intrinsics.
10 //
11 //===----------------------------------------------------------------------===//
12
13 let TargetPrefix = "aarch64" in {
14
15 def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
16                                  [IntrNoFree, IntrWillReturn]>;
17 def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
18                                   [IntrNoFree, IntrWillReturn]>;
19 def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty],
20                                  [IntrNoFree, IntrWillReturn]>;
21 def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty],
22                                   [IntrNoFree, IntrWillReturn]>;
23
24 def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty],
25                                  [IntrNoFree, IntrWillReturn]>;
26 def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty],
27                                   [IntrNoFree, IntrWillReturn]>;
28 def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
29                                [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty],
30                                [IntrNoFree, IntrWillReturn]>;
31 def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
32                                   [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty],
33                                   [IntrNoFree, IntrWillReturn]>;
34
35 def int_aarch64_clrex : Intrinsic<[]>;
36
37 def int_aarch64_sdiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
38                                 LLVMMatchType<0>], [IntrNoMem]>;
39 def int_aarch64_udiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
40                                 LLVMMatchType<0>], [IntrNoMem]>;
41
42 def int_aarch64_fjcvtzs : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
43
44 def int_aarch64_cls: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
45 def int_aarch64_cls64: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
46
47 def int_aarch64_frint32z
48     : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
49                             [ IntrNoMem ]>;
50 def int_aarch64_frint64z
51     : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
52                             [ IntrNoMem ]>;
53 def int_aarch64_frint32x
54     : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
55                             [ IntrNoMem ]>;
56 def int_aarch64_frint64x
57     : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
58                             [ IntrNoMem ]>;
59
60 //===----------------------------------------------------------------------===//
61 // HINT
62
63 def int_aarch64_hint : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
64
65 //===----------------------------------------------------------------------===//
66 // Data Barrier Instructions
67
68 def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">,
69                       Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;
70 def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">,
71                       Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;
72 def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">,
73                       Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;
74
75 // A space-consuming intrinsic primarily for testing block and jump table
76 // placements. The first argument is the number of bytes this "instruction"
77 // takes up, the second and return value are essentially chains, used to force
78 // ordering during ISel.
79 def int_aarch64_space : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>;
80
81 }
82
83 //===----------------------------------------------------------------------===//
84 // Advanced SIMD (NEON)
85
86 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
87   class AdvSIMD_2Scalar_Float_Intrinsic
88     : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
89                 [IntrNoMem]>;
90
91   class AdvSIMD_FPToIntRounding_Intrinsic
92     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
93
94   class AdvSIMD_1IntArg_Intrinsic
95     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
96   class AdvSIMD_1FloatArg_Intrinsic
97     : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
98   class AdvSIMD_1VectorArg_Intrinsic
99     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
100   class AdvSIMD_1VectorArg_Expand_Intrinsic
101     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
102   class AdvSIMD_1VectorArg_Long_Intrinsic
103     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
104   class AdvSIMD_1IntArg_Narrow_Intrinsic
105     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
106   class AdvSIMD_1VectorArg_Narrow_Intrinsic
107     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
108   class AdvSIMD_1VectorArg_Int_Across_Intrinsic
109     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
110   class AdvSIMD_1VectorArg_Float_Across_Intrinsic
111     : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
112
113   class AdvSIMD_2IntArg_Intrinsic
114     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
115                 [IntrNoMem]>;
116   class AdvSIMD_2FloatArg_Intrinsic
117     : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
118                 [IntrNoMem]>;
119   class AdvSIMD_2VectorArg_Intrinsic
120     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
121                 [IntrNoMem]>;
122   class AdvSIMD_2VectorArg_Compare_Intrinsic
123     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
124                 [IntrNoMem]>;
125   class AdvSIMD_2Arg_FloatCompare_Intrinsic
126     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
127                 [IntrNoMem]>;
128   class AdvSIMD_2VectorArg_Long_Intrinsic
129     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
130                 [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
131                 [IntrNoMem]>;
132   class AdvSIMD_2VectorArg_Wide_Intrinsic
133     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
134                 [LLVMMatchType<0>, LLVMTruncatedType<0>],
135                 [IntrNoMem]>;
136   class AdvSIMD_2VectorArg_Narrow_Intrinsic
137     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
138                 [LLVMExtendedType<0>, LLVMExtendedType<0>],
139                 [IntrNoMem]>;
140   class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
141     : DefaultAttrsIntrinsic<[llvm_anyint_ty],
142                 [LLVMExtendedType<0>, llvm_i32_ty],
143                 [IntrNoMem]>;
144   class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
145     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
146                 [llvm_anyvector_ty],
147                 [IntrNoMem]>;
148   class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
149     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
150                 [LLVMTruncatedType<0>],
151                 [IntrNoMem]>;
152   class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
153     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
154                 [LLVMTruncatedType<0>, llvm_i32_ty],
155                 [IntrNoMem]>;
156   class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
157     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
158                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
159                 [IntrNoMem]>;
160   class AdvSIMD_2VectorArg_Lane_Intrinsic
161     : DefaultAttrsIntrinsic<[llvm_anyint_ty],
162                 [LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
163                 [IntrNoMem]>;
164
165   class AdvSIMD_3IntArg_Intrinsic
166     : DefaultAttrsIntrinsic<[llvm_anyint_ty],
167                 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
168                 [IntrNoMem]>;
169   class AdvSIMD_3VectorArg_Intrinsic
170       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
171                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
172                [IntrNoMem]>;
173   class AdvSIMD_3VectorArg_Scalar_Intrinsic
174       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
175                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
176                [IntrNoMem]>;
177   class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
178       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
179                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
180                 LLVMMatchType<1>], [IntrNoMem]>;
181   class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
182     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
183                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
184                 [IntrNoMem]>;
185   class AdvSIMD_CvtFxToFP_Intrinsic
186     : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
187                 [IntrNoMem]>;
188   class AdvSIMD_CvtFPToFx_Intrinsic
189     : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
190                 [IntrNoMem]>;
191
192   class AdvSIMD_1Arg_Intrinsic
193     : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;
194
195   class AdvSIMD_Dot_Intrinsic
196     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
197                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
198                 [IntrNoMem]>;
199
200   class AdvSIMD_FP16FML_Intrinsic
201     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
202                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
203                 [IntrNoMem]>;
204
205   class AdvSIMD_MatMul_Intrinsic
206     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
207                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
208                 [IntrNoMem]>;
209
210   class AdvSIMD_FML_Intrinsic
211     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
212                 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
213                 [IntrNoMem]>;
214
215   class AdvSIMD_BF16FML_Intrinsic
216     : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
217                 [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
218                 [IntrNoMem]>;
219 }
220
221 // Arithmetic ops
222
223 let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
224   // Vector Add Across Lanes
225   def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
226   def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
227   def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
228
229   // Vector Long Add Across Lanes
230   def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
231   def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
232
233   // Vector Halving Add
234   def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
235   def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
236
237   // Vector Rounding Halving Add
238   def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
239   def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
240
241   // Vector Saturating Add
242   def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
243   def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
244   def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
245   def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
246
247   // Vector Add High-Half
248   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
249   // header is no longer supported.
250   def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
251
252   // Vector Rounding Add High-Half
253   def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
254
255   // Vector Saturating Doubling Multiply High
256   def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
257   def int_aarch64_neon_sqdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
258   def int_aarch64_neon_sqdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;
259
260   // Vector Saturating Rounding Doubling Multiply High
261   def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
262   def int_aarch64_neon_sqrdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
263   def int_aarch64_neon_sqrdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;
264
265   def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic;
266   def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic;
267
268   // Vector Polynominal Multiply
269   def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
270
271   // Vector Long Multiply
272   def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
273   def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
274   def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
275
276   // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
277   // it with a v16i8.
278   def int_aarch64_neon_pmull64 :
279         DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
280
281   // Vector Extending Multiply
282   def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
283     let IntrProperties = [IntrNoMem, Commutative];
284   }
285
286   // Vector Saturating Doubling Long Multiply
287   def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
288   def int_aarch64_neon_sqdmulls_scalar
289     : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
290
291   // Vector Halving Subtract
292   def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
293   def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
294
295   // Vector Saturating Subtract
296   def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
297   def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
298
299   // Vector Subtract High-Half
300   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
301   // header is no longer supported.
302   def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
303
304   // Vector Rounding Subtract High-Half
305   def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
306
307   // Vector Compare Absolute Greater-than-or-equal
308   def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
309
310   // Vector Compare Absolute Greater-than
311   def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
312
313   // Vector Absolute Difference
314   def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
315   def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
316   def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
317
318   // Scalar Absolute Difference
319   def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
320
321   // Vector Max
322   def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
323   def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
324   def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic;
325   def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
326
327   // Vector Max Across Lanes
328   def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
329   def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
330   def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
331   def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
332
333   // Vector Min
334   def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
335   def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
336   def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic;
337   def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
338
339   // Vector Min/Max Number
340   def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
341   def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
342
343   // Vector Min Across Lanes
344   def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
345   def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
346   def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
347   def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
348
349   // Pairwise Add
350   def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
351   def int_aarch64_neon_faddp : AdvSIMD_2VectorArg_Intrinsic;
352
353   // Long Pairwise Add
354   // FIXME: In theory, we shouldn't need intrinsics for saddlp or
355   // uaddlp, but tblgen's type inference currently can't handle the
356   // pattern fragments this ends up generating.
357   def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
358   def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
359
360   // Folding Maximum
361   def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
362   def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
363   def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
364
365   // Folding Minimum
366   def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
367   def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
368   def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
369
370   // Reciprocal Estimate/Step
371   def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
372   def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
373
374   // Reciprocal Exponent
375   def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
376
377   // Vector Saturating Shift Left
378   def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
379   def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
380
381   // Vector Rounding Shift Left
382   def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
383   def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
384
385   // Vector Saturating Rounding Shift Left
386   def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
387   def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
388
389   // Vector Signed->Unsigned Shift Left by Constant
390   def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
391
392   // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
393   def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
394
395   // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
396   def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
397
398   // Vector Narrowing Shift Right by Constant
399   def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
400   def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
401
402   // Vector Rounding Narrowing Shift Right by Constant
403   def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
404
405   // Vector Rounding Narrowing Saturating Shift Right by Constant
406   def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
407   def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
408
409   // Vector Shift Left
410   def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
411   def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
412
413   // Vector Widening Shift Left by Constant
414   def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
415   def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
416   def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
417
418   // Vector Shift Right by Constant and Insert
419   def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
420
421   // Vector Shift Left by Constant and Insert
422   def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
423
424   // Vector Saturating Narrow
425   def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
426   def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
427   def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
428   def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
429
430   // Vector Saturating Extract and Unsigned Narrow
431   def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
432   def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
433
434   // Vector Absolute Value
435   def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic;
436
437   // Vector Saturating Absolute Value
438   def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
439
440   // Vector Saturating Negation
441   def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
442
443   // Vector Count Leading Sign Bits
444   def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
445
446   // Vector Reciprocal Estimate
447   def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
448   def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
449
450   // Vector Square Root Estimate
451   def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
452   def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
453
454   // Vector Conversions Between Half-Precision and Single-Precision.
455   def int_aarch64_neon_vcvtfp2hf
456     : DefaultAttrsIntrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
457   def int_aarch64_neon_vcvthf2fp
458     : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
459
460   // Vector Conversions Between Floating-point and Fixed-point.
461   def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
462   def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
463   def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
464   def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
465
466   // Vector FP->Int Conversions
467   def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
468   def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
469   def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
470   def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
471   def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
472   def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
473   def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
474   def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
475   def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
476   def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
477
478   // v8.5-A Vector FP Rounding
479   def int_aarch64_neon_frint32x : AdvSIMD_1FloatArg_Intrinsic;
480   def int_aarch64_neon_frint32z : AdvSIMD_1FloatArg_Intrinsic;
481   def int_aarch64_neon_frint64x : AdvSIMD_1FloatArg_Intrinsic;
482   def int_aarch64_neon_frint64z : AdvSIMD_1FloatArg_Intrinsic;
483
484   // Scalar FP->Int conversions
485
486   // Vector FP Inexact Narrowing
487   def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
488
489   // Scalar FP Inexact Narrowing
490   def int_aarch64_sisd_fcvtxn : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty],
491                                         [IntrNoMem]>;
492
493   // v8.2-A Dot Product
494   def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
495   def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;
496
497   // v8.6-A Matrix Multiply Intrinsics
498   def int_aarch64_neon_ummla : AdvSIMD_MatMul_Intrinsic;
499   def int_aarch64_neon_smmla : AdvSIMD_MatMul_Intrinsic;
500   def int_aarch64_neon_usmmla : AdvSIMD_MatMul_Intrinsic;
501   def int_aarch64_neon_usdot : AdvSIMD_Dot_Intrinsic;
502   def int_aarch64_neon_bfdot : AdvSIMD_Dot_Intrinsic;
503   def int_aarch64_neon_bfmmla
504     : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
505                 [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
506                 [IntrNoMem]>;
507   def int_aarch64_neon_bfmlalb : AdvSIMD_BF16FML_Intrinsic;
508   def int_aarch64_neon_bfmlalt : AdvSIMD_BF16FML_Intrinsic;
509
510
511   // v8.6-A Bfloat Intrinsics
512   def int_aarch64_neon_bfcvt
513     : DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
514   def int_aarch64_neon_bfcvtn
515     : DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
516   def int_aarch64_neon_bfcvtn2
517     : DefaultAttrsIntrinsic<[llvm_v8bf16_ty],
518                 [llvm_v8bf16_ty, llvm_v4f32_ty],
519                 [IntrNoMem]>;
520
521   // v8.2-A FP16 Fused Multiply-Add Long
522   def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
523   def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
524   def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic;
525   def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic;
526
527   // v8.3-A Floating-point complex add
528   def int_aarch64_neon_vcadd_rot90  : AdvSIMD_2VectorArg_Intrinsic;
529   def int_aarch64_neon_vcadd_rot270 : AdvSIMD_2VectorArg_Intrinsic;
530
531   def int_aarch64_neon_vcmla_rot0   : AdvSIMD_3VectorArg_Intrinsic;
532   def int_aarch64_neon_vcmla_rot90  : AdvSIMD_3VectorArg_Intrinsic;
533   def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic;
534   def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic;
535 }
536
537 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
538   class AdvSIMD_2Vector2Index_Intrinsic
539     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
540                 [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
541                 [IntrNoMem]>;
542 }
543
544 // Vector element to element moves
545 def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
546
547 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
548   class AdvSIMD_1Vec_Load_Intrinsic
549       : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
550                   [IntrReadMem, IntrArgMemOnly]>;
551   class AdvSIMD_1Vec_Store_Lane_Intrinsic
552     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
553                 [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
554
555   class AdvSIMD_2Vec_Load_Intrinsic
556     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
557                 [LLVMAnyPointerType<LLVMMatchType<0>>],
558                 [IntrReadMem, IntrArgMemOnly]>;
559   class AdvSIMD_2Vec_Load_Lane_Intrinsic
560     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
561                 [LLVMMatchType<0>, llvm_anyvector_ty,
562                  llvm_i64_ty, llvm_anyptr_ty],
563                 [IntrReadMem, IntrArgMemOnly]>;
564   class AdvSIMD_2Vec_Store_Intrinsic
565     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
566                      LLVMAnyPointerType<LLVMMatchType<0>>],
567                 [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
568   class AdvSIMD_2Vec_Store_Lane_Intrinsic
569     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
570                  llvm_i64_ty, llvm_anyptr_ty],
571                 [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
572
573   class AdvSIMD_3Vec_Load_Intrinsic
574     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
575                 [LLVMAnyPointerType<LLVMMatchType<0>>],
576                 [IntrReadMem, IntrArgMemOnly]>;
577   class AdvSIMD_3Vec_Load_Lane_Intrinsic
578     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
579                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty,
580                  llvm_i64_ty, llvm_anyptr_ty],
581                 [IntrReadMem, IntrArgMemOnly]>;
582   class AdvSIMD_3Vec_Store_Intrinsic
583     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
584                      LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
585                 [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
586   class AdvSIMD_3Vec_Store_Lane_Intrinsic
587     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty,
588                  LLVMMatchType<0>, LLVMMatchType<0>,
589                  llvm_i64_ty, llvm_anyptr_ty],
590                 [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
591
592   class AdvSIMD_4Vec_Load_Intrinsic
593     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
594                  LLVMMatchType<0>, llvm_anyvector_ty],
595                 [LLVMAnyPointerType<LLVMMatchType<0>>],
596                 [IntrReadMem, IntrArgMemOnly]>;
597   class AdvSIMD_4Vec_Load_Lane_Intrinsic
598     : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
599                  LLVMMatchType<0>, LLVMMatchType<0>],
600                 [LLVMMatchType<0>, LLVMMatchType<0>,
601                  LLVMMatchType<0>, llvm_anyvector_ty,
602                  llvm_i64_ty, llvm_anyptr_ty],
603                 [IntrReadMem, IntrArgMemOnly]>;
604   class AdvSIMD_4Vec_Store_Intrinsic
605     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
606                  LLVMMatchType<0>, LLVMMatchType<0>,
607                  LLVMAnyPointerType<LLVMMatchType<0>>],
608                 [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
609   class AdvSIMD_4Vec_Store_Lane_Intrinsic
610     : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
611                  LLVMMatchType<0>, LLVMMatchType<0>,
612                  llvm_i64_ty, llvm_anyptr_ty],
613                 [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;
614 }
615
616 // Memory ops
617
618 def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
619 def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
620 def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
621
622 def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
623 def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
624 def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
625
626 def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
627 def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
628 def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
629
630 def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
631 def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
632 def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
633
634 def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
635 def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
636 def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
637
638 def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
639 def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
640 def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;
641
642 def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
643 def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
644 def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;
645
646 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
647   class AdvSIMD_Tbl1_Intrinsic
648     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
649                 [IntrNoMem]>;
650   class AdvSIMD_Tbl2_Intrinsic
651     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
652                 [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
653   class AdvSIMD_Tbl3_Intrinsic
654     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
655                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
656                  LLVMMatchType<0>],
657                 [IntrNoMem]>;
658   class AdvSIMD_Tbl4_Intrinsic
659     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
660                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
661                  LLVMMatchType<0>],
662                 [IntrNoMem]>;
663
664   class AdvSIMD_Tbx1_Intrinsic
665     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
666                 [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
667                 [IntrNoMem]>;
668   class AdvSIMD_Tbx2_Intrinsic
669     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
670                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
671                  LLVMMatchType<0>],
672                 [IntrNoMem]>;
673   class AdvSIMD_Tbx3_Intrinsic
674     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
675                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
676                  llvm_v16i8_ty, LLVMMatchType<0>],
677                 [IntrNoMem]>;
678   class AdvSIMD_Tbx4_Intrinsic
679     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
680                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
681                  llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
682                 [IntrNoMem]>;
683 }
684 def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
685 def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
686 def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
687 def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
688
689 def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
690 def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
691 def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
692 def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
693
694 let TargetPrefix = "aarch64" in {
695   class FPCR_Get_Intrinsic
696     : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>;
697   class FPCR_Set_Intrinsic
698     : DefaultAttrsIntrinsic<[], [llvm_i64_ty], [IntrNoMem, IntrHasSideEffects]>;
699   class RNDR_Intrinsic
700     : DefaultAttrsIntrinsic<[llvm_i64_ty, llvm_i1_ty], [], [IntrNoMem, IntrHasSideEffects]>;
701 }
702
703 // FPCR
704 def int_aarch64_get_fpcr : FPCR_Get_Intrinsic;
705 def int_aarch64_set_fpcr : FPCR_Set_Intrinsic;
706
707 // Armv8.5-A Random number generation intrinsics
708 def int_aarch64_rndr : RNDR_Intrinsic;
709 def int_aarch64_rndrrs : RNDR_Intrinsic;
710
711 let TargetPrefix = "aarch64" in {
712   class Crypto_AES_DataKey_Intrinsic
713     : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
714
715   class Crypto_AES_Data_Intrinsic
716     : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
717
718   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
719   // (v4i32).
720   class Crypto_SHA_5Hash4Schedule_Intrinsic
721     : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
722                 [IntrNoMem]>;
723
724   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
725   // (v4i32).
726   class Crypto_SHA_1Hash_Intrinsic
727     : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
728
729   // SHA intrinsic taking 8 words of the schedule
730   class Crypto_SHA_8Schedule_Intrinsic
731     : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
732
733   // SHA intrinsic taking 12 words of the schedule
734   class Crypto_SHA_12Schedule_Intrinsic
735     : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
736                 [IntrNoMem]>;
737
738   // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
739   class Crypto_SHA_8Hash4Schedule_Intrinsic
740     : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
741                 [IntrNoMem]>;
742
743   // SHA512 intrinsic taking 2 arguments
744   class Crypto_SHA512_2Arg_Intrinsic
745     : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
746
747   // SHA512 intrinsic taking 3 Arguments
748   class Crypto_SHA512_3Arg_Intrinsic
749     : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
750                 [IntrNoMem]>;
751
752   // SHA3 Intrinsics taking 3 arguments
753   class Crypto_SHA3_3Arg_Intrinsic
754     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
755                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
756                [IntrNoMem]>;
757
758   // SHA3 Intrinsic taking 2 arguments
759   class Crypto_SHA3_2Arg_Intrinsic
760     : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
761                [IntrNoMem]>;
762
763   // SHA3 Intrinsic taking 3 Arguments 1 immediate
764   class Crypto_SHA3_2ArgImm_Intrinsic
765     : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i64_ty],
766                [IntrNoMem, ImmArg<ArgIndex<2>>]>;
767
768   class Crypto_SM3_3Vector_Intrinsic
769     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
770                 [IntrNoMem]>;
771
772   class Crypto_SM3_3VectorIndexed_Intrinsic
773     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i64_ty],
774                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
775
776   class Crypto_SM4_2Vector_Intrinsic
777     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
778 }
779
780 // AES
781 def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
782 def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
783 def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
784 def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
785
786 // SHA1
787 def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
788 def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
789 def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
790 def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;
791
792 def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
793 def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
794
795 // SHA256
796 def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
797 def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
798 def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
799 def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
800
801 //SHA3
802 def int_aarch64_crypto_eor3s : Crypto_SHA3_3Arg_Intrinsic;
803 def int_aarch64_crypto_eor3u : Crypto_SHA3_3Arg_Intrinsic;
804 def int_aarch64_crypto_bcaxs : Crypto_SHA3_3Arg_Intrinsic;
805 def int_aarch64_crypto_bcaxu : Crypto_SHA3_3Arg_Intrinsic;
806 def int_aarch64_crypto_rax1 : Crypto_SHA3_2Arg_Intrinsic;
807 def int_aarch64_crypto_xar : Crypto_SHA3_2ArgImm_Intrinsic;
808
809 // SHA512
810 def int_aarch64_crypto_sha512h : Crypto_SHA512_3Arg_Intrinsic;
811 def int_aarch64_crypto_sha512h2 : Crypto_SHA512_3Arg_Intrinsic;
812 def int_aarch64_crypto_sha512su0 : Crypto_SHA512_2Arg_Intrinsic;
813 def int_aarch64_crypto_sha512su1 : Crypto_SHA512_3Arg_Intrinsic;
814
815 //SM3 & SM4
816 def int_aarch64_crypto_sm3partw1 : Crypto_SM3_3Vector_Intrinsic;
817 def int_aarch64_crypto_sm3partw2 : Crypto_SM3_3Vector_Intrinsic;
818 def int_aarch64_crypto_sm3ss1    : Crypto_SM3_3Vector_Intrinsic;
819 def int_aarch64_crypto_sm3tt1a   : Crypto_SM3_3VectorIndexed_Intrinsic;
820 def int_aarch64_crypto_sm3tt1b   : Crypto_SM3_3VectorIndexed_Intrinsic;
821 def int_aarch64_crypto_sm3tt2a   : Crypto_SM3_3VectorIndexed_Intrinsic;
822 def int_aarch64_crypto_sm3tt2b   : Crypto_SM3_3VectorIndexed_Intrinsic;
823 def int_aarch64_crypto_sm4e      : Crypto_SM4_2Vector_Intrinsic;
824 def int_aarch64_crypto_sm4ekey   : Crypto_SM4_2Vector_Intrinsic;
825
826 //===----------------------------------------------------------------------===//
827 // CRC32
828
829 let TargetPrefix = "aarch64" in {
830
831 def int_aarch64_crc32b  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
832     [IntrNoMem]>;
833 def int_aarch64_crc32cb : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
834     [IntrNoMem]>;
835 def int_aarch64_crc32h  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
836     [IntrNoMem]>;
837 def int_aarch64_crc32ch : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
838     [IntrNoMem]>;
839 def int_aarch64_crc32w  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
840     [IntrNoMem]>;
841 def int_aarch64_crc32cw : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
842     [IntrNoMem]>;
843 def int_aarch64_crc32x  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
844     [IntrNoMem]>;
845 def int_aarch64_crc32cx : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
846     [IntrNoMem]>;
847 }
848
849 //===----------------------------------------------------------------------===//
850 // Memory Tagging Extensions (MTE) Intrinsics
851 let TargetPrefix = "aarch64" in {
852 def int_aarch64_irg   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
853     [IntrNoMem, IntrHasSideEffects]>;
854 def int_aarch64_addg  : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
855     [IntrNoMem]>;
856 def int_aarch64_gmi   : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty],
857     [IntrNoMem]>;
858 def int_aarch64_ldg   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty],
859     [IntrReadMem]>;
860 def int_aarch64_stg   : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
861     [IntrWriteMem]>;
862 def int_aarch64_subp :  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
863     [IntrNoMem]>;
864
865 // The following are codegen-only intrinsics for stack instrumentation.
866
867 // Generate a randomly tagged stack base pointer.
868 def int_aarch64_irg_sp   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i64_ty],
869     [IntrNoMem, IntrHasSideEffects]>;
870
871 // Transfer pointer tag with offset.
872 // ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where
873 // * address is the address in ptr0
874 // * tag is a function of (tag in baseptr, tag_offset).
875 // ** Beware, this is not the same function as implemented by the ADDG instruction!
876 //    Backend optimizations may change tag_offset; the only guarantee is that calls
877 //    to tagp with the same pair of (baseptr, tag_offset) will produce pointers
878 //    with the same tag value, assuming the set of excluded tags has not changed.
879 // Address bits in baseptr and tag bits in ptr0 are ignored.
880 // When offset between ptr0 and baseptr is a compile time constant, this can be emitted as
881 //   ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset
882 // It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp.
883 def int_aarch64_tagp : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty],
884     [IntrNoMem, ImmArg<ArgIndex<2>>]>;
885
886 // Update allocation tags for the memory range to match the tag in the pointer argument.
887 def int_aarch64_settag  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
888     [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
889
890 // Update allocation tags for the memory range to match the tag in the pointer argument,
891 // and set memory contents to zero.
892 def int_aarch64_settag_zero  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
893     [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
894
895 // Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values.
896 def int_aarch64_stgp  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
897     [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
898 }
899
900 // Transactional Memory Extension (TME) Intrinsics
901 let TargetPrefix = "aarch64" in {
902 def int_aarch64_tstart  : GCCBuiltin<"__builtin_arm_tstart">,
903                          Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;
904
905 def int_aarch64_tcommit : GCCBuiltin<"__builtin_arm_tcommit">, Intrinsic<[], [], [IntrWillReturn]>;
906
907 def int_aarch64_tcancel : GCCBuiltin<"__builtin_arm_tcancel">,
908                           Intrinsic<[], [llvm_i64_ty], [IntrWillReturn, ImmArg<ArgIndex<0>>]>;
909
910 def int_aarch64_ttest   : GCCBuiltin<"__builtin_arm_ttest">,
911                           Intrinsic<[llvm_i64_ty], [],
912                                     [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;
913
914 // Armv8.7-A load/store 64-byte intrinsics
915 defvar data512 = !listsplat(llvm_i64_ty, 8);
916 def int_aarch64_ld64b: Intrinsic<data512, [llvm_ptr_ty]>;
917 def int_aarch64_st64b: Intrinsic<[], !listconcat([llvm_ptr_ty], data512)>;
918 def int_aarch64_st64bv: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>;
919 def int_aarch64_st64bv0: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>;
920
921 }
922
923 def llvm_nxv2i1_ty  : LLVMType<nxv2i1>;
924 def llvm_nxv4i1_ty  : LLVMType<nxv4i1>;
925 def llvm_nxv8i1_ty  : LLVMType<nxv8i1>;
926 def llvm_nxv16i1_ty : LLVMType<nxv16i1>;
927 def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
928 def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
929 def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
930 def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
931 def llvm_nxv8bf16_ty : LLVMType<nxv8bf16>;
932 def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
933 def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
934
935 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
936
937   class AdvSIMD_SVE_Create_2Vector_Tuple
938     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
939                 [llvm_anyvector_ty, LLVMMatchType<1>],
940                 [IntrReadMem]>;
941
942   class AdvSIMD_SVE_Create_3Vector_Tuple
943     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
944                 [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>],
945                 [IntrReadMem]>;
946
947   class AdvSIMD_SVE_Create_4Vector_Tuple
948     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
949                 [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
950                  LLVMMatchType<1>],
951                 [IntrReadMem]>;
952
953   class AdvSIMD_SVE_Set_Vector_Tuple
954     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
955                 [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty],
956                 [IntrReadMem, ImmArg<ArgIndex<1>>]>;
957
958   class AdvSIMD_SVE_Get_Vector_Tuple
959     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
960                 [IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;
961
962   class AdvSIMD_ManyVec_PredLoad_Intrinsic
963     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMPointerToElt<0>],
964                 [IntrReadMem, IntrArgMemOnly]>;
965
966   class AdvSIMD_1Vec_PredLoad_Intrinsic
967     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
968                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
969                  LLVMPointerToElt<0>],
970                 [IntrReadMem, IntrArgMemOnly]>;
971
972   class AdvSIMD_2Vec_PredLoad_Intrinsic
973     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
974                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
975                  LLVMPointerToElt<0>],
976                 [IntrReadMem, IntrArgMemOnly]>;
977
978   class AdvSIMD_3Vec_PredLoad_Intrinsic
979     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
980                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
981                  LLVMPointerToElt<0>],
982                 [IntrReadMem, IntrArgMemOnly]>;
983
984   class AdvSIMD_4Vec_PredLoad_Intrinsic
985     : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
986                  LLVMMatchType<0>],
987                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
988                  LLVMPointerToElt<0>],
989                 [IntrReadMem, IntrArgMemOnly]>;
990
991   class AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic
992     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
993                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
994                  LLVMPointerToElt<0>],
995                 [IntrInaccessibleMemOrArgMemOnly]>;
996
997   class AdvSIMD_1Vec_PredStore_Intrinsic
998     : DefaultAttrsIntrinsic<[],
999                 [llvm_anyvector_ty,
1000                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1001                  LLVMPointerToElt<0>],
1002                 [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
1003
1004   class AdvSIMD_2Vec_PredStore_Intrinsic
1005       : DefaultAttrsIntrinsic<[],
1006                   [llvm_anyvector_ty, LLVMMatchType<0>,
1007                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
1008                   [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
1009
1010   class AdvSIMD_3Vec_PredStore_Intrinsic
1011       : DefaultAttrsIntrinsic<[],
1012                   [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
1013                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
1014                   [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
1015
1016   class AdvSIMD_4Vec_PredStore_Intrinsic
1017       : DefaultAttrsIntrinsic<[],
1018                   [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
1019                    LLVMMatchType<0>,
1020                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMPointerToElt<0>],
1021                   [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;
1022
1023   class AdvSIMD_SVE_Index_Intrinsic
1024     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1025                 [LLVMVectorElementType<0>,
1026                  LLVMVectorElementType<0>],
1027                 [IntrNoMem]>;
1028
1029   class AdvSIMD_Merged1VectorArg_Intrinsic
1030     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1031                 [LLVMMatchType<0>,
1032                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1033                  LLVMMatchType<0>],
1034                 [IntrNoMem]>;
1035
1036   class AdvSIMD_2VectorArgIndexed_Intrinsic
1037     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1038                 [LLVMMatchType<0>,
1039                  LLVMMatchType<0>,
1040                  llvm_i32_ty],
1041                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;
1042
1043   class AdvSIMD_3VectorArgIndexed_Intrinsic
1044     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1045                 [LLVMMatchType<0>,
1046                  LLVMMatchType<0>,
1047                  LLVMMatchType<0>,
1048                  llvm_i32_ty],
1049                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1050
1051   class AdvSIMD_Pred1VectorArg_Intrinsic
1052     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1053                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1054                  LLVMMatchType<0>],
1055                 [IntrNoMem]>;
1056
1057   class AdvSIMD_Pred2VectorArg_Intrinsic
1058     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1059                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1060                  LLVMMatchType<0>,
1061                  LLVMMatchType<0>],
1062                 [IntrNoMem]>;
1063
1064   class AdvSIMD_Pred3VectorArg_Intrinsic
1065     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1066                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1067                  LLVMMatchType<0>,
1068                  LLVMMatchType<0>,
1069                  LLVMMatchType<0>],
1070                 [IntrNoMem]>;
1071
1072   class AdvSIMD_SVE_Compare_Intrinsic
1073     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1074                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1075                  llvm_anyvector_ty,
1076                  LLVMMatchType<0>],
1077                 [IntrNoMem]>;
1078
1079   class AdvSIMD_SVE_CompareWide_Intrinsic
1080     : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1081                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1082                  llvm_anyvector_ty,
1083                  llvm_nxv2i64_ty],
1084                 [IntrNoMem]>;
1085
1086   class AdvSIMD_SVE_Saturating_Intrinsic
1087     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1088                 [LLVMMatchType<0>,
1089                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1090                 [IntrNoMem]>;
1091
1092   class AdvSIMD_SVE_SaturatingWithPattern_Intrinsic
1093     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1094                 [LLVMMatchType<0>,
1095                  llvm_i32_ty,
1096                  llvm_i32_ty],
1097                 [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
1098
1099   class AdvSIMD_SVE_Saturating_N_Intrinsic<LLVMType T>
1100     : DefaultAttrsIntrinsic<[T],
1101                 [T, llvm_anyvector_ty],
1102                 [IntrNoMem]>;
1103
1104   class AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<LLVMType T>
1105     : DefaultAttrsIntrinsic<[T],
1106                 [T, llvm_i32_ty, llvm_i32_ty],
1107                 [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
1108
1109   class AdvSIMD_SVE_CNT_Intrinsic
1110     : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
1111                 [LLVMVectorOfBitcastsToInt<0>,
1112                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1113                  llvm_anyvector_ty],
1114                 [IntrNoMem]>;
1115
1116   class AdvSIMD_SVE_ReduceWithInit_Intrinsic
1117     : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1118                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1119                  LLVMVectorElementType<0>,
1120                  llvm_anyvector_ty],
1121                 [IntrNoMem]>;
1122
1123   class AdvSIMD_SVE_ShiftByImm_Intrinsic
1124     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1125                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1126                  LLVMMatchType<0>,
1127                  llvm_i32_ty],
1128                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;
1129
1130   class AdvSIMD_SVE_ShiftWide_Intrinsic
1131     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1132                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1133                  LLVMMatchType<0>,
1134                  llvm_nxv2i64_ty],
1135                 [IntrNoMem]>;
1136
1137   class AdvSIMD_SVE_Unpack_Intrinsic
1138     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1139                [LLVMSubdivide2VectorType<0>],
1140                [IntrNoMem]>;
1141
1142   class AdvSIMD_SVE_CADD_Intrinsic
1143     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1144                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1145                  LLVMMatchType<0>,
1146                  LLVMMatchType<0>,
1147                  llvm_i32_ty],
1148                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1149
1150   class AdvSIMD_SVE_CMLA_Intrinsic
1151     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1152                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1153                  LLVMMatchType<0>,
1154                  LLVMMatchType<0>,
1155                  LLVMMatchType<0>,
1156                  llvm_i32_ty],
1157                 [IntrNoMem, ImmArg<ArgIndex<4>>]>;
1158
1159   class AdvSIMD_SVE_CMLA_LANE_Intrinsic
1160     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1161                 [LLVMMatchType<0>,
1162                  LLVMMatchType<0>,
1163                  LLVMMatchType<0>,
1164                  llvm_i32_ty,
1165                  llvm_i32_ty],
1166                 [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
1167
1168   class AdvSIMD_SVE_DUP_Intrinsic
1169     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1170                 [LLVMMatchType<0>,
1171                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1172                  LLVMVectorElementType<0>],
1173                 [IntrNoMem]>;
1174
1175   class AdvSIMD_SVE_DUP_Unpred_Intrinsic
1176     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMVectorElementType<0>],
1177                 [IntrNoMem]>;
1178
1179   class AdvSIMD_SVE_DUPQ_Intrinsic
1180     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1181                 [LLVMMatchType<0>,
1182                  llvm_i64_ty],
1183                 [IntrNoMem]>;
1184
1185   class AdvSIMD_SVE_EXPA_Intrinsic
1186     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1187                 [LLVMVectorOfBitcastsToInt<0>],
1188                 [IntrNoMem]>;
1189
1190   class AdvSIMD_SVE_FCVT_Intrinsic
1191     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1192                 [LLVMMatchType<0>,
1193                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1194                  llvm_anyvector_ty],
1195                 [IntrNoMem]>;
1196
1197   class AdvSIMD_SVE_FCVTZS_Intrinsic
1198     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1199                 [LLVMVectorOfBitcastsToInt<0>,
1200                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1201                  llvm_anyvector_ty],
1202                 [IntrNoMem]>;
1203
1204   class AdvSIMD_SVE_INSR_Intrinsic
1205     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1206                 [LLVMMatchType<0>,
1207                  LLVMVectorElementType<0>],
1208                 [IntrNoMem]>;
1209
1210   class AdvSIMD_SVE_PTRUE_Intrinsic
1211     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1212                 [llvm_i32_ty],
1213                 [IntrNoMem, ImmArg<ArgIndex<0>>]>;
1214
1215   class AdvSIMD_SVE_PUNPKHI_Intrinsic
1216     : DefaultAttrsIntrinsic<[LLVMHalfElementsVectorType<0>],
1217                 [llvm_anyvector_ty],
1218                 [IntrNoMem]>;
1219
1220   class AdvSIMD_SVE_SCALE_Intrinsic
1221     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1222                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1223                  LLVMMatchType<0>,
1224                  LLVMVectorOfBitcastsToInt<0>],
1225                 [IntrNoMem]>;
1226
1227   class AdvSIMD_SVE_SCVTF_Intrinsic
1228     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1229                 [LLVMMatchType<0>,
1230                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1231                  llvm_anyvector_ty],
1232                 [IntrNoMem]>;
1233
1234   class AdvSIMD_SVE_TSMUL_Intrinsic
1235     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1236                 [LLVMMatchType<0>,
1237                  LLVMVectorOfBitcastsToInt<0>],
1238                 [IntrNoMem]>;
1239
1240   class AdvSIMD_SVE_CNTB_Intrinsic
1241     : DefaultAttrsIntrinsic<[llvm_i64_ty],
1242                 [llvm_i32_ty],
1243                 [IntrNoMem, ImmArg<ArgIndex<0>>]>;
1244
1245   class AdvSIMD_SVE_CNTP_Intrinsic
1246     : DefaultAttrsIntrinsic<[llvm_i64_ty],
1247                 [llvm_anyvector_ty, LLVMMatchType<0>],
1248                 [IntrNoMem]>;
1249
1250   class AdvSIMD_SVE_DOT_Intrinsic
1251     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1252                 [LLVMMatchType<0>,
1253                  LLVMSubdivide4VectorType<0>,
1254                  LLVMSubdivide4VectorType<0>],
1255                 [IntrNoMem]>;
1256
1257   class AdvSIMD_SVE_DOT_Indexed_Intrinsic
1258     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1259                 [LLVMMatchType<0>,
1260                  LLVMSubdivide4VectorType<0>,
1261                  LLVMSubdivide4VectorType<0>,
1262                  llvm_i32_ty],
1263                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1264
1265   class AdvSIMD_SVE_PTEST_Intrinsic
1266     : DefaultAttrsIntrinsic<[llvm_i1_ty],
1267                 [llvm_anyvector_ty,
1268                  LLVMMatchType<0>],
1269                 [IntrNoMem]>;
1270
1271   class AdvSIMD_SVE_TBL_Intrinsic
1272     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1273                 [LLVMMatchType<0>,
1274                  LLVMVectorOfBitcastsToInt<0>],
1275                 [IntrNoMem]>;
1276
1277   class AdvSIMD_SVE2_TBX_Intrinsic
1278     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1279                 [LLVMMatchType<0>,
1280                  LLVMMatchType<0>,
1281                  LLVMVectorOfBitcastsToInt<0>],
1282                 [IntrNoMem]>;
1283
1284   class SVE2_1VectorArg_Long_Intrinsic
1285     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1286                 [LLVMSubdivide2VectorType<0>,
1287                  llvm_i32_ty],
1288                 [IntrNoMem, ImmArg<ArgIndex<1>>]>;
1289
1290   class SVE2_2VectorArg_Long_Intrinsic
1291     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1292                 [LLVMSubdivide2VectorType<0>,
1293                  LLVMSubdivide2VectorType<0>],
1294                 [IntrNoMem]>;
1295
1296   class SVE2_2VectorArgIndexed_Long_Intrinsic
1297   : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1298               [LLVMSubdivide2VectorType<0>,
1299                LLVMSubdivide2VectorType<0>,
1300                llvm_i32_ty],
1301               [IntrNoMem, ImmArg<ArgIndex<2>>]>;
1302
1303   class SVE2_2VectorArg_Wide_Intrinsic
1304     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1305                 [LLVMMatchType<0>,
1306                  LLVMSubdivide2VectorType<0>],
1307                 [IntrNoMem]>;
1308
1309   class SVE2_2VectorArg_Pred_Long_Intrinsic
1310     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1311                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1312                  LLVMMatchType<0>,
1313                  LLVMSubdivide2VectorType<0>],
1314                 [IntrNoMem]>;
1315
1316   class SVE2_3VectorArg_Long_Intrinsic
1317     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1318                 [LLVMMatchType<0>,
1319                  LLVMSubdivide2VectorType<0>,
1320                  LLVMSubdivide2VectorType<0>],
1321                 [IntrNoMem]>;
1322
1323   class SVE2_3VectorArgIndexed_Long_Intrinsic
1324     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1325                 [LLVMMatchType<0>,
1326                  LLVMSubdivide2VectorType<0>,
1327                  LLVMSubdivide2VectorType<0>,
1328                  llvm_i32_ty],
1329                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1330
1331   class SVE2_1VectorArg_Narrowing_Intrinsic
1332     : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
1333                 [llvm_anyvector_ty],
1334                 [IntrNoMem]>;
1335
1336   class SVE2_Merged1VectorArg_Narrowing_Intrinsic
1337     : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
1338                 [LLVMSubdivide2VectorType<0>,
1339                  llvm_anyvector_ty],
1340                 [IntrNoMem]>;
1341   class SVE2_2VectorArg_Narrowing_Intrinsic
1342       : DefaultAttrsIntrinsic<
1343             [LLVMSubdivide2VectorType<0>],
1344             [llvm_anyvector_ty, LLVMMatchType<0>],
1345             [IntrNoMem]>;
1346
1347   class SVE2_Merged2VectorArg_Narrowing_Intrinsic
1348       : DefaultAttrsIntrinsic<
1349             [LLVMSubdivide2VectorType<0>],
1350             [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
1351             [IntrNoMem]>;
1352
1353   class SVE2_1VectorArg_Imm_Narrowing_Intrinsic
1354       : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
1355                   [llvm_anyvector_ty, llvm_i32_ty],
1356                   [IntrNoMem, ImmArg<ArgIndex<1>>]>;
1357
1358   class SVE2_2VectorArg_Imm_Narrowing_Intrinsic
1359       : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
1360                   [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty,
1361                    llvm_i32_ty],
1362                   [IntrNoMem, ImmArg<ArgIndex<2>>]>;
1363
1364   class SVE2_CONFLICT_DETECT_Intrinsic
1365     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1366                 [LLVMAnyPointerType<llvm_any_ty>,
1367                  LLVMMatchType<1>]>;
1368
1369   class SVE2_3VectorArg_Indexed_Intrinsic
1370     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1371                 [LLVMMatchType<0>,
1372                  LLVMSubdivide2VectorType<0>,
1373                  LLVMSubdivide2VectorType<0>,
1374                  llvm_i32_ty],
1375                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1376
1377   class AdvSIMD_SVE_CDOT_LANE_Intrinsic
1378     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1379                 [LLVMMatchType<0>,
1380                  LLVMSubdivide4VectorType<0>,
1381                  LLVMSubdivide4VectorType<0>,
1382                  llvm_i32_ty,
1383                  llvm_i32_ty],
1384                 [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
1385
1386   // NOTE: There is no relationship between these intrinsics beyond an attempt
1387   // to reuse currently identical class definitions.
1388   class AdvSIMD_SVE_LOGB_Intrinsic  : AdvSIMD_SVE_CNT_Intrinsic;
1389   class AdvSIMD_SVE2_CADD_Intrinsic : AdvSIMD_2VectorArgIndexed_Intrinsic;
1390   class AdvSIMD_SVE2_CMLA_Intrinsic : AdvSIMD_3VectorArgIndexed_Intrinsic;
1391
1392   // This class of intrinsics are not intended to be useful within LLVM IR but
1393   // are instead here to support some of the more regid parts of the ACLE.
1394   class Builtin_SVCVT<LLVMType OUT, LLVMType PRED, LLVMType IN>
1395       : DefaultAttrsIntrinsic<[OUT], [OUT, PRED, IN], [IntrNoMem]>;
1396 }
1397
1398 //===----------------------------------------------------------------------===//
1399 // SVE
1400
1401 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
1402
1403 class AdvSIMD_SVE_Reduce_Intrinsic
1404   : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1405               [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1406                llvm_anyvector_ty],
1407               [IntrNoMem]>;
1408
1409 class AdvSIMD_SVE_SADDV_Reduce_Intrinsic
1410   : DefaultAttrsIntrinsic<[llvm_i64_ty],
1411               [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1412                llvm_anyvector_ty],
1413               [IntrNoMem]>;
1414
1415 class AdvSIMD_SVE_WHILE_Intrinsic
1416     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1417                 [llvm_anyint_ty, LLVMMatchType<1>],
1418                 [IntrNoMem]>;
1419
1420 class AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic
1421     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1422                 [
1423                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1424                   LLVMPointerToElt<0>,
1425                   LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
1426                 ],
1427                 [IntrReadMem, IntrArgMemOnly]>;
1428
1429 class AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic
1430     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1431                 [
1432                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1433                   LLVMPointerToElt<0>,
1434                   LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
1435                 ],
1436                 [IntrInaccessibleMemOrArgMemOnly]>;
1437
1438 class AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic
1439     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1440                 [
1441                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1442                   LLVMPointerToElt<0>,
1443                   LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
1444                 ],
1445                 [IntrReadMem, IntrArgMemOnly]>;
1446
1447 class AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic
1448     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1449                 [
1450                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1451                   LLVMPointerToElt<0>,
1452                   LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
1453                 ],
1454                 [IntrInaccessibleMemOrArgMemOnly]>;
1455
1456 class AdvSIMD_GatherLoad_VS_Intrinsic
1457     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1458                 [
1459                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1460                   llvm_anyvector_ty,
1461                   llvm_i64_ty
1462                 ],
1463                 [IntrReadMem]>;
1464
1465 class AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic
1466     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1467                 [
1468                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1469                   llvm_anyvector_ty,
1470                   llvm_i64_ty
1471                 ],
1472                 [IntrInaccessibleMemOrArgMemOnly]>;
1473
1474 class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic
1475     : DefaultAttrsIntrinsic<[],
1476                [
1477                  llvm_anyvector_ty,
1478                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1479                  LLVMPointerToElt<0>,
1480                  LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
1481                ],
1482                [IntrWriteMem, IntrArgMemOnly]>;
1483
1484 class AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic
1485     : DefaultAttrsIntrinsic<[],
1486                [
1487                  llvm_anyvector_ty,
1488                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1489                  LLVMPointerToElt<0>,
1490                  LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
1491                ],
1492                [IntrWriteMem, IntrArgMemOnly]>;
1493
1494 class AdvSIMD_ScatterStore_VS_Intrinsic
1495     : DefaultAttrsIntrinsic<[],
1496                [
1497                  llvm_anyvector_ty,
1498                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1499                  llvm_anyvector_ty, llvm_i64_ty
1500                ],
1501                [IntrWriteMem]>;
1502
1503
1504 class SVE_gather_prf_SV
1505     : DefaultAttrsIntrinsic<[],
1506                 [
1507                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
1508                   llvm_ptr_ty, // Base address
1509                   llvm_anyvector_ty, // Offsets
1510                   llvm_i32_ty // Prfop
1511                 ],
1512                 [IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;
1513
1514 class SVE_gather_prf_VS
1515     : DefaultAttrsIntrinsic<[],
1516                 [
1517                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
1518                   llvm_anyvector_ty, // Base addresses
1519                   llvm_i64_ty, // Scalar offset
1520                   llvm_i32_ty // Prfop
1521                 ],
1522                 [IntrInaccessibleMemOrArgMemOnly, ImmArg<ArgIndex<3>>]>;
1523
1524 class SVE_MatMul_Intrinsic
1525     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1526                 [LLVMMatchType<0>, LLVMSubdivide4VectorType<0>, LLVMSubdivide4VectorType<0>],
1527                 [IntrNoMem]>;
1528
1529 class SVE_4Vec_BF16
1530     : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty],
1531                 [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty],
1532                 [IntrNoMem]>;
1533
1534 class SVE_4Vec_BF16_Indexed
1535     : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty],
1536                 [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty, llvm_i64_ty],
1537                 [IntrNoMem, ImmArg<ArgIndex<3>>]>;
1538
1539 //
1540 // Vector tuple creation intrinsics (ACLE)
1541 //
1542
1543 def int_aarch64_sve_tuple_create2 : AdvSIMD_SVE_Create_2Vector_Tuple;
1544 def int_aarch64_sve_tuple_create3 : AdvSIMD_SVE_Create_3Vector_Tuple;
1545 def int_aarch64_sve_tuple_create4 : AdvSIMD_SVE_Create_4Vector_Tuple;
1546
1547 //
1548 // Vector tuple insertion/extraction intrinsics (ACLE)
1549 //
1550
1551 def int_aarch64_sve_tuple_get : AdvSIMD_SVE_Get_Vector_Tuple;
1552 def int_aarch64_sve_tuple_set : AdvSIMD_SVE_Set_Vector_Tuple;
1553
1554 //
1555 // Loads
1556 //
1557
1558 def int_aarch64_sve_ld1   : AdvSIMD_1Vec_PredLoad_Intrinsic;
1559
1560 def int_aarch64_sve_ld2 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
1561 def int_aarch64_sve_ld3 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
1562 def int_aarch64_sve_ld4 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
1563
1564 def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic;
1565 def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic;
1566 def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic;
1567
1568 def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
1569 def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;
1570 def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;
1571
1572 def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic;
1573 def int_aarch64_sve_ld1ro : AdvSIMD_1Vec_PredLoad_Intrinsic;
1574
1575 //
1576 // Stores
1577 //
1578
1579 def int_aarch64_sve_st1  : AdvSIMD_1Vec_PredStore_Intrinsic;
1580 def int_aarch64_sve_st2  : AdvSIMD_2Vec_PredStore_Intrinsic;
1581 def int_aarch64_sve_st3  : AdvSIMD_3Vec_PredStore_Intrinsic;
1582 def int_aarch64_sve_st4  : AdvSIMD_4Vec_PredStore_Intrinsic;
1583
1584 def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
1585
1586 //
1587 // Prefetches
1588 //
1589
1590 def int_aarch64_sve_prf
1591   : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_ptr_ty, llvm_i32_ty],
1592                   [IntrArgMemOnly, ImmArg<ArgIndex<2>>]>;
1593
1594 // Scalar + 32-bit scaled offset vector, zero extend, packed and
1595 // unpacked.
1596 def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV;
1597 def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV;
1598 def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV;
1599 def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV;
1600
1601 // Scalar + 32-bit scaled offset vector, sign extend, packed and
1602 // unpacked.
1603 def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV;
1604 def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV;
1605 def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV;
1606 def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV;
1607
1608 // Scalar + 64-bit scaled offset vector.
1609 def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV;
1610 def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV;
1611 def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV;
1612 def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV;
1613
1614 // Vector + scalar.
1615 def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS;
1616 def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS;
1617 def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS;
1618 def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS;
1619
1620 //
1621 // Scalar to vector operations
1622 //
1623
1624 def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic;
1625 def int_aarch64_sve_dup_x : AdvSIMD_SVE_DUP_Unpred_Intrinsic;
1626
1627 def int_aarch64_sve_index : AdvSIMD_SVE_Index_Intrinsic;
1628
1629 //
1630 // Address calculation
1631 //
1632
1633 def int_aarch64_sve_adrb : AdvSIMD_2VectorArg_Intrinsic;
1634 def int_aarch64_sve_adrh : AdvSIMD_2VectorArg_Intrinsic;
1635 def int_aarch64_sve_adrw : AdvSIMD_2VectorArg_Intrinsic;
1636 def int_aarch64_sve_adrd : AdvSIMD_2VectorArg_Intrinsic;
1637
1638 //
1639 // Integer arithmetic
1640 //
1641
1642 def int_aarch64_sve_add   : AdvSIMD_Pred2VectorArg_Intrinsic;
1643 def int_aarch64_sve_sub   : AdvSIMD_Pred2VectorArg_Intrinsic;
1644 def int_aarch64_sve_subr  : AdvSIMD_Pred2VectorArg_Intrinsic;
1645
1646 def int_aarch64_sve_pmul       : AdvSIMD_2VectorArg_Intrinsic;
1647
1648 def int_aarch64_sve_mul        : AdvSIMD_Pred2VectorArg_Intrinsic;
1649 def int_aarch64_sve_mul_lane   : AdvSIMD_2VectorArgIndexed_Intrinsic;
1650 def int_aarch64_sve_smulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
1651 def int_aarch64_sve_umulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
1652
1653 def int_aarch64_sve_sdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1654 def int_aarch64_sve_udiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1655 def int_aarch64_sve_sdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1656 def int_aarch64_sve_udivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1657
1658 def int_aarch64_sve_smax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1659 def int_aarch64_sve_umax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1660 def int_aarch64_sve_smin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1661 def int_aarch64_sve_umin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1662 def int_aarch64_sve_sabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1663 def int_aarch64_sve_uabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1664
1665 def int_aarch64_sve_mad        : AdvSIMD_Pred3VectorArg_Intrinsic;
1666 def int_aarch64_sve_msb        : AdvSIMD_Pred3VectorArg_Intrinsic;
1667 def int_aarch64_sve_mla        : AdvSIMD_Pred3VectorArg_Intrinsic;
1668 def int_aarch64_sve_mla_lane   : AdvSIMD_3VectorArgIndexed_Intrinsic;
1669 def int_aarch64_sve_mls        : AdvSIMD_Pred3VectorArg_Intrinsic;
1670 def int_aarch64_sve_mls_lane   : AdvSIMD_3VectorArgIndexed_Intrinsic;
1671
1672 def int_aarch64_sve_saddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
1673 def int_aarch64_sve_uaddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
1674
1675 def int_aarch64_sve_smaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
1676 def int_aarch64_sve_umaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
1677 def int_aarch64_sve_sminv      : AdvSIMD_SVE_Reduce_Intrinsic;
1678 def int_aarch64_sve_uminv      : AdvSIMD_SVE_Reduce_Intrinsic;
1679
1680 def int_aarch64_sve_orv        : AdvSIMD_SVE_Reduce_Intrinsic;
1681 def int_aarch64_sve_eorv       : AdvSIMD_SVE_Reduce_Intrinsic;
1682 def int_aarch64_sve_andv       : AdvSIMD_SVE_Reduce_Intrinsic;
1683
1684 def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic;
1685 def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic;
1686
1687 def int_aarch64_sve_sdot      : AdvSIMD_SVE_DOT_Intrinsic;
1688 def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
1689
1690 def int_aarch64_sve_udot      : AdvSIMD_SVE_DOT_Intrinsic;
1691 def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
1692
1693 def int_aarch64_sve_sqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
1694 def int_aarch64_sve_sqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
1695 def int_aarch64_sve_uqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
1696 def int_aarch64_sve_uqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
1697
1698 // Shifts
1699
1700 def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1701 def int_aarch64_sve_asr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1702 def int_aarch64_sve_asrd     : AdvSIMD_SVE_ShiftByImm_Intrinsic;
1703 def int_aarch64_sve_insr     : AdvSIMD_SVE_INSR_Intrinsic;
1704 def int_aarch64_sve_lsl      : AdvSIMD_Pred2VectorArg_Intrinsic;
1705 def int_aarch64_sve_lsl_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1706 def int_aarch64_sve_lsr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1707 def int_aarch64_sve_lsr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1708
1709 //
1710 // Integer comparisons
1711 //
1712
1713 def int_aarch64_sve_cmpeq : AdvSIMD_SVE_Compare_Intrinsic;
1714 def int_aarch64_sve_cmpge : AdvSIMD_SVE_Compare_Intrinsic;
1715 def int_aarch64_sve_cmpgt : AdvSIMD_SVE_Compare_Intrinsic;
1716 def int_aarch64_sve_cmphi : AdvSIMD_SVE_Compare_Intrinsic;
1717 def int_aarch64_sve_cmphs : AdvSIMD_SVE_Compare_Intrinsic;
1718 def int_aarch64_sve_cmpne : AdvSIMD_SVE_Compare_Intrinsic;
1719
1720 def int_aarch64_sve_cmpeq_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1721 def int_aarch64_sve_cmpge_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1722 def int_aarch64_sve_cmpgt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1723 def int_aarch64_sve_cmphi_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1724 def int_aarch64_sve_cmphs_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1725 def int_aarch64_sve_cmple_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1726 def int_aarch64_sve_cmplo_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1727 def int_aarch64_sve_cmpls_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1728 def int_aarch64_sve_cmplt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1729 def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1730
1731 //
1732 // Counting bits
1733 //
1734
1735 def int_aarch64_sve_cls : AdvSIMD_Merged1VectorArg_Intrinsic;
1736 def int_aarch64_sve_clz : AdvSIMD_Merged1VectorArg_Intrinsic;
1737 def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic;
1738
1739 //
1740 // Counting elements
1741 //
1742
1743 def int_aarch64_sve_cntb : AdvSIMD_SVE_CNTB_Intrinsic;
1744 def int_aarch64_sve_cnth : AdvSIMD_SVE_CNTB_Intrinsic;
1745 def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic;
1746 def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic;
1747
1748 def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic;
1749
1750 //
1751 // FFR manipulation
1752 //
1753
1754 def int_aarch64_sve_rdffr   : GCCBuiltin<"__builtin_sve_svrdffr">,   DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>;
1755 def int_aarch64_sve_rdffr_z : GCCBuiltin<"__builtin_sve_svrdffr_z">, DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [llvm_nxv16i1_ty], [IntrReadMem, IntrInaccessibleMemOnly]>;
1756 def int_aarch64_sve_setffr  : GCCBuiltin<"__builtin_sve_svsetffr">,  DefaultAttrsIntrinsic<[], [], [IntrWriteMem, IntrInaccessibleMemOnly]>;
1757 def int_aarch64_sve_wrffr   : GCCBuiltin<"__builtin_sve_svwrffr">,   DefaultAttrsIntrinsic<[], [llvm_nxv16i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly]>;
1758
1759 //
1760 // Saturating scalar arithmetic
1761 //
1762
1763 def int_aarch64_sve_sqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1764 def int_aarch64_sve_sqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1765 def int_aarch64_sve_sqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1766 def int_aarch64_sve_sqdecp : AdvSIMD_SVE_Saturating_Intrinsic;
1767
1768 def int_aarch64_sve_sqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1769 def int_aarch64_sve_sqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1770 def int_aarch64_sve_sqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1771 def int_aarch64_sve_sqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1772 def int_aarch64_sve_sqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1773 def int_aarch64_sve_sqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1774 def int_aarch64_sve_sqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1775 def int_aarch64_sve_sqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1776 def int_aarch64_sve_sqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1777 def int_aarch64_sve_sqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1778
1779 def int_aarch64_sve_sqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1780 def int_aarch64_sve_sqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1781 def int_aarch64_sve_sqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1782 def int_aarch64_sve_sqincp : AdvSIMD_SVE_Saturating_Intrinsic;
1783
1784 def int_aarch64_sve_sqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1785 def int_aarch64_sve_sqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1786 def int_aarch64_sve_sqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1787 def int_aarch64_sve_sqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1788 def int_aarch64_sve_sqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1789 def int_aarch64_sve_sqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1790 def int_aarch64_sve_sqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1791 def int_aarch64_sve_sqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1792 def int_aarch64_sve_sqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1793 def int_aarch64_sve_sqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1794
1795 def int_aarch64_sve_uqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1796 def int_aarch64_sve_uqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1797 def int_aarch64_sve_uqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1798 def int_aarch64_sve_uqdecp : AdvSIMD_SVE_Saturating_Intrinsic;
1799
1800 def int_aarch64_sve_uqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1801 def int_aarch64_sve_uqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1802 def int_aarch64_sve_uqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1803 def int_aarch64_sve_uqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1804 def int_aarch64_sve_uqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1805 def int_aarch64_sve_uqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1806 def int_aarch64_sve_uqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1807 def int_aarch64_sve_uqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1808 def int_aarch64_sve_uqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1809 def int_aarch64_sve_uqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1810
1811 def int_aarch64_sve_uqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1812 def int_aarch64_sve_uqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1813 def int_aarch64_sve_uqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1814 def int_aarch64_sve_uqincp : AdvSIMD_SVE_Saturating_Intrinsic;
1815
1816 def int_aarch64_sve_uqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1817 def int_aarch64_sve_uqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1818 def int_aarch64_sve_uqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1819 def int_aarch64_sve_uqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1820 def int_aarch64_sve_uqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1821 def int_aarch64_sve_uqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1822 def int_aarch64_sve_uqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1823 def int_aarch64_sve_uqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1824 def int_aarch64_sve_uqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1825 def int_aarch64_sve_uqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1826
1827 //
1828 // Reversal
1829 //
1830
1831 def int_aarch64_sve_rbit : AdvSIMD_Merged1VectorArg_Intrinsic;
1832 def int_aarch64_sve_revb : AdvSIMD_Merged1VectorArg_Intrinsic;
1833 def int_aarch64_sve_revh : AdvSIMD_Merged1VectorArg_Intrinsic;
1834 def int_aarch64_sve_revw : AdvSIMD_Merged1VectorArg_Intrinsic;
1835
1836 //
1837 // Permutations and selection
1838 //
1839
1840 def int_aarch64_sve_clasta    : AdvSIMD_Pred2VectorArg_Intrinsic;
1841 def int_aarch64_sve_clasta_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
1842 def int_aarch64_sve_clastb    : AdvSIMD_Pred2VectorArg_Intrinsic;
1843 def int_aarch64_sve_clastb_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
1844 def int_aarch64_sve_compact   : AdvSIMD_Pred1VectorArg_Intrinsic;
1845 def int_aarch64_sve_dupq_lane : AdvSIMD_SVE_DUPQ_Intrinsic;
1846 def int_aarch64_sve_ext       : AdvSIMD_2VectorArgIndexed_Intrinsic;
1847 def int_aarch64_sve_sel       : AdvSIMD_Pred2VectorArg_Intrinsic;
1848 def int_aarch64_sve_lasta     : AdvSIMD_SVE_Reduce_Intrinsic;
1849 def int_aarch64_sve_lastb     : AdvSIMD_SVE_Reduce_Intrinsic;
1850 def int_aarch64_sve_rev       : AdvSIMD_1VectorArg_Intrinsic;
1851 def int_aarch64_sve_splice    : AdvSIMD_Pred2VectorArg_Intrinsic;
1852 def int_aarch64_sve_sunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
1853 def int_aarch64_sve_sunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
1854 def int_aarch64_sve_tbl       : AdvSIMD_SVE_TBL_Intrinsic;
1855 def int_aarch64_sve_trn1      : AdvSIMD_2VectorArg_Intrinsic;
1856 def int_aarch64_sve_trn2      : AdvSIMD_2VectorArg_Intrinsic;
1857 def int_aarch64_sve_trn1q     : AdvSIMD_2VectorArg_Intrinsic;
1858 def int_aarch64_sve_trn2q     : AdvSIMD_2VectorArg_Intrinsic;
1859 def int_aarch64_sve_uunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
1860 def int_aarch64_sve_uunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
1861 def int_aarch64_sve_uzp1      : AdvSIMD_2VectorArg_Intrinsic;
1862 def int_aarch64_sve_uzp2      : AdvSIMD_2VectorArg_Intrinsic;
1863 def int_aarch64_sve_uzp1q     : AdvSIMD_2VectorArg_Intrinsic;
1864 def int_aarch64_sve_uzp2q     : AdvSIMD_2VectorArg_Intrinsic;
1865 def int_aarch64_sve_zip1      : AdvSIMD_2VectorArg_Intrinsic;
1866 def int_aarch64_sve_zip2      : AdvSIMD_2VectorArg_Intrinsic;
1867 def int_aarch64_sve_zip1q     : AdvSIMD_2VectorArg_Intrinsic;
1868 def int_aarch64_sve_zip2q     : AdvSIMD_2VectorArg_Intrinsic;
1869
1870 //
1871 // Logical operations
1872 //
1873
1874 def int_aarch64_sve_and  : AdvSIMD_Pred2VectorArg_Intrinsic;
1875 def int_aarch64_sve_bic  : AdvSIMD_Pred2VectorArg_Intrinsic;
1876 def int_aarch64_sve_cnot : AdvSIMD_Merged1VectorArg_Intrinsic;
1877 def int_aarch64_sve_eor  : AdvSIMD_Pred2VectorArg_Intrinsic;
1878 def int_aarch64_sve_not  : AdvSIMD_Merged1VectorArg_Intrinsic;
1879 def int_aarch64_sve_orr  : AdvSIMD_Pred2VectorArg_Intrinsic;
1880
1881 //
1882 // Conversion
1883 //
1884
1885 def int_aarch64_sve_sxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
1886 def int_aarch64_sve_sxth : AdvSIMD_Merged1VectorArg_Intrinsic;
1887 def int_aarch64_sve_sxtw : AdvSIMD_Merged1VectorArg_Intrinsic;
1888 def int_aarch64_sve_uxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
1889 def int_aarch64_sve_uxth : AdvSIMD_Merged1VectorArg_Intrinsic;
1890 def int_aarch64_sve_uxtw : AdvSIMD_Merged1VectorArg_Intrinsic;
1891
1892 //
1893 // While comparisons
1894 //
1895
1896 def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic;
1897 def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic;
1898 def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic;
1899 def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic;
1900 def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic;
1901 def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic;
1902 def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic;
1903 def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic;
1904
1905 //
1906 // Floating-point arithmetic
1907 //
1908
1909 def int_aarch64_sve_fabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1910 def int_aarch64_sve_fabs       : AdvSIMD_Merged1VectorArg_Intrinsic;
1911 def int_aarch64_sve_fadd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1912 def int_aarch64_sve_fcadd      : AdvSIMD_SVE_CADD_Intrinsic;
1913 def int_aarch64_sve_fcmla      : AdvSIMD_SVE_CMLA_Intrinsic;
1914 def int_aarch64_sve_fcmla_lane : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
1915 def int_aarch64_sve_fdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1916 def int_aarch64_sve_fdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1917 def int_aarch64_sve_fexpa_x    : AdvSIMD_SVE_EXPA_Intrinsic;
1918 def int_aarch64_sve_fmad       : AdvSIMD_Pred3VectorArg_Intrinsic;
1919 def int_aarch64_sve_fmax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1920 def int_aarch64_sve_fmaxnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
1921 def int_aarch64_sve_fmin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1922 def int_aarch64_sve_fminnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
1923 def int_aarch64_sve_fmla       : AdvSIMD_Pred3VectorArg_Intrinsic;
1924 def int_aarch64_sve_fmla_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
1925 def int_aarch64_sve_fmls       : AdvSIMD_Pred3VectorArg_Intrinsic;
1926 def int_aarch64_sve_fmls_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
1927 def int_aarch64_sve_fmsb       : AdvSIMD_Pred3VectorArg_Intrinsic;
1928 def int_aarch64_sve_fmul       : AdvSIMD_Pred2VectorArg_Intrinsic;
1929 def int_aarch64_sve_fmulx      : AdvSIMD_Pred2VectorArg_Intrinsic;
1930 def int_aarch64_sve_fneg       : AdvSIMD_Merged1VectorArg_Intrinsic;
1931 def int_aarch64_sve_fmul_lane  : AdvSIMD_2VectorArgIndexed_Intrinsic;
1932 def int_aarch64_sve_fnmad      : AdvSIMD_Pred3VectorArg_Intrinsic;
1933 def int_aarch64_sve_fnmla      : AdvSIMD_Pred3VectorArg_Intrinsic;
1934 def int_aarch64_sve_fnmls      : AdvSIMD_Pred3VectorArg_Intrinsic;
1935 def int_aarch64_sve_fnmsb      : AdvSIMD_Pred3VectorArg_Intrinsic;
1936 def int_aarch64_sve_frecpe_x   : AdvSIMD_1VectorArg_Intrinsic;
1937 def int_aarch64_sve_frecps_x   : AdvSIMD_2VectorArg_Intrinsic;
1938 def int_aarch64_sve_frecpx     : AdvSIMD_Merged1VectorArg_Intrinsic;
1939 def int_aarch64_sve_frinta     : AdvSIMD_Merged1VectorArg_Intrinsic;
1940 def int_aarch64_sve_frinti     : AdvSIMD_Merged1VectorArg_Intrinsic;
1941 def int_aarch64_sve_frintm     : AdvSIMD_Merged1VectorArg_Intrinsic;
1942 def int_aarch64_sve_frintn     : AdvSIMD_Merged1VectorArg_Intrinsic;
1943 def int_aarch64_sve_frintp     : AdvSIMD_Merged1VectorArg_Intrinsic;
1944 def int_aarch64_sve_frintx     : AdvSIMD_Merged1VectorArg_Intrinsic;
1945 def int_aarch64_sve_frintz     : AdvSIMD_Merged1VectorArg_Intrinsic;
1946 def int_aarch64_sve_frsqrte_x  : AdvSIMD_1VectorArg_Intrinsic;
1947 def int_aarch64_sve_frsqrts_x  : AdvSIMD_2VectorArg_Intrinsic;
1948 def int_aarch64_sve_fscale     : AdvSIMD_SVE_SCALE_Intrinsic;
1949 def int_aarch64_sve_fsqrt      : AdvSIMD_Merged1VectorArg_Intrinsic;
1950 def int_aarch64_sve_fsub       : AdvSIMD_Pred2VectorArg_Intrinsic;
1951 def int_aarch64_sve_fsubr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1952 def int_aarch64_sve_ftmad_x    : AdvSIMD_2VectorArgIndexed_Intrinsic;
1953 def int_aarch64_sve_ftsmul_x   : AdvSIMD_SVE_TSMUL_Intrinsic;
1954 def int_aarch64_sve_ftssel_x   : AdvSIMD_SVE_TSMUL_Intrinsic;
1955
1956 //
1957 // Floating-point reductions
1958 //
1959
1960 def int_aarch64_sve_fadda   : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
1961 def int_aarch64_sve_faddv   : AdvSIMD_SVE_Reduce_Intrinsic;
1962 def int_aarch64_sve_fmaxv   : AdvSIMD_SVE_Reduce_Intrinsic;
1963 def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic;
1964 def int_aarch64_sve_fminv   : AdvSIMD_SVE_Reduce_Intrinsic;
1965 def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic;
1966
1967 //
1968 // Floating-point conversions
1969 //
1970
1971 def int_aarch64_sve_fcvt   : AdvSIMD_SVE_FCVT_Intrinsic;
1972 def int_aarch64_sve_fcvtzs : AdvSIMD_SVE_FCVTZS_Intrinsic;
1973 def int_aarch64_sve_fcvtzu : AdvSIMD_SVE_FCVTZS_Intrinsic;
1974 def int_aarch64_sve_scvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;
1975 def int_aarch64_sve_ucvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;
1976
1977 //
1978 // Floating-point comparisons
1979 //
1980
1981 def int_aarch64_sve_facge : AdvSIMD_SVE_Compare_Intrinsic;
1982 def int_aarch64_sve_facgt : AdvSIMD_SVE_Compare_Intrinsic;
1983
1984 def int_aarch64_sve_fcmpeq : AdvSIMD_SVE_Compare_Intrinsic;
1985 def int_aarch64_sve_fcmpge : AdvSIMD_SVE_Compare_Intrinsic;
1986 def int_aarch64_sve_fcmpgt : AdvSIMD_SVE_Compare_Intrinsic;
1987 def int_aarch64_sve_fcmpne : AdvSIMD_SVE_Compare_Intrinsic;
1988 def int_aarch64_sve_fcmpuo : AdvSIMD_SVE_Compare_Intrinsic;
1989
1990 def int_aarch64_sve_fcvtzs_i32f16   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
1991 def int_aarch64_sve_fcvtzs_i32f64   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
1992 def int_aarch64_sve_fcvtzs_i64f16   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
1993 def int_aarch64_sve_fcvtzs_i64f32   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
1994
1995 def int_aarch64_sve_fcvt_bf16f32    : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;
1996 def int_aarch64_sve_fcvtnt_bf16f32  : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;
1997
1998 def int_aarch64_sve_fcvtzu_i32f16   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
1999 def int_aarch64_sve_fcvtzu_i32f64   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2000 def int_aarch64_sve_fcvtzu_i64f16   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
2001 def int_aarch64_sve_fcvtzu_i64f32   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
2002
2003 def int_aarch64_sve_fcvt_f16f32     : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
2004 def int_aarch64_sve_fcvt_f16f64     : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2005 def int_aarch64_sve_fcvt_f32f64     : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2006
2007 def int_aarch64_sve_fcvt_f32f16     : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
2008 def int_aarch64_sve_fcvt_f64f16     : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
2009 def int_aarch64_sve_fcvt_f64f32     : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
2010
2011 def int_aarch64_sve_fcvtlt_f32f16   : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
2012 def int_aarch64_sve_fcvtlt_f64f32   : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
2013 def int_aarch64_sve_fcvtnt_f16f32   : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
2014 def int_aarch64_sve_fcvtnt_f32f64   : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2015
2016 def int_aarch64_sve_fcvtx_f32f64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2017 def int_aarch64_sve_fcvtxnt_f32f64  : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
2018
2019 def int_aarch64_sve_scvtf_f16i32    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
2020 def int_aarch64_sve_scvtf_f16i64    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
2021 def int_aarch64_sve_scvtf_f32i64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
2022 def int_aarch64_sve_scvtf_f64i32    : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;
2023
2024 def int_aarch64_sve_ucvtf_f16i32    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
2025 def int_aarch64_sve_ucvtf_f16i64    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
2026 def int_aarch64_sve_ucvtf_f32i64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
2027 def int_aarch64_sve_ucvtf_f64i32    : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;
2028
2029 //
2030 // Predicate creation
2031 //
2032
2033 def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic;
2034
2035 //
2036 // Predicate operations
2037 //
2038
2039 def int_aarch64_sve_and_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2040 def int_aarch64_sve_bic_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2041 def int_aarch64_sve_brka    : AdvSIMD_Merged1VectorArg_Intrinsic;
2042 def int_aarch64_sve_brka_z  : AdvSIMD_Pred1VectorArg_Intrinsic;
2043 def int_aarch64_sve_brkb    : AdvSIMD_Merged1VectorArg_Intrinsic;
2044 def int_aarch64_sve_brkb_z  : AdvSIMD_Pred1VectorArg_Intrinsic;
2045 def int_aarch64_sve_brkn_z  : AdvSIMD_Pred2VectorArg_Intrinsic;
2046 def int_aarch64_sve_brkpa_z : AdvSIMD_Pred2VectorArg_Intrinsic;
2047 def int_aarch64_sve_brkpb_z : AdvSIMD_Pred2VectorArg_Intrinsic;
2048 def int_aarch64_sve_eor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2049 def int_aarch64_sve_nand_z  : AdvSIMD_Pred2VectorArg_Intrinsic;
2050 def int_aarch64_sve_nor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2051 def int_aarch64_sve_orn_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2052 def int_aarch64_sve_orr_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
2053 def int_aarch64_sve_pfirst  : AdvSIMD_Pred1VectorArg_Intrinsic;
2054 def int_aarch64_sve_pnext   : AdvSIMD_Pred1VectorArg_Intrinsic;
2055 def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic;
2056 def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic;
2057
2058 //
2059 // Testing predicates
2060 //
2061
2062 def int_aarch64_sve_ptest_any   : AdvSIMD_SVE_PTEST_Intrinsic;
2063 def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic;
2064 def int_aarch64_sve_ptest_last  : AdvSIMD_SVE_PTEST_Intrinsic;
2065
2066 //
2067 // Reinterpreting data
2068 //
2069
2070 def int_aarch64_sve_convert_from_svbool : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
2071                                                     [llvm_nxv16i1_ty],
2072                                                     [IntrNoMem]>;
2073
2074 def int_aarch64_sve_convert_to_svbool : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
2075                                                   [llvm_anyvector_ty],
2076                                                   [IntrNoMem]>;
2077
2078 //
2079 // Gather loads: scalar base + vector offsets
2080 //
2081
2082 // 64 bit unscaled offsets
2083 def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
2084
2085 // 64 bit scaled offsets
2086 def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
2087
2088 // 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
2089 def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
2090 def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
2091
2092 // 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
2093 def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
2094 def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
2095
2096 //
2097 // Gather loads: vector base + scalar offset
2098 //
2099
2100 def int_aarch64_sve_ld1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic;
2101
2102
2103 //
2104 // First-faulting gather loads: scalar base + vector offsets
2105 //
2106
2107 // 64 bit unscaled offsets
2108 def int_aarch64_sve_ldff1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic;
2109
2110 // 64 bit scaled offsets
2111 def int_aarch64_sve_ldff1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic;
2112
2113 // 32 bit unscaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
2114 def int_aarch64_sve_ldff1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
2115 def int_aarch64_sve_ldff1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
2116
2117 // 32 bit scaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
2118 def int_aarch64_sve_ldff1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
2119 def int_aarch64_sve_ldff1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
2120
2121 //
2122 // First-faulting gather loads: vector base + scalar offset
2123 //
2124
2125 def int_aarch64_sve_ldff1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic;
2126
2127
2128 //
2129 // Non-temporal gather loads: scalar base + vector offsets
2130 //
2131
2132 // 64 bit unscaled offsets
2133 def int_aarch64_sve_ldnt1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
2134
2135 // 64 bit indices
2136 def int_aarch64_sve_ldnt1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;
2137
2138 // 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
2139 def int_aarch64_sve_ldnt1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
2140
2141 //
2142 // Non-temporal gather loads: vector base + scalar offset
2143 //
2144
2145 def int_aarch64_sve_ldnt1_gather_scalar_offset  : AdvSIMD_GatherLoad_VS_Intrinsic;
2146
2147 //
2148 // Scatter stores: scalar base + vector offsets
2149 //
2150
2151 // 64 bit unscaled offsets
2152 def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
2153
2154 // 64 bit scaled offsets
2155 def int_aarch64_sve_st1_scatter_index
2156     : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
2157
2158 // 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
2159 def int_aarch64_sve_st1_scatter_sxtw
2160     : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
2161
2162 def int_aarch64_sve_st1_scatter_uxtw
2163     : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
2164
2165 // 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
2166 def int_aarch64_sve_st1_scatter_sxtw_index
2167     : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
2168
2169 def int_aarch64_sve_st1_scatter_uxtw_index
2170     : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
2171
2172 //
2173 // Scatter stores: vector base + scalar offset
2174 //
2175
2176 def int_aarch64_sve_st1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic;
2177
2178 //
2179 // Non-temporal scatter stores: scalar base + vector offsets
2180 //
2181
2182 // 64 bit unscaled offsets
2183 def int_aarch64_sve_stnt1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
2184
2185 // 64 bit indices
2186 def int_aarch64_sve_stnt1_scatter_index
2187     : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;
2188
2189 // 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
2190 def int_aarch64_sve_stnt1_scatter_uxtw : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;
2191
2192 //
2193 // Non-temporal scatter stores: vector base + scalar offset
2194 //
2195
2196 def int_aarch64_sve_stnt1_scatter_scalar_offset  : AdvSIMD_ScatterStore_VS_Intrinsic;
2197
2198 //
2199 // SVE2 - Uniform DSP operations
2200 //
2201
2202 def int_aarch64_sve_saba          : AdvSIMD_3VectorArg_Intrinsic;
2203 def int_aarch64_sve_shadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
2204 def int_aarch64_sve_shsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
2205 def int_aarch64_sve_shsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
2206 def int_aarch64_sve_sli           : AdvSIMD_2VectorArgIndexed_Intrinsic;
2207 def int_aarch64_sve_sqabs         : AdvSIMD_Merged1VectorArg_Intrinsic;
2208 def int_aarch64_sve_sqadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
2209 def int_aarch64_sve_sqdmulh       : AdvSIMD_2VectorArg_Intrinsic;
2210 def int_aarch64_sve_sqdmulh_lane  : AdvSIMD_2VectorArgIndexed_Intrinsic;
2211 def int_aarch64_sve_sqneg         : AdvSIMD_Merged1VectorArg_Intrinsic;
2212 def int_aarch64_sve_sqrdmlah      : AdvSIMD_3VectorArg_Intrinsic;
2213 def int_aarch64_sve_sqrdmlah_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
2214 def int_aarch64_sve_sqrdmlsh      : AdvSIMD_3VectorArg_Intrinsic;
2215 def int_aarch64_sve_sqrdmlsh_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
2216 def int_aarch64_sve_sqrdmulh      : AdvSIMD_2VectorArg_Intrinsic;
2217 def int_aarch64_sve_sqrdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
2218 def int_aarch64_sve_sqrshl        : AdvSIMD_Pred2VectorArg_Intrinsic;
2219 def int_aarch64_sve_sqshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
2220 def int_aarch64_sve_sqshlu        : AdvSIMD_SVE_ShiftByImm_Intrinsic;
2221 def int_aarch64_sve_sqsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
2222 def int_aarch64_sve_sqsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
2223 def int_aarch64_sve_srhadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
2224 def int_aarch64_sve_sri           : AdvSIMD_2VectorArgIndexed_Intrinsic;
2225 def int_aarch64_sve_srshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
2226 def int_aarch64_sve_srshr         : AdvSIMD_SVE_ShiftByImm_Intrinsic;
2227 def int_aarch64_sve_srsra         : AdvSIMD_2VectorArgIndexed_Intrinsic;
2228 def int_aarch64_sve_ssra          : AdvSIMD_2VectorArgIndexed_Intrinsic;
2229 def int_aarch64_sve_suqadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
2230 def int_aarch64_sve_uaba          : AdvSIMD_3VectorArg_Intrinsic;
2231 def int_aarch64_sve_uhadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
2232 def int_aarch64_sve_uhsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
2233 def int_aarch64_sve_uhsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
2234 def int_aarch64_sve_uqadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
2235 def int_aarch64_sve_uqrshl        : AdvSIMD_Pred2VectorArg_Intrinsic;
2236 def int_aarch64_sve_uqshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
2237 def int_aarch64_sve_uqsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
2238 def int_aarch64_sve_uqsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
2239 def int_aarch64_sve_urecpe        : AdvSIMD_Merged1VectorArg_Intrinsic;
2240 def int_aarch64_sve_urhadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
2241 def int_aarch64_sve_urshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
2242 def int_aarch64_sve_urshr         : AdvSIMD_SVE_ShiftByImm_Intrinsic;
2243 def int_aarch64_sve_ursqrte       : AdvSIMD_Merged1VectorArg_Intrinsic;
2244 def int_aarch64_sve_ursra         : AdvSIMD_2VectorArgIndexed_Intrinsic;
2245 def int_aarch64_sve_usqadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
2246 def int_aarch64_sve_usra          : AdvSIMD_2VectorArgIndexed_Intrinsic;
2247
2248 //
2249 // SVE2 - Widening DSP operations
2250 //
2251
2252 def int_aarch64_sve_sabalb : SVE2_3VectorArg_Long_Intrinsic;
2253 def int_aarch64_sve_sabalt : SVE2_3VectorArg_Long_Intrinsic;
2254 def int_aarch64_sve_sabdlb : SVE2_2VectorArg_Long_Intrinsic;
2255 def int_aarch64_sve_sabdlt : SVE2_2VectorArg_Long_Intrinsic;
2256 def int_aarch64_sve_saddlb : SVE2_2VectorArg_Long_Intrinsic;
2257 def int_aarch64_sve_saddlt : SVE2_2VectorArg_Long_Intrinsic;
2258 def int_aarch64_sve_saddwb : SVE2_2VectorArg_Wide_Intrinsic;
2259 def int_aarch64_sve_saddwt : SVE2_2VectorArg_Wide_Intrinsic;
2260 def int_aarch64_sve_sshllb : SVE2_1VectorArg_Long_Intrinsic;
2261 def int_aarch64_sve_sshllt : SVE2_1VectorArg_Long_Intrinsic;
2262 def int_aarch64_sve_ssublb : SVE2_2VectorArg_Long_Intrinsic;
2263 def int_aarch64_sve_ssublt : SVE2_2VectorArg_Long_Intrinsic;
2264 def int_aarch64_sve_ssubwb : SVE2_2VectorArg_Wide_Intrinsic;
2265 def int_aarch64_sve_ssubwt : SVE2_2VectorArg_Wide_Intrinsic;
2266 def int_aarch64_sve_uabalb : SVE2_3VectorArg_Long_Intrinsic;
2267 def int_aarch64_sve_uabalt : SVE2_3VectorArg_Long_Intrinsic;
2268 def int_aarch64_sve_uabdlb : SVE2_2VectorArg_Long_Intrinsic;
2269 def int_aarch64_sve_uabdlt : SVE2_2VectorArg_Long_Intrinsic;
2270 def int_aarch64_sve_uaddlb : SVE2_2VectorArg_Long_Intrinsic;
2271 def int_aarch64_sve_uaddlt : SVE2_2VectorArg_Long_Intrinsic;
2272 def int_aarch64_sve_uaddwb : SVE2_2VectorArg_Wide_Intrinsic;
2273 def int_aarch64_sve_uaddwt : SVE2_2VectorArg_Wide_Intrinsic;
2274 def int_aarch64_sve_ushllb : SVE2_1VectorArg_Long_Intrinsic;
2275 def int_aarch64_sve_ushllt : SVE2_1VectorArg_Long_Intrinsic;
2276 def int_aarch64_sve_usublb : SVE2_2VectorArg_Long_Intrinsic;
2277 def int_aarch64_sve_usublt : SVE2_2VectorArg_Long_Intrinsic;
2278 def int_aarch64_sve_usubwb : SVE2_2VectorArg_Wide_Intrinsic;
2279 def int_aarch64_sve_usubwt : SVE2_2VectorArg_Wide_Intrinsic;
2280
2281 //
2282 // SVE2 - Non-widening pairwise arithmetic
2283 //
2284
2285 def int_aarch64_sve_addp    : AdvSIMD_Pred2VectorArg_Intrinsic;
2286 def int_aarch64_sve_faddp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2287 def int_aarch64_sve_fmaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2288 def int_aarch64_sve_fmaxnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
2289 def int_aarch64_sve_fminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2290 def int_aarch64_sve_fminnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
2291 def int_aarch64_sve_smaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2292 def int_aarch64_sve_sminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2293 def int_aarch64_sve_umaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2294 def int_aarch64_sve_uminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
2295
2296 //
2297 // SVE2 - Widening pairwise arithmetic
2298 //
2299
2300 def int_aarch64_sve_sadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;
2301 def int_aarch64_sve_uadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;
2302
2303 //
2304 // SVE2 - Uniform complex integer arithmetic
2305 //
2306
2307 def int_aarch64_sve_cadd_x           : AdvSIMD_SVE2_CADD_Intrinsic;
2308 def int_aarch64_sve_sqcadd_x         : AdvSIMD_SVE2_CADD_Intrinsic;
2309 def int_aarch64_sve_cmla_x           : AdvSIMD_SVE2_CMLA_Intrinsic;
2310 def int_aarch64_sve_cmla_lane_x      : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
2311 def int_aarch64_sve_sqrdcmlah_x      : AdvSIMD_SVE2_CMLA_Intrinsic;
2312 def int_aarch64_sve_sqrdcmlah_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
2313
2314 //
2315 // SVE2 - Widening complex integer arithmetic
2316 //
2317
2318 def int_aarch64_sve_saddlbt   : SVE2_2VectorArg_Long_Intrinsic;
2319 def int_aarch64_sve_ssublbt   : SVE2_2VectorArg_Long_Intrinsic;
2320 def int_aarch64_sve_ssubltb   : SVE2_2VectorArg_Long_Intrinsic;
2321
2322 //
2323 // SVE2 - Widening complex integer dot product
2324 //
2325
2326 def int_aarch64_sve_cdot      : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
2327 def int_aarch64_sve_cdot_lane : AdvSIMD_SVE_CDOT_LANE_Intrinsic;
2328
2329 //
2330 // SVE2 - Floating-point widening multiply-accumulate
2331 //
2332
2333 def int_aarch64_sve_fmlalb        : SVE2_3VectorArg_Long_Intrinsic;
2334 def int_aarch64_sve_fmlalb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
2335 def int_aarch64_sve_fmlalt        : SVE2_3VectorArg_Long_Intrinsic;
2336 def int_aarch64_sve_fmlalt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
2337 def int_aarch64_sve_fmlslb        : SVE2_3VectorArg_Long_Intrinsic;
2338 def int_aarch64_sve_fmlslb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
2339 def int_aarch64_sve_fmlslt        : SVE2_3VectorArg_Long_Intrinsic;
2340 def int_aarch64_sve_fmlslt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
2341
2342 //
2343 // SVE2 - Floating-point integer binary logarithm
2344 //
2345
2346 def int_aarch64_sve_flogb : AdvSIMD_SVE_LOGB_Intrinsic;
2347
2348 //
2349 // SVE2 - Vector histogram count
2350 //
2351
2352 def int_aarch64_sve_histcnt : AdvSIMD_Pred2VectorArg_Intrinsic;
2353 def int_aarch64_sve_histseg : AdvSIMD_2VectorArg_Intrinsic;
2354
2355 //
2356 // SVE2 - Character match
2357 //
2358
2359 def int_aarch64_sve_match   : AdvSIMD_SVE_Compare_Intrinsic;
2360 def int_aarch64_sve_nmatch  : AdvSIMD_SVE_Compare_Intrinsic;
2361
2362 //
2363 // SVE2 - Unary narrowing operations
2364 //
2365
2366 def int_aarch64_sve_sqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
2367 def int_aarch64_sve_sqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
2368 def int_aarch64_sve_sqxtunb : SVE2_1VectorArg_Narrowing_Intrinsic;
2369 def int_aarch64_sve_sqxtunt : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
2370 def int_aarch64_sve_uqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
2371 def int_aarch64_sve_uqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
2372
2373 //
2374 // SVE2 - Binary narrowing DSP operations
2375 //
2376 def int_aarch64_sve_addhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
2377 def int_aarch64_sve_addhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
2378
2379 def int_aarch64_sve_raddhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
2380 def int_aarch64_sve_raddhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
2381
2382 def int_aarch64_sve_subhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
2383 def int_aarch64_sve_subhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
2384
2385 def int_aarch64_sve_rsubhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
2386 def int_aarch64_sve_rsubhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
2387
2388 // Narrowing shift right
2389 def int_aarch64_sve_shrnb     : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2390 def int_aarch64_sve_shrnt     : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2391
2392 def int_aarch64_sve_rshrnb    : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2393 def int_aarch64_sve_rshrnt    : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2394
2395 // Saturating shift right - signed input/output
2396 def int_aarch64_sve_sqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2397 def int_aarch64_sve_sqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2398
2399 def int_aarch64_sve_sqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2400 def int_aarch64_sve_sqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2401
2402 // Saturating shift right - unsigned input/output
2403 def int_aarch64_sve_uqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2404 def int_aarch64_sve_uqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2405
2406 def int_aarch64_sve_uqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2407 def int_aarch64_sve_uqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2408
2409 // Saturating shift right - signed input, unsigned output
2410 def int_aarch64_sve_sqshrunb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2411 def int_aarch64_sve_sqshrunt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2412
2413 def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
2414 def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
2415
2416 // SVE2 MLA LANE.
2417 def int_aarch64_sve_smlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2418 def int_aarch64_sve_smlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2419 def int_aarch64_sve_umlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2420 def int_aarch64_sve_umlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2421 def int_aarch64_sve_smlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2422 def int_aarch64_sve_smlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2423 def int_aarch64_sve_umlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2424 def int_aarch64_sve_umlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
2425 def int_aarch64_sve_smullb_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
2426 def int_aarch64_sve_smullt_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
2427 def int_aarch64_sve_umullb_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
2428 def int_aarch64_sve_umullt_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
2429 def int_aarch64_sve_sqdmlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
2430 def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
2431 def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
2432 def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
2433 def int_aarch64_sve_sqdmullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
2434 def int_aarch64_sve_sqdmullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
2435
2436 // SVE2 MLA Unpredicated.
2437 def int_aarch64_sve_smlalb      : SVE2_3VectorArg_Long_Intrinsic;
2438 def int_aarch64_sve_smlalt      : SVE2_3VectorArg_Long_Intrinsic;
2439 def int_aarch64_sve_umlalb      : SVE2_3VectorArg_Long_Intrinsic;
2440 def int_aarch64_sve_umlalt      : SVE2_3VectorArg_Long_Intrinsic;
2441 def int_aarch64_sve_smlslb      : SVE2_3VectorArg_Long_Intrinsic;
2442 def int_aarch64_sve_smlslt      : SVE2_3VectorArg_Long_Intrinsic;
2443 def int_aarch64_sve_umlslb      : SVE2_3VectorArg_Long_Intrinsic;
2444 def int_aarch64_sve_umlslt      : SVE2_3VectorArg_Long_Intrinsic;
2445 def int_aarch64_sve_smullb      : SVE2_2VectorArg_Long_Intrinsic;
2446 def int_aarch64_sve_smullt      : SVE2_2VectorArg_Long_Intrinsic;
2447 def int_aarch64_sve_umullb      : SVE2_2VectorArg_Long_Intrinsic;
2448 def int_aarch64_sve_umullt      : SVE2_2VectorArg_Long_Intrinsic;
2449
2450 def int_aarch64_sve_sqdmlalb    : SVE2_3VectorArg_Long_Intrinsic;
2451 def int_aarch64_sve_sqdmlalt    : SVE2_3VectorArg_Long_Intrinsic;
2452 def int_aarch64_sve_sqdmlslb    : SVE2_3VectorArg_Long_Intrinsic;
2453 def int_aarch64_sve_sqdmlslt    : SVE2_3VectorArg_Long_Intrinsic;
2454 def int_aarch64_sve_sqdmullb    : SVE2_2VectorArg_Long_Intrinsic;
2455 def int_aarch64_sve_sqdmullt    : SVE2_2VectorArg_Long_Intrinsic;
2456 def int_aarch64_sve_sqdmlalbt   : SVE2_3VectorArg_Long_Intrinsic;
2457 def int_aarch64_sve_sqdmlslbt   : SVE2_3VectorArg_Long_Intrinsic;
2458
2459 // SVE2 ADDSUB Long Unpredicated.
2460 def int_aarch64_sve_adclb       : AdvSIMD_3VectorArg_Intrinsic;
2461 def int_aarch64_sve_adclt       : AdvSIMD_3VectorArg_Intrinsic;
2462 def int_aarch64_sve_sbclb       : AdvSIMD_3VectorArg_Intrinsic;
2463 def int_aarch64_sve_sbclt       : AdvSIMD_3VectorArg_Intrinsic;
2464
2465 //
2466 // SVE2 - Polynomial arithmetic
2467 //
2468 def int_aarch64_sve_eorbt       : AdvSIMD_3VectorArg_Intrinsic;
2469 def int_aarch64_sve_eortb       : AdvSIMD_3VectorArg_Intrinsic;
2470 def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic;
2471 def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic;
2472
2473 //
2474 // SVE2 bitwise ternary operations.
2475 //
2476 def int_aarch64_sve_eor3   : AdvSIMD_3VectorArg_Intrinsic;
2477 def int_aarch64_sve_bcax   : AdvSIMD_3VectorArg_Intrinsic;
2478 def int_aarch64_sve_bsl    : AdvSIMD_3VectorArg_Intrinsic;
2479 def int_aarch64_sve_bsl1n  : AdvSIMD_3VectorArg_Intrinsic;
2480 def int_aarch64_sve_bsl2n  : AdvSIMD_3VectorArg_Intrinsic;
2481 def int_aarch64_sve_nbsl   : AdvSIMD_3VectorArg_Intrinsic;
2482 def int_aarch64_sve_xar    : AdvSIMD_2VectorArgIndexed_Intrinsic;
2483
2484 //
2485 // SVE2 - Optional AES, SHA-3 and SM4
2486 //
2487
2488 def int_aarch64_sve_aesd    : GCCBuiltin<"__builtin_sve_svaesd_u8">,
2489                               DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
2490                                         [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
2491                                         [IntrNoMem]>;
2492 def int_aarch64_sve_aesimc  : GCCBuiltin<"__builtin_sve_svaesimc_u8">,
2493                               DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
2494                                         [llvm_nxv16i8_ty],
2495                                         [IntrNoMem]>;
2496 def int_aarch64_sve_aese    : GCCBuiltin<"__builtin_sve_svaese_u8">,
2497                               DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
2498                                         [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
2499                                         [IntrNoMem]>;
2500 def int_aarch64_sve_aesmc   : GCCBuiltin<"__builtin_sve_svaesmc_u8">,
2501                               DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
2502                                         [llvm_nxv16i8_ty],
2503                                         [IntrNoMem]>;
2504 def int_aarch64_sve_rax1    : GCCBuiltin<"__builtin_sve_svrax1_u64">,
2505                               DefaultAttrsIntrinsic<[llvm_nxv2i64_ty],
2506                                         [llvm_nxv2i64_ty, llvm_nxv2i64_ty],
2507                                         [IntrNoMem]>;
2508 def int_aarch64_sve_sm4e    : GCCBuiltin<"__builtin_sve_svsm4e_u32">,
2509                               DefaultAttrsIntrinsic<[llvm_nxv4i32_ty],
2510                                         [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
2511                                         [IntrNoMem]>;
2512 def int_aarch64_sve_sm4ekey : GCCBuiltin<"__builtin_sve_svsm4ekey_u32">,
2513                               DefaultAttrsIntrinsic<[llvm_nxv4i32_ty],
2514                                         [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
2515                                         [IntrNoMem]>;
2516 //
2517 // SVE2 - Extended table lookup/permute
2518 //
2519
2520 def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic;
2521 def int_aarch64_sve_tbx  : AdvSIMD_SVE2_TBX_Intrinsic;
2522
2523 //
2524 // SVE2 - Optional bit permutation
2525 //
2526
2527 def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic;
2528 def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic;
2529 def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic;
2530
2531
2532 //
2533 // SVE ACLE: 7.3. INT8 matrix multiply extensions
2534 //
2535 def int_aarch64_sve_ummla : SVE_MatMul_Intrinsic;
2536 def int_aarch64_sve_smmla : SVE_MatMul_Intrinsic;
2537 def int_aarch64_sve_usmmla : SVE_MatMul_Intrinsic;
2538
2539 def int_aarch64_sve_usdot : AdvSIMD_SVE_DOT_Intrinsic;
2540 def int_aarch64_sve_usdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
2541 def int_aarch64_sve_sudot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
2542
2543 //
2544 // SVE ACLE: 7.4/5. FP64/FP32 matrix multiply extensions
2545 //
2546 def int_aarch64_sve_fmmla : AdvSIMD_3VectorArg_Intrinsic;
2547
2548 //
2549 // SVE ACLE: 7.2. BFloat16 extensions
2550 //
2551
2552 def int_aarch64_sve_bfdot   : SVE_4Vec_BF16;
2553 def int_aarch64_sve_bfmlalb : SVE_4Vec_BF16;
2554 def int_aarch64_sve_bfmlalt : SVE_4Vec_BF16;
2555
2556 def int_aarch64_sve_bfmmla  : SVE_4Vec_BF16;
2557
2558 def int_aarch64_sve_bfdot_lane   : SVE_4Vec_BF16_Indexed;
2559 def int_aarch64_sve_bfmlalb_lane : SVE_4Vec_BF16_Indexed;
2560 def int_aarch64_sve_bfmlalt_lane : SVE_4Vec_BF16_Indexed;
2561 }
2562
2563 //
2564 // SVE2 - Contiguous conflict detection
2565 //
2566
2567 def int_aarch64_sve_whilerw_b : SVE2_CONFLICT_DETECT_Intrinsic;
2568 def int_aarch64_sve_whilerw_h : SVE2_CONFLICT_DETECT_Intrinsic;
2569 def int_aarch64_sve_whilerw_s : SVE2_CONFLICT_DETECT_Intrinsic;
2570 def int_aarch64_sve_whilerw_d : SVE2_CONFLICT_DETECT_Intrinsic;
2571 def int_aarch64_sve_whilewr_b : SVE2_CONFLICT_DETECT_Intrinsic;
2572 def int_aarch64_sve_whilewr_h : SVE2_CONFLICT_DETECT_Intrinsic;
2573 def int_aarch64_sve_whilewr_s : SVE2_CONFLICT_DETECT_Intrinsic;
2574 def int_aarch64_sve_whilewr_d : SVE2_CONFLICT_DETECT_Intrinsic;