1 //=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
14 class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
15 list<string> altNames = []>
16 : Register<n, altNames> {
18 let Namespace = "AArch64";
19 let SubRegs = subregs;
22 let Namespace = "AArch64" in {
23 def sub_32 : SubRegIndex<32>;
25 def bsub : SubRegIndex<8>;
26 def hsub : SubRegIndex<16>;
27 def ssub : SubRegIndex<32>;
28 def dsub : SubRegIndex<32>;
29 def sube32 : SubRegIndex<32>;
30 def subo32 : SubRegIndex<32>;
31 def qhisub : SubRegIndex<64>;
32 def qsub : SubRegIndex<64>;
33 def sube64 : SubRegIndex<64>;
34 def subo64 : SubRegIndex<64>;
36 def zsub : SubRegIndex<128>;
37 // Note: zsub_hi should never be used directly because it represents
38 // the scalable part of the SVE vector and cannot be manipulated as a
39 // subvector in the same way the lower 128bits can.
40 def zsub_hi : SubRegIndex<128>;
41 // Note: Code depends on these having consecutive numbers
42 def dsub0 : SubRegIndex<64>;
43 def dsub1 : SubRegIndex<64>;
44 def dsub2 : SubRegIndex<64>;
45 def dsub3 : SubRegIndex<64>;
46 // Note: Code depends on these having consecutive numbers
47 def qsub0 : SubRegIndex<128>;
48 def qsub1 : SubRegIndex<128>;
49 def qsub2 : SubRegIndex<128>;
50 def qsub3 : SubRegIndex<128>;
53 let Namespace = "AArch64" in {
54 def vreg : RegAltNameIndex;
55 def vlist1 : RegAltNameIndex;
58 //===----------------------------------------------------------------------===//
60 //===----------------------------------------------------------------------===//
61 def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>;
62 def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>;
63 def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>;
64 def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>;
65 def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>;
66 def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>;
67 def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>;
68 def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>;
69 def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>;
70 def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>;
71 def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
72 def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
73 def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
74 def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
75 def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
76 def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
77 def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
78 def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
79 def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
80 def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
81 def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
82 def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
83 def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
84 def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
85 def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
86 def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
87 def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
88 def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
89 def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
90 def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
91 def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
92 def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
93 def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
95 let SubRegIndices = [sub_32] in {
96 def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>;
97 def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>;
98 def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>;
99 def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>;
100 def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>;
101 def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>;
102 def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>;
103 def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>;
104 def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>;
105 def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>;
106 def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
107 def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
108 def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
109 def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
110 def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
111 def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
112 def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
113 def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
114 def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
115 def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
116 def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
117 def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
118 def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
119 def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
120 def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
121 def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
122 def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
123 def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
124 def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
125 def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
126 def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
127 def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>;
128 def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
131 // Condition code register.
132 def NZCV : AArch64Reg<0, "nzcv">;
134 // First fault status register
135 def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
137 // GPR register classes with the intersections of GPR32/GPR32sp and
138 // GPR64/GPR64sp for use by the coalescer.
139 def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
140 let AltOrders = [(rotl GPR32common, 8)];
141 let AltOrderSelect = [{ return 1; }];
143 def GPR64common : RegisterClass<"AArch64", [i64], 64,
144 (add (sequence "X%u", 0, 28), FP, LR)> {
145 let AltOrders = [(rotl GPR64common, 8)];
146 let AltOrderSelect = [{ return 1; }];
148 // GPR register classes which exclude SP/WSP.
149 def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
150 let AltOrders = [(rotl GPR32, 8)];
151 let AltOrderSelect = [{ return 1; }];
153 def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
154 let AltOrders = [(rotl GPR64, 8)];
155 let AltOrderSelect = [{ return 1; }];
158 // GPR register classes which include SP/WSP.
159 def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
160 let AltOrders = [(rotl GPR32sp, 8)];
161 let AltOrderSelect = [{ return 1; }];
163 def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
164 let AltOrders = [(rotl GPR64sp, 8)];
165 let AltOrderSelect = [{ return 1; }];
168 def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
169 def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
171 def GPR64spPlus0Operand : AsmOperandClass {
172 let Name = "GPR64sp0";
173 let RenderMethod = "addRegOperands";
174 let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
175 let ParserMethod = "tryParseGPR64sp0Operand";
178 def GPR64sp0 : RegisterOperand<GPR64sp> {
179 let ParserMatchClass = GPR64spPlus0Operand;
182 // GPR32/GPR64 but with zero-register substitution enabled.
183 // TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
184 def GPR32z : RegisterOperand<GPR32> {
185 let GIZeroRegister = WZR;
187 def GPR64z : RegisterOperand<GPR64> {
188 let GIZeroRegister = XZR;
191 // GPR register classes which include WZR/XZR AND SP/WSP. This is not a
192 // constraint used by any instructions, it is used as a common super-class.
193 def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
194 def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
196 // For tail calls, we can't use callee-saved registers, as they are restored
197 // to the saved value before the tail call, which would clobber a call address.
198 // This is for indirect tail calls to store the address of the destination.
199 def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
200 X22, X23, X24, X25, X26,
203 // Restricted set of tail call registers, for use when branch target
204 // enforcement is enabled. These are the only registers which can be used to
205 // indirectly branch (not call) to the "BTI c" instruction at the start of a
206 // BTI-protected function.
207 def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
209 // GPR register classes for post increment amount of vector load/store that
210 // has alternate printing when Rm=31 and prints a constant immediate value
211 // equal to the total number of bytes transferred.
213 // FIXME: TableGen *should* be able to do these itself now. There appears to be
214 // a bug in counting how many operands a Post-indexed MCInst should have which
215 // means the aliases don't trigger.
216 def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">;
217 def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">;
218 def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">;
219 def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">;
220 def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">;
221 def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">;
222 def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
223 def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
224 def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
225 def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
226 def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
227 def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
229 // Condition code regclass.
230 def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
231 let CopyCost = -1; // Don't allow copying of status registers.
233 // CCR is not allocatable.
234 let isAllocatable = 0;
237 //===----------------------------------------------------------------------===//
238 // Floating Point Scalar Registers
239 //===----------------------------------------------------------------------===//
241 def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>;
242 def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>;
243 def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>;
244 def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>;
245 def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>;
246 def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>;
247 def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>;
248 def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>;
249 def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>;
250 def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>;
251 def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
252 def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
253 def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
254 def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
255 def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
256 def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
257 def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
258 def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
259 def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
260 def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
261 def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
262 def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
263 def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
264 def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
265 def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
266 def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
267 def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
268 def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
269 def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
270 def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
271 def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
272 def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
274 let SubRegIndices = [bsub] in {
275 def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>;
276 def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>;
277 def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>;
278 def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>;
279 def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>;
280 def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>;
281 def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>;
282 def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>;
283 def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>;
284 def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>;
285 def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
286 def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
287 def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
288 def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
289 def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
290 def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
291 def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
292 def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
293 def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
294 def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
295 def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
296 def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
297 def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
298 def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
299 def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
300 def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
301 def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
302 def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
303 def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
304 def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
305 def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
306 def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
309 let SubRegIndices = [hsub] in {
310 def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>;
311 def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>;
312 def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>;
313 def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>;
314 def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>;
315 def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>;
316 def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>;
317 def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>;
318 def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>;
319 def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>;
320 def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
321 def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
322 def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
323 def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
324 def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
325 def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
326 def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
327 def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
328 def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
329 def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
330 def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
331 def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
332 def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
333 def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
334 def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
335 def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
336 def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
337 def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
338 def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
339 def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
340 def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
341 def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
344 let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
345 def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
346 def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
347 def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
348 def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
349 def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
350 def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
351 def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
352 def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
353 def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
354 def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
355 def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
356 def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
357 def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
358 def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
359 def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
360 def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
361 def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
362 def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
363 def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
364 def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
365 def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
366 def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
367 def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
368 def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
369 def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
370 def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
371 def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
372 def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
373 def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
374 def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
375 def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
376 def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
379 let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
380 def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
381 def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
382 def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
383 def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
384 def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
385 def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
386 def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
387 def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
388 def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
389 def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
390 def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
391 def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
392 def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
393 def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
394 def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
395 def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
396 def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
397 def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
398 def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
399 def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
400 def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
401 def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
402 def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
403 def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
404 def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
405 def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
406 def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
407 def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
408 def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
409 def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
410 def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
411 def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
414 def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> {
417 def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> {
420 def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
421 def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
423 64, (sequence "D%u", 0, 31)>;
424 // We don't (yet) have an f128 legal type, so don't use that here. We
425 // normalize 128-bit vectors to v2f64 for arg passing and such, so use
427 def FPR128 : RegisterClass<"AArch64",
428 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
430 128, (sequence "Q%u", 0, 31)>;
432 // The lower 16 vector registers. Some instructions can only take registers
434 def FPR128_lo : RegisterClass<"AArch64",
435 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16],
436 128, (trunc FPR128, 16)>;
438 // Pairs, triples, and quads of 64-bit vector registers.
439 def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
440 def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
441 [(rotl FPR64, 0), (rotl FPR64, 1),
443 def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
444 [(rotl FPR64, 0), (rotl FPR64, 1),
445 (rotl FPR64, 2), (rotl FPR64, 3)]>;
446 def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
449 def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
452 def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
456 // Pairs, triples, and quads of 128-bit vector registers.
457 def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
458 def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
459 [(rotl FPR128, 0), (rotl FPR128, 1),
461 def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
462 [(rotl FPR128, 0), (rotl FPR128, 1),
463 (rotl FPR128, 2), (rotl FPR128, 3)]>;
464 def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
467 def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
470 def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
475 // Vector operand versions of the FP registers. Alternate name printing and
476 // assmebler matching.
477 def VectorReg64AsmOperand : AsmOperandClass {
478 let Name = "VectorReg64";
479 let PredicateMethod = "isNeonVectorReg";
481 def VectorReg128AsmOperand : AsmOperandClass {
482 let Name = "VectorReg128";
483 let PredicateMethod = "isNeonVectorReg";
486 def V64 : RegisterOperand<FPR64, "printVRegOperand"> {
487 let ParserMatchClass = VectorReg64AsmOperand;
490 def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
491 let ParserMatchClass = VectorReg128AsmOperand;
494 def VectorRegLoAsmOperand : AsmOperandClass {
495 let Name = "VectorRegLo";
496 let PredicateMethod = "isNeonVectorRegLo";
498 def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
499 let ParserMatchClass = VectorRegLoAsmOperand;
502 class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
504 let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
507 = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
508 let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">";
511 class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
512 : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
515 multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
516 // With implicit types (probably on instruction instead). E.g. { v0, v1 }
517 def _64AsmOperand : AsmOperandClass {
518 let Name = NAME # "64";
519 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
520 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
523 def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
524 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
527 def _128AsmOperand : AsmOperandClass {
528 let Name = NAME # "128";
529 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
530 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
533 def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
534 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
537 // 64-bit register lists with explicit type.
540 def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
541 def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
542 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
546 def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
547 def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
548 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
552 def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
553 def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
554 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
558 def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
559 def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
560 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
563 // 128-bit register lists with explicit type
565 // { v0.16b, v1.16b }
566 def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
567 def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
568 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
572 def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
573 def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
574 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
578 def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
579 def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
580 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
584 def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
585 def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
586 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
590 def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
591 def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
592 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
596 def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
597 def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
598 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
602 def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
603 def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
604 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
608 def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
609 def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
610 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
616 defm VecListOne : VectorList<1, FPR64, FPR128>;
617 defm VecListTwo : VectorList<2, DD, QQ>;
618 defm VecListThree : VectorList<3, DDD, QQQ>;
619 defm VecListFour : VectorList<4, DDDD, QQQQ>;
621 class FPRAsmOperand<string RC> : AsmOperandClass {
622 let Name = "FPRAsmOperand" # RC;
623 let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
624 let RenderMethod = "addRegOperands";
627 // Register operand versions of the scalar FP registers.
628 def FPR8Op : RegisterOperand<FPR8, "printOperand"> {
629 let ParserMatchClass = FPRAsmOperand<"FPR8">;
632 def FPR16Op : RegisterOperand<FPR16, "printOperand"> {
633 let ParserMatchClass = FPRAsmOperand<"FPR16">;
636 def FPR32Op : RegisterOperand<FPR32, "printOperand"> {
637 let ParserMatchClass = FPRAsmOperand<"FPR32">;
640 def FPR64Op : RegisterOperand<FPR64, "printOperand"> {
641 let ParserMatchClass = FPRAsmOperand<"FPR64">;
644 def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
645 let ParserMatchClass = FPRAsmOperand<"FPR128">;
648 //===----------------------------------------------------------------------===//
649 // ARMv8.1a atomic CASP register operands
652 def WSeqPairs : RegisterTuples<[sube32, subo32],
653 [(decimate (rotl GPR32, 0), 2),
654 (decimate (rotl GPR32, 1), 2)]>;
655 def XSeqPairs : RegisterTuples<[sube64, subo64],
656 [(decimate (rotl GPR64, 0), 2),
657 (decimate (rotl GPR64, 1), 2)]>;
659 def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32,
663 def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64,
669 let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
670 def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
671 def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
674 def WSeqPairClassOperand :
675 RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
676 let ParserMatchClass = WSeqPairsAsmOperandClass;
678 def XSeqPairClassOperand :
679 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
680 let ParserMatchClass = XSeqPairsAsmOperandClass;
684 //===----- END: v8.1a atomic CASP register operands -----------------------===//
686 // SVE predicate registers
687 def P0 : AArch64Reg<0, "p0">, DwarfRegNum<[48]>;
688 def P1 : AArch64Reg<1, "p1">, DwarfRegNum<[49]>;
689 def P2 : AArch64Reg<2, "p2">, DwarfRegNum<[50]>;
690 def P3 : AArch64Reg<3, "p3">, DwarfRegNum<[51]>;
691 def P4 : AArch64Reg<4, "p4">, DwarfRegNum<[52]>;
692 def P5 : AArch64Reg<5, "p5">, DwarfRegNum<[53]>;
693 def P6 : AArch64Reg<6, "p6">, DwarfRegNum<[54]>;
694 def P7 : AArch64Reg<7, "p7">, DwarfRegNum<[55]>;
695 def P8 : AArch64Reg<8, "p8">, DwarfRegNum<[56]>;
696 def P9 : AArch64Reg<9, "p9">, DwarfRegNum<[57]>;
697 def P10 : AArch64Reg<10, "p10">, DwarfRegNum<[58]>;
698 def P11 : AArch64Reg<11, "p11">, DwarfRegNum<[59]>;
699 def P12 : AArch64Reg<12, "p12">, DwarfRegNum<[60]>;
700 def P13 : AArch64Reg<13, "p13">, DwarfRegNum<[61]>;
701 def P14 : AArch64Reg<14, "p14">, DwarfRegNum<[62]>;
702 def P15 : AArch64Reg<15, "p15">, DwarfRegNum<[63]>;
704 // The part of SVE registers that don't overlap Neon registers.
705 // These are only used as part of clobber lists.
706 def Z0_HI : AArch64Reg<0, "z0_hi">;
707 def Z1_HI : AArch64Reg<1, "z1_hi">;
708 def Z2_HI : AArch64Reg<2, "z2_hi">;
709 def Z3_HI : AArch64Reg<3, "z3_hi">;
710 def Z4_HI : AArch64Reg<4, "z4_hi">;
711 def Z5_HI : AArch64Reg<5, "z5_hi">;
712 def Z6_HI : AArch64Reg<6, "z6_hi">;
713 def Z7_HI : AArch64Reg<7, "z7_hi">;
714 def Z8_HI : AArch64Reg<8, "z8_hi">;
715 def Z9_HI : AArch64Reg<9, "z9_hi">;
716 def Z10_HI : AArch64Reg<10, "z10_hi">;
717 def Z11_HI : AArch64Reg<11, "z11_hi">;
718 def Z12_HI : AArch64Reg<12, "z12_hi">;
719 def Z13_HI : AArch64Reg<13, "z13_hi">;
720 def Z14_HI : AArch64Reg<14, "z14_hi">;
721 def Z15_HI : AArch64Reg<15, "z15_hi">;
722 def Z16_HI : AArch64Reg<16, "z16_hi">;
723 def Z17_HI : AArch64Reg<17, "z17_hi">;
724 def Z18_HI : AArch64Reg<18, "z18_hi">;
725 def Z19_HI : AArch64Reg<19, "z19_hi">;
726 def Z20_HI : AArch64Reg<20, "z20_hi">;
727 def Z21_HI : AArch64Reg<21, "z21_hi">;
728 def Z22_HI : AArch64Reg<22, "z22_hi">;
729 def Z23_HI : AArch64Reg<23, "z23_hi">;
730 def Z24_HI : AArch64Reg<24, "z24_hi">;
731 def Z25_HI : AArch64Reg<25, "z25_hi">;
732 def Z26_HI : AArch64Reg<26, "z26_hi">;
733 def Z27_HI : AArch64Reg<27, "z27_hi">;
734 def Z28_HI : AArch64Reg<28, "z28_hi">;
735 def Z29_HI : AArch64Reg<29, "z29_hi">;
736 def Z30_HI : AArch64Reg<30, "z30_hi">;
737 def Z31_HI : AArch64Reg<31, "z31_hi">;
739 // SVE variable-size vector registers
740 let SubRegIndices = [zsub,zsub_hi] in {
741 def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>;
742 def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>;
743 def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>;
744 def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>;
745 def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>;
746 def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>;
747 def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>;
748 def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>;
749 def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>;
750 def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>;
751 def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
752 def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
753 def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
754 def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
755 def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
756 def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
757 def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
758 def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
759 def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
760 def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
761 def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
762 def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
763 def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
764 def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
765 def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
766 def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
767 def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
768 def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
769 def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
770 def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
771 def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
772 def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
775 // Enum descibing the element size for destructive
777 class ElementSizeEnum<bits<3> val> {
781 def ElementSizeNone : ElementSizeEnum<0>;
782 def ElementSizeB : ElementSizeEnum<1>;
783 def ElementSizeH : ElementSizeEnum<2>;
784 def ElementSizeS : ElementSizeEnum<3>;
785 def ElementSizeD : ElementSizeEnum<4>;
786 def ElementSizeQ : ElementSizeEnum<5>; // Unused
788 class SVERegOp <string Suffix, AsmOperandClass C,
789 ElementSizeEnum Size,
790 RegisterClass RC> : RegisterOperand<RC> {
791 ElementSizeEnum ElementSize;
793 let ElementSize = Size;
794 let PrintMethod = !if(!eq(Suffix, ""),
796 "printSVERegOp<'" # Suffix # "'>");
797 let ParserMatchClass = C;
800 class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
801 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
802 class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
803 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
805 //******************************************************************************
807 // SVE predicate register classes.
808 class PPRClass<int lastreg> : RegisterClass<
810 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1 ], 16,
811 (sequence "P%u", 0, lastreg)> {
815 def PPR : PPRClass<15>;
816 def PPR_3b : PPRClass<7>; // Restricted 3 bit SVE predicate register class.
818 class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
819 let Name = "SVE" # name # "Reg";
820 let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
821 # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
822 let DiagnosticType = "InvalidSVE" # name # "Reg";
823 let RenderMethod = "addRegOperands";
824 let ParserMethod = "tryParseSVEPredicateVector";
827 def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>;
828 def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>;
829 def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>;
830 def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>;
831 def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>;
833 def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>;
834 def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>;
835 def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>;
836 def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>;
837 def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>;
839 def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>;
840 def PPRAsmOp3b8 : PPRAsmOperand<"Predicate3bB", "PPR_3b", 8>;
841 def PPRAsmOp3b16 : PPRAsmOperand<"Predicate3bH", "PPR_3b", 16>;
842 def PPRAsmOp3b32 : PPRAsmOperand<"Predicate3bS", "PPR_3b", 32>;
843 def PPRAsmOp3b64 : PPRAsmOperand<"Predicate3bD", "PPR_3b", 64>;
845 def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
846 def PPR3b8 : PPRRegOp<"b", PPRAsmOp3b8, ElementSizeB, PPR_3b>;
847 def PPR3b16 : PPRRegOp<"h", PPRAsmOp3b16, ElementSizeH, PPR_3b>;
848 def PPR3b32 : PPRRegOp<"s", PPRAsmOp3b32, ElementSizeS, PPR_3b>;
849 def PPR3b64 : PPRRegOp<"d", PPRAsmOp3b64, ElementSizeD, PPR_3b>;
851 //******************************************************************************
853 // SVE vector register class
854 def ZPR : RegisterClass<"AArch64",
855 [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
856 nxv2f16, nxv4f16, nxv8f16,
857 nxv1f32, nxv2f32, nxv4f32,
859 128, (sequence "Z%u", 0, 31)> {
863 // SVE restricted 4 bit scalable vector register class
864 def ZPR_4b : RegisterClass<"AArch64",
865 [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
866 nxv2f16, nxv4f16, nxv8f16,
867 nxv1f32, nxv2f32, nxv4f32,
869 128, (sequence "Z%u", 0, 15)> {
873 // SVE restricted 3 bit scalable vector register class
874 def ZPR_3b : RegisterClass<"AArch64",
875 [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
876 nxv2f16, nxv4f16, nxv8f16,
877 nxv1f32, nxv2f32, nxv4f32,
879 128, (sequence "Z%u", 0, 7)> {
883 class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
885 let Name = "SVE" # name # "Reg";
886 let PredicateMethod = "isSVEDataVectorRegOfWidth<"
887 # Width # ", AArch64::ZPR"
888 # RegClassSuffix # "RegClassID>";
889 let RenderMethod = "addRegOperands";
890 let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
891 let ParserMethod = "tryParseSVEDataVector<false, "
892 # !if(!eq(Width, 0), "false", "true") # ">";
895 def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
896 def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>;
897 def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>;
898 def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>;
899 def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>;
900 def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>;
902 def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>;
903 def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>;
904 def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>;
905 def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>;
906 def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>;
907 def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
909 def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
910 def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
911 def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
913 def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>;
914 def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
915 def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
917 def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
918 def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
919 def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
921 def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
922 def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
923 def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
925 class FPRasZPR<int Width> : AsmOperandClass{
926 let Name = "FPR" # Width # "asZPR";
927 let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
928 let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
931 class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
932 let ParserMatchClass = FPRasZPR<Width>;
933 let PrintMethod = "printZPRasFPR<" # Width # ">";
936 def FPR8asZPR : FPRasZPROperand<8>;
937 def FPR16asZPR : FPRasZPROperand<16>;
938 def FPR32asZPR : FPRasZPROperand<32>;
939 def FPR64asZPR : FPRasZPROperand<64>;
940 def FPR128asZPR : FPRasZPROperand<128>;
942 let Namespace = "AArch64" in {
943 def zsub0 : SubRegIndex<128, -1>;
944 def zsub1 : SubRegIndex<128, -1>;
945 def zsub2 : SubRegIndex<128, -1>;
946 def zsub3 : SubRegIndex<128, -1>;
949 // Pairs, triples, and quads of SVE vector registers.
950 def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
951 def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
952 def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
954 def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> {
957 def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
960 def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
964 class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
965 let Name = "SVEVectorList" # NumRegs # ElementWidth;
966 let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
967 let PredicateMethod =
968 "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
969 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
972 def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> {
973 let ParserMatchClass = ZPRVectorList<8, 1>;
976 def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> {
977 let ParserMatchClass = ZPRVectorList<16, 1>;
980 def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> {
981 let ParserMatchClass = ZPRVectorList<32, 1>;
984 def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> {
985 let ParserMatchClass = ZPRVectorList<64, 1>;
988 def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
989 let ParserMatchClass = ZPRVectorList<8, 2>;
992 def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
993 let ParserMatchClass = ZPRVectorList<16, 2>;
996 def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
997 let ParserMatchClass = ZPRVectorList<32, 2>;
1000 def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
1001 let ParserMatchClass = ZPRVectorList<64, 2>;
1004 def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
1005 let ParserMatchClass = ZPRVectorList<8, 3>;
1008 def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
1009 let ParserMatchClass = ZPRVectorList<16, 3>;
1012 def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
1013 let ParserMatchClass = ZPRVectorList<32, 3>;
1016 def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
1017 let ParserMatchClass = ZPRVectorList<64, 3>;
1020 def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
1021 let ParserMatchClass = ZPRVectorList<8, 4>;
1024 def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
1025 let ParserMatchClass = ZPRVectorList<16, 4>;
1028 def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
1029 let ParserMatchClass = ZPRVectorList<32, 4>;
1032 def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
1033 let ParserMatchClass = ZPRVectorList<64, 4>;
1036 class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
1037 bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
1038 let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
1039 # !if(ScaleAlwaysSame, "Only", "");
1041 let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
1042 # RegWidth # ", AArch64::ZPRRegClassID, "
1043 # "AArch64_AM::" # ShiftExtend # ", "
1045 # !if(ScaleAlwaysSame, "true", "false")
1047 let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
1048 let RenderMethod = "addRegOperands";
1049 let ParserMethod = "tryParseSVEDataVector<true, true>";
1052 class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
1053 int RegWidth, int Scale, string Suffix = "">
1054 : RegisterOperand<ZPR> {
1055 let ParserMatchClass =
1056 !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
1057 let PrintMethod = "printRegWithShiftExtend<"
1058 # !if(SignExtend, "true", "false") # ", "
1060 # !if(IsLSL, "'x'", "'w'") # ", "
1061 # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
1064 foreach RegWidth = [32, 64] in {
1066 def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
1067 def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
1068 def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
1069 def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
1070 def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
1072 def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
1073 def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
1074 def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
1075 def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
1076 def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
1079 def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
1080 def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
1081 def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
1082 def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
1083 def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
1085 def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
1086 def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
1087 def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
1088 def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
1089 def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
1092 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1093 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1094 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1095 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1096 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1097 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1098 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1099 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
1102 class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
1103 let Name = AsmOperandName # Scale;
1104 let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
1105 let DiagnosticType = "Invalid" # AsmOperandName # Scale;
1106 let RenderMethod = "addRegOperands";
1107 let ParserMethod = "tryParseGPROperand<true>";
1110 class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
1111 let ParserMatchClass = !cast<AsmOperandClass>(Name);
1112 let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
1115 foreach Scale = [8, 16, 32, 64] in {
1116 def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
1117 def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
1119 def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
1120 def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;