1 //===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for the PowerPC 32- and 64-bit
13 //===----------------------------------------------------------------------===//
15 /// CCIfSubtarget - Match if the current subtarget has a feature F.
16 class CCIfSubtarget<string F, CCAction A>
17 : CCIf<!strconcat("static_cast<const PPCSubtarget&>"
18 "(State.getMachineFunction().getSubtarget()).",
21 class CCIfNotSubtarget<string F, CCAction A>
22 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>"
23 "(State.getMachineFunction().getSubtarget()).",
26 class CCIfOrigArgWasNotPPCF128<CCAction A>
27 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
29 class CCIfOrigArgWasPPCF128<CCAction A>
30 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
33 //===----------------------------------------------------------------------===//
34 // Return Value Calling Convention
35 //===----------------------------------------------------------------------===//
37 // PPC64 AnyReg return-value convention. No explicit register is specified for
38 // the return-value. The register allocator is allowed and expected to choose
41 // This calling convention is currently only supported by the stackmap and
42 // patchpoint intrinsics. All other uses will result in an assert on Debug
43 // builds. On Release builds we fallback to the PPC C calling convention.
44 def RetCC_PPC64_AnyReg : CallingConv<[
45 CCCustom<"CC_PPC_AnyReg_Error">
48 // Return-value convention for PowerPC coldcc.
49 def RetCC_PPC_Cold : CallingConv<[
50 // Use the same return registers as RetCC_PPC, but limited to only
51 // one return value. The remaining return values will be saved to
53 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
54 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
56 CCIfType<[i32], CCAssignToReg<[R3]>>,
57 CCIfType<[i64], CCAssignToReg<[X3]>>,
58 CCIfType<[i128], CCAssignToReg<[X3]>>,
60 CCIfType<[f32], CCAssignToReg<[F1]>>,
61 CCIfType<[f64], CCAssignToReg<[F1]>>,
62 CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2]>>>,
64 CCIfType<[v4f64, v4f32, v4i1],
65 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1]>>>,
67 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
68 CCIfSubtarget<"hasAltivec()",
72 // Return-value convention for PowerPC
73 def RetCC_PPC : CallingConv<[
74 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
76 // On PPC64, integer return values are always promoted to i64
77 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
78 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
80 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
81 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
82 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
84 // Floating point types returned as "direct" go into F1 .. F8; note that
85 // only the ELFv2 ABI fully utilizes all these registers.
86 CCIfNotSubtarget<"hasSPE()",
87 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
88 CCIfNotSubtarget<"hasSPE()",
89 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
90 CCIfSubtarget<"hasSPE()",
91 CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
92 CCIfSubtarget<"hasSPE()",
93 CCIfType<[f64], CCAssignToReg<[S3, S4, S5, S6, S7, S8, S9, S10]>>>,
95 // For P9, f128 are passed in vector registers.
97 CCIfSubtarget<"hasP9Vector()",
98 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
100 // QPX vectors are returned in QF1 and QF2.
101 CCIfType<[v4f64, v4f32, v4i1],
102 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
104 // Vector types returned as "direct" go into V2 .. V9; note that only the
105 // ELFv2 ABI fully utilizes all these registers.
106 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
107 CCIfSubtarget<"hasAltivec()",
108 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
111 // No explicit register is specified for the AnyReg calling convention. The
112 // register allocator may assign the arguments to any free register.
114 // This calling convention is currently only supported by the stackmap and
115 // patchpoint intrinsics. All other uses will result in an assert on Debug
116 // builds. On Release builds we fallback to the PPC C calling convention.
117 def CC_PPC64_AnyReg : CallingConv<[
118 CCCustom<"CC_PPC_AnyReg_Error">
121 // Note that we don't currently have calling conventions for 64-bit
122 // PowerPC, but handle all the complexities of the ABI in the lowering
123 // logic. FIXME: See if the logic can be simplified with use of CCs.
124 // This may require some extensions to current table generation.
126 // Simple calling convention for 64-bit ELF PowerPC fast isel.
127 // Only handle ints and floats. All ints are promoted to i64.
128 // Vector types and quadword ints are not handled.
129 def CC_PPC64_ELF_FIS : CallingConv<[
130 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>,
132 CCIfType<[i1], CCPromoteToType<i64>>,
133 CCIfType<[i8], CCPromoteToType<i64>>,
134 CCIfType<[i16], CCPromoteToType<i64>>,
135 CCIfType<[i32], CCPromoteToType<i64>>,
136 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>,
137 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>
140 // Simple return-value convention for 64-bit ELF PowerPC fast isel.
141 // All small ints are promoted to i64. Vector types, quadword ints,
142 // and multiple register returns are "supported" to avoid compile
143 // errors, but none are handled by the fast selector.
144 def RetCC_PPC64_ELF_FIS : CallingConv<[
145 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
147 CCIfType<[i1], CCPromoteToType<i64>>,
148 CCIfType<[i8], CCPromoteToType<i64>>,
149 CCIfType<[i16], CCPromoteToType<i64>>,
150 CCIfType<[i32], CCPromoteToType<i64>>,
151 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
152 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
153 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
154 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
156 CCIfSubtarget<"hasP9Vector()",
157 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
158 CCIfType<[v4f64, v4f32, v4i1],
159 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
160 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
161 CCIfSubtarget<"hasAltivec()",
162 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
165 //===----------------------------------------------------------------------===//
166 // PowerPC System V Release 4 32-bit ABI
167 //===----------------------------------------------------------------------===//
169 def CC_PPC32_SVR4_Common : CallingConv<[
170 CCIfType<[i1], CCPromoteToType<i32>>,
172 // The ABI requires i64 to be passed in two adjacent registers with the first
173 // register having an odd register number.
175 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
176 CCIfOrigArgWasNotPPCF128<
177 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>,
180 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()",
181 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>,
182 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
183 CCIfOrigArgWasPPCF128<CCCustom<
184 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>,
186 // The 'nest' parameter, if any, is passed in R11.
187 CCIfNest<CCAssignToReg<[R11]>>,
189 // The first 8 integer arguments are passed in integer registers.
190 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
192 // Make sure the i64 words from a long double are either both passed in
193 // registers or both passed on the stack.
194 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>,
196 // FP values are passed in F1 - F8.
198 CCIfNotSubtarget<"hasSPE()",
199 CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
201 CCIfSubtarget<"hasSPE()",
202 CCAssignToReg<[S3, S4, S5, S6, S7, S8, S9, S10]>>>,
204 CCIfSubtarget<"hasSPE()",
205 CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
207 // Split arguments have an alignment of 8 bytes on the stack.
208 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>,
210 CCIfType<[i32], CCAssignToStack<4, 4>>,
212 // Floats are stored in double precision format, thus they have the same
213 // alignment and size as doubles.
214 // With SPE floats are stored as single precision, so have alignment and
216 CCIfType<[f32,f64], CCIfNotSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
217 CCIfType<[f32], CCIfSubtarget<"hasSPE()", CCAssignToStack<4, 4>>>,
218 CCIfType<[f64], CCIfSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
220 // QPX vectors that are stored in double precision need 32-byte alignment.
221 CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>,
223 // Vectors and float128 get 16-byte stack slots that are 16-byte aligned.
224 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>,
225 CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToStack<16, 16>>>
228 // This calling convention puts vector arguments always on the stack. It is used
229 // to assign vector arguments which belong to the variable portion of the
230 // parameter list of a variable argument function.
231 def CC_PPC32_SVR4_VarArg : CallingConv<[
232 CCDelegateTo<CC_PPC32_SVR4_Common>
235 // In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to
236 // put vector arguments in vector registers before putting them on the stack.
237 def CC_PPC32_SVR4 : CallingConv<[
238 // QPX vectors mirror the scalar FP convention.
239 CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()",
240 CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>,
242 // The first 12 Vector arguments are passed in AltiVec registers.
243 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
244 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
245 V8, V9, V10, V11, V12, V13]>>>,
247 // Float128 types treated as vector arguments.
249 CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
250 V8, V9, V10, V11, V12, V13]>>>,
252 CCDelegateTo<CC_PPC32_SVR4_Common>
255 // Helper "calling convention" to handle aggregate by value arguments.
256 // Aggregate by value arguments are always placed in the local variable space
257 // of the caller. This calling convention is only used to assign those stack
258 // offsets in the callers stack frame.
260 // Still, the address of the aggregate copy in the callers stack frame is passed
261 // in a GPR (or in the parameter list area if all GPRs are allocated) from the
262 // caller to the callee. The location for the address argument is assigned by
263 // the CC_PPC32_SVR4 calling convention.
265 // The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are
266 // not passed by value.
268 def CC_PPC32_SVR4_ByVal : CallingConv<[
269 CCIfByVal<CCPassByVal<4, 4>>,
271 CCCustom<"CC_PPC32_SVR4_Custom_Dummy">
274 def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27,
275 V28, V29, V30, V31)>;
277 def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
278 R21, R22, R23, R24, R25, R26, R27, R28,
279 R29, R30, R31, F14, F15, F16, F17, F18,
280 F19, F20, F21, F22, F23, F24, F25, F26,
281 F27, F28, F29, F30, F31, CR2, CR3, CR4
284 def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>;
286 // SPE does not use FPRs, so break out the common register set as base.
287 def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20,
288 R21, R22, R23, R24, R25, R26, R27,
289 R28, R29, R30, R31, CR2, CR3, CR4
291 def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18,
292 F19, F20, F21, F22, F23, F24, F25, F26,
293 F27, F28, F29, F30, F31
295 def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22,
296 S23, S24, S25, S26, S27, S28, S29, S30, S31
299 def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>;
301 def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>;
303 def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20,
304 X21, X22, X23, X24, X25, X26, X27, X28,
305 X29, X30, X31, F14, F15, F16, F17, F18,
306 F19, F20, F21, F22, F23, F24, F25, F26,
307 F27, F28, F29, F30, F31, CR2, CR3, CR4
310 def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>;
312 def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
313 X21, X22, X23, X24, X25, X26, X27, X28,
314 X29, X30, X31, F14, F15, F16, F17, F18,
315 F19, F20, F21, F22, F23, F24, F25, F26,
316 F27, F28, F29, F30, F31, CR2, CR3, CR4
319 // CSRs that are handled by prologue, epilogue.
320 def CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>;
322 def CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>;
324 def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
326 def CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>;
328 def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>;
330 def CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>;
332 def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>;
334 def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>;
336 def CSR_NoRegs : CalleeSavedRegs<(add)>;
338 // coldcc calling convection marks most registers as non-volatile.
339 // Do not include r1 since the stack pointer is never considered a CSR.
340 // Do not include r2, since it is the TOC register and is added depending
341 // on whether or not the function uses the TOC and is a non-leaf.
342 // Do not include r0,r11,r13 as they are optional in functional linkage
343 // and value may be altered by inter-library calls.
344 // Do not include r12 as it is used as a scratch register.
345 // Do not include return registers r3, f1, v2.
346 def CSR_SVR32_ColdCC : CalleeSavedRegs<(add (sequence "R%u", 4, 10),
347 (sequence "R%u", 14, 31),
348 F0, (sequence "F%u", 2, 31),
349 (sequence "CR%u", 0, 7))>;
351 def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC,
352 (sequence "V%u", 0, 1),
353 (sequence "V%u", 3, 31))>;
355 def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10),
356 (sequence "X%u", 14, 31),
357 F0, (sequence "F%u", 2, 31),
358 (sequence "CR%u", 0, 7))>;
360 def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>;
362 def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC,
363 (sequence "V%u", 0, 1),
364 (sequence "V%u", 3, 31))>;
366 def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>;
368 def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
369 (sequence "X%u", 14, 31),
370 (sequence "F%u", 0, 31),
371 (sequence "CR%u", 0, 7))>;
373 def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
374 (sequence "V%u", 0, 31))>;
376 def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
377 (sequence "VSL%u", 0, 31))>;