1 //===---- AMDCallingConv.td - Calling Conventions for Radeon GPUs ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for the AMD Radeon GPUs.
12 //===----------------------------------------------------------------------===//
14 // Inversion of CCIfInReg
15 class CCIfNotInReg<CCAction A> : CCIf<"!ArgFlags.isInReg()", A> {}
16 class CCIfExtend<CCAction A>
17 : CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
19 // Calling convention for SI
20 def CC_SI : CallingConv<[
22 CCIfInReg<CCIfType<[f32, i32, f16] , CCAssignToReg<[
23 SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
24 SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
25 SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
26 SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
27 SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39
30 // We have no way of referring to the generated register tuples
31 // here, so use a custom function.
32 CCIfInReg<CCIfType<[i64], CCCustom<"allocateSGPRTuple">>>,
33 CCIfByVal<CCIfType<[i64], CCCustom<"allocateSGPRTuple">>>,
35 // 32*4 + 4 is the minimum for a fetch shader consumer with 32 inputs.
36 CCIfNotInReg<CCIfType<[f32, i32, f16] , CCAssignToReg<[
37 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
38 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
39 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
40 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
41 VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
42 VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
43 VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
44 VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
45 VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
46 VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
47 VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
48 VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
49 VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
50 VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
51 VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
52 VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
53 VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
57 def RetCC_SI_Shader : CallingConv<[
58 CCIfType<[i32] , CCAssignToReg<[
59 SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
60 SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
61 SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
62 SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
63 SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39
66 // 32*4 + 4 is the minimum for a fetch shader with 32 outputs.
67 CCIfType<[f32, f16] , CCAssignToReg<[
68 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
69 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
70 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
71 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
72 VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
73 VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
74 VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
75 VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
76 VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
77 VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
78 VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
79 VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
80 VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
81 VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
82 VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
83 VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
84 VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
88 // Calling convention for R600
89 def CC_R600 : CallingConv<[
90 CCIfInReg<CCIfType<[v4f32, v4i32] , CCAssignToReg<[
91 T0_XYZW, T1_XYZW, T2_XYZW, T3_XYZW, T4_XYZW, T5_XYZW, T6_XYZW, T7_XYZW,
92 T8_XYZW, T9_XYZW, T10_XYZW, T11_XYZW, T12_XYZW, T13_XYZW, T14_XYZW, T15_XYZW,
93 T16_XYZW, T17_XYZW, T18_XYZW, T19_XYZW, T20_XYZW, T21_XYZW, T22_XYZW,
94 T23_XYZW, T24_XYZW, T25_XYZW, T26_XYZW, T27_XYZW, T28_XYZW, T29_XYZW,
95 T30_XYZW, T31_XYZW, T32_XYZW
99 // Calling convention for compute kernels
100 def CC_AMDGPU_Kernel : CallingConv<[
101 CCCustom<"allocateKernArg">
104 def CSR_AMDGPU_VGPRs_24_255 : CalleeSavedRegs<
105 (sequence "VGPR%u", 24, 255)
108 def CSR_AMDGPU_VGPRs_32_255 : CalleeSavedRegs<
109 (sequence "VGPR%u", 32, 255)
112 def CSR_AMDGPU_SGPRs_32_103 : CalleeSavedRegs<
113 (sequence "SGPR%u", 32, 103)
116 def CSR_AMDGPU_HighRegs : CalleeSavedRegs<
117 (add CSR_AMDGPU_VGPRs_32_255, CSR_AMDGPU_SGPRs_32_103)
120 // Calling convention for leaf functions
121 def CC_AMDGPU_Func : CallingConv<[
122 CCIfByVal<CCPassByVal<4, 4>>,
123 CCIfType<[i1], CCPromoteToType<i32>>,
124 CCIfType<[i1, i8, i16], CCIfExtend<CCPromoteToType<i32>>>,
125 CCIfType<[i32, f32, i16, f16, v2i16, v2f16, i1], CCAssignToReg<[
126 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
127 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
128 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
129 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
130 CCIfType<[i64, f64, v2i32, v2f32, v4i32, v4f32, v8i32, v8f32, v16i32, v16f32, v2i64, v2f64], CCCustom<"allocateVGPRTuple">>,
131 CCIfType<[i32, f32, v2i16, v2f16, i16, f16, i1], CCAssignToStack<4, 4>>,
132 CCIfType<[i64, f64, v2i32, v2f32], CCAssignToStack<8, 4>>,
133 CCIfType<[v4i32, v4f32, v2i64, v2f64], CCAssignToStack<16, 4>>,
134 CCIfType<[v8i32, v8f32], CCAssignToStack<32, 4>>,
135 CCIfType<[v16i32, v16f32], CCAssignToStack<64, 4>>
138 // Calling convention for leaf functions
139 def RetCC_AMDGPU_Func : CallingConv<[
140 CCIfType<[i1], CCPromoteToType<i32>>,
141 CCIfType<[i1, i16], CCIfExtend<CCPromoteToType<i32>>>,
142 CCIfType<[i32, f32, i16, f16, v2i16, v2f16], CCAssignToReg<[
143 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
144 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
145 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
146 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
147 CCIfType<[i64, f64, v2i32, v2f32, v4i32, v4f32, v8i32, v8f32, v16i32, v16f32, v2i64, v2f64], CCCustom<"allocateVGPRTuple">>
150 def CC_AMDGPU : CallingConv<[
151 CCIf<"static_cast<const AMDGPUSubtarget&>"
152 "(State.getMachineFunction().getSubtarget()).getGeneration() >="
153 "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
154 "!AMDGPU::isShader(State.getCallingConv())",
155 CCDelegateTo<CC_AMDGPU_Kernel>>,
156 CCIf<"static_cast<const AMDGPUSubtarget&>"
157 "(State.getMachineFunction().getSubtarget()).getGeneration() < "
158 "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
159 "!AMDGPU::isShader(State.getCallingConv())",
160 CCDelegateTo<CC_AMDGPU_Kernel>>,
161 CCIf<"static_cast<const AMDGPUSubtarget&>"
162 "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
163 "AMDGPUSubtarget::SOUTHERN_ISLANDS",
164 CCDelegateTo<CC_SI>>,
165 CCIf<"static_cast<const AMDGPUSubtarget&>"
166 "(State.getMachineFunction().getSubtarget()).getGeneration() < "
167 "AMDGPUSubtarget::SOUTHERN_ISLANDS",
168 CCDelegateTo<CC_R600>>