1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
4 ; GCN-LABEL: {{^}}mac_vvv:
5 ; GCN: buffer_load_dword [[A:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0{{$}}
6 ; GCN: buffer_load_dword [[B:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:4
7 ; GCN: buffer_load_dword [[C:v[0-9]+]], off, s[{{[0-9]+:[0-9]+}}], 0 offset:8
8 ; GCN: v_mac_f32_e32 [[C]], [[B]], [[A]]
9 ; GCN: buffer_store_dword [[C]]
10 define void @mac_vvv(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
12 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
13 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
15 %a = load volatile float, float addrspace(1)* %in
16 %b = load volatile float, float addrspace(1)* %b_ptr
17 %c = load volatile float, float addrspace(1)* %c_ptr
19 %tmp0 = fmul float %a, %b
20 %tmp1 = fadd float %tmp0, %c
21 store float %tmp1, float addrspace(1)* %out
25 ; GCN-LABEL: {{^}}mad_inline_sgpr_inline:
27 ; GCN: v_mad_f32 v{{[0-9]}}, s{{[0-9]+}}, 0.5, 0.5
28 define void @mad_inline_sgpr_inline(float addrspace(1)* %out, float %in) #0 {
30 %tmp0 = fmul float 0.5, %in
31 %tmp1 = fadd float %tmp0, 0.5
32 store float %tmp1, float addrspace(1)* %out
36 ; GCN-LABEL: {{^}}mad_vvs:
38 ; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
39 define void @mad_vvs(float addrspace(1)* %out, float addrspace(1)* %in, float %c) #0 {
41 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
43 %a = load float, float addrspace(1)* %in
44 %b = load float, float addrspace(1)* %b_ptr
46 %tmp0 = fmul float %a, %b
47 %tmp1 = fadd float %tmp0, %c
48 store float %tmp1, float addrspace(1)* %out
52 ; GCN-LABEL: {{^}}mac_ssv:
53 ; GCN: v_mac_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
54 define void @mac_ssv(float addrspace(1)* %out, float addrspace(1)* %in, float %a) #0 {
56 %c = load float, float addrspace(1)* %in
58 %tmp0 = fmul float %a, %a
59 %tmp1 = fadd float %tmp0, %c
60 store float %tmp1, float addrspace(1)* %out
64 ; GCN-LABEL: {{^}}mac_mad_same_add:
65 ; GCN: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
66 ; GCN: v_mac_f32_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
67 define void @mac_mad_same_add(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
69 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
70 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
71 %d_ptr = getelementptr float, float addrspace(1)* %in, i32 3
72 %e_ptr = getelementptr float, float addrspace(1)* %in, i32 4
74 %a = load volatile float, float addrspace(1)* %in
75 %b = load volatile float, float addrspace(1)* %b_ptr
76 %c = load volatile float, float addrspace(1)* %c_ptr
77 %d = load volatile float, float addrspace(1)* %d_ptr
78 %e = load volatile float, float addrspace(1)* %e_ptr
80 %tmp0 = fmul float %a, %b
81 %tmp1 = fadd float %tmp0, %c
83 %tmp2 = fmul float %d, %e
84 %tmp3 = fadd float %tmp2, %c
86 %out1 = getelementptr float, float addrspace(1)* %out, i32 1
87 store float %tmp1, float addrspace(1)* %out
88 store float %tmp3, float addrspace(1)* %out1
92 ; There is no advantage to using v_mac when one of the operands is negated
93 ; and v_mad accepts more operand types.
95 ; GCN-LABEL: {{^}}mad_neg_src0:
97 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
98 define void @mad_neg_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
100 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
101 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
103 %a = load float, float addrspace(1)* %in
104 %b = load float, float addrspace(1)* %b_ptr
105 %c = load float, float addrspace(1)* %c_ptr
107 %neg_a = fsub float -0.0, %a
108 %tmp0 = fmul float %neg_a, %b
109 %tmp1 = fadd float %tmp0, %c
111 store float %tmp1, float addrspace(1)* %out
115 ; GCN-LABEL: {{^}}unsafe_mad_sub0_src0:
117 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
118 define void @unsafe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
120 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
121 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
123 %a = load float, float addrspace(1)* %in
124 %b = load float, float addrspace(1)* %b_ptr
125 %c = load float, float addrspace(1)* %c_ptr
127 %neg_a = fsub float 0.0, %a
128 %tmp0 = fmul float %neg_a, %b
129 %tmp1 = fadd float %tmp0, %c
131 store float %tmp1, float addrspace(1)* %out
135 ; GCN-LABEL: {{^}}safe_mad_sub0_src0:
136 ; GCN: v_sub_f32_e32 [[SUB0:v[0-9]+]], 0,
137 ; GCN: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[SUB0]]
138 define void @safe_mad_sub0_src0(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
140 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
141 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
143 %a = load float, float addrspace(1)* %in
144 %b = load float, float addrspace(1)* %b_ptr
145 %c = load float, float addrspace(1)* %c_ptr
147 %neg_a = fsub float 0.0, %a
148 %tmp0 = fmul float %neg_a, %b
149 %tmp1 = fadd float %tmp0, %c
151 store float %tmp1, float addrspace(1)* %out
155 ; GCN-LABEL: {{^}}mad_neg_src1:
157 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
158 define void @mad_neg_src1(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
160 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
161 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
163 %a = load float, float addrspace(1)* %in
164 %b = load float, float addrspace(1)* %b_ptr
165 %c = load float, float addrspace(1)* %c_ptr
167 %neg_b = fsub float -0.0, %b
168 %tmp0 = fmul float %a, %neg_b
169 %tmp1 = fadd float %tmp0, %c
171 store float %tmp1, float addrspace(1)* %out
175 ; GCN-LABEL: {{^}}unsafe_mad_sub0_src1:
177 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
178 define void @unsafe_mad_sub0_src1(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
180 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
181 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
183 %a = load float, float addrspace(1)* %in
184 %b = load float, float addrspace(1)* %b_ptr
185 %c = load float, float addrspace(1)* %c_ptr
187 %neg_b = fsub float 0.0, %b
188 %tmp0 = fmul float %a, %neg_b
189 %tmp1 = fadd float %tmp0, %c
191 store float %tmp1, float addrspace(1)* %out
195 ; GCN-LABEL: {{^}}mad_neg_src2:
197 ; GCN: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
198 define void @mad_neg_src2(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
200 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
201 %c_ptr = getelementptr float, float addrspace(1)* %in, i32 2
203 %a = load float, float addrspace(1)* %in
204 %b = load float, float addrspace(1)* %b_ptr
205 %c = load float, float addrspace(1)* %c_ptr
207 %neg_c = fsub float -0.0, %c
208 %tmp0 = fmul float %a, %b
209 %tmp1 = fadd float %tmp0, %neg_c
211 store float %tmp1, float addrspace(1)* %out
215 ; Without special casing the inline constant check for v_mac_f32's
216 ; src2, this fails to fold the 1.0 into a mad.
218 ; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f32:
219 ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
220 ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
222 ; GCN: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
223 ; GCN: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
224 define void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
226 %tid = call i32 @llvm.amdgcn.workitem.id.x()
227 %tid.ext = sext i32 %tid to i64
228 %gep.a = getelementptr inbounds float, float addrspace(1)* %a, i64 %tid.ext
229 %gep.b = getelementptr inbounds float, float addrspace(1)* %b, i64 %tid.ext
230 %gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
231 %tmp = load volatile float, float addrspace(1)* %gep.a
232 %tmp1 = load volatile float, float addrspace(1)* %gep.b
233 %tmp2 = fadd float %tmp, %tmp
234 %tmp3 = fmul float %tmp2, 4.0
235 %tmp4 = fsub float 1.0, %tmp3
236 %tmp5 = fadd float %tmp4, %tmp1
237 %tmp6 = fadd float %tmp1, %tmp1
238 %tmp7 = fmul float %tmp6, %tmp
239 %tmp8 = fsub float 1.0, %tmp7
240 %tmp9 = fmul float %tmp8, 8.0
241 %tmp10 = fadd float %tmp5, %tmp9
242 store float %tmp10, float addrspace(1)* %gep.out
246 ; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f16:
247 ; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
248 ; GCN: {{buffer|flat}}_load_ushort [[B:v[0-9]+]]
250 ; FIXME: How is this not folded?
251 ; SI: v_cvt_f32_f16_e32 v{{[0-9]+}}, 0x3c00
253 ; VI: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
254 ; VI: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
255 define void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
257 %tid = call i32 @llvm.amdgcn.workitem.id.x()
258 %tid.ext = sext i32 %tid to i64
259 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
260 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
261 %gep.out = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
262 %tmp = load volatile half, half addrspace(1)* %gep.a
263 %tmp1 = load volatile half, half addrspace(1)* %gep.b
264 %tmp2 = fadd half %tmp, %tmp
265 %tmp3 = fmul half %tmp2, 4.0
266 %tmp4 = fsub half 1.0, %tmp3
267 %tmp5 = fadd half %tmp4, %tmp1
268 %tmp6 = fadd half %tmp1, %tmp1
269 %tmp7 = fmul half %tmp6, %tmp
270 %tmp8 = fsub half 1.0, %tmp7
271 %tmp9 = fmul half %tmp8, 8.0
272 %tmp10 = fadd half %tmp5, %tmp9
273 store half %tmp10, half addrspace(1)* %gep.out
277 declare i32 @llvm.amdgcn.workitem.id.x() #2
279 attributes #0 = { nounwind "unsafe-fp-math"="false" }
280 attributes #1 = { nounwind "unsafe-fp-math"="true" }
281 attributes #2 = { nounwind readnone }
282 attributes #3 = { nounwind }