1 ; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
2 ; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT
5 declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32)
6 define i64 @S2_asr_i_p(i64 %a) {
7 %z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
10 ; CHECK: r1:0 = asr(r1:0, #0)
12 declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
13 define i64 @S2_lsr_i_p(i64 %a) {
14 %z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
17 ; CHECK: r1:0 = lsr(r1:0, #0)
19 declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
20 define i64 @S2_asl_i_p(i64 %a) {
21 %z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
24 ; CHECK: r1:0 = asl(r1:0, #0)
26 declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
27 define i32 @S2_asr_i_r(i32 %a) {
28 %z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
31 ; CHECK: r0 = asr(r0, #0)
33 declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
34 define i32 @S2_lsr_i_r(i32 %a) {
35 %z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
38 ; CHECK: r0 = lsr(r0, #0)
40 declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
41 define i32 @S2_asl_i_r(i32 %a) {
42 %z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
45 ; CHECK: r0 = asl(r0, #0)
47 ; Shift by immediate and accumulate
48 declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
49 define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
50 %z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
53 ; CHECK: r1:0 -= asr(r3:2, #0)
55 declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
56 define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
57 %z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
60 ; CHECK: r1:0 -= lsr(r3:2, #0)
62 declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
63 define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
64 %z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
67 ; CHECK: r1:0 -= asl(r3:2, #0)
69 declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
70 define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
71 %z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
74 ; CHECK: r1:0 += asr(r3:2, #0)
76 declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
77 define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
78 %z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
81 ; CHECK: r1:0 += lsr(r3:2, #0)
83 declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
84 define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
85 %z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
88 ; CHECK: r1:0 += asl(r3:2, #0)
90 declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
91 define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
92 %z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
95 ; CHECK: r0 -= asr(r1, #0)
97 declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
98 define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
99 %z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
102 ; CHECK: r0 -= lsr(r1, #0)
104 declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
105 define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
106 %z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
109 ; CHECK: r0 -= asl(r1, #0)
111 declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
112 define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
113 %z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
116 ; CHECK: r0 += asr(r1, #0)
118 declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
119 define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
120 %z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
123 ; CHECK: r0 += lsr(r1, #0)
125 declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
126 define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
127 %z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
130 ; CHECK: r0 += asl(r1, #0)
132 ; Shift by immediate and add
133 declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
134 define i32 @S4_addi_asl_ri(i32 %a) {
135 %z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
138 ; CHECK: r0 = add(#0, asl(r0, #0))
140 declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
141 define i32 @S4_subi_asl_ri(i32 %a) {
142 %z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
145 ; CHECK: r0 = sub(#0, asl(r0, #0))
147 declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
148 define i32 @S4_addi_lsr_ri(i32 %a) {
149 %z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
152 ; CHECK: r0 = add(#0, lsr(r0, #0))
154 declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
155 define i32 @S4_subi_lsr_ri(i32 %a) {
156 %z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
159 ; CHECK: r0 = sub(#0, lsr(r0, #0))
161 declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
162 define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
163 %z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
166 ; CHECK: r0 = addasl(r0, r1, #0)
168 ; Shift by immediate and logical
169 declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
170 define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
171 %z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
174 ; CHECK: r1:0 &= asr(r3:2, #0)
176 declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
177 define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
178 %z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
181 ; CHECK: r1:0 &= lsr(r3:2, #0)
183 declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
184 define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
185 %z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
188 ; CHECK: r1:0 &= asl(r3:2, #0)
190 declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
191 define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
192 %z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
195 ; CHECK: r1:0 |= asr(r3:2, #0)
197 declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
198 define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
199 %z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
202 ; CHECK: r1:0 |= lsr(r3:2, #0)
204 declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
205 define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
206 %z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
209 ; CHECK: r1:0 |= asl(r3:2, #0)
211 declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
212 define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
213 %z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
216 ; CHECK: r1:0 ^= lsr(r3:2, #0)
218 declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
219 define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
220 %z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
223 ; CHECK: r1:0 ^= asl(r3:2, #0)
225 declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
226 define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
227 %z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
230 ; CHECK: r0 &= asr(r1, #0)
232 declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
233 define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
234 %z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
237 ; CHECK: r0 &= lsr(r1, #0)
239 declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
240 define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
241 %z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
244 ; CHECK: r0 &= asl(r1, #0)
246 declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
247 define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
248 %z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
251 ; CHECK: r0 |= asr(r1, #0)
253 declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
254 define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
255 %z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
258 ; CHECK: r0 |= lsr(r1, #0)
260 declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
261 define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
262 %z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
265 ; CHECK: r0 |= asl(r1, #0)
267 declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
268 define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
269 %z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
272 ; CHECK: r0 ^= lsr(r1, #0)
274 declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
275 define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
276 %z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
279 ; CHECK: r0 ^= asl(r1, #0)
281 declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
282 define i32 @S4_andi_asl_ri(i32 %a) {
283 %z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
286 ; CHECK: r0 = and(#0, asl(r0, #0))
288 declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
289 define i32 @S4_ori_asl_ri(i32 %a) {
290 %z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
293 ; CHECK: r0 = or(#0, asl(r0, #0))
295 declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
296 define i32 @S4_andi_lsr_ri(i32 %a) {
297 %z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
300 ; CHECK: r0 = and(#0, lsr(r0, #0))
302 declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
303 define i32 @S4_ori_lsr_ri(i32 %a) {
304 %z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
307 ; CHECK: r0 = or(#0, lsr(r0, #0))
309 ; Shift right by immediate with rounding
310 declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
311 define i64 @S2_asr_i_p_rnd(i64 %a) {
312 %z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
315 ; CHECK: r1:0 = asr(r1:0, #0):rnd
317 declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
318 define i32 @S2_asr_i_r_rnd(i32 %a) {
319 %z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
322 ; CHECK: r0 = asr(r0, #0):rnd
324 ; Shift left by immediate with saturation
325 declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
326 define i32 @S2_asl_i_r_sat(i32 %a) {
327 %z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
330 ; CHECK: r0 = asl(r0, #0):sat
333 declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
334 define i64 @S2_asr_r_p(i64 %a, i32 %b) {
335 %z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
338 ; CHECK: r1:0 = asr(r1:0, r2)
340 declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
341 define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
342 %z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
345 ; CHECK: r1:0 = lsr(r1:0, r2)
347 declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
348 define i64 @S2_asl_r_p(i64 %a, i32 %b) {
349 %z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
352 ; CHECK: r1:0 = asl(r1:0, r2)
354 declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
355 define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
356 %z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
359 ; CHECK: r1:0 = lsl(r1:0, r2)
361 declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
362 define i32 @S2_asr_r_r(i32 %a, i32 %b) {
363 %z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
366 ; CHECK: r0 = asr(r0, r1)
368 declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
369 define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
370 %z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
373 ; CHECK: r0 = lsr(r0, r1)
375 declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
376 define i32 @S2_asl_r_r(i32 %a, i32 %b) {
377 %z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
380 ; CHECK: r0 = asl(r0, r1)
382 declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
383 define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
384 %z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
387 ; CHECK: r0 = lsl(r0, r1)
389 declare i32 @llvm.hexagon.S4.lsli(i32, i32)
390 define i32 @S4_lsli(i32 %a) {
391 %z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
394 ; CHECK: r0 = lsl(#0, r0)
396 ; Shift by register and accumulate
397 declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
398 define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
399 %z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
402 ; CHECK: r1:0 -= asr(r3:2, r4)
404 declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
405 define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
406 %z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
409 ; CHECK: r1:0 -= lsr(r3:2, r4)
411 declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
412 define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
413 %z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
416 ; CHECK: r1:0 -= asl(r3:2, r4)
418 declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
419 define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
420 %z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
423 ; CHECK: r1:0 -= lsl(r3:2, r4)
425 declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
426 define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
427 %z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
430 ; CHECK: r1:0 += asr(r3:2, r4)
432 declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
433 define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
434 %z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
437 ; CHECK: r1:0 += lsr(r3:2, r4)
439 declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
440 define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
441 %z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
444 ; CHECK: r1:0 += asl(r3:2, r4)
446 declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
447 define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
448 %z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
451 ; CHECK: r1:0 += lsl(r3:2, r4)
453 declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
454 define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
455 %z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
458 ; CHECK: r0 -= asr(r1, r2)
460 declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
461 define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
462 %z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
465 ; CHECK: r0 -= lsr(r1, r2)
467 declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
468 define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
469 %z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
472 ; CHECK: r0 -= asl(r1, r2)
474 declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
475 define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
476 %z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
479 ; CHECK: r0 -= lsl(r1, r2)
481 declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
482 define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
483 %z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
486 ; CHECK: r0 += asr(r1, r2)
488 declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
489 define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
490 %z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
493 ; CHECK: r0 += lsr(r1, r2)
495 declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
496 define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
497 %z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
500 ; CHECK: r0 += asl(r1, r2)
502 declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
503 define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
504 %z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
507 ; CHECK: r0 += lsl(r1, r2)
509 ; Shift by register and logical
510 declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
511 define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
512 %z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
515 ; CHECK: r1:0 |= asr(r3:2, r4)
517 declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
518 define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
519 %z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
522 ; CHECK: r1:0 |= lsr(r3:2, r4)
524 declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
525 define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
526 %z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
529 ; CHECK: r1:0 |= asl(r3:2, r4)
531 declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
532 define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
533 %z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
536 ; CHECK: r1:0 |= lsl(r3:2, r4)
538 declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
539 define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
540 %z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
543 ; CHECK: r1:0 &= asr(r3:2, r4)
545 declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
546 define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
547 %z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
550 ; CHECK: r1:0 &= lsr(r3:2, r4)
552 declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
553 define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
554 %z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
557 ; CHECK: r1:0 &= asl(r3:2, r4)
559 declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
560 define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
561 %z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
564 ; CHECK: r1:0 &= lsl(r3:2, r4)
566 declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
567 define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
568 %z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
571 ; CHECK: r0 |= asr(r1, r2)
573 declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
574 define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
575 %z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
578 ; CHECK: r0 |= lsr(r1, r2)
580 declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
581 define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
582 %z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
585 ; CHECK: r0 |= asl(r1, r2)
587 declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
588 define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
589 %z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
592 ; CHECK: r0 |= lsl(r1, r2)
594 declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
595 define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
596 %z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
599 ; CHECK: r0 &= asr(r1, r2)
601 declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
602 define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
603 %z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
606 ; CHECK: r0 &= lsr(r1, r2)
608 declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
609 define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
610 %z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
613 ; CHECK: r0 &= asl(r1, r2)
615 declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
616 define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
617 %z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
620 ; CHECK: r0 &= lsl(r1, r2)
622 ; Shift by register with saturation
623 declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
624 define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
625 %z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
628 ; CHECK: r0 = asr(r0, r1):sat
630 declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
631 define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
632 %z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
635 ; CHECK: r0 = asl(r0, r1):sat
637 ; Vector shift halfwords by immediate
638 declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
639 define i64 @S2_asr_i_vh(i64 %a) {
640 %z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
643 ; CHECK: r1:0 = vasrh(r1:0, #0)
645 declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
646 define i64 @S2_lsr_i_vh(i64 %a) {
647 %z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
650 ; CHECK: r1:0 = vlsrh(r1:0, #0)
652 declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
653 define i64 @S2_asl_i_vh(i64 %a) {
654 %z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
657 ; CHECK: r1:0 = vaslh(r1:0, #0)
659 ; Vector shift halfwords by register
660 declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
661 define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
662 %z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
665 ; CHECK: r1:0 = vasrh(r1:0, r2)
667 declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
668 define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
669 %z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
672 ; CHECK: r1:0 = vlsrh(r1:0, r2)
674 declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
675 define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
676 %z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
679 ; CHECK: r1:0 = vaslh(r1:0, r2)
681 declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
682 define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
683 %z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
686 ; CHECK: r1:0 = vlslh(r1:0, r2)
688 ; Vector shift words by immediate
689 declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
690 define i64 @S2_asr_i_vw(i64 %a) {
691 %z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
694 ; CHECK: r1:0 = vasrw(r1:0, #0)
696 declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
697 define i64 @S2_lsr_i_vw(i64 %a) {
698 %z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
701 ; CHECK: r1:0 = vlsrw(r1:0, #0)
703 declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
704 define i64 @S2_asl_i_vw(i64 %a) {
705 %z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
708 ; CHECK: r1:0 = vaslw(r1:0, #0)
710 ; Vector shift words by with truncate and pack
711 declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
712 define i32 @S2_asr_i_svw_trun(i64 %a) {
713 %z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
716 ; CHECK: r0 = vasrw(r1:0, #0)
718 declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
719 define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
720 %z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
723 ; CHECK: r0 = vasrw(r1:0, r2)