1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
4 declare i8 @llvm.fshl.i8(i8, i8, i8)
5 declare i16 @llvm.fshl.i16(i16, i16, i16)
6 declare i32 @llvm.fshl.i32(i32, i32, i32)
7 declare i64 @llvm.fshl.i64(i64, i64, i64)
8 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
10 declare i8 @llvm.fshr.i8(i8, i8, i8)
11 declare i16 @llvm.fshr.i16(i16, i16, i16)
12 declare i32 @llvm.fshr.i32(i32, i32, i32)
13 declare i64 @llvm.fshr.i64(i64, i64, i64)
14 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
16 ; When first 2 operands match, it's a rotate.
18 define i8 @rotl_i8_const_shift(i8 %x) {
19 ; CHECK-LABEL: rotl_i8_const_shift:
21 ; CHECK-NEXT: ubfx w8, w0, #5, #3
22 ; CHECK-NEXT: bfi w8, w0, #3, #29
23 ; CHECK-NEXT: mov w0, w8
25 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3)
29 define i64 @rotl_i64_const_shift(i64 %x) {
30 ; CHECK-LABEL: rotl_i64_const_shift:
32 ; CHECK-NEXT: ror x0, x0, #61
34 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3)
38 ; When first 2 operands match, it's a rotate (by variable amount).
40 define i16 @rotl_i16(i16 %x, i16 %z) {
41 ; CHECK-LABEL: rotl_i16:
43 ; CHECK-NEXT: orr w10, wzr, #0x10
44 ; CHECK-NEXT: sub w10, w10, w1
45 ; CHECK-NEXT: and w8, w0, #0xffff
46 ; CHECK-NEXT: and w9, w1, #0xf
47 ; CHECK-NEXT: and w10, w10, #0xf
48 ; CHECK-NEXT: lsl w9, w0, w9
49 ; CHECK-NEXT: lsr w8, w8, w10
50 ; CHECK-NEXT: orr w0, w9, w8
52 %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z)
56 define i32 @rotl_i32(i32 %x, i32 %z) {
57 ; CHECK-LABEL: rotl_i32:
59 ; CHECK-NEXT: orr w8, wzr, #0x20
60 ; CHECK-NEXT: sub w8, w8, w1
61 ; CHECK-NEXT: ror w0, w0, w8
63 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z)
67 define i64 @rotl_i64(i64 %x, i64 %z) {
68 ; CHECK-LABEL: rotl_i64:
70 ; CHECK-NEXT: orr w9, wzr, #0x40
71 ; CHECK-NEXT: sub w9, w9, w1
72 ; CHECK-NEXT: lsl x8, x0, x1
73 ; CHECK-NEXT: lsr x9, x0, x9
74 ; CHECK-NEXT: orr x0, x8, x9
76 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z)
82 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
83 ; CHECK-LABEL: rotl_v4i32:
85 ; CHECK-NEXT: movi v2.4s, #31
86 ; CHECK-NEXT: movi v3.4s, #32
87 ; CHECK-NEXT: and v4.16b, v1.16b, v2.16b
88 ; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s
89 ; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
90 ; CHECK-NEXT: neg v1.4s, v1.4s
91 ; CHECK-NEXT: ushl v3.4s, v0.4s, v4.4s
92 ; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
93 ; CHECK-NEXT: orr v0.16b, v3.16b, v0.16b
95 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
99 ; Vector rotate by constant splat amount.
101 define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
102 ; CHECK-LABEL: rotl_v4i32_rotl_const_shift:
104 ; CHECK-NEXT: ushr v1.4s, v0.4s, #29
105 ; CHECK-NEXT: shl v0.4s, v0.4s, #3
106 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
108 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
112 ; Repeat everything for funnel shift right.
114 ; When first 2 operands match, it's a rotate.
116 define i8 @rotr_i8_const_shift(i8 %x) {
117 ; CHECK-LABEL: rotr_i8_const_shift:
119 ; CHECK-NEXT: ubfx w8, w0, #3, #5
120 ; CHECK-NEXT: bfi w8, w0, #5, #27
121 ; CHECK-NEXT: mov w0, w8
123 %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
127 define i32 @rotr_i32_const_shift(i32 %x) {
128 ; CHECK-LABEL: rotr_i32_const_shift:
130 ; CHECK-NEXT: ror w0, w0, #3
132 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3)
136 ; When first 2 operands match, it's a rotate (by variable amount).
138 define i16 @rotr_i16(i16 %x, i16 %z) {
139 ; CHECK-LABEL: rotr_i16:
141 ; CHECK-NEXT: and w8, w0, #0xffff
142 ; CHECK-NEXT: and w9, w1, #0xf
143 ; CHECK-NEXT: orr w10, wzr, #0x10
144 ; CHECK-NEXT: lsr w8, w8, w9
145 ; CHECK-NEXT: sub w9, w10, w1
146 ; CHECK-NEXT: and w9, w9, #0xf
147 ; CHECK-NEXT: lsl w9, w0, w9
148 ; CHECK-NEXT: orr w0, w9, w8
150 %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z)
154 define i32 @rotr_i32(i32 %x, i32 %z) {
155 ; CHECK-LABEL: rotr_i32:
157 ; CHECK-NEXT: ror w0, w0, w1
159 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z)
163 define i64 @rotr_i64(i64 %x, i64 %z) {
164 ; CHECK-LABEL: rotr_i64:
166 ; CHECK-NEXT: ror x0, x0, x1
168 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z)
174 define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
175 ; CHECK-LABEL: rotr_v4i32:
177 ; CHECK-NEXT: movi v2.4s, #31
178 ; CHECK-NEXT: movi v3.4s, #32
179 ; CHECK-NEXT: and v4.16b, v1.16b, v2.16b
180 ; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s
181 ; CHECK-NEXT: neg v3.4s, v4.4s
182 ; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
183 ; CHECK-NEXT: ushl v2.4s, v0.4s, v3.4s
184 ; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
185 ; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
187 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
191 ; Vector rotate by constant splat amount.
193 define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
194 ; CHECK-LABEL: rotr_v4i32_const_shift:
196 ; CHECK-NEXT: ushr v1.4s, v0.4s, #3
197 ; CHECK-NEXT: shl v0.4s, v0.4s, #29
198 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
200 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
204 define i32 @rotl_i32_shift_by_bitwidth(i32 %x) {
205 ; CHECK-LABEL: rotl_i32_shift_by_bitwidth:
208 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32)
212 define i32 @rotr_i32_shift_by_bitwidth(i32 %x) {
213 ; CHECK-LABEL: rotr_i32_shift_by_bitwidth:
216 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32)
220 define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) {
221 ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth:
224 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
228 define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) {
229 ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth:
232 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)