1 ; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s
4 declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
5 declare i32 @llvm.atomic.load.nand.i32.p0i32(i32* nocapture, i32) nounwind
6 declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
7 declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
9 declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
10 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
11 declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* nocapture, i8) nounwind
12 declare i8 @llvm.atomic.swap.i8.p0i8(i8* nocapture, i8) nounwind
13 declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
16 @x = common global i32 0, align 4
18 define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
20 %0 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* @x, i32 %incr)
23 ; CHECK: AtomicLoadAdd32:
24 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
25 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
26 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
27 ; CHECK: or $2, $zero, $[[R1]]
28 ; CHECK: addu $[[R2:[0-9]+]], $[[R1]], $4
29 ; CHECK: sc $[[R2]], 0($[[R0]])
30 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
33 define i32 @AtomicLoadNand32(i32 %incr) nounwind {
35 %0 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* @x, i32 %incr)
38 ; CHECK: AtomicLoadNand32:
39 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
40 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
41 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
42 ; CHECK: or $2, $zero, $[[R1]]
43 ; CHECK: and $[[R1]], $[[R1]], $4
44 ; CHECK: nor $[[R2:[0-9]+]], $zero, $[[R1]]
45 ; CHECK: sc $[[R2]], 0($[[R0]])
46 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
49 define i32 @AtomicSwap32(i32 %oldval) nounwind {
51 %0 = call i32 @llvm.atomic.swap.i32.p0i32(i32* @x, i32 %oldval)
54 ; CHECK: AtomicSwap32:
55 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
56 ; CHECK: sw $4, [[OFFSET:[0-9]+]]($sp)
57 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
58 ; CHECK: ll $[[R1:[0-9]+]], 0($[[R0]])
59 ; CHECK: or $2, $zero, $[[R1]]
60 ; CHECK: lw $[[R2:[0-9]+]], [[OFFSET]]($sp)
61 ; CHECK: or $[[R3:[0-9]+]], $zero, $[[R2]]
62 ; CHECK: sc $[[R3]], 0($[[R0]])
63 ; CHECK: beq $[[R3]], $zero, $[[BB0]]
66 define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
68 %0 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* @x, i32 %oldval, i32 %newval)
71 ; CHECK: AtomicCmpSwap32:
72 ; CHECK: lw $[[R0:[0-9]+]], %got(x)($gp)
73 ; CHECK: sw $5, [[OFFSET:[0-9]+]]($sp)
74 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
75 ; CHECK: ll $2, 0($[[R0]])
76 ; CHECK: bne $2, $4, $[[BB1:[A-Z_0-9]+]]
77 ; CHECK: lw $[[R1:[0-9]+]], [[OFFSET]]($sp)
78 ; CHECK: or $[[R2:[0-9]+]], $zero, $[[R1]]
79 ; CHECK: sc $[[R2]], 0($[[R0]])
80 ; CHECK: beq $[[R2]], $zero, $[[BB0]]
86 @y = common global i8 0, align 1
88 define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
90 %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @y, i8 %incr)
93 ; CHECK: AtomicLoadAdd8:
94 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
95 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
96 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
97 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
98 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
99 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
100 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
101 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
102 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
103 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
105 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
106 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
107 ; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
108 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
109 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
110 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
111 ; CHECK: sc $[[R14]], 0($[[R2]])
112 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
114 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
115 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
116 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
117 ; CHECK: sra $2, $[[R17]], 24
120 define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
122 %0 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @y, i8 %incr)
125 ; CHECK: AtomicLoadSub8:
126 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
127 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
128 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
129 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
130 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
131 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
132 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
133 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
134 ; CHECK: subu $[[R18:[0-9]+]], $zero, $4
135 ; CHECK: andi $[[R8:[0-9]+]], $[[R18]], 255
136 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
138 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
139 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
140 ; CHECK: addu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
141 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
142 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
143 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
144 ; CHECK: sc $[[R14]], 0($[[R2]])
145 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
147 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
148 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
149 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
150 ; CHECK: sra $2, $[[R17]], 24
153 define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
155 %0 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @y, i8 %incr)
158 ; CHECK: AtomicLoadNand8:
159 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
160 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
161 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
162 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
163 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
164 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
165 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
166 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
167 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
168 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
170 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
171 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
172 ; CHECK: and $[[R18:[0-9]+]], $[[R10]], $[[R9]]
173 ; CHECK: nor $[[R11:[0-9]+]], $zero, $[[R18]]
174 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
175 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
176 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
177 ; CHECK: sc $[[R14]], 0($[[R2]])
178 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
180 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
181 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
182 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
183 ; CHECK: sra $2, $[[R17]], 24
186 define signext i8 @AtomicSwap8(i8 signext %oldval) nounwind {
188 %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @y, i8 %oldval)
191 ; CHECK: AtomicSwap8:
192 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
193 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
194 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
195 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
196 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
197 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
198 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
199 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
200 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
201 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
202 ; CHECK: sw $[[R9]], [[OFFSET:[0-9]+]]($sp)
204 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
205 ; CHECK: ll $[[R10:[0-9]+]], 0($[[R2]])
206 ; CHECK: lw $[[R18:[0-9]+]], [[OFFSET]]($sp)
207 ; CHECK: or $[[R11:[0-9]+]], $zero, $[[R18]]
208 ; CHECK: and $[[R12:[0-9]+]], $[[R11]], $[[R6]]
209 ; CHECK: and $[[R13:[0-9]+]], $[[R10]], $[[R7]]
210 ; CHECK: or $[[R14:[0-9]+]], $[[R13]], $[[R12]]
211 ; CHECK: sc $[[R14]], 0($[[R2]])
212 ; CHECK: beq $[[R14]], $zero, $[[BB0]]
214 ; CHECK: and $[[R15:[0-9]+]], $[[R10]], $[[R6]]
215 ; CHECK: srl $[[R16:[0-9]+]], $[[R15]], $[[R4]]
216 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
217 ; CHECK: sra $2, $[[R17]], 24
220 define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
222 %0 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @y, i8 %oldval, i8 %newval)
225 ; CHECK: AtomicCmpSwap8:
226 ; CHECK: lw $[[R0:[0-9]+]], %got(y)($gp)
227 ; CHECK: addiu $[[R1:[0-9]+]], $zero, -4
228 ; CHECK: and $[[R2:[0-9]+]], $[[R0]], $[[R1]]
229 ; CHECK: andi $[[R3:[0-9]+]], $[[R0]], 3
230 ; CHECK: sll $[[R4:[0-9]+]], $[[R3]], 3
231 ; CHECK: ori $[[R5:[0-9]+]], $zero, 255
232 ; CHECK: sll $[[R6:[0-9]+]], $[[R5]], $[[R4]]
233 ; CHECK: nor $[[R7:[0-9]+]], $zero, $[[R6]]
234 ; CHECK: andi $[[R8:[0-9]+]], $4, 255
235 ; CHECK: sll $[[R9:[0-9]+]], $[[R8]], $[[R4]]
236 ; CHECK: andi $[[R10:[0-9]+]], $5, 255
237 ; CHECK: sll $[[R11:[0-9]+]], $[[R10]], $[[R4]]
239 ; CHECK: $[[BB0:[A-Z_0-9]+]]:
240 ; CHECK: ll $[[R12:[0-9]+]], 0($[[R2]])
241 ; CHECK: and $[[R13:[0-9]+]], $[[R12]], $[[R6]]
242 ; CHECK: bne $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
244 ; CHECK: and $[[R14:[0-9]+]], $[[R12]], $[[R7]]
245 ; CHECK: or $[[R15:[0-9]+]], $[[R14]], $[[R11]]
246 ; CHECK: sc $[[R15]], 0($[[R2]])
247 ; CHECK: beq $[[R15]], $zero, $[[BB0]]
250 ; CHECK: srl $[[R16:[0-9]+]], $[[R13]], $[[R4]]
251 ; CHECK: sll $[[R17:[0-9]+]], $[[R16]], 24
252 ; CHECK: sra $2, $[[R17]], 24