]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/ia64/include/atomic.h
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / ia64 / include / atomic.h
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifndef _MACHINE_ATOMIC_H_
30 #define _MACHINE_ATOMIC_H_
31
32 #define mb()    __asm __volatile("mf")
33 #define wmb()   mb()
34 #define rmb()   mb()
35
36 /*
37  * Various simple arithmetic on memory which is atomic in the presence
38  * of interrupts and SMP safe.
39  */
40
41 /*
42  * Everything is built out of cmpxchg.
43  */
44 #define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret)                   \
45         __asm __volatile (                                              \
46                 "mov ar.ccv=%2;;\n\t"                                   \
47                 "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t"           \
48                 : "=r" (ret), "=m" (*p)                                 \
49                 : "r" ((uint64_t)cmpval), "r" (newval), "m" (*p)        \
50                 : "memory")
51
52 /*
53  * Some common forms of cmpxch.
54  */
55 static __inline uint32_t
56 ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
57 {
58         uint32_t ret;
59         IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
60         return (ret);
61 }
62
63 static __inline uint32_t
64 ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
65 {
66         uint32_t ret;
67         IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
68         return (ret);
69 }
70
71 static __inline uint64_t
72 ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
73 {
74         uint64_t ret;
75         IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
76         return (ret);
77 }
78
79 static __inline uint64_t
80 ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
81 {
82         uint64_t ret;
83         IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
84         return (ret);
85 }
86
87 #define ATOMIC_STORE_LOAD(type, width, size)                            \
88         static __inline uint##width##_t                                 \
89         ia64_ld_acq_##width(volatile uint##width##_t* p)                \
90         {                                                               \
91                 uint##width##_t v;                                      \
92                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
93                     : "m" (*p) : "memory");                             \
94                 return (v);                                             \
95         }                                                               \
96                                                                         \
97         static __inline uint##width##_t                                 \
98         atomic_load_acq_##width(volatile uint##width##_t* p)            \
99         {                                                               \
100                 uint##width##_t v;                                      \
101                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
102                     : "m" (*p) : "memory");                             \
103                 return (v);                                             \
104         }                                                               \
105                                                                         \
106         static __inline uint##width##_t                                 \
107         atomic_load_acq_##type(volatile uint##width##_t* p)             \
108         {                                                               \
109                 uint##width##_t v;                                      \
110                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
111                     : "m" (*p) : "memory");                             \
112                 return (v);                                             \
113         }                                                               \
114                                                                         \
115         static __inline void                                            \
116         ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
117         {                                                               \
118                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
119                     : "r" (v) : "memory");                              \
120         }                                                               \
121                                                                         \
122         static __inline void                                            \
123         atomic_store_rel_##width(volatile uint##width##_t* p,           \
124             uint##width##_t v)                                          \
125         {                                                               \
126                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
127                     : "r" (v) : "memory");                              \
128         }                                                               \
129                                                                         \
130         static __inline void                                            \
131         atomic_store_rel_##type(volatile uint##width##_t* p,            \
132             uint##width##_t v)                                          \
133         {                                                               \
134                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
135                     : "r" (v) : "memory");                              \
136         }
137
138 ATOMIC_STORE_LOAD(char,  8,  "1")
139 ATOMIC_STORE_LOAD(short, 16, "2")
140 ATOMIC_STORE_LOAD(int,   32, "4")
141 ATOMIC_STORE_LOAD(long,  64, "8")
142
143 #undef ATOMIC_STORE_LOAD
144
145 #define atomic_load_acq_ptr(p)          \
146     ((void *)atomic_load_acq_64((volatile uint64_t *)p))
147
148 #define atomic_store_rel_ptr(p, v)      \
149     atomic_store_rel_64((volatile uint64_t *)p, (uint64_t)v)
150
151 #define IA64_ATOMIC(sz, type, name, width, op)                          \
152         static __inline type                                            \
153         atomic_##name##_acq_##width(volatile type *p, type v)           \
154         {                                                               \
155                 type old, ret;                                          \
156                 do {                                                    \
157                         old = *p;                                       \
158                         IA64_CMPXCHG(sz, acq, p, old, old op v, ret);   \
159                 } while (ret != old);                                   \
160                 return (old);                                           \
161         }                                                               \
162                                                                         \
163         static __inline type                                            \
164         atomic_##name##_rel_##width(volatile type *p, type v)           \
165         {                                                               \
166                 type old, ret;                                          \
167                 do {                                                    \
168                         old = *p;                                       \
169                         IA64_CMPXCHG(sz, rel, p, old, old op v, ret);   \
170                 } while (ret != old);                                   \
171                 return (old);                                           \
172         }
173
174 IA64_ATOMIC(1, uint8_t,  set, 8,  |)
175 IA64_ATOMIC(2, uint16_t, set, 16, |)
176 IA64_ATOMIC(4, uint32_t, set, 32, |)
177 IA64_ATOMIC(8, uint64_t, set, 64, |)
178
179 IA64_ATOMIC(1, uint8_t,  clear, 8,  &~)
180 IA64_ATOMIC(2, uint16_t, clear, 16, &~)
181 IA64_ATOMIC(4, uint32_t, clear, 32, &~)
182 IA64_ATOMIC(8, uint64_t, clear, 64, &~)
183
184 IA64_ATOMIC(1, uint8_t,  add, 8,  +)
185 IA64_ATOMIC(2, uint16_t, add, 16, +)
186 IA64_ATOMIC(4, uint32_t, add, 32, +)
187 IA64_ATOMIC(8, uint64_t, add, 64, +)
188
189 IA64_ATOMIC(1, uint8_t,  subtract, 8,  -)
190 IA64_ATOMIC(2, uint16_t, subtract, 16, -)
191 IA64_ATOMIC(4, uint32_t, subtract, 32, -)
192 IA64_ATOMIC(8, uint64_t, subtract, 64, -)
193
194 #undef IA64_ATOMIC
195
196 #define atomic_set_8                    atomic_set_acq_8
197 #define atomic_clear_8                  atomic_clear_acq_8
198 #define atomic_add_8                    atomic_add_acq_8
199 #define atomic_subtract_8               atomic_subtract_acq_8
200
201 #define atomic_set_16                   atomic_set_acq_16
202 #define atomic_clear_16                 atomic_clear_acq_16
203 #define atomic_add_16                   atomic_add_acq_16
204 #define atomic_subtract_16              atomic_subtract_acq_16
205
206 #define atomic_set_32                   atomic_set_acq_32
207 #define atomic_clear_32                 atomic_clear_acq_32
208 #define atomic_add_32                   atomic_add_acq_32
209 #define atomic_subtract_32              atomic_subtract_acq_32
210
211 #define atomic_set_64                   atomic_set_acq_64
212 #define atomic_clear_64                 atomic_clear_acq_64
213 #define atomic_add_64                   atomic_add_acq_64
214 #define atomic_subtract_64              atomic_subtract_acq_64
215
216 #define atomic_set_char                 atomic_set_8
217 #define atomic_clear_char               atomic_clear_8
218 #define atomic_add_char                 atomic_add_8
219 #define atomic_subtract_char            atomic_subtract_8
220 #define atomic_set_acq_char             atomic_set_acq_8
221 #define atomic_clear_acq_char           atomic_clear_acq_8
222 #define atomic_add_acq_char             atomic_add_acq_8
223 #define atomic_subtract_acq_char        atomic_subtract_acq_8
224 #define atomic_set_rel_char             atomic_set_rel_8
225 #define atomic_clear_rel_char           atomic_clear_rel_8
226 #define atomic_add_rel_char             atomic_add_rel_8
227 #define atomic_subtract_rel_char        atomic_subtract_rel_8
228
229 #define atomic_set_short                atomic_set_16
230 #define atomic_clear_short              atomic_clear_16
231 #define atomic_add_short                atomic_add_16
232 #define atomic_subtract_short           atomic_subtract_16
233 #define atomic_set_acq_short            atomic_set_acq_16
234 #define atomic_clear_acq_short          atomic_clear_acq_16
235 #define atomic_add_acq_short            atomic_add_acq_16
236 #define atomic_subtract_acq_short       atomic_subtract_acq_16
237 #define atomic_set_rel_short            atomic_set_rel_16
238 #define atomic_clear_rel_short          atomic_clear_rel_16
239 #define atomic_add_rel_short            atomic_add_rel_16
240 #define atomic_subtract_rel_short       atomic_subtract_rel_16
241
242 #define atomic_set_int                  atomic_set_32
243 #define atomic_clear_int                atomic_clear_32
244 #define atomic_add_int                  atomic_add_32
245 #define atomic_subtract_int             atomic_subtract_32
246 #define atomic_set_acq_int              atomic_set_acq_32
247 #define atomic_clear_acq_int            atomic_clear_acq_32
248 #define atomic_add_acq_int              atomic_add_acq_32
249 #define atomic_subtract_acq_int         atomic_subtract_acq_32
250 #define atomic_set_rel_int              atomic_set_rel_32
251 #define atomic_clear_rel_int            atomic_clear_rel_32
252 #define atomic_add_rel_int              atomic_add_rel_32
253 #define atomic_subtract_rel_int         atomic_subtract_rel_32
254
255 #define atomic_set_long                 atomic_set_64
256 #define atomic_clear_long               atomic_clear_64
257 #define atomic_add_long                 atomic_add_64
258 #define atomic_subtract_long            atomic_subtract_64
259 #define atomic_set_acq_long             atomic_set_acq_64
260 #define atomic_clear_acq_long           atomic_clear_acq_64
261 #define atomic_add_acq_long             atomic_add_acq_64
262 #define atomic_subtract_acq_long        atomic_subtract_acq_64
263 #define atomic_set_rel_long             atomic_set_rel_64
264 #define atomic_clear_rel_long           atomic_clear_rel_64
265 #define atomic_add_rel_long             atomic_add_rel_64
266 #define atomic_subtract_rel_long        atomic_subtract_rel_64
267
268 /* XXX Needs casting. */
269 #define atomic_set_ptr                  atomic_set_64
270 #define atomic_clear_ptr                atomic_clear_64
271 #define atomic_add_ptr                  atomic_add_64
272 #define atomic_subtract_ptr             atomic_subtract_64
273 #define atomic_set_acq_ptr              atomic_set_acq_64
274 #define atomic_clear_acq_ptr            atomic_clear_acq_64
275 #define atomic_add_acq_ptr              atomic_add_acq_64
276 #define atomic_subtract_acq_ptr         atomic_subtract_acq_64
277 #define atomic_set_rel_ptr              atomic_set_rel_64
278 #define atomic_clear_rel_ptr            atomic_clear_rel_64
279 #define atomic_add_rel_ptr              atomic_add_rel_64
280 #define atomic_subtract_rel_ptr         atomic_subtract_rel_64
281
282 #undef IA64_CMPXCHG
283
284 /*
285  * Atomically compare the value stored at *p with cmpval and if the
286  * two values are equal, update the value of *p with newval. Returns
287  * zero if the compare failed, nonzero otherwise.
288  */
289 static __inline int
290 atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
291 {
292         return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
293 }
294
295 static __inline int
296 atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
297 {
298         return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
299 }
300
301 /*
302  * Atomically compare the value stored at *p with cmpval and if the
303  * two values are equal, update the value of *p with newval. Returns
304  * zero if the compare failed, nonzero otherwise.
305  */
306 static __inline int
307 atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
308 {
309         return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
310 }
311
312 static __inline int
313 atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
314 {
315         return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
316 }
317
318 #define atomic_cmpset_32                atomic_cmpset_acq_32
319 #define atomic_cmpset_64                atomic_cmpset_acq_64
320 #define atomic_cmpset_int               atomic_cmpset_32
321 #define atomic_cmpset_long              atomic_cmpset_64
322 #define atomic_cmpset_acq_int           atomic_cmpset_acq_32
323 #define atomic_cmpset_rel_int           atomic_cmpset_rel_32
324 #define atomic_cmpset_acq_long          atomic_cmpset_acq_64
325 #define atomic_cmpset_rel_long          atomic_cmpset_rel_64
326
327 #define atomic_cmpset_acq_ptr(p, o, n)  \
328     (atomic_cmpset_acq_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
329
330 #define atomic_cmpset_ptr               atomic_cmpset_acq_ptr
331
332 #define atomic_cmpset_rel_ptr(p, o, n)  \
333     (atomic_cmpset_rel_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
334
335 static __inline uint32_t
336 atomic_readandclear_32(volatile uint32_t* p)
337 {
338         uint32_t val;
339         do {
340                 val = *p;
341         } while (!atomic_cmpset_32(p, val, 0));
342         return (val);
343 }
344
345 static __inline uint64_t
346 atomic_readandclear_64(volatile uint64_t* p)
347 {
348         uint64_t val;
349         do {
350                 val = *p;
351         } while (!atomic_cmpset_64(p, val, 0));
352         return (val);
353 }
354
355 #define atomic_readandclear_int         atomic_readandclear_32
356 #define atomic_readandclear_long        atomic_readandclear_64
357 #define atomic_readandclear_ptr         atomic_readandclear_64
358
359 /*
360  * Atomically add the value of v to the integer pointed to by p and return
361  * the previous value of *p.
362  *
363  * XXX: Should we use the fetchadd instruction here?
364  */
365 static __inline uint32_t
366 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
367 {
368         uint32_t value;
369
370         do {
371                 value = *p;
372         } while (!atomic_cmpset_32(p, value, value + v));
373         return (value);
374 }
375
376 #define atomic_fetchadd_int             atomic_fetchadd_32
377
378 static __inline u_long
379 atomic_fetchadd_long(volatile u_long *p, u_long v)
380 {
381         u_long value;
382
383         do {
384                 value = *p;
385         } while (!atomic_cmpset_64(p, value, value + v));
386         return (value);
387 }
388
389 #endif /* ! _MACHINE_ATOMIC_H_ */