]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/ia64/include/atomic.h
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / ia64 / include / atomic.h
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifndef _MACHINE_ATOMIC_H_
30 #define _MACHINE_ATOMIC_H_
31
32 /*
33  * Various simple arithmetic on memory which is atomic in the presence
34  * of interrupts and SMP safe.
35  */
36
37 /*
38  * Everything is built out of cmpxchg.
39  */
40 #define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret)                   \
41         __asm __volatile (                                              \
42                 "mov ar.ccv=%2;;\n\t"                                   \
43                 "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t"           \
44                 : "=r" (ret), "=m" (*p)                                 \
45                 : "r" ((uint64_t)cmpval), "r" (newval), "m" (*p)        \
46                 : "memory")
47
48 /*
49  * Some common forms of cmpxch.
50  */
51 static __inline uint32_t
52 ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
53 {
54         uint32_t ret;
55         IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
56         return (ret);
57 }
58
59 static __inline uint32_t
60 ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
61 {
62         uint32_t ret;
63         IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
64         return (ret);
65 }
66
67 static __inline uint64_t
68 ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
69 {
70         uint64_t ret;
71         IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
72         return (ret);
73 }
74
75 static __inline uint64_t
76 ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
77 {
78         uint64_t ret;
79         IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
80         return (ret);
81 }
82
83 #define ATOMIC_STORE_LOAD(type, width, size)                            \
84         static __inline uint##width##_t                                 \
85         ia64_ld_acq_##width(volatile uint##width##_t* p)                \
86         {                                                               \
87                 uint##width##_t v;                                      \
88                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
89                     : "m" (*p) : "memory");                             \
90                 return (v);                                             \
91         }                                                               \
92                                                                         \
93         static __inline uint##width##_t                                 \
94         atomic_load_acq_##width(volatile uint##width##_t* p)            \
95         {                                                               \
96                 uint##width##_t v;                                      \
97                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
98                     : "m" (*p) : "memory");                             \
99                 return (v);                                             \
100         }                                                               \
101                                                                         \
102         static __inline uint##width##_t                                 \
103         atomic_load_acq_##type(volatile uint##width##_t* p)             \
104         {                                                               \
105                 uint##width##_t v;                                      \
106                 __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)     \
107                     : "m" (*p) : "memory");                             \
108                 return (v);                                             \
109         }                                                               \
110                                                                         \
111         static __inline void                                            \
112         ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
113         {                                                               \
114                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
115                     : "r" (v) : "memory");                              \
116         }                                                               \
117                                                                         \
118         static __inline void                                            \
119         atomic_store_rel_##width(volatile uint##width##_t* p,           \
120             uint##width##_t v)                                          \
121         {                                                               \
122                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
123                     : "r" (v) : "memory");                              \
124         }                                                               \
125                                                                         \
126         static __inline void                                            \
127         atomic_store_rel_##type(volatile uint##width##_t* p,            \
128             uint##width##_t v)                                          \
129         {                                                               \
130                 __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)    \
131                     : "r" (v) : "memory");                              \
132         }
133
134 ATOMIC_STORE_LOAD(char,  8,  "1")
135 ATOMIC_STORE_LOAD(short, 16, "2")
136 ATOMIC_STORE_LOAD(int,   32, "4")
137 ATOMIC_STORE_LOAD(long,  64, "8")
138
139 #undef ATOMIC_STORE_LOAD
140
141 #define atomic_load_acq_ptr(p)          \
142     ((void *)atomic_load_acq_64((volatile uint64_t *)p))
143
144 #define atomic_store_rel_ptr(p, v)      \
145     atomic_store_rel_64((volatile uint64_t *)p, (uint64_t)v)
146
147 #define IA64_ATOMIC(sz, type, name, width, op)                          \
148         static __inline type                                            \
149         atomic_##name##_acq_##width(volatile type *p, type v)           \
150         {                                                               \
151                 type old, ret;                                          \
152                 do {                                                    \
153                         old = *p;                                       \
154                         IA64_CMPXCHG(sz, acq, p, old, old op v, ret);   \
155                 } while (ret != old);                                   \
156                 return (old);                                           \
157         }                                                               \
158                                                                         \
159         static __inline type                                            \
160         atomic_##name##_rel_##width(volatile type *p, type v)           \
161         {                                                               \
162                 type old, ret;                                          \
163                 do {                                                    \
164                         old = *p;                                       \
165                         IA64_CMPXCHG(sz, rel, p, old, old op v, ret);   \
166                 } while (ret != old);                                   \
167                 return (old);                                           \
168         }
169
170 IA64_ATOMIC(1, uint8_t,  set, 8,  |)
171 IA64_ATOMIC(2, uint16_t, set, 16, |)
172 IA64_ATOMIC(4, uint32_t, set, 32, |)
173 IA64_ATOMIC(8, uint64_t, set, 64, |)
174
175 IA64_ATOMIC(1, uint8_t,  clear, 8,  &~)
176 IA64_ATOMIC(2, uint16_t, clear, 16, &~)
177 IA64_ATOMIC(4, uint32_t, clear, 32, &~)
178 IA64_ATOMIC(8, uint64_t, clear, 64, &~)
179
180 IA64_ATOMIC(1, uint8_t,  add, 8,  +)
181 IA64_ATOMIC(2, uint16_t, add, 16, +)
182 IA64_ATOMIC(4, uint32_t, add, 32, +)
183 IA64_ATOMIC(8, uint64_t, add, 64, +)
184
185 IA64_ATOMIC(1, uint8_t,  subtract, 8,  -)
186 IA64_ATOMIC(2, uint16_t, subtract, 16, -)
187 IA64_ATOMIC(4, uint32_t, subtract, 32, -)
188 IA64_ATOMIC(8, uint64_t, subtract, 64, -)
189
190 #undef IA64_ATOMIC
191
192 #define atomic_set_8                    atomic_set_acq_8
193 #define atomic_clear_8                  atomic_clear_acq_8
194 #define atomic_add_8                    atomic_add_acq_8
195 #define atomic_subtract_8               atomic_subtract_acq_8
196
197 #define atomic_set_16                   atomic_set_acq_16
198 #define atomic_clear_16                 atomic_clear_acq_16
199 #define atomic_add_16                   atomic_add_acq_16
200 #define atomic_subtract_16              atomic_subtract_acq_16
201
202 #define atomic_set_32                   atomic_set_acq_32
203 #define atomic_clear_32                 atomic_clear_acq_32
204 #define atomic_add_32                   atomic_add_acq_32
205 #define atomic_subtract_32              atomic_subtract_acq_32
206
207 #define atomic_set_64                   atomic_set_acq_64
208 #define atomic_clear_64                 atomic_clear_acq_64
209 #define atomic_add_64                   atomic_add_acq_64
210 #define atomic_subtract_64              atomic_subtract_acq_64
211
212 #define atomic_set_char                 atomic_set_8
213 #define atomic_clear_char               atomic_clear_8
214 #define atomic_add_char                 atomic_add_8
215 #define atomic_subtract_char            atomic_subtract_8
216 #define atomic_set_acq_char             atomic_set_acq_8
217 #define atomic_clear_acq_char           atomic_clear_acq_8
218 #define atomic_add_acq_char             atomic_add_acq_8
219 #define atomic_subtract_acq_char        atomic_subtract_acq_8
220 #define atomic_set_rel_char             atomic_set_rel_8
221 #define atomic_clear_rel_char           atomic_clear_rel_8
222 #define atomic_add_rel_char             atomic_add_rel_8
223 #define atomic_subtract_rel_char        atomic_subtract_rel_8
224
225 #define atomic_set_short                atomic_set_16
226 #define atomic_clear_short              atomic_clear_16
227 #define atomic_add_short                atomic_add_16
228 #define atomic_subtract_short           atomic_subtract_16
229 #define atomic_set_acq_short            atomic_set_acq_16
230 #define atomic_clear_acq_short          atomic_clear_acq_16
231 #define atomic_add_acq_short            atomic_add_acq_16
232 #define atomic_subtract_acq_short       atomic_subtract_acq_16
233 #define atomic_set_rel_short            atomic_set_rel_16
234 #define atomic_clear_rel_short          atomic_clear_rel_16
235 #define atomic_add_rel_short            atomic_add_rel_16
236 #define atomic_subtract_rel_short       atomic_subtract_rel_16
237
238 #define atomic_set_int                  atomic_set_32
239 #define atomic_clear_int                atomic_clear_32
240 #define atomic_add_int                  atomic_add_32
241 #define atomic_subtract_int             atomic_subtract_32
242 #define atomic_set_acq_int              atomic_set_acq_32
243 #define atomic_clear_acq_int            atomic_clear_acq_32
244 #define atomic_add_acq_int              atomic_add_acq_32
245 #define atomic_subtract_acq_int         atomic_subtract_acq_32
246 #define atomic_set_rel_int              atomic_set_rel_32
247 #define atomic_clear_rel_int            atomic_clear_rel_32
248 #define atomic_add_rel_int              atomic_add_rel_32
249 #define atomic_subtract_rel_int         atomic_subtract_rel_32
250
251 #define atomic_set_long                 atomic_set_64
252 #define atomic_clear_long               atomic_clear_64
253 #define atomic_add_long                 atomic_add_64
254 #define atomic_subtract_long            atomic_subtract_64
255 #define atomic_set_acq_long             atomic_set_acq_64
256 #define atomic_clear_acq_long           atomic_clear_acq_64
257 #define atomic_add_acq_long             atomic_add_acq_64
258 #define atomic_subtract_acq_long        atomic_subtract_acq_64
259 #define atomic_set_rel_long             atomic_set_rel_64
260 #define atomic_clear_rel_long           atomic_clear_rel_64
261 #define atomic_add_rel_long             atomic_add_rel_64
262 #define atomic_subtract_rel_long        atomic_subtract_rel_64
263
264 /* XXX Needs casting. */
265 #define atomic_set_ptr                  atomic_set_64
266 #define atomic_clear_ptr                atomic_clear_64
267 #define atomic_add_ptr                  atomic_add_64
268 #define atomic_subtract_ptr             atomic_subtract_64
269 #define atomic_set_acq_ptr              atomic_set_acq_64
270 #define atomic_clear_acq_ptr            atomic_clear_acq_64
271 #define atomic_add_acq_ptr              atomic_add_acq_64
272 #define atomic_subtract_acq_ptr         atomic_subtract_acq_64
273 #define atomic_set_rel_ptr              atomic_set_rel_64
274 #define atomic_clear_rel_ptr            atomic_clear_rel_64
275 #define atomic_add_rel_ptr              atomic_add_rel_64
276 #define atomic_subtract_rel_ptr         atomic_subtract_rel_64
277
278 #undef IA64_CMPXCHG
279
280 /*
281  * Atomically compare the value stored at *p with cmpval and if the
282  * two values are equal, update the value of *p with newval. Returns
283  * zero if the compare failed, nonzero otherwise.
284  */
285 static __inline int
286 atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
287 {
288         return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
289 }
290
291 static __inline int
292 atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
293 {
294         return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
295 }
296
297 /*
298  * Atomically compare the value stored at *p with cmpval and if the
299  * two values are equal, update the value of *p with newval. Returns
300  * zero if the compare failed, nonzero otherwise.
301  */
302 static __inline int
303 atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
304 {
305         return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
306 }
307
308 static __inline int
309 atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
310 {
311         return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
312 }
313
314 #define atomic_cmpset_32                atomic_cmpset_acq_32
315 #define atomic_cmpset_64                atomic_cmpset_acq_64
316 #define atomic_cmpset_int               atomic_cmpset_32
317 #define atomic_cmpset_long              atomic_cmpset_64
318 #define atomic_cmpset_acq_int           atomic_cmpset_acq_32
319 #define atomic_cmpset_rel_int           atomic_cmpset_rel_32
320 #define atomic_cmpset_acq_long          atomic_cmpset_acq_64
321 #define atomic_cmpset_rel_long          atomic_cmpset_rel_64
322
323 #define atomic_cmpset_acq_ptr(p, o, n)  \
324     (atomic_cmpset_acq_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
325
326 #define atomic_cmpset_ptr               atomic_cmpset_acq_ptr
327
328 #define atomic_cmpset_rel_ptr(p, o, n)  \
329     (atomic_cmpset_rel_64((volatile uint64_t *)p, (uint64_t)o, (uint64_t)n))
330
331 static __inline uint32_t
332 atomic_readandclear_32(volatile uint32_t* p)
333 {
334         uint32_t val;
335         do {
336                 val = *p;
337         } while (!atomic_cmpset_32(p, val, 0));
338         return (val);
339 }
340
341 static __inline uint64_t
342 atomic_readandclear_64(volatile uint64_t* p)
343 {
344         uint64_t val;
345         do {
346                 val = *p;
347         } while (!atomic_cmpset_64(p, val, 0));
348         return (val);
349 }
350
351 #define atomic_readandclear_int         atomic_readandclear_32
352 #define atomic_readandclear_long        atomic_readandclear_64
353 #define atomic_readandclear_ptr         atomic_readandclear_64
354
355 /*
356  * Atomically add the value of v to the integer pointed to by p and return
357  * the previous value of *p.
358  *
359  * XXX: Should we use the fetchadd instruction here?
360  */
361 static __inline uint32_t
362 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
363 {
364         uint32_t value;
365
366         do {
367                 value = *p;
368         } while (!atomic_cmpset_32(p, value, value + v));
369         return (value);
370 }
371
372 #define atomic_fetchadd_int             atomic_fetchadd_32
373
374 static __inline u_long
375 atomic_fetchadd_long(volatile u_long *p, u_long v)
376 {
377         u_long value;
378
379         do {
380                 value = *p;
381         } while (!atomic_cmpset_64(p, value, value + v));
382         return (value);
383 }
384
385 #endif /* ! _MACHINE_ATOMIC_H_ */