1 /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
4 * Copyright (C) 2003-2004 Olivier Houchard
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Brini.
22 * 4. The name of Brini may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
31 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
33 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
34 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #ifndef _MACHINE_ATOMIC_V6_H_
40 #define _MACHINE_ATOMIC_V6_H_
42 #ifndef _MACHINE_ATOMIC_H_
43 #error Do not include this file directly, use <machine/atomic.h>
47 #define isb() __asm __volatile("isb" : : : "memory")
48 #define dsb() __asm __volatile("dsb" : : : "memory")
49 #define dmb() __asm __volatile("dmb" : : : "memory")
51 #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
52 #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
53 #define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
55 #error Only use this file with ARMv6 and later
62 #define ARM_HAVE_ATOMIC64
64 #define ATOMIC_ACQ_REL_LONG(NAME) \
65 static __inline void \
66 atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \
68 atomic_##NAME##_long(p, v); \
72 static __inline void \
73 atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \
76 atomic_##NAME##_long(p, v); \
79 #define ATOMIC_ACQ_REL(NAME, WIDTH) \
80 static __inline void \
81 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
83 atomic_##NAME##_##WIDTH(p, v); \
87 static __inline void \
88 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
91 atomic_##NAME##_##WIDTH(p, v); \
96 atomic_add_32(volatile uint32_t *p, uint32_t val)
98 uint32_t tmp = 0, tmp2 = 0;
101 "1: ldrex %0, [%2] \n"
103 " strex %1, %0, [%2] \n"
107 : "=&r" (tmp), "+r" (tmp2)
108 ,"+r" (p), "+r" (val) : : "cc", "memory");
112 atomic_add_64(volatile uint64_t *p, uint64_t val)
119 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
120 " adds %Q[tmp], %Q[val] \n"
121 " adc %R[tmp], %R[tmp], %R[val] \n"
122 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
126 : [exf] "=&r" (exflag),
134 atomic_add_long(volatile u_long *p, u_long val)
137 atomic_add_32((volatile uint32_t *)p, val);
140 ATOMIC_ACQ_REL(add, 32)
141 ATOMIC_ACQ_REL(add, 64)
142 ATOMIC_ACQ_REL_LONG(add)
145 atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
147 uint32_t tmp = 0, tmp2 = 0;
150 "1: ldrex %0, [%2] \n"
152 " strex %1, %0, [%2] \n"
156 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
161 atomic_clear_64(volatile uint64_t *p, uint64_t val)
168 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
169 " bic %Q[tmp], %Q[val] \n"
170 " bic %R[tmp], %R[val] \n"
171 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
175 : [exf] "=&r" (exflag),
183 atomic_clear_long(volatile u_long *address, u_long setmask)
186 atomic_clear_32((volatile uint32_t *)address, setmask);
189 ATOMIC_ACQ_REL(clear, 32)
190 ATOMIC_ACQ_REL(clear, 64)
191 ATOMIC_ACQ_REL_LONG(clear)
193 static __inline uint32_t
194 atomic_cmpset_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
199 "1: ldrex %0, [%1] \n"
204 " strex %0, %3, [%1] \n"
210 : "=&r" (ret), "+r" (p), "+r" (cmpval), "+r" (newval)
216 atomic_cmpset_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
223 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
224 " teq %Q[tmp], %Q[cmpval] \n"
226 " teqeq %R[tmp], %R[cmpval] \n"
227 " movne %[ret], #0 \n"
229 " strexd %[ret], %Q[newval], %R[newval], [%[ptr]]\n"
238 [cmpval] "r" (cmpval),
239 [newval] "r" (newval)
244 static __inline u_long
245 atomic_cmpset_long(volatile u_long *p, u_long cmpval, u_long newval)
248 return (atomic_cmpset_32((volatile uint32_t *)p, cmpval, newval));
251 static __inline uint32_t
252 atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
256 ret = atomic_cmpset_32(p, cmpval, newval);
261 static __inline uint64_t
262 atomic_cmpset_acq_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
266 ret = atomic_cmpset_64(p, cmpval, newval);
271 static __inline u_long
272 atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval)
276 ret = atomic_cmpset_long(p, cmpval, newval);
281 static __inline uint32_t
282 atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
286 return (atomic_cmpset_32(p, cmpval, newval));
289 static __inline uint64_t
290 atomic_cmpset_rel_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
294 return (atomic_cmpset_64(p, cmpval, newval));
297 static __inline u_long
298 atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
302 return (atomic_cmpset_long(p, cmpval, newval));
305 static __inline uint32_t
306 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
308 uint32_t tmp = 0, tmp2 = 0, ret = 0;
311 "1: ldrex %0, [%3] \n"
313 " strex %2, %1, [%3] \n"
317 : "+r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
322 static __inline uint64_t
323 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
330 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
331 " adds %Q[tmp], %Q[ret], %Q[val] \n"
332 " adc %R[tmp], %R[ret], %R[val] \n"
333 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
338 [exf] "=&r" (exflag),
346 static __inline u_long
347 atomic_fetchadd_long(volatile u_long *p, u_long val)
350 return (atomic_fetchadd_32((volatile uint32_t *)p, val));
353 static __inline uint32_t
354 atomic_load_acq_32(volatile uint32_t *p)
363 static __inline uint64_t
364 atomic_load_64(volatile uint64_t *p)
369 * The only way to atomically load 64 bits is with LDREXD which puts the
370 * exclusive monitor into the exclusive state, so reset it to open state
371 * with CLREX because we don't actually need to store anything.
374 "ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
382 static __inline uint64_t
383 atomic_load_acq_64(volatile uint64_t *p)
387 ret = atomic_load_64(p);
392 static __inline u_long
393 atomic_load_acq_long(volatile u_long *p)
402 static __inline uint32_t
403 atomic_readandclear_32(volatile uint32_t *p)
405 uint32_t ret, tmp = 0, tmp2 = 0;
408 "1: ldrex %0, [%3] \n"
410 " strex %2, %1, [%3] \n"
414 : "=r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p)
419 static __inline uint64_t
420 atomic_readandclear_64(volatile uint64_t *p)
427 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
428 " mov %Q[tmp], #0 \n"
429 " mov %R[tmp], #0 \n"
430 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
435 [exf] "=&r" (exflag),
442 static __inline u_long
443 atomic_readandclear_long(volatile u_long *p)
446 return (atomic_readandclear_32((volatile uint32_t *)p));
450 atomic_set_32(volatile uint32_t *address, uint32_t setmask)
452 uint32_t tmp = 0, tmp2 = 0;
455 "1: ldrex %0, [%2] \n"
457 " strex %1, %0, [%2] \n"
461 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
466 atomic_set_64(volatile uint64_t *p, uint64_t val)
473 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
474 " orr %Q[tmp], %Q[val] \n"
475 " orr %R[tmp], %R[val] \n"
476 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
480 : [exf] "=&r" (exflag),
488 atomic_set_long(volatile u_long *address, u_long setmask)
491 atomic_set_32((volatile uint32_t *)address, setmask);
494 ATOMIC_ACQ_REL(set, 32)
495 ATOMIC_ACQ_REL(set, 64)
496 ATOMIC_ACQ_REL_LONG(set)
499 atomic_subtract_32(volatile uint32_t *p, uint32_t val)
501 uint32_t tmp = 0, tmp2 = 0;
504 "1: ldrex %0, [%2] \n"
506 " strex %1, %0, [%2] \n"
510 : "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
515 atomic_subtract_64(volatile uint64_t *p, uint64_t val)
522 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
523 " subs %Q[tmp], %Q[val] \n"
524 " sbc %R[tmp], %R[tmp], %R[val] \n"
525 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
529 : [exf] "=&r" (exflag),
537 atomic_subtract_long(volatile u_long *p, u_long val)
540 atomic_subtract_32((volatile uint32_t *)p, val);
543 ATOMIC_ACQ_REL(subtract, 32)
544 ATOMIC_ACQ_REL(subtract, 64)
545 ATOMIC_ACQ_REL_LONG(subtract)
548 atomic_store_64(volatile uint64_t *p, uint64_t val)
554 * The only way to atomically store 64 bits is with STREXD, which will
555 * succeed only if paired up with a preceeding LDREXD using the same
556 * address, so we read and discard the existing value before storing.
560 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
561 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
573 atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
581 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
585 atomic_store_64(p, val);
589 atomic_store_rel_long(volatile u_long *p, u_long v)
597 atomic_testandset_32(volatile uint32_t *p, u_int v)
599 uint32_t tmp, tmp2, res, mask;
601 mask = 1u << (v & 0x1f);
604 "1: ldrex %0, [%4] \n"
606 " strex %2, %1, [%4] \n"
610 : "=&r" (res), "=&r" (tmp), "=&r" (tmp2)
611 : "r" (mask), "r" (p)
613 return ((res & mask) != 0);
617 atomic_testandset_int(volatile u_int *p, u_int v)
620 return (atomic_testandset_32((volatile uint32_t *)p, v));
624 atomic_testandset_long(volatile u_long *p, u_int v)
627 return (atomic_testandset_32((volatile uint32_t *)p, v));
631 atomic_testandset_64(volatile uint64_t *p, u_int v)
633 volatile uint32_t *p32;
635 p32 = (volatile uint32_t *)p;
636 /* Assume little-endian */
641 return (atomic_testandset_32(p32, v));
644 static __inline uint32_t
645 atomic_swap_32(volatile uint32_t *p, uint32_t v)
647 uint32_t ret, exflag;
650 "1: ldrex %[ret], [%[ptr]] \n"
651 " strex %[exf], %[val], [%[ptr]] \n"
663 static __inline uint64_t
664 atomic_swap_64(volatile uint64_t *p, uint64_t v)
670 "1: ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
671 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
683 #undef ATOMIC_ACQ_REL
684 #undef ATOMIC_ACQ_REL_LONG
687 atomic_thread_fence_acq(void)
694 atomic_thread_fence_rel(void)
701 atomic_thread_fence_acq_rel(void)
708 atomic_thread_fence_seq_cst(void)
714 #endif /* _MACHINE_ATOMIC_V6_H_ */