1 /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
4 * Copyright (C) 2003-2004 Olivier Houchard
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
9 * This code is derived from software written for Brini by Mark Brinicombe
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Brini.
22 * 4. The name of Brini may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
31 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
33 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
34 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #ifndef _MACHINE_ATOMIC_V6_H_
40 #define _MACHINE_ATOMIC_V6_H_
42 #ifndef _MACHINE_ATOMIC_H_
43 #error Do not include this file directly, use <machine/atomic.h>
47 #define isb() __asm __volatile("isb" : : : "memory")
48 #define dsb() __asm __volatile("dsb" : : : "memory")
49 #define dmb() __asm __volatile("dmb" : : : "memory")
51 #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
52 #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
53 #define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
55 #error Only use this file with ARMv6 and later
62 #define ARM_HAVE_ATOMIC64
64 #define ATOMIC_ACQ_REL_LONG(NAME) \
65 static __inline void \
66 atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \
68 atomic_##NAME##_long(p, v); \
72 static __inline void \
73 atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \
76 atomic_##NAME##_long(p, v); \
79 #define ATOMIC_ACQ_REL(NAME, WIDTH) \
80 static __inline void \
81 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
83 atomic_##NAME##_##WIDTH(p, v); \
87 static __inline void \
88 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
91 atomic_##NAME##_##WIDTH(p, v); \
95 atomic_add_32(volatile uint32_t *p, uint32_t val)
97 uint32_t tmp = 0, tmp2 = 0;
100 "1: ldrex %0, [%2] \n"
102 " strex %1, %0, [%2] \n"
106 : "=&r" (tmp), "+r" (tmp2)
107 ,"+r" (p), "+r" (val) : : "cc", "memory");
111 atomic_add_64(volatile uint64_t *p, uint64_t val)
118 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
119 " adds %Q[tmp], %Q[val] \n"
120 " adc %R[tmp], %R[tmp], %R[val] \n"
121 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
125 : [exf] "=&r" (exflag),
133 atomic_add_long(volatile u_long *p, u_long val)
136 atomic_add_32((volatile uint32_t *)p, val);
139 ATOMIC_ACQ_REL(add, 32)
140 ATOMIC_ACQ_REL(add, 64)
141 ATOMIC_ACQ_REL_LONG(add)
144 atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
146 uint32_t tmp = 0, tmp2 = 0;
149 "1: ldrex %0, [%2] \n"
151 " strex %1, %0, [%2] \n"
155 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
160 atomic_clear_64(volatile uint64_t *p, uint64_t val)
167 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
168 " bic %Q[tmp], %Q[val] \n"
169 " bic %R[tmp], %R[val] \n"
170 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
174 : [exf] "=&r" (exflag),
182 atomic_clear_long(volatile u_long *address, u_long setmask)
185 atomic_clear_32((volatile uint32_t *)address, setmask);
188 ATOMIC_ACQ_REL(clear, 32)
189 ATOMIC_ACQ_REL(clear, 64)
190 ATOMIC_ACQ_REL_LONG(clear)
192 #define ATOMIC_FCMPSET_CODE(RET, TYPE, SUF) \
197 "1: ldrex" SUF " %[tmp], [%[ptr]] \n" \
198 " ldr" SUF " %[ret], [%[oldv]] \n" \
199 " teq %[tmp], %[ret] \n" \
201 " str" SUF "ne %[tmp], [%[oldv]] \n" \
202 " movne %[ret], #0 \n" \
203 " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \
204 " eorseq %[ret], #1 \n" \
206 : [ret] "=&r" (RET), \
208 : [ptr] "r" (_ptr), \
214 #define ATOMIC_FCMPSET_CODE64(RET) \
219 "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \
220 " ldrd %Q[cmp], %R[cmp], [%[oldv]] \n" \
221 " teq %Q[tmp], %Q[cmp] \n" \
223 " teqeq %R[tmp], %R[cmp] \n" \
225 " movne %[ret], #0 \n" \
226 " strdne %[cmp], [%[oldv]] \n" \
227 " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \
228 " eorseq %[ret], #1 \n" \
230 : [ret] "=&r" (RET), \
233 : [ptr] "r" (_ptr), \
240 atomic_fcmpset_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
244 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
247 #define atomic_fcmpset_8 atomic_fcmpset_8
250 atomic_fcmpset_acq_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
254 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
260 atomic_fcmpset_rel_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
265 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
270 atomic_fcmpset_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
274 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
277 #define atomic_fcmpset_16 atomic_fcmpset_16
280 atomic_fcmpset_acq_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
284 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
290 atomic_fcmpset_rel_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
295 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
300 atomic_fcmpset_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
304 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
309 atomic_fcmpset_acq_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
313 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
319 atomic_fcmpset_rel_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
324 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
329 atomic_fcmpset_long(volatile u_long *_ptr, u_long *_old, u_long _new)
333 ATOMIC_FCMPSET_CODE(ret, u_long, "");
338 atomic_fcmpset_acq_long(volatile u_long *_ptr, u_long *_old, u_long _new)
342 ATOMIC_FCMPSET_CODE(ret, u_long, "");
348 atomic_fcmpset_rel_long(volatile u_long *_ptr, u_long *_old, u_long _new)
353 ATOMIC_FCMPSET_CODE(ret, u_long, "");
358 atomic_fcmpset_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
362 ATOMIC_FCMPSET_CODE64(ret);
367 atomic_fcmpset_acq_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
371 ATOMIC_FCMPSET_CODE64(ret);
377 atomic_fcmpset_rel_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
382 ATOMIC_FCMPSET_CODE64(ret);
386 #define ATOMIC_CMPSET_CODE(RET, SUF) \
389 "1: ldrex" SUF " %[ret], [%[ptr]] \n" \
390 " teq %[ret], %[oldv] \n" \
392 " movne %[ret], #0 \n" \
393 " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \
394 " eorseq %[ret], #1 \n" \
396 : [ret] "=&r" (RET) \
397 : [ptr] "r" (_ptr), \
403 #define ATOMIC_CMPSET_CODE64(RET) \
408 "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \
409 " teq %Q[tmp], %Q[oldv] \n" \
411 " teqeq %R[tmp], %R[oldv] \n" \
413 " movne %[ret], #0 \n" \
414 " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \
415 " eorseq %[ret], #1 \n" \
417 : [ret] "=&r" (RET), \
419 : [ptr] "r" (_ptr), \
426 atomic_cmpset_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
430 ATOMIC_CMPSET_CODE(ret, "b");
433 #define atomic_cmpset_8 atomic_cmpset_8
436 atomic_cmpset_acq_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
440 ATOMIC_CMPSET_CODE(ret, "b");
446 atomic_cmpset_rel_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
451 ATOMIC_CMPSET_CODE(ret, "b");
456 atomic_cmpset_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
460 ATOMIC_CMPSET_CODE(ret, "h");
463 #define atomic_cmpset_16 atomic_cmpset_16
466 atomic_cmpset_acq_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
470 ATOMIC_CMPSET_CODE(ret, "h");
476 atomic_cmpset_rel_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
481 ATOMIC_CMPSET_CODE(ret, "h");
486 atomic_cmpset_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
490 ATOMIC_CMPSET_CODE(ret, "");
495 atomic_cmpset_acq_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
499 ATOMIC_CMPSET_CODE(ret, "");
505 atomic_cmpset_rel_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
510 ATOMIC_CMPSET_CODE(ret, "");
515 atomic_cmpset_long(volatile u_long *_ptr, u_long _old, u_long _new)
519 ATOMIC_CMPSET_CODE(ret, "");
524 atomic_cmpset_acq_long(volatile u_long *_ptr, u_long _old, u_long _new)
528 ATOMIC_CMPSET_CODE(ret, "");
534 atomic_cmpset_rel_long(volatile u_long *_ptr, u_long _old, u_long _new)
539 ATOMIC_CMPSET_CODE(ret, "");
544 atomic_cmpset_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
548 ATOMIC_CMPSET_CODE64(ret);
553 atomic_cmpset_acq_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
557 ATOMIC_CMPSET_CODE64(ret);
563 atomic_cmpset_rel_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
568 ATOMIC_CMPSET_CODE64(ret);
572 static __inline uint32_t
573 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
575 uint32_t tmp = 0, tmp2 = 0, ret = 0;
578 "1: ldrex %0, [%3] \n"
580 " strex %2, %1, [%3] \n"
584 : "+r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
589 static __inline uint64_t
590 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
597 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
598 " adds %Q[tmp], %Q[ret], %Q[val] \n"
599 " adc %R[tmp], %R[ret], %R[val] \n"
600 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
605 [exf] "=&r" (exflag),
613 static __inline u_long
614 atomic_fetchadd_long(volatile u_long *p, u_long val)
617 return (atomic_fetchadd_32((volatile uint32_t *)p, val));
620 static __inline uint32_t
621 atomic_load_acq_32(volatile uint32_t *p)
630 static __inline uint64_t
631 atomic_load_64(volatile uint64_t *p)
636 * The only way to atomically load 64 bits is with LDREXD which puts the
637 * exclusive monitor into the exclusive state, so reset it to open state
638 * with CLREX because we don't actually need to store anything.
641 "ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
649 static __inline uint64_t
650 atomic_load_acq_64(volatile uint64_t *p)
654 ret = atomic_load_64(p);
659 static __inline u_long
660 atomic_load_acq_long(volatile u_long *p)
669 static __inline uint32_t
670 atomic_readandclear_32(volatile uint32_t *p)
672 uint32_t ret, tmp = 0, tmp2 = 0;
675 "1: ldrex %0, [%3] \n"
677 " strex %2, %1, [%3] \n"
681 : "=r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p)
686 static __inline uint64_t
687 atomic_readandclear_64(volatile uint64_t *p)
694 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
695 " mov %Q[tmp], #0 \n"
696 " mov %R[tmp], #0 \n"
697 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
702 [exf] "=&r" (exflag),
709 static __inline u_long
710 atomic_readandclear_long(volatile u_long *p)
713 return (atomic_readandclear_32((volatile uint32_t *)p));
717 atomic_set_32(volatile uint32_t *address, uint32_t setmask)
719 uint32_t tmp = 0, tmp2 = 0;
722 "1: ldrex %0, [%2] \n"
724 " strex %1, %0, [%2] \n"
728 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
733 atomic_set_64(volatile uint64_t *p, uint64_t val)
740 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
741 " orr %Q[tmp], %Q[val] \n"
742 " orr %R[tmp], %R[val] \n"
743 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
747 : [exf] "=&r" (exflag),
755 atomic_set_long(volatile u_long *address, u_long setmask)
758 atomic_set_32((volatile uint32_t *)address, setmask);
761 ATOMIC_ACQ_REL(set, 32)
762 ATOMIC_ACQ_REL(set, 64)
763 ATOMIC_ACQ_REL_LONG(set)
766 atomic_subtract_32(volatile uint32_t *p, uint32_t val)
768 uint32_t tmp = 0, tmp2 = 0;
771 "1: ldrex %0, [%2] \n"
773 " strex %1, %0, [%2] \n"
777 : "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
782 atomic_subtract_64(volatile uint64_t *p, uint64_t val)
789 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
790 " subs %Q[tmp], %Q[val] \n"
791 " sbc %R[tmp], %R[tmp], %R[val] \n"
792 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
796 : [exf] "=&r" (exflag),
804 atomic_subtract_long(volatile u_long *p, u_long val)
807 atomic_subtract_32((volatile uint32_t *)p, val);
810 ATOMIC_ACQ_REL(subtract, 32)
811 ATOMIC_ACQ_REL(subtract, 64)
812 ATOMIC_ACQ_REL_LONG(subtract)
815 atomic_store_64(volatile uint64_t *p, uint64_t val)
821 * The only way to atomically store 64 bits is with STREXD, which will
822 * succeed only if paired up with a preceeding LDREXD using the same
823 * address, so we read and discard the existing value before storing.
827 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
828 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
840 atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
848 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
852 atomic_store_64(p, val);
856 atomic_store_rel_long(volatile u_long *p, u_long v)
864 atomic_testandclear_32(volatile uint32_t *ptr, u_int bit)
866 int newv, oldv, result;
870 " lsl ip, ip, %[bit] \n"
871 /* Done with %[bit] as input, reuse below as output. */
873 " ldrex %[oldv], [%[ptr]] \n"
874 " bic %[newv], %[oldv], ip \n"
875 " strex %[bit], %[newv], [%[ptr]] \n"
879 " ands %[bit], %[oldv], ip \n"
881 " movne %[bit], #1 \n"
882 : [bit] "=&r" (result),
887 : "cc", "ip", "memory");
893 atomic_testandclear_int(volatile u_int *p, u_int v)
896 return (atomic_testandclear_32((volatile uint32_t *)p, v));
900 atomic_testandclear_long(volatile u_long *p, u_int v)
903 return (atomic_testandclear_32((volatile uint32_t *)p, v));
905 #define atomic_testandclear_long atomic_testandclear_long
908 atomic_testandset_32(volatile uint32_t *ptr, u_int bit)
910 int newv, oldv, result;
914 " lsl ip, ip, %[bit] \n"
915 /* Done with %[bit] as input, reuse below as output. */
917 " ldrex %[oldv], [%[ptr]] \n"
918 " orr %[newv], %[oldv], ip \n"
919 " strex %[bit], %[newv], [%[ptr]] \n"
923 " ands %[bit], %[oldv], ip \n"
925 " movne %[bit], #1 \n"
926 : [bit] "=&r" (result),
931 : "cc", "ip", "memory");
937 atomic_testandset_int(volatile u_int *p, u_int v)
940 return (atomic_testandset_32((volatile uint32_t *)p, v));
944 atomic_testandset_long(volatile u_long *p, u_int v)
947 return (atomic_testandset_32((volatile uint32_t *)p, v));
949 #define atomic_testandset_long atomic_testandset_long
952 atomic_testandset_64(volatile uint64_t *p, u_int v)
954 volatile uint32_t *p32;
956 p32 = (volatile uint32_t *)p;
957 /* Assume little-endian */
962 return (atomic_testandset_32(p32, v));
965 static __inline uint32_t
966 atomic_swap_32(volatile uint32_t *p, uint32_t v)
968 uint32_t ret, exflag;
971 "1: ldrex %[ret], [%[ptr]] \n"
972 " strex %[exf], %[val], [%[ptr]] \n"
984 static __inline uint64_t
985 atomic_swap_64(volatile uint64_t *p, uint64_t v)
991 "1: ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
992 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
1004 #undef ATOMIC_ACQ_REL
1005 #undef ATOMIC_ACQ_REL_LONG
1007 static __inline void
1008 atomic_thread_fence_acq(void)
1014 static __inline void
1015 atomic_thread_fence_rel(void)
1021 static __inline void
1022 atomic_thread_fence_acq_rel(void)
1028 static __inline void
1029 atomic_thread_fence_seq_cst(void)
1035 #endif /* _MACHINE_ATOMIC_V6_H_ */