1 /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * Copyright (C) 2003-2004 Olivier Houchard
7 * Copyright (C) 1994-1997 Mark Brinicombe
8 * Copyright (C) 1994 Brini
11 * This code is derived from software written for Brini by Mark Brinicombe
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Brini.
24 * 4. The name of Brini may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
36 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #ifndef _MACHINE_ATOMIC_H_
40 #define _MACHINE_ATOMIC_H_
42 #include <sys/atomic_common.h>
45 #define isb() __asm __volatile("isb" : : : "memory")
46 #define dsb() __asm __volatile("dsb" : : : "memory")
47 #define dmb() __asm __volatile("dmb" : : : "memory")
49 #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
50 #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
51 #define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
58 #define ARM_HAVE_ATOMIC64
60 #define ATOMIC_ACQ_REL_LONG(NAME) \
61 static __inline void \
62 atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \
64 atomic_##NAME##_long(p, v); \
68 static __inline void \
69 atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \
72 atomic_##NAME##_long(p, v); \
75 #define ATOMIC_ACQ_REL(NAME, WIDTH) \
76 static __inline void \
77 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
79 atomic_##NAME##_##WIDTH(p, v); \
83 static __inline void \
84 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
87 atomic_##NAME##_##WIDTH(p, v); \
91 atomic_add_32(volatile uint32_t *p, uint32_t val)
93 uint32_t tmp = 0, tmp2 = 0;
96 "1: ldrex %0, [%2] \n"
98 " strex %1, %0, [%2] \n"
102 : "=&r" (tmp), "+r" (tmp2)
103 ,"+r" (p), "+r" (val) : : "cc", "memory");
107 atomic_add_64(volatile uint64_t *p, uint64_t val)
114 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
115 " adds %Q[tmp], %Q[val] \n"
116 " adc %R[tmp], %R[tmp], %R[val] \n"
117 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
121 : [exf] "=&r" (exflag),
129 atomic_add_long(volatile u_long *p, u_long val)
132 atomic_add_32((volatile uint32_t *)p, val);
135 ATOMIC_ACQ_REL(add, 32)
136 ATOMIC_ACQ_REL(add, 64)
137 ATOMIC_ACQ_REL_LONG(add)
140 atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
142 uint32_t tmp = 0, tmp2 = 0;
145 "1: ldrex %0, [%2] \n"
147 " strex %1, %0, [%2] \n"
151 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
156 atomic_clear_64(volatile uint64_t *p, uint64_t val)
163 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
164 " bic %Q[tmp], %Q[val] \n"
165 " bic %R[tmp], %R[val] \n"
166 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
170 : [exf] "=&r" (exflag),
178 atomic_clear_long(volatile u_long *address, u_long setmask)
181 atomic_clear_32((volatile uint32_t *)address, setmask);
184 ATOMIC_ACQ_REL(clear, 32)
185 ATOMIC_ACQ_REL(clear, 64)
186 ATOMIC_ACQ_REL_LONG(clear)
188 #define ATOMIC_FCMPSET_CODE(RET, TYPE, SUF) \
193 "1: ldrex" SUF " %[tmp], [%[ptr]] \n" \
194 " ldr" SUF " %[ret], [%[oldv]] \n" \
195 " teq %[tmp], %[ret] \n" \
197 " str" SUF "ne %[tmp], [%[oldv]] \n" \
198 " movne %[ret], #0 \n" \
199 " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \
200 " eorseq %[ret], #1 \n" \
202 : [ret] "=&r" (RET), \
204 : [ptr] "r" (_ptr), \
210 #define ATOMIC_FCMPSET_CODE64(RET) \
215 "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \
216 " ldrd %Q[cmp], %R[cmp], [%[oldv]] \n" \
217 " teq %Q[tmp], %Q[cmp] \n" \
219 " teqeq %R[tmp], %R[cmp] \n" \
221 " movne %[ret], #0 \n" \
222 " strdne %[cmp], [%[oldv]] \n" \
223 " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \
224 " eorseq %[ret], #1 \n" \
226 : [ret] "=&r" (RET), \
229 : [ptr] "r" (_ptr), \
236 atomic_fcmpset_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
240 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
243 #define atomic_fcmpset_8 atomic_fcmpset_8
246 atomic_fcmpset_acq_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
250 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
256 atomic_fcmpset_rel_8(volatile uint8_t *_ptr, uint8_t *_old, uint8_t _new)
261 ATOMIC_FCMPSET_CODE(ret, uint8_t, "b");
266 atomic_fcmpset_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
270 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
273 #define atomic_fcmpset_16 atomic_fcmpset_16
276 atomic_fcmpset_acq_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
280 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
286 atomic_fcmpset_rel_16(volatile uint16_t *_ptr, uint16_t *_old, uint16_t _new)
291 ATOMIC_FCMPSET_CODE(ret, uint16_t, "h");
296 atomic_fcmpset_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
300 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
305 atomic_fcmpset_acq_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
309 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
315 atomic_fcmpset_rel_32(volatile uint32_t *_ptr, uint32_t *_old, uint32_t _new)
320 ATOMIC_FCMPSET_CODE(ret, uint32_t, "");
325 atomic_fcmpset_long(volatile u_long *_ptr, u_long *_old, u_long _new)
329 ATOMIC_FCMPSET_CODE(ret, u_long, "");
334 atomic_fcmpset_acq_long(volatile u_long *_ptr, u_long *_old, u_long _new)
338 ATOMIC_FCMPSET_CODE(ret, u_long, "");
344 atomic_fcmpset_rel_long(volatile u_long *_ptr, u_long *_old, u_long _new)
349 ATOMIC_FCMPSET_CODE(ret, u_long, "");
354 atomic_fcmpset_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
358 ATOMIC_FCMPSET_CODE64(ret);
363 atomic_fcmpset_acq_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
367 ATOMIC_FCMPSET_CODE64(ret);
373 atomic_fcmpset_rel_64(volatile uint64_t *_ptr, uint64_t *_old, uint64_t _new)
378 ATOMIC_FCMPSET_CODE64(ret);
382 #define ATOMIC_CMPSET_CODE(RET, SUF) \
385 "1: ldrex" SUF " %[ret], [%[ptr]] \n" \
386 " teq %[ret], %[oldv] \n" \
388 " movne %[ret], #0 \n" \
389 " strex" SUF "eq %[ret], %[newv], [%[ptr]] \n" \
390 " eorseq %[ret], #1 \n" \
392 : [ret] "=&r" (RET) \
393 : [ptr] "r" (_ptr), \
399 #define ATOMIC_CMPSET_CODE64(RET) \
404 "1: ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" \
405 " teq %Q[tmp], %Q[oldv] \n" \
407 " teqeq %R[tmp], %R[oldv] \n" \
409 " movne %[ret], #0 \n" \
410 " strexdeq %[ret], %Q[newv], %R[newv], [%[ptr]] \n" \
411 " eorseq %[ret], #1 \n" \
413 : [ret] "=&r" (RET), \
415 : [ptr] "r" (_ptr), \
422 atomic_cmpset_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
426 ATOMIC_CMPSET_CODE(ret, "b");
429 #define atomic_cmpset_8 atomic_cmpset_8
432 atomic_cmpset_acq_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
436 ATOMIC_CMPSET_CODE(ret, "b");
442 atomic_cmpset_rel_8(volatile uint8_t *_ptr, uint8_t _old, uint8_t _new)
447 ATOMIC_CMPSET_CODE(ret, "b");
452 atomic_cmpset_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
456 ATOMIC_CMPSET_CODE(ret, "h");
459 #define atomic_cmpset_16 atomic_cmpset_16
462 atomic_cmpset_acq_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
466 ATOMIC_CMPSET_CODE(ret, "h");
472 atomic_cmpset_rel_16(volatile uint16_t *_ptr, uint16_t _old, uint16_t _new)
477 ATOMIC_CMPSET_CODE(ret, "h");
482 atomic_cmpset_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
486 ATOMIC_CMPSET_CODE(ret, "");
491 atomic_cmpset_acq_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
495 ATOMIC_CMPSET_CODE(ret, "");
501 atomic_cmpset_rel_32(volatile uint32_t *_ptr, uint32_t _old, uint32_t _new)
506 ATOMIC_CMPSET_CODE(ret, "");
511 atomic_cmpset_long(volatile u_long *_ptr, u_long _old, u_long _new)
515 ATOMIC_CMPSET_CODE(ret, "");
520 atomic_cmpset_acq_long(volatile u_long *_ptr, u_long _old, u_long _new)
524 ATOMIC_CMPSET_CODE(ret, "");
530 atomic_cmpset_rel_long(volatile u_long *_ptr, u_long _old, u_long _new)
535 ATOMIC_CMPSET_CODE(ret, "");
540 atomic_cmpset_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
544 ATOMIC_CMPSET_CODE64(ret);
549 atomic_cmpset_acq_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
553 ATOMIC_CMPSET_CODE64(ret);
559 atomic_cmpset_rel_64(volatile uint64_t *_ptr, uint64_t _old, uint64_t _new)
564 ATOMIC_CMPSET_CODE64(ret);
568 static __inline uint32_t
569 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
571 uint32_t tmp = 0, tmp2 = 0, ret = 0;
574 "1: ldrex %0, [%3] \n"
576 " strex %2, %1, [%3] \n"
580 : "+r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
585 static __inline uint64_t
586 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
593 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
594 " adds %Q[tmp], %Q[ret], %Q[val] \n"
595 " adc %R[tmp], %R[ret], %R[val] \n"
596 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
601 [exf] "=&r" (exflag),
609 static __inline u_long
610 atomic_fetchadd_long(volatile u_long *p, u_long val)
613 return (atomic_fetchadd_32((volatile uint32_t *)p, val));
616 static __inline uint32_t
617 atomic_load_acq_32(volatile uint32_t *p)
626 static __inline uint64_t
627 atomic_load_64(volatile uint64_t *p)
632 * The only way to atomically load 64 bits is with LDREXD which puts the
633 * exclusive monitor into the exclusive state, so reset it to open state
634 * with CLREX because we don't actually need to store anything.
637 "ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
645 static __inline uint64_t
646 atomic_load_acq_64(volatile uint64_t *p)
650 ret = atomic_load_64(p);
655 static __inline u_long
656 atomic_load_acq_long(volatile u_long *p)
665 static __inline uint32_t
666 atomic_readandclear_32(volatile uint32_t *p)
668 uint32_t ret, tmp = 0, tmp2 = 0;
671 "1: ldrex %0, [%3] \n"
673 " strex %2, %1, [%3] \n"
677 : "=r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p)
682 static __inline uint64_t
683 atomic_readandclear_64(volatile uint64_t *p)
690 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
691 " mov %Q[tmp], #0 \n"
692 " mov %R[tmp], #0 \n"
693 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
698 [exf] "=&r" (exflag),
705 static __inline u_long
706 atomic_readandclear_long(volatile u_long *p)
709 return (atomic_readandclear_32((volatile uint32_t *)p));
713 atomic_set_32(volatile uint32_t *address, uint32_t setmask)
715 uint32_t tmp = 0, tmp2 = 0;
718 "1: ldrex %0, [%2] \n"
720 " strex %1, %0, [%2] \n"
724 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask)
729 atomic_set_64(volatile uint64_t *p, uint64_t val)
736 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
737 " orr %Q[tmp], %Q[val] \n"
738 " orr %R[tmp], %R[val] \n"
739 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
743 : [exf] "=&r" (exflag),
751 atomic_set_long(volatile u_long *address, u_long setmask)
754 atomic_set_32((volatile uint32_t *)address, setmask);
757 ATOMIC_ACQ_REL(set, 32)
758 ATOMIC_ACQ_REL(set, 64)
759 ATOMIC_ACQ_REL_LONG(set)
762 atomic_subtract_32(volatile uint32_t *p, uint32_t val)
764 uint32_t tmp = 0, tmp2 = 0;
767 "1: ldrex %0, [%2] \n"
769 " strex %1, %0, [%2] \n"
773 : "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val)
778 atomic_subtract_64(volatile uint64_t *p, uint64_t val)
785 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
786 " subs %Q[tmp], %Q[val] \n"
787 " sbc %R[tmp], %R[tmp], %R[val] \n"
788 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n"
792 : [exf] "=&r" (exflag),
800 atomic_subtract_long(volatile u_long *p, u_long val)
803 atomic_subtract_32((volatile uint32_t *)p, val);
806 ATOMIC_ACQ_REL(subtract, 32)
807 ATOMIC_ACQ_REL(subtract, 64)
808 ATOMIC_ACQ_REL_LONG(subtract)
811 atomic_store_64(volatile uint64_t *p, uint64_t val)
817 * The only way to atomically store 64 bits is with STREXD, which will
818 * succeed only if paired up with a preceeding LDREXD using the same
819 * address, so we read and discard the existing value before storing.
823 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n"
824 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
836 atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
844 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
848 atomic_store_64(p, val);
852 atomic_store_rel_long(volatile u_long *p, u_long v)
860 atomic_testandclear_32(volatile uint32_t *ptr, u_int bit)
862 int newv, oldv, result;
866 " lsl ip, ip, %[bit] \n"
867 /* Done with %[bit] as input, reuse below as output. */
869 " ldrex %[oldv], [%[ptr]] \n"
870 " bic %[newv], %[oldv], ip \n"
871 " strex %[bit], %[newv], [%[ptr]] \n"
875 " ands %[bit], %[oldv], ip \n"
877 " movne %[bit], #1 \n"
878 : [bit] "=&r" (result),
883 : "cc", "ip", "memory");
889 atomic_testandclear_int(volatile u_int *p, u_int v)
892 return (atomic_testandclear_32((volatile uint32_t *)p, v));
896 atomic_testandclear_long(volatile u_long *p, u_int v)
899 return (atomic_testandclear_32((volatile uint32_t *)p, v));
901 #define atomic_testandclear_long atomic_testandclear_long
905 atomic_testandclear_64(volatile uint64_t *p, u_int v)
907 volatile uint32_t *p32;
909 p32 = (volatile uint32_t *)p;
911 * Assume little-endian,
912 * atomic_testandclear_32() uses only last 5 bits of v
916 return (atomic_testandclear_32(p32, v));
920 atomic_testandset_32(volatile uint32_t *ptr, u_int bit)
922 int newv, oldv, result;
926 " lsl ip, ip, %[bit] \n"
927 /* Done with %[bit] as input, reuse below as output. */
929 " ldrex %[oldv], [%[ptr]] \n"
930 " orr %[newv], %[oldv], ip \n"
931 " strex %[bit], %[newv], [%[ptr]] \n"
935 " ands %[bit], %[oldv], ip \n"
937 " movne %[bit], #1 \n"
938 : [bit] "=&r" (result),
943 : "cc", "ip", "memory");
949 atomic_testandset_int(volatile u_int *p, u_int v)
952 return (atomic_testandset_32((volatile uint32_t *)p, v));
956 atomic_testandset_long(volatile u_long *p, u_int v)
959 return (atomic_testandset_32((volatile uint32_t *)p, v));
961 #define atomic_testandset_long atomic_testandset_long
964 atomic_testandset_64(volatile uint64_t *p, u_int v)
966 volatile uint32_t *p32;
968 p32 = (volatile uint32_t *)p;
970 * Assume little-endian,
971 * atomic_testandset_32() uses only last 5 bits of v
975 return (atomic_testandset_32(p32, v));
978 static __inline uint32_t
979 atomic_swap_32(volatile uint32_t *p, uint32_t v)
981 uint32_t ret, exflag;
984 "1: ldrex %[ret], [%[ptr]] \n"
985 " strex %[exf], %[val], [%[ptr]] \n"
997 static __inline u_long
998 atomic_swap_long(volatile u_long *p, u_long v)
1001 return (atomic_swap_32((volatile uint32_t *)p, v));
1004 static __inline uint64_t
1005 atomic_swap_64(volatile uint64_t *p, uint64_t v)
1011 "1: ldrexd %Q[ret], %R[ret], [%[ptr]] \n"
1012 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n"
1013 " teq %[exf], #0 \n"
1016 : [ret] "=&r" (ret),
1017 [exf] "=&r" (exflag)
1024 #undef ATOMIC_ACQ_REL
1025 #undef ATOMIC_ACQ_REL_LONG
1027 static __inline void
1028 atomic_thread_fence_acq(void)
1034 static __inline void
1035 atomic_thread_fence_rel(void)
1041 static __inline void
1042 atomic_thread_fence_acq_rel(void)
1048 static __inline void
1049 atomic_thread_fence_seq_cst(void)
1055 #define atomic_clear_ptr atomic_clear_32
1056 #define atomic_clear_acq_ptr atomic_clear_acq_32
1057 #define atomic_clear_rel_ptr atomic_clear_rel_32
1058 #define atomic_set_ptr atomic_set_32
1059 #define atomic_set_acq_ptr atomic_set_acq_32
1060 #define atomic_set_rel_ptr atomic_set_rel_32
1061 #define atomic_fcmpset_ptr atomic_fcmpset_32
1062 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_32
1063 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_32
1064 #define atomic_cmpset_ptr atomic_cmpset_32
1065 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
1066 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
1067 #define atomic_load_acq_ptr atomic_load_acq_32
1068 #define atomic_store_rel_ptr atomic_store_rel_32
1069 #define atomic_swap_ptr atomic_swap_32
1070 #define atomic_readandclear_ptr atomic_readandclear_32
1072 #define atomic_add_int atomic_add_32
1073 #define atomic_add_acq_int atomic_add_acq_32
1074 #define atomic_add_rel_int atomic_add_rel_32
1075 #define atomic_subtract_int atomic_subtract_32
1076 #define atomic_subtract_acq_int atomic_subtract_acq_32
1077 #define atomic_subtract_rel_int atomic_subtract_rel_32
1078 #define atomic_clear_int atomic_clear_32
1079 #define atomic_clear_acq_int atomic_clear_acq_32
1080 #define atomic_clear_rel_int atomic_clear_rel_32
1081 #define atomic_set_int atomic_set_32
1082 #define atomic_set_acq_int atomic_set_acq_32
1083 #define atomic_set_rel_int atomic_set_rel_32
1084 #define atomic_fcmpset_int atomic_fcmpset_32
1085 #define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
1086 #define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
1087 #define atomic_cmpset_int atomic_cmpset_32
1088 #define atomic_cmpset_acq_int atomic_cmpset_acq_32
1089 #define atomic_cmpset_rel_int atomic_cmpset_rel_32
1090 #define atomic_fetchadd_int atomic_fetchadd_32
1091 #define atomic_readandclear_int atomic_readandclear_32
1092 #define atomic_load_acq_int atomic_load_acq_32
1093 #define atomic_store_rel_int atomic_store_rel_32
1094 #define atomic_swap_int atomic_swap_32
1098 * - atomic_load_acq_8
1099 * - atomic_load_acq_16
1100 * - atomic_testandset_acq_long
1102 #include <sys/_atomic_subword.h>
1104 #endif /* _MACHINE_ATOMIC_H_ */