2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
35 #include <sys/atomic_common.h>
38 #include <machine/md_var.h>
39 #include <machine/specialreg.h>
42 #ifndef __OFFSETOF_MONITORBUF
44 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
46 * The open-coded number is used instead of the symbolic expression to
47 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
48 * An assertion in i386/vm_machdep.c ensures that the value is correct.
50 #define __OFFSETOF_MONITORBUF 0x180
56 __asm __volatile("lock; addl $0,%%fs:%0"
57 : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
64 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
69 * Various simple operations on memory, each of which is atomic in the
70 * presence of interrupts and multiple processors.
72 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
73 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
74 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
75 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
77 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
78 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
79 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
80 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
82 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
83 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
84 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
85 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
86 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
87 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
89 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
90 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
91 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
92 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
93 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
94 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
98 * The above functions are expanded inline in the statically-linked
99 * kernel. Lock prefixes are generated if an SMP kernel is being
102 * Kernel modules call real functions which are built into the kernel.
103 * This allows kernel modules to be portable between UP and SMP systems.
105 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
106 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
107 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
108 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
110 int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
111 int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
112 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
113 int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
114 int atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
116 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
117 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
118 int atomic_testandset_int(volatile u_int *p, u_int v);
119 int atomic_testandclear_int(volatile u_int *p, u_int v);
120 void atomic_thread_fence_acq(void);
121 void atomic_thread_fence_acq_rel(void);
122 void atomic_thread_fence_rel(void);
123 void atomic_thread_fence_seq_cst(void);
125 #define ATOMIC_LOAD(TYPE) \
126 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
127 #define ATOMIC_STORE(TYPE) \
128 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
130 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
131 uint64_t atomic_load_acq_64(volatile uint64_t *);
132 void atomic_store_rel_64(volatile uint64_t *, uint64_t);
133 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
134 uint64_t atomic_fetchadd_64(volatile uint64_t *, uint64_t);
135 void atomic_add_64(volatile uint64_t *, uint64_t);
136 void atomic_subtract_64(volatile uint64_t *, uint64_t);
138 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
141 * For userland, always use lock prefixes so that the binaries will run
142 * on both SMP and !SMP systems.
144 #if defined(SMP) || !defined(_KERNEL)
145 #define MPLOCKED "lock ; "
151 * The assembly is volatilized to avoid code chunk removal by the compiler.
152 * GCC aggressively reorders operations and memory clobbering is necessary
153 * in order to avoid that for memory barriers.
155 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
156 static __inline void \
157 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
159 __asm __volatile(MPLOCKED OP \
165 static __inline void \
166 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
168 __asm __volatile(MPLOCKED OP \
176 * Atomic compare and set, used by the mutex functions.
179 * if (*dst == expect)
183 * if (*dst == *expect)
188 * Returns 0 on failure, non-zero on success.
190 #define ATOMIC_CMPSET(TYPE, CONS) \
191 static __inline int \
192 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
198 " cmpxchg %3,%1 ; " \
200 "# atomic_cmpset_" #TYPE " " \
201 : "=q" (res), /* 0 */ \
202 "+m" (*dst), /* 1 */ \
203 "+a" (expect) /* 2 */ \
204 : CONS (src) /* 3 */ \
209 static __inline int \
210 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
216 " cmpxchg %3,%1 ; " \
218 "# atomic_fcmpset_" #TYPE " " \
219 : "=q" (res), /* 0 */ \
220 "+m" (*dst), /* 1 */ \
221 "+a" (*expect) /* 2 */ \
222 : CONS (src) /* 3 */ \
227 ATOMIC_CMPSET(char, "q");
228 ATOMIC_CMPSET(short, "r");
229 ATOMIC_CMPSET(int, "r");
232 * Atomically add the value of v to the integer pointed to by p and return
233 * the previous value of *p.
235 static __inline u_int
236 atomic_fetchadd_int(volatile u_int *p, u_int v)
242 "# atomic_fetchadd_int"
250 atomic_testandset_int(volatile u_int *p, u_int v)
258 "# atomic_testandset_int"
259 : "=q" (res), /* 0 */
261 : "Ir" (v & 0x1f) /* 2 */
267 atomic_testandclear_int(volatile u_int *p, u_int v)
275 "# atomic_testandclear_int"
276 : "=q" (res), /* 0 */
278 : "Ir" (v & 0x1f) /* 2 */
284 * We assume that a = b will do atomic loads and stores. Due to the
285 * IA32 memory model, a simple store guarantees release semantics.
287 * However, a load may pass a store if they are performed on distinct
288 * addresses, so we need Store/Load barrier for sequentially
289 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
290 * Store/Load barrier, as recommended by the AMD Software Optimization
291 * Guide, and not mfence. In the kernel, we use a private per-cpu
292 * cache line for "mem", to avoid introducing false data
293 * dependencies. In user space, we use the word at the top of the
296 * For UP kernels, however, the memory of the single processor is
297 * always consistent, so we only need to stop the compiler from
298 * reordering accesses in a way that violates the semantics of acquire
304 #define __storeload_barrier() __mbk()
305 #else /* _KERNEL && UP */
306 #define __storeload_barrier() __compiler_membar()
309 #define __storeload_barrier() __mbu()
312 #define ATOMIC_LOAD(TYPE) \
313 static __inline u_##TYPE \
314 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
319 __compiler_membar(); \
324 #define ATOMIC_STORE(TYPE) \
325 static __inline void \
326 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
329 __compiler_membar(); \
335 atomic_thread_fence_acq(void)
342 atomic_thread_fence_rel(void)
349 atomic_thread_fence_acq_rel(void)
356 atomic_thread_fence_seq_cst(void)
359 __storeload_barrier();
364 #ifdef WANT_FUNCTIONS
365 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
366 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
367 uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
368 uint64_t atomic_load_acq_64_i586(volatile uint64_t *);
369 void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
370 void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
371 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
372 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
375 /* I486 does not support SMP or CMPXCHG8B. */
377 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
379 volatile uint32_t *p;
382 p = (volatile uint32_t *)dst;
388 " orl %%edx,%%eax ; "
395 : "+A" (expect), /* 0 */
397 "+m" (*(p + 1)), /* 2 */
399 : "r" ((uint32_t)src), /* 4 */
400 "r" ((uint32_t)(src >> 32)) /* 5 */
405 static __inline uint64_t
406 atomic_load_acq_64_i386(volatile uint64_t *p)
408 volatile uint32_t *q;
411 q = (volatile uint32_t *)p;
418 : "=&A" (res) /* 0 */
420 "m" (*(q + 1)) /* 2 */
426 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
428 volatile uint32_t *q;
430 q = (volatile uint32_t *)p;
438 "=m" (*(q + 1)) /* 1 */
443 static __inline uint64_t
444 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
446 volatile uint32_t *q;
449 q = (volatile uint32_t *)p;
458 : "=&A" (res), /* 0 */
460 "+m" (*(q + 1)) /* 2 */
461 : "r" ((uint32_t)v), /* 3 */
462 "r" ((uint32_t)(v >> 32))); /* 4 */
467 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
475 : "=q" (res), /* 0 */
477 "+A" (expect) /* 2 */
478 : "b" ((uint32_t)src), /* 3 */
479 "c" ((uint32_t)(src >> 32)) /* 4 */
484 static __inline uint64_t
485 atomic_load_acq_64_i586(volatile uint64_t *p)
490 " movl %%ebx,%%eax ; "
491 " movl %%ecx,%%edx ; "
494 : "=&A" (res), /* 0 */
501 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
505 " movl %%eax,%%ebx ; "
506 " movl %%edx,%%ecx ; "
513 : : "ebx", "ecx", "memory", "cc");
516 static __inline uint64_t
517 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
521 " movl %%eax,%%ebx ; "
522 " movl %%edx,%%ecx ; "
529 : : "ebx", "ecx", "memory", "cc");
534 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
537 if ((cpu_feature & CPUID_CX8) == 0)
538 return (atomic_cmpset_64_i386(dst, expect, src));
540 return (atomic_cmpset_64_i586(dst, expect, src));
543 static __inline uint64_t
544 atomic_load_acq_64(volatile uint64_t *p)
547 if ((cpu_feature & CPUID_CX8) == 0)
548 return (atomic_load_acq_64_i386(p));
550 return (atomic_load_acq_64_i586(p));
554 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
557 if ((cpu_feature & CPUID_CX8) == 0)
558 atomic_store_rel_64_i386(p, v);
560 atomic_store_rel_64_i586(p, v);
563 static __inline uint64_t
564 atomic_swap_64(volatile uint64_t *p, uint64_t v)
567 if ((cpu_feature & CPUID_CX8) == 0)
568 return (atomic_swap_64_i386(p, v));
570 return (atomic_swap_64_i586(p, v));
573 static __inline uint64_t
574 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
579 if (atomic_cmpset_64(p, t, t + v))
585 atomic_add_64(volatile uint64_t *p, uint64_t v)
591 if (atomic_cmpset_64(p, t, t + v))
597 atomic_subtract_64(volatile uint64_t *p, uint64_t v)
603 if (atomic_cmpset_64(p, t, t - v))
610 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
612 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
613 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
614 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
615 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
617 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
618 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
619 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
620 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
622 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
623 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
624 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
625 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
627 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
628 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
629 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
630 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
632 #define ATOMIC_LOADSTORE(TYPE) \
636 ATOMIC_LOADSTORE(char);
637 ATOMIC_LOADSTORE(short);
638 ATOMIC_LOADSTORE(int);
639 ATOMIC_LOADSTORE(long);
644 #undef ATOMIC_LOADSTORE
646 #ifndef WANT_FUNCTIONS
649 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
652 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
656 static __inline u_long
657 atomic_fetchadd_long(volatile u_long *p, u_long v)
660 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
664 atomic_testandset_long(volatile u_long *p, u_int v)
667 return (atomic_testandset_int((volatile u_int *)p, v));
671 atomic_testandclear_long(volatile u_long *p, u_int v)
674 return (atomic_testandclear_int((volatile u_int *)p, v));
677 /* Read the current value and store a new value in the destination. */
678 #ifdef __GNUCLIKE_ASM
680 static __inline u_int
681 atomic_swap_int(volatile u_int *p, u_int v)
692 static __inline u_long
693 atomic_swap_long(volatile u_long *p, u_long v)
696 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
699 #else /* !__GNUCLIKE_ASM */
701 u_int atomic_swap_int(volatile u_int *p, u_int v);
702 u_long atomic_swap_long(volatile u_long *p, u_long v);
704 #endif /* __GNUCLIKE_ASM */
706 #define atomic_set_acq_char atomic_set_barr_char
707 #define atomic_set_rel_char atomic_set_barr_char
708 #define atomic_clear_acq_char atomic_clear_barr_char
709 #define atomic_clear_rel_char atomic_clear_barr_char
710 #define atomic_add_acq_char atomic_add_barr_char
711 #define atomic_add_rel_char atomic_add_barr_char
712 #define atomic_subtract_acq_char atomic_subtract_barr_char
713 #define atomic_subtract_rel_char atomic_subtract_barr_char
714 #define atomic_cmpset_acq_char atomic_cmpset_char
715 #define atomic_cmpset_rel_char atomic_cmpset_char
716 #define atomic_fcmpset_acq_char atomic_fcmpset_char
717 #define atomic_fcmpset_rel_char atomic_fcmpset_char
719 #define atomic_set_acq_short atomic_set_barr_short
720 #define atomic_set_rel_short atomic_set_barr_short
721 #define atomic_clear_acq_short atomic_clear_barr_short
722 #define atomic_clear_rel_short atomic_clear_barr_short
723 #define atomic_add_acq_short atomic_add_barr_short
724 #define atomic_add_rel_short atomic_add_barr_short
725 #define atomic_subtract_acq_short atomic_subtract_barr_short
726 #define atomic_subtract_rel_short atomic_subtract_barr_short
727 #define atomic_cmpset_acq_short atomic_cmpset_short
728 #define atomic_cmpset_rel_short atomic_cmpset_short
729 #define atomic_fcmpset_acq_short atomic_fcmpset_short
730 #define atomic_fcmpset_rel_short atomic_fcmpset_short
732 #define atomic_set_acq_int atomic_set_barr_int
733 #define atomic_set_rel_int atomic_set_barr_int
734 #define atomic_clear_acq_int atomic_clear_barr_int
735 #define atomic_clear_rel_int atomic_clear_barr_int
736 #define atomic_add_acq_int atomic_add_barr_int
737 #define atomic_add_rel_int atomic_add_barr_int
738 #define atomic_subtract_acq_int atomic_subtract_barr_int
739 #define atomic_subtract_rel_int atomic_subtract_barr_int
740 #define atomic_cmpset_acq_int atomic_cmpset_int
741 #define atomic_cmpset_rel_int atomic_cmpset_int
742 #define atomic_fcmpset_acq_int atomic_fcmpset_int
743 #define atomic_fcmpset_rel_int atomic_fcmpset_int
745 #define atomic_set_acq_long atomic_set_barr_long
746 #define atomic_set_rel_long atomic_set_barr_long
747 #define atomic_clear_acq_long atomic_clear_barr_long
748 #define atomic_clear_rel_long atomic_clear_barr_long
749 #define atomic_add_acq_long atomic_add_barr_long
750 #define atomic_add_rel_long atomic_add_barr_long
751 #define atomic_subtract_acq_long atomic_subtract_barr_long
752 #define atomic_subtract_rel_long atomic_subtract_barr_long
753 #define atomic_cmpset_acq_long atomic_cmpset_long
754 #define atomic_cmpset_rel_long atomic_cmpset_long
755 #define atomic_fcmpset_acq_long atomic_fcmpset_long
756 #define atomic_fcmpset_rel_long atomic_fcmpset_long
758 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
759 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
761 /* Operations on 8-bit bytes. */
762 #define atomic_set_8 atomic_set_char
763 #define atomic_set_acq_8 atomic_set_acq_char
764 #define atomic_set_rel_8 atomic_set_rel_char
765 #define atomic_clear_8 atomic_clear_char
766 #define atomic_clear_acq_8 atomic_clear_acq_char
767 #define atomic_clear_rel_8 atomic_clear_rel_char
768 #define atomic_add_8 atomic_add_char
769 #define atomic_add_acq_8 atomic_add_acq_char
770 #define atomic_add_rel_8 atomic_add_rel_char
771 #define atomic_subtract_8 atomic_subtract_char
772 #define atomic_subtract_acq_8 atomic_subtract_acq_char
773 #define atomic_subtract_rel_8 atomic_subtract_rel_char
774 #define atomic_load_acq_8 atomic_load_acq_char
775 #define atomic_store_rel_8 atomic_store_rel_char
776 #define atomic_cmpset_8 atomic_cmpset_char
777 #define atomic_cmpset_acq_8 atomic_cmpset_acq_char
778 #define atomic_cmpset_rel_8 atomic_cmpset_rel_char
779 #define atomic_fcmpset_8 atomic_fcmpset_char
780 #define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char
781 #define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char
783 /* Operations on 16-bit words. */
784 #define atomic_set_16 atomic_set_short
785 #define atomic_set_acq_16 atomic_set_acq_short
786 #define atomic_set_rel_16 atomic_set_rel_short
787 #define atomic_clear_16 atomic_clear_short
788 #define atomic_clear_acq_16 atomic_clear_acq_short
789 #define atomic_clear_rel_16 atomic_clear_rel_short
790 #define atomic_add_16 atomic_add_short
791 #define atomic_add_acq_16 atomic_add_acq_short
792 #define atomic_add_rel_16 atomic_add_rel_short
793 #define atomic_subtract_16 atomic_subtract_short
794 #define atomic_subtract_acq_16 atomic_subtract_acq_short
795 #define atomic_subtract_rel_16 atomic_subtract_rel_short
796 #define atomic_load_acq_16 atomic_load_acq_short
797 #define atomic_store_rel_16 atomic_store_rel_short
798 #define atomic_cmpset_16 atomic_cmpset_short
799 #define atomic_cmpset_acq_16 atomic_cmpset_acq_short
800 #define atomic_cmpset_rel_16 atomic_cmpset_rel_short
801 #define atomic_fcmpset_16 atomic_fcmpset_short
802 #define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short
803 #define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short
805 /* Operations on 32-bit double words. */
806 #define atomic_set_32 atomic_set_int
807 #define atomic_set_acq_32 atomic_set_acq_int
808 #define atomic_set_rel_32 atomic_set_rel_int
809 #define atomic_clear_32 atomic_clear_int
810 #define atomic_clear_acq_32 atomic_clear_acq_int
811 #define atomic_clear_rel_32 atomic_clear_rel_int
812 #define atomic_add_32 atomic_add_int
813 #define atomic_add_acq_32 atomic_add_acq_int
814 #define atomic_add_rel_32 atomic_add_rel_int
815 #define atomic_subtract_32 atomic_subtract_int
816 #define atomic_subtract_acq_32 atomic_subtract_acq_int
817 #define atomic_subtract_rel_32 atomic_subtract_rel_int
818 #define atomic_load_acq_32 atomic_load_acq_int
819 #define atomic_store_rel_32 atomic_store_rel_int
820 #define atomic_cmpset_32 atomic_cmpset_int
821 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
822 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
823 #define atomic_fcmpset_32 atomic_fcmpset_int
824 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
825 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
826 #define atomic_swap_32 atomic_swap_int
827 #define atomic_readandclear_32 atomic_readandclear_int
828 #define atomic_fetchadd_32 atomic_fetchadd_int
829 #define atomic_testandset_32 atomic_testandset_int
830 #define atomic_testandclear_32 atomic_testandclear_int
832 /* Operations on 64-bit quad words. */
833 #define atomic_cmpset_acq_64 atomic_cmpset_64
834 #define atomic_cmpset_rel_64 atomic_cmpset_64
835 #define atomic_fetchadd_acq_64 atomic_fetchadd_64
836 #define atomic_fetchadd_rel_64 atomic_fetchadd_64
837 #define atomic_add_acq_64 atomic_add_64
838 #define atomic_add_rel_64 atomic_add_64
839 #define atomic_subtract_acq_64 atomic_subtract_64
840 #define atomic_subtract_rel_64 atomic_subtract_64
842 /* Operations on pointers. */
843 #define atomic_set_ptr(p, v) \
844 atomic_set_int((volatile u_int *)(p), (u_int)(v))
845 #define atomic_set_acq_ptr(p, v) \
846 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
847 #define atomic_set_rel_ptr(p, v) \
848 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
849 #define atomic_clear_ptr(p, v) \
850 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
851 #define atomic_clear_acq_ptr(p, v) \
852 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
853 #define atomic_clear_rel_ptr(p, v) \
854 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
855 #define atomic_add_ptr(p, v) \
856 atomic_add_int((volatile u_int *)(p), (u_int)(v))
857 #define atomic_add_acq_ptr(p, v) \
858 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
859 #define atomic_add_rel_ptr(p, v) \
860 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
861 #define atomic_subtract_ptr(p, v) \
862 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
863 #define atomic_subtract_acq_ptr(p, v) \
864 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
865 #define atomic_subtract_rel_ptr(p, v) \
866 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
867 #define atomic_load_acq_ptr(p) \
868 atomic_load_acq_int((volatile u_int *)(p))
869 #define atomic_store_rel_ptr(p, v) \
870 atomic_store_rel_int((volatile u_int *)(p), (v))
871 #define atomic_cmpset_ptr(dst, old, new) \
872 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
873 #define atomic_cmpset_acq_ptr(dst, old, new) \
874 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
876 #define atomic_cmpset_rel_ptr(dst, old, new) \
877 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
879 #define atomic_fcmpset_ptr(dst, old, new) \
880 atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
881 #define atomic_fcmpset_acq_ptr(dst, old, new) \
882 atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
884 #define atomic_fcmpset_rel_ptr(dst, old, new) \
885 atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
887 #define atomic_swap_ptr(p, v) \
888 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
889 #define atomic_readandclear_ptr(p) \
890 atomic_readandclear_int((volatile u_int *)(p))
892 #endif /* !WANT_FUNCTIONS */
896 #define wmb() __mbk()
897 #define rmb() __mbk()
900 #define wmb() __mbu()
901 #define rmb() __mbu()
904 #endif /* !_MACHINE_ATOMIC_H_ */