2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
40 #ifndef __OFFSETOF_MONITORBUF
42 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
44 * The open-coded number is used instead of the symbolic expression to
45 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46 * An assertion in i386/vm_machdep.c ensures that the value is correct.
48 #define __OFFSETOF_MONITORBUF 0x180
54 __asm __volatile("lock; addl $0,%%fs:%0"
55 : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
62 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
67 * Various simple operations on memory, each of which is atomic in the
68 * presence of interrupts and multiple processors.
70 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
71 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
72 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
73 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
75 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
76 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
77 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
78 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
80 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
81 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
82 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
83 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
84 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
87 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
88 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
89 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
90 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
91 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
96 * The above functions are expanded inline in the statically-linked
97 * kernel. Lock prefixes are generated if an SMP kernel is being
100 * Kernel modules call real functions which are built into the kernel.
101 * This allows kernel modules to be portable between UP and SMP systems.
103 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
105 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
106 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
108 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
109 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
110 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
111 int atomic_testandset_int(volatile u_int *p, u_int v);
112 int atomic_testandclear_int(volatile u_int *p, u_int v);
113 void atomic_thread_fence_acq(void);
114 void atomic_thread_fence_acq_rel(void);
115 void atomic_thread_fence_rel(void);
116 void atomic_thread_fence_seq_cst(void);
118 #define ATOMIC_LOAD(TYPE) \
119 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
120 #define ATOMIC_STORE(TYPE) \
121 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
123 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
124 uint64_t atomic_load_acq_64(volatile uint64_t *);
125 void atomic_store_rel_64(volatile uint64_t *, uint64_t);
126 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
128 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
131 * For userland, always use lock prefixes so that the binaries will run
132 * on both SMP and !SMP systems.
134 #if defined(SMP) || !defined(_KERNEL)
135 #define MPLOCKED "lock ; "
141 * The assembly is volatilized to avoid code chunk removal by the compiler.
142 * GCC aggressively reorders operations and memory clobbering is necessary
143 * in order to avoid that for memory barriers.
145 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
146 static __inline void \
147 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
149 __asm __volatile(MPLOCKED OP \
155 static __inline void \
156 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
158 __asm __volatile(MPLOCKED OP \
166 * Atomic compare and set, used by the mutex functions
168 * if (*dst == expect) *dst = src (all 32 bit words)
170 * Returns 0 on failure, non-zero on success
173 #ifdef CPU_DISABLE_CMPXCHG
176 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
189 "# atomic_cmpset_int"
190 : "=q" (res), /* 0 */
198 #else /* !CPU_DISABLE_CMPXCHG */
201 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
209 "# atomic_cmpset_int"
210 : "=q" (res), /* 0 */
212 "+a" (expect) /* 2 */
219 atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src)
227 "# atomic_cmpset_int"
228 : "=q" (res), /* 0 */
230 "+a" (*expect) /* 2 */
236 #endif /* CPU_DISABLE_CMPXCHG */
239 * Atomically add the value of v to the integer pointed to by p and return
240 * the previous value of *p.
242 static __inline u_int
243 atomic_fetchadd_int(volatile u_int *p, u_int v)
249 "# atomic_fetchadd_int"
257 atomic_testandset_int(volatile u_int *p, u_int v)
265 "# atomic_testandset_int"
266 : "=q" (res), /* 0 */
268 : "Ir" (v & 0x1f) /* 2 */
274 atomic_testandclear_int(volatile u_int *p, u_int v)
282 "# atomic_testandclear_int"
283 : "=q" (res), /* 0 */
285 : "Ir" (v & 0x1f) /* 2 */
291 * We assume that a = b will do atomic loads and stores. Due to the
292 * IA32 memory model, a simple store guarantees release semantics.
294 * However, a load may pass a store if they are performed on distinct
295 * addresses, so we need Store/Load barrier for sequentially
296 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
297 * Store/Load barrier, as recommended by the AMD Software Optimization
298 * Guide, and not mfence. In the kernel, we use a private per-cpu
299 * cache line for "mem", to avoid introducing false data
300 * dependencies. In user space, we use the word at the top of the
303 * For UP kernels, however, the memory of the single processor is
304 * always consistent, so we only need to stop the compiler from
305 * reordering accesses in a way that violates the semantics of acquire
311 #define __storeload_barrier() __mbk()
312 #else /* _KERNEL && UP */
313 #define __storeload_barrier() __compiler_membar()
316 #define __storeload_barrier() __mbu()
319 #define ATOMIC_LOAD(TYPE) \
320 static __inline u_##TYPE \
321 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
326 __compiler_membar(); \
331 #define ATOMIC_STORE(TYPE) \
332 static __inline void \
333 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
336 __compiler_membar(); \
342 atomic_thread_fence_acq(void)
349 atomic_thread_fence_rel(void)
356 atomic_thread_fence_acq_rel(void)
363 atomic_thread_fence_seq_cst(void)
366 __storeload_barrier();
371 #ifdef WANT_FUNCTIONS
372 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
373 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
374 uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
375 uint64_t atomic_load_acq_64_i586(volatile uint64_t *);
376 void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
377 void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
378 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
379 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
382 /* I486 does not support SMP or CMPXCHG8B. */
384 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
386 volatile uint32_t *p;
389 p = (volatile uint32_t *)dst;
395 " orl %%edx,%%eax ; "
402 : "+A" (expect), /* 0 */
404 "+m" (*(p + 1)), /* 2 */
406 : "r" ((uint32_t)src), /* 4 */
407 "r" ((uint32_t)(src >> 32)) /* 5 */
412 static __inline uint64_t
413 atomic_load_acq_64_i386(volatile uint64_t *p)
415 volatile uint32_t *q;
418 q = (volatile uint32_t *)p;
425 : "=&A" (res) /* 0 */
427 "m" (*(q + 1)) /* 2 */
433 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
435 volatile uint32_t *q;
437 q = (volatile uint32_t *)p;
445 "=m" (*(q + 1)) /* 1 */
450 static __inline uint64_t
451 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
453 volatile uint32_t *q;
456 q = (volatile uint32_t *)p;
465 : "=&A" (res), /* 0 */
467 "+m" (*(q + 1)) /* 2 */
468 : "r" ((uint32_t)v), /* 3 */
469 "r" ((uint32_t)(v >> 32))); /* 4 */
474 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
482 : "=q" (res), /* 0 */
484 "+A" (expect) /* 2 */
485 : "b" ((uint32_t)src), /* 3 */
486 "c" ((uint32_t)(src >> 32)) /* 4 */
491 static __inline uint64_t
492 atomic_load_acq_64_i586(volatile uint64_t *p)
497 " movl %%ebx,%%eax ; "
498 " movl %%ecx,%%edx ; "
501 : "=&A" (res), /* 0 */
508 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
512 " movl %%eax,%%ebx ; "
513 " movl %%edx,%%ecx ; "
520 : : "ebx", "ecx", "memory", "cc");
523 static __inline uint64_t
524 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
528 " movl %%eax,%%ebx ; "
529 " movl %%edx,%%ecx ; "
536 : : "ebx", "ecx", "memory", "cc");
541 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
544 if ((cpu_feature & CPUID_CX8) == 0)
545 return (atomic_cmpset_64_i386(dst, expect, src));
547 return (atomic_cmpset_64_i586(dst, expect, src));
550 static __inline uint64_t
551 atomic_load_acq_64(volatile uint64_t *p)
554 if ((cpu_feature & CPUID_CX8) == 0)
555 return (atomic_load_acq_64_i386(p));
557 return (atomic_load_acq_64_i586(p));
561 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
564 if ((cpu_feature & CPUID_CX8) == 0)
565 atomic_store_rel_64_i386(p, v);
567 atomic_store_rel_64_i586(p, v);
570 static __inline uint64_t
571 atomic_swap_64(volatile uint64_t *p, uint64_t v)
574 if ((cpu_feature & CPUID_CX8) == 0)
575 return (atomic_swap_64_i386(p, v));
577 return (atomic_swap_64_i586(p, v));
582 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
584 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
585 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
586 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
587 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
589 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
590 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
591 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
592 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
594 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
595 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
596 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
597 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
599 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
600 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
601 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
602 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
604 #define ATOMIC_LOADSTORE(TYPE) \
608 ATOMIC_LOADSTORE(char);
609 ATOMIC_LOADSTORE(short);
610 ATOMIC_LOADSTORE(int);
611 ATOMIC_LOADSTORE(long);
616 #undef ATOMIC_LOADSTORE
618 #ifndef WANT_FUNCTIONS
621 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
624 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
628 static __inline u_long
629 atomic_fetchadd_long(volatile u_long *p, u_long v)
632 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
636 atomic_testandset_long(volatile u_long *p, u_int v)
639 return (atomic_testandset_int((volatile u_int *)p, v));
643 atomic_testandclear_long(volatile u_long *p, u_int v)
646 return (atomic_testandclear_int((volatile u_int *)p, v));
649 /* Read the current value and store a new value in the destination. */
650 #ifdef __GNUCLIKE_ASM
652 static __inline u_int
653 atomic_swap_int(volatile u_int *p, u_int v)
664 static __inline u_long
665 atomic_swap_long(volatile u_long *p, u_long v)
668 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
671 #else /* !__GNUCLIKE_ASM */
673 u_int atomic_swap_int(volatile u_int *p, u_int v);
674 u_long atomic_swap_long(volatile u_long *p, u_long v);
676 #endif /* __GNUCLIKE_ASM */
678 #define atomic_set_acq_char atomic_set_barr_char
679 #define atomic_set_rel_char atomic_set_barr_char
680 #define atomic_clear_acq_char atomic_clear_barr_char
681 #define atomic_clear_rel_char atomic_clear_barr_char
682 #define atomic_add_acq_char atomic_add_barr_char
683 #define atomic_add_rel_char atomic_add_barr_char
684 #define atomic_subtract_acq_char atomic_subtract_barr_char
685 #define atomic_subtract_rel_char atomic_subtract_barr_char
687 #define atomic_set_acq_short atomic_set_barr_short
688 #define atomic_set_rel_short atomic_set_barr_short
689 #define atomic_clear_acq_short atomic_clear_barr_short
690 #define atomic_clear_rel_short atomic_clear_barr_short
691 #define atomic_add_acq_short atomic_add_barr_short
692 #define atomic_add_rel_short atomic_add_barr_short
693 #define atomic_subtract_acq_short atomic_subtract_barr_short
694 #define atomic_subtract_rel_short atomic_subtract_barr_short
696 #define atomic_set_acq_int atomic_set_barr_int
697 #define atomic_set_rel_int atomic_set_barr_int
698 #define atomic_clear_acq_int atomic_clear_barr_int
699 #define atomic_clear_rel_int atomic_clear_barr_int
700 #define atomic_add_acq_int atomic_add_barr_int
701 #define atomic_add_rel_int atomic_add_barr_int
702 #define atomic_subtract_acq_int atomic_subtract_barr_int
703 #define atomic_subtract_rel_int atomic_subtract_barr_int
704 #define atomic_cmpset_acq_int atomic_cmpset_int
705 #define atomic_cmpset_rel_int atomic_cmpset_int
706 #define atomic_fcmpset_acq_int atomic_fcmpset_int
707 #define atomic_fcmpset_rel_int atomic_fcmpset_int
709 #define atomic_set_acq_long atomic_set_barr_long
710 #define atomic_set_rel_long atomic_set_barr_long
711 #define atomic_clear_acq_long atomic_clear_barr_long
712 #define atomic_clear_rel_long atomic_clear_barr_long
713 #define atomic_add_acq_long atomic_add_barr_long
714 #define atomic_add_rel_long atomic_add_barr_long
715 #define atomic_subtract_acq_long atomic_subtract_barr_long
716 #define atomic_subtract_rel_long atomic_subtract_barr_long
717 #define atomic_cmpset_acq_long atomic_cmpset_long
718 #define atomic_cmpset_rel_long atomic_cmpset_long
719 #define atomic_fcmpset_acq_long atomic_fcmpset_long
720 #define atomic_fcmpset_rel_long atomic_fcmpset_long
722 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
723 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
725 /* Operations on 8-bit bytes. */
726 #define atomic_set_8 atomic_set_char
727 #define atomic_set_acq_8 atomic_set_acq_char
728 #define atomic_set_rel_8 atomic_set_rel_char
729 #define atomic_clear_8 atomic_clear_char
730 #define atomic_clear_acq_8 atomic_clear_acq_char
731 #define atomic_clear_rel_8 atomic_clear_rel_char
732 #define atomic_add_8 atomic_add_char
733 #define atomic_add_acq_8 atomic_add_acq_char
734 #define atomic_add_rel_8 atomic_add_rel_char
735 #define atomic_subtract_8 atomic_subtract_char
736 #define atomic_subtract_acq_8 atomic_subtract_acq_char
737 #define atomic_subtract_rel_8 atomic_subtract_rel_char
738 #define atomic_load_acq_8 atomic_load_acq_char
739 #define atomic_store_rel_8 atomic_store_rel_char
741 /* Operations on 16-bit words. */
742 #define atomic_set_16 atomic_set_short
743 #define atomic_set_acq_16 atomic_set_acq_short
744 #define atomic_set_rel_16 atomic_set_rel_short
745 #define atomic_clear_16 atomic_clear_short
746 #define atomic_clear_acq_16 atomic_clear_acq_short
747 #define atomic_clear_rel_16 atomic_clear_rel_short
748 #define atomic_add_16 atomic_add_short
749 #define atomic_add_acq_16 atomic_add_acq_short
750 #define atomic_add_rel_16 atomic_add_rel_short
751 #define atomic_subtract_16 atomic_subtract_short
752 #define atomic_subtract_acq_16 atomic_subtract_acq_short
753 #define atomic_subtract_rel_16 atomic_subtract_rel_short
754 #define atomic_load_acq_16 atomic_load_acq_short
755 #define atomic_store_rel_16 atomic_store_rel_short
757 /* Operations on 32-bit double words. */
758 #define atomic_set_32 atomic_set_int
759 #define atomic_set_acq_32 atomic_set_acq_int
760 #define atomic_set_rel_32 atomic_set_rel_int
761 #define atomic_clear_32 atomic_clear_int
762 #define atomic_clear_acq_32 atomic_clear_acq_int
763 #define atomic_clear_rel_32 atomic_clear_rel_int
764 #define atomic_add_32 atomic_add_int
765 #define atomic_add_acq_32 atomic_add_acq_int
766 #define atomic_add_rel_32 atomic_add_rel_int
767 #define atomic_subtract_32 atomic_subtract_int
768 #define atomic_subtract_acq_32 atomic_subtract_acq_int
769 #define atomic_subtract_rel_32 atomic_subtract_rel_int
770 #define atomic_load_acq_32 atomic_load_acq_int
771 #define atomic_store_rel_32 atomic_store_rel_int
772 #define atomic_cmpset_32 atomic_cmpset_int
773 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
774 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
775 #define atomic_fcmpset_32 atomic_fcmpset_int
776 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
777 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
778 #define atomic_swap_32 atomic_swap_int
779 #define atomic_readandclear_32 atomic_readandclear_int
780 #define atomic_fetchadd_32 atomic_fetchadd_int
781 #define atomic_testandset_32 atomic_testandset_int
782 #define atomic_testandclear_32 atomic_testandclear_int
784 /* Operations on pointers. */
785 #define atomic_set_ptr(p, v) \
786 atomic_set_int((volatile u_int *)(p), (u_int)(v))
787 #define atomic_set_acq_ptr(p, v) \
788 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
789 #define atomic_set_rel_ptr(p, v) \
790 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
791 #define atomic_clear_ptr(p, v) \
792 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
793 #define atomic_clear_acq_ptr(p, v) \
794 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
795 #define atomic_clear_rel_ptr(p, v) \
796 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
797 #define atomic_add_ptr(p, v) \
798 atomic_add_int((volatile u_int *)(p), (u_int)(v))
799 #define atomic_add_acq_ptr(p, v) \
800 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
801 #define atomic_add_rel_ptr(p, v) \
802 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
803 #define atomic_subtract_ptr(p, v) \
804 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
805 #define atomic_subtract_acq_ptr(p, v) \
806 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
807 #define atomic_subtract_rel_ptr(p, v) \
808 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
809 #define atomic_load_acq_ptr(p) \
810 atomic_load_acq_int((volatile u_int *)(p))
811 #define atomic_store_rel_ptr(p, v) \
812 atomic_store_rel_int((volatile u_int *)(p), (v))
813 #define atomic_cmpset_ptr(dst, old, new) \
814 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
815 #define atomic_cmpset_acq_ptr(dst, old, new) \
816 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
818 #define atomic_cmpset_rel_ptr(dst, old, new) \
819 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
821 #define atomic_fcmpset_ptr(dst, old, new) \
822 atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
823 #define atomic_fcmpset_acq_ptr(dst, old, new) \
824 atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
826 #define atomic_fcmpset_rel_ptr(dst, old, new) \
827 atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
829 #define atomic_swap_ptr(p, v) \
830 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
831 #define atomic_readandclear_ptr(p) \
832 atomic_readandclear_int((volatile u_int *)(p))
834 #endif /* !WANT_FUNCTIONS */
838 #define wmb() __mbk()
839 #define rmb() __mbk()
842 #define wmb() __mbu()
843 #define rmb() __mbu()
846 #endif /* !_MACHINE_ATOMIC_H_ */