2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
40 #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
48 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
53 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
54 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
55 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
56 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
58 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
59 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
60 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
61 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
62 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
65 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
66 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
67 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
68 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
69 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
74 * The above functions are expanded inline in the statically-linked
75 * kernel. Lock prefixes are generated if an SMP kernel is being
78 * Kernel modules call real functions which are built into the kernel.
79 * This allows kernel modules to be portable between UP and SMP systems.
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
86 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
88 int atomic_testandset_int(volatile u_int *p, u_int v);
89 void atomic_thread_fence_acq(void);
90 void atomic_thread_fence_acq_rel(void);
91 void atomic_thread_fence_rel(void);
92 void atomic_thread_fence_seq_cst(void);
94 #define ATOMIC_LOAD(TYPE) \
95 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
96 #define ATOMIC_STORE(TYPE) \
97 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
99 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
100 uint64_t atomic_load_acq_64(volatile uint64_t *);
101 void atomic_store_rel_64(volatile uint64_t *, uint64_t);
102 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
104 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
107 * For userland, always use lock prefixes so that the binaries will run
108 * on both SMP and !SMP systems.
110 #if defined(SMP) || !defined(_KERNEL)
111 #define MPLOCKED "lock ; "
117 * The assembly is volatilized to avoid code chunk removal by the compiler.
118 * GCC aggressively reorders operations and memory clobbering is necessary
119 * in order to avoid that for memory barriers.
121 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
122 static __inline void \
123 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
125 __asm __volatile(MPLOCKED OP \
131 static __inline void \
132 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
134 __asm __volatile(MPLOCKED OP \
142 * Atomic compare and set, used by the mutex functions
144 * if (*dst == expect) *dst = src (all 32 bit words)
146 * Returns 0 on failure, non-zero on success
149 #ifdef CPU_DISABLE_CMPXCHG
152 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
165 "# atomic_cmpset_int"
166 : "=q" (res), /* 0 */
174 #else /* !CPU_DISABLE_CMPXCHG */
177 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
185 "# atomic_cmpset_int"
186 : "=q" (res), /* 0 */
188 "+a" (expect) /* 2 */
194 #endif /* CPU_DISABLE_CMPXCHG */
197 * Atomically add the value of v to the integer pointed to by p and return
198 * the previous value of *p.
200 static __inline u_int
201 atomic_fetchadd_int(volatile u_int *p, u_int v)
207 "# atomic_fetchadd_int"
215 atomic_testandset_int(volatile u_int *p, u_int v)
223 "# atomic_testandset_int"
224 : "=q" (res), /* 0 */
226 : "Ir" (v & 0x1f) /* 2 */
232 * We assume that a = b will do atomic loads and stores. Due to the
233 * IA32 memory model, a simple store guarantees release semantics.
235 * However, a load may pass a store if they are performed on distinct
236 * addresses, so for atomic_load_acq we introduce a Store/Load barrier
237 * before the load in SMP kernels. We use "lock addl $0,mem", as
238 * recommended by the AMD Software Optimization Guide, and not mfence.
239 * In the kernel, we use a private per-cpu cache line as the target
240 * for the locked addition, to avoid introducing false data
241 * dependencies. In userspace, a word at the top of the stack is
244 * For UP kernels, however, the memory of the single processor is
245 * always consistent, so we only need to stop the compiler from
246 * reordering accesses in a way that violates the semantics of acquire
252 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
254 * The open-coded number is used instead of the symbolic expression to
255 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
256 * An assertion in i386/vm_machdep.c ensures that the value is correct.
258 #define OFFSETOF_MONITORBUF 0x180
262 __storeload_barrier(void)
265 __asm __volatile("lock; addl $0,%%fs:%0"
266 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
268 #else /* _KERNEL && UP */
270 __storeload_barrier(void)
278 __storeload_barrier(void)
281 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
286 * C11-standard acq/rel semantics only apply when the variable in the
287 * call is the same for acq as it is for rel. However, our previous
288 * (x86) implementations provided much stronger ordering than required
289 * (essentially what is called seq_cst order in C11). This
290 * implementation provides the historical strong ordering since some
291 * callers depend on it.
294 #define ATOMIC_LOAD(TYPE) \
295 static __inline u_##TYPE \
296 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
300 __storeload_barrier(); \
302 __compiler_membar(); \
307 #define ATOMIC_STORE(TYPE) \
308 static __inline void \
309 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
312 __compiler_membar(); \
318 atomic_thread_fence_acq(void)
325 atomic_thread_fence_rel(void)
332 atomic_thread_fence_acq_rel(void)
339 atomic_thread_fence_seq_cst(void)
342 __storeload_barrier();
347 #ifdef WANT_FUNCTIONS
348 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
349 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
350 uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
351 uint64_t atomic_load_acq_64_i586(volatile uint64_t *);
352 void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
353 void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
354 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
355 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
358 /* I486 does not support SMP or CMPXCHG8B. */
360 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
362 volatile uint32_t *p;
365 p = (volatile uint32_t *)dst;
371 " orl %%edx,%%eax ; "
378 : "+A" (expect), /* 0 */
380 "+m" (*(p + 1)), /* 2 */
382 : "r" ((uint32_t)src), /* 4 */
383 "r" ((uint32_t)(src >> 32)) /* 5 */
388 static __inline uint64_t
389 atomic_load_acq_64_i386(volatile uint64_t *p)
391 volatile uint32_t *q;
394 q = (volatile uint32_t *)p;
401 : "=&A" (res) /* 0 */
403 "m" (*(q + 1)) /* 2 */
409 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
411 volatile uint32_t *q;
413 q = (volatile uint32_t *)p;
421 "=m" (*(q + 1)) /* 1 */
426 static __inline uint64_t
427 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
429 volatile uint32_t *q;
432 q = (volatile uint32_t *)p;
441 : "=&A" (res), /* 0 */
443 "+m" (*(q + 1)) /* 2 */
444 : "r" ((uint32_t)v), /* 3 */
445 "r" ((uint32_t)(v >> 32))); /* 4 */
450 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
458 : "=q" (res), /* 0 */
460 "+A" (expect) /* 2 */
461 : "b" ((uint32_t)src), /* 3 */
462 "c" ((uint32_t)(src >> 32)) /* 4 */
467 static __inline uint64_t
468 atomic_load_acq_64_i586(volatile uint64_t *p)
473 " movl %%ebx,%%eax ; "
474 " movl %%ecx,%%edx ; "
477 : "=&A" (res), /* 0 */
484 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
488 " movl %%eax,%%ebx ; "
489 " movl %%edx,%%ecx ; "
496 : : "ebx", "ecx", "memory", "cc");
499 static __inline uint64_t
500 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
504 " movl %%eax,%%ebx ; "
505 " movl %%edx,%%ecx ; "
512 : : "ebx", "ecx", "memory", "cc");
517 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
520 if ((cpu_feature & CPUID_CX8) == 0)
521 return (atomic_cmpset_64_i386(dst, expect, src));
523 return (atomic_cmpset_64_i586(dst, expect, src));
526 static __inline uint64_t
527 atomic_load_acq_64(volatile uint64_t *p)
530 if ((cpu_feature & CPUID_CX8) == 0)
531 return (atomic_load_acq_64_i386(p));
533 return (atomic_load_acq_64_i586(p));
537 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
540 if ((cpu_feature & CPUID_CX8) == 0)
541 atomic_store_rel_64_i386(p, v);
543 atomic_store_rel_64_i586(p, v);
546 static __inline uint64_t
547 atomic_swap_64(volatile uint64_t *p, uint64_t v)
550 if ((cpu_feature & CPUID_CX8) == 0)
551 return (atomic_swap_64_i386(p, v));
553 return (atomic_swap_64_i586(p, v));
558 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
560 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
561 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
562 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
563 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
565 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
566 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
567 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
568 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
570 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
571 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
572 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
573 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
575 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
576 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
577 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
578 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
580 #define ATOMIC_LOADSTORE(TYPE) \
584 ATOMIC_LOADSTORE(char);
585 ATOMIC_LOADSTORE(short);
586 ATOMIC_LOADSTORE(int);
587 ATOMIC_LOADSTORE(long);
592 #undef ATOMIC_LOADSTORE
594 #ifndef WANT_FUNCTIONS
597 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
600 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
604 static __inline u_long
605 atomic_fetchadd_long(volatile u_long *p, u_long v)
608 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
612 atomic_testandset_long(volatile u_long *p, u_int v)
615 return (atomic_testandset_int((volatile u_int *)p, v));
618 /* Read the current value and store a new value in the destination. */
619 #ifdef __GNUCLIKE_ASM
621 static __inline u_int
622 atomic_swap_int(volatile u_int *p, u_int v)
633 static __inline u_long
634 atomic_swap_long(volatile u_long *p, u_long v)
637 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
640 #else /* !__GNUCLIKE_ASM */
642 u_int atomic_swap_int(volatile u_int *p, u_int v);
643 u_long atomic_swap_long(volatile u_long *p, u_long v);
645 #endif /* __GNUCLIKE_ASM */
647 #define atomic_set_acq_char atomic_set_barr_char
648 #define atomic_set_rel_char atomic_set_barr_char
649 #define atomic_clear_acq_char atomic_clear_barr_char
650 #define atomic_clear_rel_char atomic_clear_barr_char
651 #define atomic_add_acq_char atomic_add_barr_char
652 #define atomic_add_rel_char atomic_add_barr_char
653 #define atomic_subtract_acq_char atomic_subtract_barr_char
654 #define atomic_subtract_rel_char atomic_subtract_barr_char
656 #define atomic_set_acq_short atomic_set_barr_short
657 #define atomic_set_rel_short atomic_set_barr_short
658 #define atomic_clear_acq_short atomic_clear_barr_short
659 #define atomic_clear_rel_short atomic_clear_barr_short
660 #define atomic_add_acq_short atomic_add_barr_short
661 #define atomic_add_rel_short atomic_add_barr_short
662 #define atomic_subtract_acq_short atomic_subtract_barr_short
663 #define atomic_subtract_rel_short atomic_subtract_barr_short
665 #define atomic_set_acq_int atomic_set_barr_int
666 #define atomic_set_rel_int atomic_set_barr_int
667 #define atomic_clear_acq_int atomic_clear_barr_int
668 #define atomic_clear_rel_int atomic_clear_barr_int
669 #define atomic_add_acq_int atomic_add_barr_int
670 #define atomic_add_rel_int atomic_add_barr_int
671 #define atomic_subtract_acq_int atomic_subtract_barr_int
672 #define atomic_subtract_rel_int atomic_subtract_barr_int
673 #define atomic_cmpset_acq_int atomic_cmpset_int
674 #define atomic_cmpset_rel_int atomic_cmpset_int
676 #define atomic_set_acq_long atomic_set_barr_long
677 #define atomic_set_rel_long atomic_set_barr_long
678 #define atomic_clear_acq_long atomic_clear_barr_long
679 #define atomic_clear_rel_long atomic_clear_barr_long
680 #define atomic_add_acq_long atomic_add_barr_long
681 #define atomic_add_rel_long atomic_add_barr_long
682 #define atomic_subtract_acq_long atomic_subtract_barr_long
683 #define atomic_subtract_rel_long atomic_subtract_barr_long
684 #define atomic_cmpset_acq_long atomic_cmpset_long
685 #define atomic_cmpset_rel_long atomic_cmpset_long
687 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
688 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
690 /* Operations on 8-bit bytes. */
691 #define atomic_set_8 atomic_set_char
692 #define atomic_set_acq_8 atomic_set_acq_char
693 #define atomic_set_rel_8 atomic_set_rel_char
694 #define atomic_clear_8 atomic_clear_char
695 #define atomic_clear_acq_8 atomic_clear_acq_char
696 #define atomic_clear_rel_8 atomic_clear_rel_char
697 #define atomic_add_8 atomic_add_char
698 #define atomic_add_acq_8 atomic_add_acq_char
699 #define atomic_add_rel_8 atomic_add_rel_char
700 #define atomic_subtract_8 atomic_subtract_char
701 #define atomic_subtract_acq_8 atomic_subtract_acq_char
702 #define atomic_subtract_rel_8 atomic_subtract_rel_char
703 #define atomic_load_acq_8 atomic_load_acq_char
704 #define atomic_store_rel_8 atomic_store_rel_char
706 /* Operations on 16-bit words. */
707 #define atomic_set_16 atomic_set_short
708 #define atomic_set_acq_16 atomic_set_acq_short
709 #define atomic_set_rel_16 atomic_set_rel_short
710 #define atomic_clear_16 atomic_clear_short
711 #define atomic_clear_acq_16 atomic_clear_acq_short
712 #define atomic_clear_rel_16 atomic_clear_rel_short
713 #define atomic_add_16 atomic_add_short
714 #define atomic_add_acq_16 atomic_add_acq_short
715 #define atomic_add_rel_16 atomic_add_rel_short
716 #define atomic_subtract_16 atomic_subtract_short
717 #define atomic_subtract_acq_16 atomic_subtract_acq_short
718 #define atomic_subtract_rel_16 atomic_subtract_rel_short
719 #define atomic_load_acq_16 atomic_load_acq_short
720 #define atomic_store_rel_16 atomic_store_rel_short
722 /* Operations on 32-bit double words. */
723 #define atomic_set_32 atomic_set_int
724 #define atomic_set_acq_32 atomic_set_acq_int
725 #define atomic_set_rel_32 atomic_set_rel_int
726 #define atomic_clear_32 atomic_clear_int
727 #define atomic_clear_acq_32 atomic_clear_acq_int
728 #define atomic_clear_rel_32 atomic_clear_rel_int
729 #define atomic_add_32 atomic_add_int
730 #define atomic_add_acq_32 atomic_add_acq_int
731 #define atomic_add_rel_32 atomic_add_rel_int
732 #define atomic_subtract_32 atomic_subtract_int
733 #define atomic_subtract_acq_32 atomic_subtract_acq_int
734 #define atomic_subtract_rel_32 atomic_subtract_rel_int
735 #define atomic_load_acq_32 atomic_load_acq_int
736 #define atomic_store_rel_32 atomic_store_rel_int
737 #define atomic_cmpset_32 atomic_cmpset_int
738 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
739 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
740 #define atomic_swap_32 atomic_swap_int
741 #define atomic_readandclear_32 atomic_readandclear_int
742 #define atomic_fetchadd_32 atomic_fetchadd_int
743 #define atomic_testandset_32 atomic_testandset_int
745 /* Operations on pointers. */
746 #define atomic_set_ptr(p, v) \
747 atomic_set_int((volatile u_int *)(p), (u_int)(v))
748 #define atomic_set_acq_ptr(p, v) \
749 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
750 #define atomic_set_rel_ptr(p, v) \
751 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
752 #define atomic_clear_ptr(p, v) \
753 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
754 #define atomic_clear_acq_ptr(p, v) \
755 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
756 #define atomic_clear_rel_ptr(p, v) \
757 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
758 #define atomic_add_ptr(p, v) \
759 atomic_add_int((volatile u_int *)(p), (u_int)(v))
760 #define atomic_add_acq_ptr(p, v) \
761 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
762 #define atomic_add_rel_ptr(p, v) \
763 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
764 #define atomic_subtract_ptr(p, v) \
765 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
766 #define atomic_subtract_acq_ptr(p, v) \
767 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
768 #define atomic_subtract_rel_ptr(p, v) \
769 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
770 #define atomic_load_acq_ptr(p) \
771 atomic_load_acq_int((volatile u_int *)(p))
772 #define atomic_store_rel_ptr(p, v) \
773 atomic_store_rel_int((volatile u_int *)(p), (v))
774 #define atomic_cmpset_ptr(dst, old, new) \
775 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
776 #define atomic_cmpset_acq_ptr(dst, old, new) \
777 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
779 #define atomic_cmpset_rel_ptr(dst, old, new) \
780 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
782 #define atomic_swap_ptr(p, v) \
783 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
784 #define atomic_readandclear_ptr(p) \
785 atomic_readandclear_int((volatile u_int *)(p))
787 #endif /* !WANT_FUNCTIONS */
789 #endif /* !_MACHINE_ATOMIC_H_ */