2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
40 #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
48 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
53 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
54 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
55 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
56 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
58 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
59 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
60 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
61 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
62 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
65 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
66 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
67 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
68 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
69 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
74 * The above functions are expanded inline in the statically-linked
75 * kernel. Lock prefixes are generated if an SMP kernel is being
78 * Kernel modules call real functions which are built into the kernel.
79 * This allows kernel modules to be portable between UP and SMP systems.
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
86 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
88 int atomic_testandset_int(volatile u_int *p, u_int v);
90 #define ATOMIC_LOAD(TYPE, LOP) \
91 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92 #define ATOMIC_STORE(TYPE) \
93 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
95 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
98 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
101 * For userland, always use lock prefixes so that the binaries will run
102 * on both SMP and !SMP systems.
104 #if defined(SMP) || !defined(_KERNEL)
105 #define MPLOCKED "lock ; "
111 * The assembly is volatilized to avoid code chunk removal by the compiler.
112 * GCC aggressively reorders operations and memory clobbering is necessary
113 * in order to avoid that for memory barriers.
115 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
116 static __inline void \
117 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
119 __asm __volatile(MPLOCKED OP \
121 : CONS (V), "m" (*p) \
125 static __inline void \
126 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
128 __asm __volatile(MPLOCKED OP \
130 : CONS (V), "m" (*p) \
135 #if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
137 /* I486 does not support SMP or CMPXCHG8B. */
138 static __inline uint64_t
139 atomic_load_acq_64_i386(volatile uint64_t *p)
141 volatile uint32_t *high, *low;
144 low = (volatile uint32_t *)p;
145 high = (volatile uint32_t *)p + 1;
152 : "=&A" (res) /* 0 */
153 : "m" (*low), /* 1 */
161 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
163 volatile uint32_t *high, *low;
165 low = (volatile uint32_t *)p;
166 high = (volatile uint32_t *)p + 1;
173 : "=m" (*low), /* 0 */
179 static __inline uint64_t
180 atomic_load_acq_64_i586(volatile uint64_t *p)
185 " movl %%ebx,%%eax ; "
186 " movl %%ecx,%%edx ; "
189 : "=&A" (res), /* 0 */
198 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
202 " movl %%eax,%%ebx ; "
203 " movl %%edx,%%ecx ; "
211 : "ebx", "ecx", "memory", "cc");
214 #endif /* _KERNEL && !WANT_FUNCTIONS */
217 * Atomic compare and set, used by the mutex functions
219 * if (*dst == expect) *dst = src (all 32 bit words)
221 * Returns 0 on failure, non-zero on success
224 #ifdef CPU_DISABLE_CMPXCHG
227 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
240 "# atomic_cmpset_int"
241 : "=q" (res), /* 0 */
244 "r" (expect), /* 3 */
251 #else /* !CPU_DISABLE_CMPXCHG */
254 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
263 "# atomic_cmpset_int"
264 : "=a" (res), /* 0 */
267 "a" (expect), /* 3 */
274 #endif /* CPU_DISABLE_CMPXCHG */
277 * Atomically add the value of v to the integer pointed to by p and return
278 * the previous value of *p.
280 static __inline u_int
281 atomic_fetchadd_int(volatile u_int *p, u_int v)
287 "# atomic_fetchadd_int"
288 : "+r" (v), /* 0 (result) */
296 atomic_testandset_int(volatile u_int *p, u_int v)
304 "# atomic_testandset_int"
305 : "=q" (res), /* 0 */
307 : "Ir" (v & 0x1f) /* 2 */
313 * We assume that a = b will do atomic loads and stores. Due to the
314 * IA32 memory model, a simple store guarantees release semantics.
316 * However, loads may pass stores, so for atomic_load_acq we have to
317 * ensure a Store/Load barrier to do the load in SMP kernels. We use
318 * "lock cmpxchg" as recommended by the AMD Software Optimization
319 * Guide, and not mfence. For UP kernels, however, the cache of the
320 * single processor is always consistent, so we only need to take care
323 #define ATOMIC_STORE(TYPE) \
324 static __inline void \
325 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
327 __compiler_membar(); \
332 #if defined(_KERNEL) && !defined(SMP)
334 #define ATOMIC_LOAD(TYPE, LOP) \
335 static __inline u_##TYPE \
336 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
341 __compiler_membar(); \
346 #else /* !(_KERNEL && !SMP) */
348 #define ATOMIC_LOAD(TYPE, LOP) \
349 static __inline u_##TYPE \
350 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
354 __asm __volatile(MPLOCKED LOP \
355 : "=a" (res), /* 0 */ \
364 #endif /* _KERNEL && !SMP */
368 #ifdef WANT_FUNCTIONS
369 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
370 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
371 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
372 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
375 /* I486 does not support SMP or CMPXCHG8B. */
377 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
379 volatile uint32_t *p;
382 p = (volatile uint32_t *)dst;
388 " orl %%edx,%%eax ; "
395 : "+A" (expect), /* 0 */
397 "+m" (*(p + 1)), /* 2 */
399 : "r" ((uint32_t)src), /* 4 */
400 "r" ((uint32_t)(src >> 32)) /* 5 */
405 static __inline uint64_t
406 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
408 volatile uint32_t *q;
411 q = (volatile uint32_t *)p;
420 : "=&A" (res), /* 0 */
422 "+m" (*(q + 1)) /* 2 */
423 : "r" ((uint32_t)v), /* 3 */
424 "r" ((uint32_t)(v >> 32))); /* 4 */
429 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
437 : "=q" (res), /* 0 */
439 "+A" (expect) /* 2 */
440 : "b" ((uint32_t)src), /* 3 */
441 "c" ((uint32_t)(src >> 32)) /* 4 */
446 static __inline uint64_t
447 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
451 " movl %%eax,%%ebx ; "
452 " movl %%edx,%%ecx ; "
459 : : "ebx", "ecx", "memory", "cc");
464 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
467 if ((cpu_feature & CPUID_CX8) == 0)
468 return (atomic_cmpset_64_i386(dst, expect, src));
470 return (atomic_cmpset_64_i586(dst, expect, src));
473 static __inline uint64_t
474 atomic_swap_64(volatile uint64_t *p, uint64_t v)
477 if ((cpu_feature & CPUID_CX8) == 0)
478 return (atomic_swap_64_i386(p, v));
480 return (atomic_swap_64_i586(p, v));
485 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
487 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
488 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
489 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
490 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
492 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
493 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
494 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
495 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
497 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
498 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
499 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
500 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
502 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
503 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
504 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
505 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
507 ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
508 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
509 ATOMIC_LOAD(int, "cmpxchgl %0,%1");
510 ATOMIC_LOAD(long, "cmpxchgl %0,%1");
521 #ifndef WANT_FUNCTIONS
524 extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
525 extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
529 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
532 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
536 static __inline u_long
537 atomic_fetchadd_long(volatile u_long *p, u_long v)
540 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
544 atomic_testandset_long(volatile u_long *p, u_int v)
547 return (atomic_testandset_int((volatile u_int *)p, v));
550 /* Read the current value and store a zero in the destination. */
551 #ifdef __GNUCLIKE_ASM
553 static __inline u_int
554 atomic_readandclear_int(volatile u_int *addr)
561 "# atomic_readandclear_int"
562 : "+r" (res), /* 0 */
569 static __inline u_long
570 atomic_readandclear_long(volatile u_long *addr)
577 "# atomic_readandclear_long"
578 : "+r" (res), /* 0 */
585 static __inline u_int
586 atomic_swap_int(volatile u_int *p, u_int v)
597 static __inline u_long
598 atomic_swap_long(volatile u_long *p, u_long v)
601 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
604 #else /* !__GNUCLIKE_ASM */
606 u_int atomic_readandclear_int(volatile u_int *addr);
607 u_long atomic_readandclear_long(volatile u_long *addr);
608 u_int atomic_swap_int(volatile u_int *p, u_int v);
609 u_long atomic_swap_long(volatile u_long *p, u_long v);
611 #endif /* __GNUCLIKE_ASM */
613 #define atomic_set_acq_char atomic_set_barr_char
614 #define atomic_set_rel_char atomic_set_barr_char
615 #define atomic_clear_acq_char atomic_clear_barr_char
616 #define atomic_clear_rel_char atomic_clear_barr_char
617 #define atomic_add_acq_char atomic_add_barr_char
618 #define atomic_add_rel_char atomic_add_barr_char
619 #define atomic_subtract_acq_char atomic_subtract_barr_char
620 #define atomic_subtract_rel_char atomic_subtract_barr_char
622 #define atomic_set_acq_short atomic_set_barr_short
623 #define atomic_set_rel_short atomic_set_barr_short
624 #define atomic_clear_acq_short atomic_clear_barr_short
625 #define atomic_clear_rel_short atomic_clear_barr_short
626 #define atomic_add_acq_short atomic_add_barr_short
627 #define atomic_add_rel_short atomic_add_barr_short
628 #define atomic_subtract_acq_short atomic_subtract_barr_short
629 #define atomic_subtract_rel_short atomic_subtract_barr_short
631 #define atomic_set_acq_int atomic_set_barr_int
632 #define atomic_set_rel_int atomic_set_barr_int
633 #define atomic_clear_acq_int atomic_clear_barr_int
634 #define atomic_clear_rel_int atomic_clear_barr_int
635 #define atomic_add_acq_int atomic_add_barr_int
636 #define atomic_add_rel_int atomic_add_barr_int
637 #define atomic_subtract_acq_int atomic_subtract_barr_int
638 #define atomic_subtract_rel_int atomic_subtract_barr_int
639 #define atomic_cmpset_acq_int atomic_cmpset_int
640 #define atomic_cmpset_rel_int atomic_cmpset_int
642 #define atomic_set_acq_long atomic_set_barr_long
643 #define atomic_set_rel_long atomic_set_barr_long
644 #define atomic_clear_acq_long atomic_clear_barr_long
645 #define atomic_clear_rel_long atomic_clear_barr_long
646 #define atomic_add_acq_long atomic_add_barr_long
647 #define atomic_add_rel_long atomic_add_barr_long
648 #define atomic_subtract_acq_long atomic_subtract_barr_long
649 #define atomic_subtract_rel_long atomic_subtract_barr_long
650 #define atomic_cmpset_acq_long atomic_cmpset_long
651 #define atomic_cmpset_rel_long atomic_cmpset_long
653 /* Operations on 8-bit bytes. */
654 #define atomic_set_8 atomic_set_char
655 #define atomic_set_acq_8 atomic_set_acq_char
656 #define atomic_set_rel_8 atomic_set_rel_char
657 #define atomic_clear_8 atomic_clear_char
658 #define atomic_clear_acq_8 atomic_clear_acq_char
659 #define atomic_clear_rel_8 atomic_clear_rel_char
660 #define atomic_add_8 atomic_add_char
661 #define atomic_add_acq_8 atomic_add_acq_char
662 #define atomic_add_rel_8 atomic_add_rel_char
663 #define atomic_subtract_8 atomic_subtract_char
664 #define atomic_subtract_acq_8 atomic_subtract_acq_char
665 #define atomic_subtract_rel_8 atomic_subtract_rel_char
666 #define atomic_load_acq_8 atomic_load_acq_char
667 #define atomic_store_rel_8 atomic_store_rel_char
669 /* Operations on 16-bit words. */
670 #define atomic_set_16 atomic_set_short
671 #define atomic_set_acq_16 atomic_set_acq_short
672 #define atomic_set_rel_16 atomic_set_rel_short
673 #define atomic_clear_16 atomic_clear_short
674 #define atomic_clear_acq_16 atomic_clear_acq_short
675 #define atomic_clear_rel_16 atomic_clear_rel_short
676 #define atomic_add_16 atomic_add_short
677 #define atomic_add_acq_16 atomic_add_acq_short
678 #define atomic_add_rel_16 atomic_add_rel_short
679 #define atomic_subtract_16 atomic_subtract_short
680 #define atomic_subtract_acq_16 atomic_subtract_acq_short
681 #define atomic_subtract_rel_16 atomic_subtract_rel_short
682 #define atomic_load_acq_16 atomic_load_acq_short
683 #define atomic_store_rel_16 atomic_store_rel_short
685 /* Operations on 32-bit double words. */
686 #define atomic_set_32 atomic_set_int
687 #define atomic_set_acq_32 atomic_set_acq_int
688 #define atomic_set_rel_32 atomic_set_rel_int
689 #define atomic_clear_32 atomic_clear_int
690 #define atomic_clear_acq_32 atomic_clear_acq_int
691 #define atomic_clear_rel_32 atomic_clear_rel_int
692 #define atomic_add_32 atomic_add_int
693 #define atomic_add_acq_32 atomic_add_acq_int
694 #define atomic_add_rel_32 atomic_add_rel_int
695 #define atomic_subtract_32 atomic_subtract_int
696 #define atomic_subtract_acq_32 atomic_subtract_acq_int
697 #define atomic_subtract_rel_32 atomic_subtract_rel_int
698 #define atomic_load_acq_32 atomic_load_acq_int
699 #define atomic_store_rel_32 atomic_store_rel_int
700 #define atomic_cmpset_32 atomic_cmpset_int
701 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
702 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
703 #define atomic_swap_32 atomic_swap_int
704 #define atomic_readandclear_32 atomic_readandclear_int
705 #define atomic_fetchadd_32 atomic_fetchadd_int
706 #define atomic_testandset_32 atomic_testandset_int
708 /* Operations on pointers. */
709 #define atomic_set_ptr(p, v) \
710 atomic_set_int((volatile u_int *)(p), (u_int)(v))
711 #define atomic_set_acq_ptr(p, v) \
712 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
713 #define atomic_set_rel_ptr(p, v) \
714 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
715 #define atomic_clear_ptr(p, v) \
716 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
717 #define atomic_clear_acq_ptr(p, v) \
718 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
719 #define atomic_clear_rel_ptr(p, v) \
720 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
721 #define atomic_add_ptr(p, v) \
722 atomic_add_int((volatile u_int *)(p), (u_int)(v))
723 #define atomic_add_acq_ptr(p, v) \
724 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
725 #define atomic_add_rel_ptr(p, v) \
726 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
727 #define atomic_subtract_ptr(p, v) \
728 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
729 #define atomic_subtract_acq_ptr(p, v) \
730 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
731 #define atomic_subtract_rel_ptr(p, v) \
732 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
733 #define atomic_load_acq_ptr(p) \
734 atomic_load_acq_int((volatile u_int *)(p))
735 #define atomic_store_rel_ptr(p, v) \
736 atomic_store_rel_int((volatile u_int *)(p), (v))
737 #define atomic_cmpset_ptr(dst, old, new) \
738 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
739 #define atomic_cmpset_acq_ptr(dst, old, new) \
740 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
742 #define atomic_cmpset_rel_ptr(dst, old, new) \
743 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
745 #define atomic_swap_ptr(p, v) \
746 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
747 #define atomic_readandclear_ptr(p) \
748 atomic_readandclear_int((volatile u_int *)(p))
750 #endif /* !WANT_FUNCTIONS */
752 #endif /* !_MACHINE_ATOMIC_H_ */