2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
36 * To express interprocessor (as opposed to processor and device) memory
37 * ordering constraints, use the atomic_*() functions with acquire and release
38 * semantics rather than the *mb() functions. An architecture's memory
39 * ordering (or memory consistency) model governs the order in which a
40 * program's accesses to different locations may be performed by an
41 * implementation of that architecture. In general, for memory regions
42 * defined as writeback cacheable, the memory ordering implemented by amd64
43 * processors preserves the program ordering of a load followed by a load, a
44 * load followed by a store, and a store followed by a store. Only a store
45 * followed by a load to a different memory location may be reordered.
46 * Therefore, except for special cases, like non-temporal memory accesses or
47 * memory regions defined as write combining, the memory ordering effects
48 * provided by the sfence instruction in the wmb() function and the lfence
49 * instruction in the rmb() function are redundant. In contrast, the
50 * atomic_*() functions with acquire and release semantics do not perform
51 * redundant instructions for ordinary cases of interprocessor memory
52 * ordering on any architecture.
54 #define mb() __asm __volatile("mfence;" : : : "memory")
55 #define wmb() __asm __volatile("sfence;" : : : "memory")
56 #define rmb() __asm __volatile("lfence;" : : : "memory")
59 * Various simple operations on memory, each of which is atomic in the
60 * presence of interrupts and multiple processors.
62 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
63 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
64 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
65 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
67 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
68 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
69 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
70 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
72 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
73 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
74 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
75 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
76 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
77 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
79 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
80 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
81 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
82 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
83 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
84 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
88 * The above functions are expanded inline in the statically-linked
89 * kernel. Lock prefixes are generated if an SMP kernel is being
92 * Kernel modules call real functions which are built into the kernel.
93 * This allows kernel modules to be portable between UP and SMP systems.
95 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
96 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
97 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
98 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
100 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
101 int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
102 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
103 u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
104 int atomic_testandset_int(volatile u_int *p, u_int v);
105 int atomic_testandset_long(volatile u_long *p, u_int v);
106 int atomic_testandclear_int(volatile u_int *p, u_int v);
107 int atomic_testandclear_long(volatile u_long *p, u_int v);
108 void atomic_thread_fence_acq(void);
109 void atomic_thread_fence_acq_rel(void);
110 void atomic_thread_fence_rel(void);
111 void atomic_thread_fence_seq_cst(void);
113 #define ATOMIC_LOAD(TYPE) \
114 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
115 #define ATOMIC_STORE(TYPE) \
116 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
118 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
121 * For userland, always use lock prefixes so that the binaries will run
122 * on both SMP and !SMP systems.
124 #if defined(SMP) || !defined(_KERNEL)
125 #define MPLOCKED "lock ; "
131 * The assembly is volatilized to avoid code chunk removal by the compiler.
132 * GCC aggressively reorders operations and memory clobbering is necessary
133 * in order to avoid that for memory barriers.
135 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
136 static __inline void \
137 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
139 __asm __volatile(MPLOCKED OP \
145 static __inline void \
146 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
148 __asm __volatile(MPLOCKED OP \
156 * Atomic compare and set, used by the mutex functions
158 * if (*dst == expect) *dst = src (all 32 bit words)
160 * Returns 0 on failure, non-zero on success
164 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
172 "# atomic_cmpset_int"
173 : "=q" (res), /* 0 */
175 "+a" (expect) /* 2 */
182 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
190 "# atomic_cmpset_long"
191 : "=q" (res), /* 0 */
193 "+a" (expect) /* 2 */
200 * Atomically add the value of v to the integer pointed to by p and return
201 * the previous value of *p.
203 static __inline u_int
204 atomic_fetchadd_int(volatile u_int *p, u_int v)
210 "# atomic_fetchadd_int"
218 * Atomically add the value of v to the long integer pointed to by p and return
219 * the previous value of *p.
221 static __inline u_long
222 atomic_fetchadd_long(volatile u_long *p, u_long v)
228 "# atomic_fetchadd_long"
236 atomic_testandset_int(volatile u_int *p, u_int v)
244 "# atomic_testandset_int"
245 : "=q" (res), /* 0 */
247 : "Ir" (v & 0x1f) /* 2 */
253 atomic_testandset_long(volatile u_long *p, u_int v)
261 "# atomic_testandset_long"
262 : "=q" (res), /* 0 */
264 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
270 atomic_testandclear_int(volatile u_int *p, u_int v)
278 "# atomic_testandclear_int"
279 : "=q" (res), /* 0 */
281 : "Ir" (v & 0x1f) /* 2 */
287 atomic_testandclear_long(volatile u_long *p, u_int v)
295 "# atomic_testandclear_long"
296 : "=q" (res), /* 0 */
298 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
304 * We assume that a = b will do atomic loads and stores. Due to the
305 * IA32 memory model, a simple store guarantees release semantics.
307 * However, a load may pass a store if they are performed on distinct
308 * addresses, so we need a Store/Load barrier for sequentially
309 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
310 * Store/Load barrier, as recommended by the AMD Software Optimization
311 * Guide, and not mfence. To avoid false data dependencies, we use a
312 * special address for "mem". In the kernel, we use a private per-cpu
313 * cache line. In user space, we use a word in the stack's red zone
316 * For UP kernels, however, the memory of the single processor is
317 * always consistent, so we only need to stop the compiler from
318 * reordering accesses in a way that violates the semantics of acquire
325 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
327 * The open-coded number is used instead of the symbolic expression to
328 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
329 * An assertion in amd64/vm_machdep.c ensures that the value is correct.
331 #define OFFSETOF_MONITORBUF 0x180
335 __storeload_barrier(void)
338 __asm __volatile("lock; addl $0,%%gs:%0"
339 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
341 #else /* _KERNEL && UP */
343 __storeload_barrier(void)
351 __storeload_barrier(void)
354 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
358 #define ATOMIC_LOAD(TYPE) \
359 static __inline u_##TYPE \
360 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
365 __compiler_membar(); \
370 #define ATOMIC_STORE(TYPE) \
371 static __inline void \
372 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
375 __compiler_membar(); \
381 atomic_thread_fence_acq(void)
388 atomic_thread_fence_rel(void)
395 atomic_thread_fence_acq_rel(void)
402 atomic_thread_fence_seq_cst(void)
405 __storeload_barrier();
408 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
410 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
411 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
412 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
413 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
415 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
416 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
417 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
418 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
420 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
421 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
422 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
423 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
425 ATOMIC_ASM(set, long, "orq %1,%0", "ir", v);
426 ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v);
427 ATOMIC_ASM(add, long, "addq %1,%0", "ir", v);
428 ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v);
430 #define ATOMIC_LOADSTORE(TYPE) \
434 ATOMIC_LOADSTORE(char);
435 ATOMIC_LOADSTORE(short);
436 ATOMIC_LOADSTORE(int);
437 ATOMIC_LOADSTORE(long);
442 #undef ATOMIC_LOADSTORE
443 #ifndef WANT_FUNCTIONS
445 /* Read the current value and store a new value in the destination. */
446 #ifdef __GNUCLIKE_ASM
448 static __inline u_int
449 atomic_swap_int(volatile u_int *p, u_int v)
460 static __inline u_long
461 atomic_swap_long(volatile u_long *p, u_long v)
472 #else /* !__GNUCLIKE_ASM */
474 u_int atomic_swap_int(volatile u_int *p, u_int v);
475 u_long atomic_swap_long(volatile u_long *p, u_long v);
477 #endif /* __GNUCLIKE_ASM */
479 #define atomic_set_acq_char atomic_set_barr_char
480 #define atomic_set_rel_char atomic_set_barr_char
481 #define atomic_clear_acq_char atomic_clear_barr_char
482 #define atomic_clear_rel_char atomic_clear_barr_char
483 #define atomic_add_acq_char atomic_add_barr_char
484 #define atomic_add_rel_char atomic_add_barr_char
485 #define atomic_subtract_acq_char atomic_subtract_barr_char
486 #define atomic_subtract_rel_char atomic_subtract_barr_char
488 #define atomic_set_acq_short atomic_set_barr_short
489 #define atomic_set_rel_short atomic_set_barr_short
490 #define atomic_clear_acq_short atomic_clear_barr_short
491 #define atomic_clear_rel_short atomic_clear_barr_short
492 #define atomic_add_acq_short atomic_add_barr_short
493 #define atomic_add_rel_short atomic_add_barr_short
494 #define atomic_subtract_acq_short atomic_subtract_barr_short
495 #define atomic_subtract_rel_short atomic_subtract_barr_short
497 #define atomic_set_acq_int atomic_set_barr_int
498 #define atomic_set_rel_int atomic_set_barr_int
499 #define atomic_clear_acq_int atomic_clear_barr_int
500 #define atomic_clear_rel_int atomic_clear_barr_int
501 #define atomic_add_acq_int atomic_add_barr_int
502 #define atomic_add_rel_int atomic_add_barr_int
503 #define atomic_subtract_acq_int atomic_subtract_barr_int
504 #define atomic_subtract_rel_int atomic_subtract_barr_int
505 #define atomic_cmpset_acq_int atomic_cmpset_int
506 #define atomic_cmpset_rel_int atomic_cmpset_int
508 #define atomic_set_acq_long atomic_set_barr_long
509 #define atomic_set_rel_long atomic_set_barr_long
510 #define atomic_clear_acq_long atomic_clear_barr_long
511 #define atomic_clear_rel_long atomic_clear_barr_long
512 #define atomic_add_acq_long atomic_add_barr_long
513 #define atomic_add_rel_long atomic_add_barr_long
514 #define atomic_subtract_acq_long atomic_subtract_barr_long
515 #define atomic_subtract_rel_long atomic_subtract_barr_long
516 #define atomic_cmpset_acq_long atomic_cmpset_long
517 #define atomic_cmpset_rel_long atomic_cmpset_long
519 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
520 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
522 /* Operations on 8-bit bytes. */
523 #define atomic_set_8 atomic_set_char
524 #define atomic_set_acq_8 atomic_set_acq_char
525 #define atomic_set_rel_8 atomic_set_rel_char
526 #define atomic_clear_8 atomic_clear_char
527 #define atomic_clear_acq_8 atomic_clear_acq_char
528 #define atomic_clear_rel_8 atomic_clear_rel_char
529 #define atomic_add_8 atomic_add_char
530 #define atomic_add_acq_8 atomic_add_acq_char
531 #define atomic_add_rel_8 atomic_add_rel_char
532 #define atomic_subtract_8 atomic_subtract_char
533 #define atomic_subtract_acq_8 atomic_subtract_acq_char
534 #define atomic_subtract_rel_8 atomic_subtract_rel_char
535 #define atomic_load_acq_8 atomic_load_acq_char
536 #define atomic_store_rel_8 atomic_store_rel_char
538 /* Operations on 16-bit words. */
539 #define atomic_set_16 atomic_set_short
540 #define atomic_set_acq_16 atomic_set_acq_short
541 #define atomic_set_rel_16 atomic_set_rel_short
542 #define atomic_clear_16 atomic_clear_short
543 #define atomic_clear_acq_16 atomic_clear_acq_short
544 #define atomic_clear_rel_16 atomic_clear_rel_short
545 #define atomic_add_16 atomic_add_short
546 #define atomic_add_acq_16 atomic_add_acq_short
547 #define atomic_add_rel_16 atomic_add_rel_short
548 #define atomic_subtract_16 atomic_subtract_short
549 #define atomic_subtract_acq_16 atomic_subtract_acq_short
550 #define atomic_subtract_rel_16 atomic_subtract_rel_short
551 #define atomic_load_acq_16 atomic_load_acq_short
552 #define atomic_store_rel_16 atomic_store_rel_short
554 /* Operations on 32-bit double words. */
555 #define atomic_set_32 atomic_set_int
556 #define atomic_set_acq_32 atomic_set_acq_int
557 #define atomic_set_rel_32 atomic_set_rel_int
558 #define atomic_clear_32 atomic_clear_int
559 #define atomic_clear_acq_32 atomic_clear_acq_int
560 #define atomic_clear_rel_32 atomic_clear_rel_int
561 #define atomic_add_32 atomic_add_int
562 #define atomic_add_acq_32 atomic_add_acq_int
563 #define atomic_add_rel_32 atomic_add_rel_int
564 #define atomic_subtract_32 atomic_subtract_int
565 #define atomic_subtract_acq_32 atomic_subtract_acq_int
566 #define atomic_subtract_rel_32 atomic_subtract_rel_int
567 #define atomic_load_acq_32 atomic_load_acq_int
568 #define atomic_store_rel_32 atomic_store_rel_int
569 #define atomic_cmpset_32 atomic_cmpset_int
570 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
571 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
572 #define atomic_swap_32 atomic_swap_int
573 #define atomic_readandclear_32 atomic_readandclear_int
574 #define atomic_fetchadd_32 atomic_fetchadd_int
575 #define atomic_testandset_32 atomic_testandset_int
576 #define atomic_testandclear_32 atomic_testandclear_int
578 /* Operations on 64-bit quad words. */
579 #define atomic_set_64 atomic_set_long
580 #define atomic_set_acq_64 atomic_set_acq_long
581 #define atomic_set_rel_64 atomic_set_rel_long
582 #define atomic_clear_64 atomic_clear_long
583 #define atomic_clear_acq_64 atomic_clear_acq_long
584 #define atomic_clear_rel_64 atomic_clear_rel_long
585 #define atomic_add_64 atomic_add_long
586 #define atomic_add_acq_64 atomic_add_acq_long
587 #define atomic_add_rel_64 atomic_add_rel_long
588 #define atomic_subtract_64 atomic_subtract_long
589 #define atomic_subtract_acq_64 atomic_subtract_acq_long
590 #define atomic_subtract_rel_64 atomic_subtract_rel_long
591 #define atomic_load_acq_64 atomic_load_acq_long
592 #define atomic_store_rel_64 atomic_store_rel_long
593 #define atomic_cmpset_64 atomic_cmpset_long
594 #define atomic_cmpset_acq_64 atomic_cmpset_acq_long
595 #define atomic_cmpset_rel_64 atomic_cmpset_rel_long
596 #define atomic_swap_64 atomic_swap_long
597 #define atomic_readandclear_64 atomic_readandclear_long
598 #define atomic_fetchadd_64 atomic_fetchadd_long
599 #define atomic_testandset_64 atomic_testandset_long
600 #define atomic_testandclear_64 atomic_testandclear_long
602 /* Operations on pointers. */
603 #define atomic_set_ptr atomic_set_long
604 #define atomic_set_acq_ptr atomic_set_acq_long
605 #define atomic_set_rel_ptr atomic_set_rel_long
606 #define atomic_clear_ptr atomic_clear_long
607 #define atomic_clear_acq_ptr atomic_clear_acq_long
608 #define atomic_clear_rel_ptr atomic_clear_rel_long
609 #define atomic_add_ptr atomic_add_long
610 #define atomic_add_acq_ptr atomic_add_acq_long
611 #define atomic_add_rel_ptr atomic_add_rel_long
612 #define atomic_subtract_ptr atomic_subtract_long
613 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
614 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
615 #define atomic_load_acq_ptr atomic_load_acq_long
616 #define atomic_store_rel_ptr atomic_store_rel_long
617 #define atomic_cmpset_ptr atomic_cmpset_long
618 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
619 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
620 #define atomic_swap_ptr atomic_swap_long
621 #define atomic_readandclear_ptr atomic_readandclear_long
623 #endif /* !WANT_FUNCTIONS */
625 #endif /* !_MACHINE_ATOMIC_H_ */