2 * Copyright (c) 1998 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
32 #error this file needs sys/cdefs.h as a prerequisite
35 #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
36 #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
37 #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
40 * Various simple operations on memory, each of which is atomic in the
41 * presence of interrupts and multiple processors.
43 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
44 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
45 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
46 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
48 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
49 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
50 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
51 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
53 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
54 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
55 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
56 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
57 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
59 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
60 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
61 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
62 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
63 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
67 * The above functions are expanded inline in the statically-linked
68 * kernel. Lock prefixes are generated if an SMP kernel is being
71 * Kernel modules call real functions which are built into the kernel.
72 * This allows kernel modules to be portable between UP and SMP systems.
74 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
76 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
77 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
79 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
80 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
82 #define ATOMIC_LOAD(TYPE, LOP) \
83 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
84 #define ATOMIC_STORE(TYPE) \
85 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
87 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
90 * For userland, always use lock prefixes so that the binaries will run
91 * on both SMP and !SMP systems.
93 #if defined(SMP) || !defined(_KERNEL)
94 #define MPLOCKED "lock ; "
100 * The assembly is volatilized to avoid code chunk removal by the compiler.
101 * GCC aggressively reorders operations and memory clobbering is necessary
102 * in order to avoid that for memory barriers.
104 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
105 static __inline void \
106 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
108 __asm __volatile(MPLOCKED OP \
110 : CONS (V), "m" (*p) \
114 static __inline void \
115 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
117 __asm __volatile(MPLOCKED OP \
119 : CONS (V), "m" (*p) \
124 #if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
126 /* I486 does not support SMP or CMPXCHG8B. */
127 static __inline uint64_t
128 atomic_load_acq_64_i386(volatile uint64_t *p)
130 volatile uint32_t *high, *low;
133 low = (volatile uint32_t *)p;
134 high = (volatile uint32_t *)p + 1;
141 : "=&A" (res) /* 0 */
142 : "m" (*low), /* 1 */
150 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
152 volatile uint32_t *high, *low;
154 low = (volatile uint32_t *)p;
155 high = (volatile uint32_t *)p + 1;
162 : "=m" (*low), /* 0 */
168 static __inline uint64_t
169 atomic_load_acq_64_i586(volatile uint64_t *p)
174 " movl %%ebx,%%eax ; "
175 " movl %%ecx,%%edx ; "
178 : "=&A" (res), /* 0 */
187 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
191 " movl %%eax,%%ebx ; "
192 " movl %%edx,%%ecx ; "
200 : "ebx", "ecx", "memory", "cc");
203 #endif /* _KERNEL && !WANT_FUNCTIONS */
206 * Atomic compare and set, used by the mutex functions
208 * if (*dst == expect) *dst = src (all 32 bit words)
210 * Returns 0 on failure, non-zero on success
213 #ifdef CPU_DISABLE_CMPXCHG
216 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
229 "# atomic_cmpset_int"
230 : "=q" (res), /* 0 */
233 "r" (expect), /* 3 */
240 #else /* !CPU_DISABLE_CMPXCHG */
243 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
252 "# atomic_cmpset_int"
253 : "=a" (res), /* 0 */
256 "a" (expect), /* 3 */
263 #endif /* CPU_DISABLE_CMPXCHG */
266 * Atomically add the value of v to the integer pointed to by p and return
267 * the previous value of *p.
269 static __inline u_int
270 atomic_fetchadd_int(volatile u_int *p, u_int v)
276 "# atomic_fetchadd_int"
277 : "+r" (v), /* 0 (result) */
285 * We assume that a = b will do atomic loads and stores. Due to the
286 * IA32 memory model, a simple store guarantees release semantics.
288 * However, loads may pass stores, so for atomic_load_acq we have to
289 * ensure a Store/Load barrier to do the load in SMP kernels. We use
290 * "lock cmpxchg" as recommended by the AMD Software Optimization
291 * Guide, and not mfence. For UP kernels, however, the cache of the
292 * single processor is always consistent, so we only need to take care
295 #define ATOMIC_STORE(TYPE) \
296 static __inline void \
297 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
299 __asm __volatile("" : : : "memory"); \
304 #if defined(_KERNEL) && !defined(SMP)
306 #define ATOMIC_LOAD(TYPE, LOP) \
307 static __inline u_##TYPE \
308 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
313 __asm __volatile("" : : : "memory"); \
318 #else /* !(_KERNEL && !SMP) */
320 #define ATOMIC_LOAD(TYPE, LOP) \
321 static __inline u_##TYPE \
322 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
326 __asm __volatile(MPLOCKED LOP \
327 : "=a" (res), /* 0 */ \
336 #endif /* _KERNEL && !SMP */
338 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
340 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
341 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
342 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
343 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
345 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
346 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
347 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
348 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
350 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
351 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
352 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
353 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
355 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
356 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
357 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
358 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
360 ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
361 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
362 ATOMIC_LOAD(int, "cmpxchgl %0,%1");
363 ATOMIC_LOAD(long, "cmpxchgl %0,%1");
374 #ifndef WANT_FUNCTIONS
377 extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
378 extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
382 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
385 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
389 static __inline u_long
390 atomic_fetchadd_long(volatile u_long *p, u_long v)
393 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
396 /* Read the current value and store a zero in the destination. */
397 #ifdef __GNUCLIKE_ASM
399 static __inline u_int
400 atomic_readandclear_int(volatile u_int *addr)
407 "# atomic_readandclear_int"
408 : "+r" (res), /* 0 */
415 static __inline u_long
416 atomic_readandclear_long(volatile u_long *addr)
423 "# atomic_readandclear_long"
424 : "+r" (res), /* 0 */
431 #else /* !__GNUCLIKE_ASM */
433 u_int atomic_readandclear_int(volatile u_int *addr);
434 u_long atomic_readandclear_long(volatile u_long *addr);
436 #endif /* __GNUCLIKE_ASM */
438 #define atomic_set_acq_char atomic_set_barr_char
439 #define atomic_set_rel_char atomic_set_barr_char
440 #define atomic_clear_acq_char atomic_clear_barr_char
441 #define atomic_clear_rel_char atomic_clear_barr_char
442 #define atomic_add_acq_char atomic_add_barr_char
443 #define atomic_add_rel_char atomic_add_barr_char
444 #define atomic_subtract_acq_char atomic_subtract_barr_char
445 #define atomic_subtract_rel_char atomic_subtract_barr_char
447 #define atomic_set_acq_short atomic_set_barr_short
448 #define atomic_set_rel_short atomic_set_barr_short
449 #define atomic_clear_acq_short atomic_clear_barr_short
450 #define atomic_clear_rel_short atomic_clear_barr_short
451 #define atomic_add_acq_short atomic_add_barr_short
452 #define atomic_add_rel_short atomic_add_barr_short
453 #define atomic_subtract_acq_short atomic_subtract_barr_short
454 #define atomic_subtract_rel_short atomic_subtract_barr_short
456 #define atomic_set_acq_int atomic_set_barr_int
457 #define atomic_set_rel_int atomic_set_barr_int
458 #define atomic_clear_acq_int atomic_clear_barr_int
459 #define atomic_clear_rel_int atomic_clear_barr_int
460 #define atomic_add_acq_int atomic_add_barr_int
461 #define atomic_add_rel_int atomic_add_barr_int
462 #define atomic_subtract_acq_int atomic_subtract_barr_int
463 #define atomic_subtract_rel_int atomic_subtract_barr_int
464 #define atomic_cmpset_acq_int atomic_cmpset_int
465 #define atomic_cmpset_rel_int atomic_cmpset_int
467 #define atomic_set_acq_long atomic_set_barr_long
468 #define atomic_set_rel_long atomic_set_barr_long
469 #define atomic_clear_acq_long atomic_clear_barr_long
470 #define atomic_clear_rel_long atomic_clear_barr_long
471 #define atomic_add_acq_long atomic_add_barr_long
472 #define atomic_add_rel_long atomic_add_barr_long
473 #define atomic_subtract_acq_long atomic_subtract_barr_long
474 #define atomic_subtract_rel_long atomic_subtract_barr_long
475 #define atomic_cmpset_acq_long atomic_cmpset_long
476 #define atomic_cmpset_rel_long atomic_cmpset_long
478 /* Operations on 8-bit bytes. */
479 #define atomic_set_8 atomic_set_char
480 #define atomic_set_acq_8 atomic_set_acq_char
481 #define atomic_set_rel_8 atomic_set_rel_char
482 #define atomic_clear_8 atomic_clear_char
483 #define atomic_clear_acq_8 atomic_clear_acq_char
484 #define atomic_clear_rel_8 atomic_clear_rel_char
485 #define atomic_add_8 atomic_add_char
486 #define atomic_add_acq_8 atomic_add_acq_char
487 #define atomic_add_rel_8 atomic_add_rel_char
488 #define atomic_subtract_8 atomic_subtract_char
489 #define atomic_subtract_acq_8 atomic_subtract_acq_char
490 #define atomic_subtract_rel_8 atomic_subtract_rel_char
491 #define atomic_load_acq_8 atomic_load_acq_char
492 #define atomic_store_rel_8 atomic_store_rel_char
494 /* Operations on 16-bit words. */
495 #define atomic_set_16 atomic_set_short
496 #define atomic_set_acq_16 atomic_set_acq_short
497 #define atomic_set_rel_16 atomic_set_rel_short
498 #define atomic_clear_16 atomic_clear_short
499 #define atomic_clear_acq_16 atomic_clear_acq_short
500 #define atomic_clear_rel_16 atomic_clear_rel_short
501 #define atomic_add_16 atomic_add_short
502 #define atomic_add_acq_16 atomic_add_acq_short
503 #define atomic_add_rel_16 atomic_add_rel_short
504 #define atomic_subtract_16 atomic_subtract_short
505 #define atomic_subtract_acq_16 atomic_subtract_acq_short
506 #define atomic_subtract_rel_16 atomic_subtract_rel_short
507 #define atomic_load_acq_16 atomic_load_acq_short
508 #define atomic_store_rel_16 atomic_store_rel_short
510 /* Operations on 32-bit double words. */
511 #define atomic_set_32 atomic_set_int
512 #define atomic_set_acq_32 atomic_set_acq_int
513 #define atomic_set_rel_32 atomic_set_rel_int
514 #define atomic_clear_32 atomic_clear_int
515 #define atomic_clear_acq_32 atomic_clear_acq_int
516 #define atomic_clear_rel_32 atomic_clear_rel_int
517 #define atomic_add_32 atomic_add_int
518 #define atomic_add_acq_32 atomic_add_acq_int
519 #define atomic_add_rel_32 atomic_add_rel_int
520 #define atomic_subtract_32 atomic_subtract_int
521 #define atomic_subtract_acq_32 atomic_subtract_acq_int
522 #define atomic_subtract_rel_32 atomic_subtract_rel_int
523 #define atomic_load_acq_32 atomic_load_acq_int
524 #define atomic_store_rel_32 atomic_store_rel_int
525 #define atomic_cmpset_32 atomic_cmpset_int
526 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
527 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
528 #define atomic_readandclear_32 atomic_readandclear_int
529 #define atomic_fetchadd_32 atomic_fetchadd_int
531 /* Operations on pointers. */
532 #define atomic_set_ptr(p, v) \
533 atomic_set_int((volatile u_int *)(p), (u_int)(v))
534 #define atomic_set_acq_ptr(p, v) \
535 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
536 #define atomic_set_rel_ptr(p, v) \
537 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
538 #define atomic_clear_ptr(p, v) \
539 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
540 #define atomic_clear_acq_ptr(p, v) \
541 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
542 #define atomic_clear_rel_ptr(p, v) \
543 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
544 #define atomic_add_ptr(p, v) \
545 atomic_add_int((volatile u_int *)(p), (u_int)(v))
546 #define atomic_add_acq_ptr(p, v) \
547 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
548 #define atomic_add_rel_ptr(p, v) \
549 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
550 #define atomic_subtract_ptr(p, v) \
551 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
552 #define atomic_subtract_acq_ptr(p, v) \
553 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
554 #define atomic_subtract_rel_ptr(p, v) \
555 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
556 #define atomic_load_acq_ptr(p) \
557 atomic_load_acq_int((volatile u_int *)(p))
558 #define atomic_store_rel_ptr(p, v) \
559 atomic_store_rel_int((volatile u_int *)(p), (v))
560 #define atomic_cmpset_ptr(dst, old, new) \
561 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
562 #define atomic_cmpset_acq_ptr(dst, old, new) \
563 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
565 #define atomic_cmpset_rel_ptr(dst, old, new) \
566 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
568 #define atomic_readandclear_ptr(p) \
569 atomic_readandclear_int((volatile u_int *)(p))
571 #endif /* !WANT_FUNCTIONS */
573 #endif /* !_MACHINE_ATOMIC_H_ */