2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _ASM_ATOMIC_H_
33 #define _ASM_ATOMIC_H_
35 #include <linux/compiler.h>
36 #include <sys/types.h>
37 #include <machine/atomic.h>
39 #define ATOMIC_INIT(x) { .counter = (x) }
45 /*------------------------------------------------------------------------*
46 * 32-bit atomic operations
47 *------------------------------------------------------------------------*/
49 #define atomic_add(i, v) atomic_add_return((i), (v))
50 #define atomic_sub(i, v) atomic_sub_return((i), (v))
51 #define atomic_inc_return(v) atomic_add_return(1, (v))
52 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
53 #define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0)
54 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
55 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
56 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
57 #define atomic_dec_return(v) atomic_sub_return(1, (v))
58 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
61 atomic_add_return(int i, atomic_t *v)
63 return i + atomic_fetchadd_int(&v->counter, i);
67 atomic_sub_return(int i, atomic_t *v)
69 return atomic_fetchadd_int(&v->counter, -i) - i;
73 atomic_set(atomic_t *v, int i)
75 WRITE_ONCE(v->counter, i);
79 atomic_set_release(atomic_t *v, int i)
81 atomic_store_rel_int(&v->counter, i);
85 atomic_set_mask(unsigned int mask, atomic_t *v)
87 atomic_set_int(&v->counter, mask);
91 atomic_read(const atomic_t *v)
93 return READ_ONCE(v->counter);
97 atomic_inc(atomic_t *v)
99 return atomic_fetchadd_int(&v->counter, 1) + 1;
103 atomic_dec(atomic_t *v)
105 return atomic_fetchadd_int(&v->counter, -1) - 1;
109 atomic_add_unless(atomic_t *v, int a, int u)
115 if (unlikely(c == u))
117 if (likely(atomic_cmpset_int(&v->counter, c, c + a)))
124 atomic_clear_mask(unsigned int mask, atomic_t *v)
126 atomic_clear_int(&v->counter, mask);
130 atomic_xchg(atomic_t *v, int i)
132 #if !defined(__mips__)
133 return (atomic_swap_int(&v->counter, i));
137 ret = READ_ONCE(v->counter);
138 if (atomic_cmpset_int(&v->counter, ret, i))
146 atomic_cmpxchg(atomic_t *v, int old, int new)
151 if (atomic_cmpset_int(&v->counter, old, new))
153 ret = READ_ONCE(v->counter);
160 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
161 #define LINUXKPI_ATOMIC_8(...) __VA_ARGS__
162 #define LINUXKPI_ATOMIC_16(...) __VA_ARGS__
164 #define LINUXKPI_ATOMIC_8(...)
165 #define LINUXKPI_ATOMIC_16(...)
168 #if !(defined(i386) || (defined(__mips__) && !(defined(__mips_n32) || \
169 defined(__mips_n64))) || (defined(__powerpc__) && \
170 !defined(__powerpc64__)))
171 #define LINUXKPI_ATOMIC_64(...) __VA_ARGS__
173 #define LINUXKPI_ATOMIC_64(...)
176 #define cmpxchg(ptr, old, new) ({ \
178 __typeof(*(ptr)) val; \
183 } __ret = { .val = (old) }, __new = { .val = (new) }; \
186 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
187 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
188 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
189 sizeof(__ret.val) == 4); \
191 switch (sizeof(__ret.val)) { \
194 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
195 __ret.u8, __new.u8[0]) && __ret.val == (old)) \
199 LINUXKPI_ATOMIC_16( \
201 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
202 __ret.u16, __new.u16[0]) && __ret.val == (old)) \
207 while (!atomic_fcmpset_32((volatile u32 *)(ptr), \
208 __ret.u32, __new.u32[0]) && __ret.val == (old)) \
211 LINUXKPI_ATOMIC_64( \
213 while (!atomic_fcmpset_64((volatile u64 *)(ptr), \
214 __ret.u64, __new.u64[0]) && __ret.val == (old)) \
222 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)
224 #define xchg(ptr, new) ({ \
226 __typeof(*(ptr)) val; \
231 } __ret, __new = { .val = (new) }; \
234 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
235 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
236 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
237 sizeof(__ret.val) == 4); \
239 switch (sizeof(__ret.val)) { \
242 __ret.val = READ_ONCE(*ptr); \
243 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
244 __ret.u8, __new.u8[0])) \
248 LINUXKPI_ATOMIC_16( \
250 __ret.val = READ_ONCE(*ptr); \
251 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
252 __ret.u16, __new.u16[0])) \
257 __ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \
260 LINUXKPI_ATOMIC_64( \
262 __ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \
271 atomic_dec_if_positive(atomic_t *v)
276 old = atomic_read(v);
279 if (unlikely(retval < 0))
281 if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
287 #define LINUX_ATOMIC_OP(op, c_op) \
288 static inline void atomic_##op(int i, atomic_t *v) \
293 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
297 #define LINUX_ATOMIC_FETCH_OP(op, c_op) \
298 static inline int atomic_fetch_##op(int i, atomic_t *v) \
303 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
309 LINUX_ATOMIC_OP(or, |)
310 LINUX_ATOMIC_OP(and, &)
311 LINUX_ATOMIC_OP(andnot, &~)
312 LINUX_ATOMIC_OP(xor, ^)
314 LINUX_ATOMIC_FETCH_OP(or, |)
315 LINUX_ATOMIC_FETCH_OP(and, &)
316 LINUX_ATOMIC_FETCH_OP(andnot, &~)
317 LINUX_ATOMIC_FETCH_OP(xor, ^)
319 #endif /* _ASM_ATOMIC_H_ */