2 * Copyright (c) 2016 Mellanox Technologies, Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef _ASM_ATOMIC64_H_
29 #define _ASM_ATOMIC64_H_
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <machine/atomic.h>
36 volatile int64_t counter;
39 /*------------------------------------------------------------------------*
40 * 64-bit atomic operations
41 *------------------------------------------------------------------------*/
43 #define atomic64_add(i, v) atomic64_add_return((i), (v))
44 #define atomic64_sub(i, v) atomic64_sub_return((i), (v))
45 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
46 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
47 #define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0)
48 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
49 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
50 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
51 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
52 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
55 atomic64_add_return(int64_t i, atomic64_t *v)
57 return i + atomic_fetchadd_64(&v->counter, i);
61 atomic64_sub_return(int64_t i, atomic64_t *v)
63 return atomic_fetchadd_64(&v->counter, -i) - i;
67 atomic64_set(atomic64_t *v, int64_t i)
69 atomic_store_rel_64(&v->counter, i);
73 atomic64_read(atomic64_t *v)
75 return atomic_load_acq_64(&v->counter);
79 atomic64_inc(atomic64_t *v)
81 return atomic_fetchadd_64(&v->counter, 1) + 1;
85 atomic64_dec(atomic64_t *v)
87 return atomic_fetchadd_64(&v->counter, -1) - 1;
91 atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u)
99 if (likely(atomic_cmpset_64(&v->counter, c, c + a)))
105 static inline int64_t
106 atomic64_xchg(atomic64_t *v, int64_t i)
108 #if defined(__i386__) || defined(__amd64__) || \
109 defined(__arm__) || defined(__aarch64__)
110 return (atomic_swap_64(&v->counter, i));
114 ret = atomic_load_acq_64(&v->counter);
115 if (atomic_cmpset_64(&v->counter, ret, i))
122 static inline int64_t
123 atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new)
128 if (atomic_cmpset_64(&v->counter, old, new))
130 ret = atomic_load_acq_64(&v->counter);
137 #endif /* _ASM_ATOMIC64_H_ */