1 /*===-- atomic.c - Implement support functions for atomic operations.------===
3 * The LLVM Compiler Infrastructure
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
8 *===----------------------------------------------------------------------===
10 * atomic.c defines a set of functions for performing atomic accesses on
11 * arbitrary-sized memory locations. This design uses locks that should
12 * be fast in the uncontended case, for two reasons:
14 * 1) This code must work with C programs that do not link to anything
15 * (including pthreads) and so it should not depend on any pthread
17 * 2) Atomic operations, rather than explicit mutexes, are most commonly used
18 * on code where contended operations are rate.
20 * To avoid needing a per-object lock, this code allocates an array of
21 * locks and hashes the object pointers to find the one that it should use.
22 * For operations that must be atomic on two locations, the lower lock is
23 * always acquired first, to avoid deadlock.
25 *===----------------------------------------------------------------------===
31 // Clang objects if you redefine a builtin. This little hack allows us to
32 // define a function with the same name as an intrinsic.
33 #pragma redefine_extname __atomic_load_c __atomic_load
34 #pragma redefine_extname __atomic_store_c __atomic_store
35 #pragma redefine_extname __atomic_exchange_c __atomic_exchange
36 #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
38 /// Number of locks. This allocates one page on 32-bit platforms, two on
39 /// 64-bit. This can be specified externally if a different trade between
40 /// memory usage and contention probability is required for a given platform.
41 #ifndef SPINLOCK_COUNT
42 #define SPINLOCK_COUNT (1<<10)
44 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
46 ////////////////////////////////////////////////////////////////////////////////
47 // Platform-specific lock implementation. Falls back to spinlocks if none is
48 // defined. Each platform should define the Lock type, and corresponding
49 // lock() and unlock() functions.
50 ////////////////////////////////////////////////////////////////////////////////
53 #include <sys/types.h>
54 #include <machine/atomic.h>
56 typedef struct _usem Lock;
57 inline static void unlock(Lock *l) {
58 __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
59 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
61 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
63 inline static void lock(Lock *l) {
65 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
66 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
67 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
71 /// locks for atomic operations
72 static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
74 typedef _Atomic(uintptr_t) Lock;
75 /// Unlock a lock. This is a release operation.
76 inline static void unlock(Lock *l) {
77 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
79 /// Locks a lock. In the current implementation, this is potentially
80 /// unbounded in the contended case.
81 inline static void lock(Lock *l) {
83 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
87 /// locks for atomic operations
88 static Lock locks[SPINLOCK_COUNT];
92 /// Returns a lock to use for a given pointer.
93 static inline Lock *lock_for_pointer(void *ptr) {
94 intptr_t hash = (intptr_t)ptr;
95 // Disregard the lowest 4 bits. We want all values that may be part of the
96 // same memory operation to hash to the same value and therefore use the same
99 // Use the next bits as the basis for the hash
100 intptr_t low = hash & SPINLOCK_MASK;
101 // Now use the high(er) set of bits to perturb the hash, so that we don't
102 // get collisions from atomic fields in a single object
105 // Return a pointer to the word to use
106 return locks + (hash & SPINLOCK_MASK);
109 /// Macros for determining whether a size is lock free. Clang can not yet
110 /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
112 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
113 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
114 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
115 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
116 #define IS_LOCK_FREE_16 0
118 /// Macro that calls the compiler-generated lock-free versions of functions
120 #define LOCK_FREE_CASES() \
124 if (IS_LOCK_FREE_2) {\
125 LOCK_FREE_ACTION(uint16_t);\
128 if (IS_LOCK_FREE_4) {\
129 LOCK_FREE_ACTION(uint32_t);\
132 if (IS_LOCK_FREE_8) {\
133 LOCK_FREE_ACTION(uint64_t);\
136 if (IS_LOCK_FREE_16) {\
137 /* FIXME: __uint128_t isn't available on 32 bit platforms.
138 LOCK_FREE_ACTION(__uint128_t);*/\
144 /// An atomic load operation. This is atomic with respect to the source
146 void __atomic_load_c(int size, void *src, void *dest, int model) {
147 #define LOCK_FREE_ACTION(type) \
148 *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
151 #undef LOCK_FREE_ACTION
152 Lock *l = lock_for_pointer(src);
154 memcpy(dest, src, size);
158 /// An atomic store operation. This is atomic with respect to the destination
160 void __atomic_store_c(int size, void *dest, void *src, int model) {
161 #define LOCK_FREE_ACTION(type) \
162 __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
165 #undef LOCK_FREE_ACTION
166 Lock *l = lock_for_pointer(dest);
168 memcpy(dest, src, size);
172 /// Atomic compare and exchange operation. If the value at *ptr is identical
173 /// to the value at *expected, then this copies value at *desired to *ptr. If
174 /// they are not, then this stores the current value from *ptr in *expected.
176 /// This function returns 1 if the exchange takes place or 0 if it fails.
177 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
178 void *desired, int success, int failure) {
179 #define LOCK_FREE_ACTION(type) \
180 return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
181 *(type*)desired, success, failure)
183 #undef LOCK_FREE_ACTION
184 Lock *l = lock_for_pointer(ptr);
186 if (memcmp(ptr, expected, size) == 0) {
187 memcpy(ptr, desired, size);
191 memcpy(expected, ptr, size);
196 /// Performs an atomic exchange operation between two pointers. This is atomic
197 /// with respect to the target address.
198 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
199 #define LOCK_FREE_ACTION(type) \
200 *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
204 #undef LOCK_FREE_ACTION
205 Lock *l = lock_for_pointer(ptr);
207 memcpy(old, ptr, size);
208 memcpy(ptr, val, size);
212 ////////////////////////////////////////////////////////////////////////////////
213 // Where the size is known at compile time, the compiler may emit calls to
214 // specialised versions of the above functions.
215 ////////////////////////////////////////////////////////////////////////////////
216 #define OPTIMISED_CASES\
217 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
218 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
219 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
220 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
221 /* FIXME: __uint128_t isn't available on 32 bit platforms.
222 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
224 #define OPTIMISED_CASE(n, lockfree, type)\
225 type __atomic_load_##n(type *src, int model) {\
227 return __c11_atomic_load((_Atomic(type)*)src, model);\
228 Lock *l = lock_for_pointer(src);\
235 #undef OPTIMISED_CASE
237 #define OPTIMISED_CASE(n, lockfree, type)\
238 void __atomic_store_##n(type *dest, type val, int model) {\
240 __c11_atomic_store((_Atomic(type)*)dest, val, model);\
243 Lock *l = lock_for_pointer(dest);\
250 #undef OPTIMISED_CASE
252 #define OPTIMISED_CASE(n, lockfree, type)\
253 type __atomic_exchange_##n(type *dest, type val, int model) {\
255 return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
256 Lock *l = lock_for_pointer(dest);\
264 #undef OPTIMISED_CASE
266 #define OPTIMISED_CASE(n, lockfree, type)\
267 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
268 int success, int failure) {\
270 return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
272 Lock *l = lock_for_pointer(ptr);\
274 if (*ptr == *expected) {\
284 #undef OPTIMISED_CASE
286 ////////////////////////////////////////////////////////////////////////////////
287 // Atomic read-modify-write operations for integers of various sizes.
288 ////////////////////////////////////////////////////////////////////////////////
289 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
290 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
292 return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
293 Lock *l = lock_for_pointer(ptr);\
301 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
303 #undef OPTIMISED_CASE
304 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
306 #undef OPTIMISED_CASE
307 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
309 #undef OPTIMISED_CASE
310 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
312 #undef OPTIMISED_CASE
313 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
315 #undef OPTIMISED_CASE