1 /*===-- atomic.c - Implement support functions for atomic operations.------===
3 * The LLVM Compiler Infrastructure
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
8 *===----------------------------------------------------------------------===
10 * atomic.c defines a set of functions for performing atomic accesses on
11 * arbitrary-sized memory locations. This design uses locks that should
12 * be fast in the uncontended case, for two reasons:
14 * 1) This code must work with C programs that do not link to anything
15 * (including pthreads) and so it should not depend on any pthread
17 * 2) Atomic operations, rather than explicit mutexes, are most commonly used
18 * on code where contended operations are rate.
20 * To avoid needing a per-object lock, this code allocates an array of
21 * locks and hashes the object pointers to find the one that it should use.
22 * For operations that must be atomic on two locations, the lower lock is
23 * always acquired first, to avoid deadlock.
25 *===----------------------------------------------------------------------===
33 // Clang objects if you redefine a builtin. This little hack allows us to
34 // define a function with the same name as an intrinsic.
35 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
36 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
37 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
38 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
40 /// Number of locks. This allocates one page on 32-bit platforms, two on
41 /// 64-bit. This can be specified externally if a different trade between
42 /// memory usage and contention probability is required for a given platform.
43 #ifndef SPINLOCK_COUNT
44 #define SPINLOCK_COUNT (1<<10)
46 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
48 ////////////////////////////////////////////////////////////////////////////////
49 // Platform-specific lock implementation. Falls back to spinlocks if none is
50 // defined. Each platform should define the Lock type, and corresponding
51 // lock() and unlock() functions.
52 ////////////////////////////////////////////////////////////////////////////////
55 #include <sys/types.h>
56 #include <machine/atomic.h>
58 typedef struct _usem Lock;
59 __inline static void unlock(Lock *l) {
60 __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
61 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
63 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
65 __inline static void lock(Lock *l) {
67 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
68 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
69 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
73 /// locks for atomic operations
74 static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
76 #elif defined(__APPLE__)
77 #include <libkern/OSAtomic.h>
78 typedef OSSpinLock Lock;
79 __inline static void unlock(Lock *l) {
82 /// Locks a lock. In the current implementation, this is potentially
83 /// unbounded in the contended case.
84 __inline static void lock(Lock *l) {
87 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
90 typedef _Atomic(uintptr_t) Lock;
91 /// Unlock a lock. This is a release operation.
92 __inline static void unlock(Lock *l) {
93 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
95 /// Locks a lock. In the current implementation, this is potentially
96 /// unbounded in the contended case.
97 __inline static void lock(Lock *l) {
99 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
103 /// locks for atomic operations
104 static Lock locks[SPINLOCK_COUNT];
108 /// Returns a lock to use for a given pointer.
109 static __inline Lock *lock_for_pointer(void *ptr) {
110 intptr_t hash = (intptr_t)ptr;
111 // Disregard the lowest 4 bits. We want all values that may be part of the
112 // same memory operation to hash to the same value and therefore use the same
115 // Use the next bits as the basis for the hash
116 intptr_t low = hash & SPINLOCK_MASK;
117 // Now use the high(er) set of bits to perturb the hash, so that we don't
118 // get collisions from atomic fields in a single object
121 // Return a pointer to the word to use
122 return locks + (hash & SPINLOCK_MASK);
125 /// Macros for determining whether a size is lock free. Clang can not yet
126 /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
128 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
129 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
130 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
131 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
132 #define IS_LOCK_FREE_16 0
134 /// Macro that calls the compiler-generated lock-free versions of functions
136 #define LOCK_FREE_CASES() \
140 if (IS_LOCK_FREE_2) {\
141 LOCK_FREE_ACTION(uint16_t);\
144 if (IS_LOCK_FREE_4) {\
145 LOCK_FREE_ACTION(uint32_t);\
148 if (IS_LOCK_FREE_8) {\
149 LOCK_FREE_ACTION(uint64_t);\
152 if (IS_LOCK_FREE_16) {\
153 /* FIXME: __uint128_t isn't available on 32 bit platforms.
154 LOCK_FREE_ACTION(__uint128_t);*/\
160 /// An atomic load operation. This is atomic with respect to the source
162 void __atomic_load_c(int size, void *src, void *dest, int model) {
163 #define LOCK_FREE_ACTION(type) \
164 *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
167 #undef LOCK_FREE_ACTION
168 Lock *l = lock_for_pointer(src);
170 memcpy(dest, src, size);
174 /// An atomic store operation. This is atomic with respect to the destination
176 void __atomic_store_c(int size, void *dest, void *src, int model) {
177 #define LOCK_FREE_ACTION(type) \
178 __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
181 #undef LOCK_FREE_ACTION
182 Lock *l = lock_for_pointer(dest);
184 memcpy(dest, src, size);
188 /// Atomic compare and exchange operation. If the value at *ptr is identical
189 /// to the value at *expected, then this copies value at *desired to *ptr. If
190 /// they are not, then this stores the current value from *ptr in *expected.
192 /// This function returns 1 if the exchange takes place or 0 if it fails.
193 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
194 void *desired, int success, int failure) {
195 #define LOCK_FREE_ACTION(type) \
196 return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
197 *(type*)desired, success, failure)
199 #undef LOCK_FREE_ACTION
200 Lock *l = lock_for_pointer(ptr);
202 if (memcmp(ptr, expected, size) == 0) {
203 memcpy(ptr, desired, size);
207 memcpy(expected, ptr, size);
212 /// Performs an atomic exchange operation between two pointers. This is atomic
213 /// with respect to the target address.
214 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
215 #define LOCK_FREE_ACTION(type) \
216 *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
220 #undef LOCK_FREE_ACTION
221 Lock *l = lock_for_pointer(ptr);
223 memcpy(old, ptr, size);
224 memcpy(ptr, val, size);
228 ////////////////////////////////////////////////////////////////////////////////
229 // Where the size is known at compile time, the compiler may emit calls to
230 // specialised versions of the above functions.
231 ////////////////////////////////////////////////////////////////////////////////
232 #ifdef __SIZEOF_INT128__
233 #define OPTIMISED_CASES\
234 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
235 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
236 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
237 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
238 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
240 #define OPTIMISED_CASES\
241 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
242 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
243 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
244 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
247 #define OPTIMISED_CASE(n, lockfree, type)\
248 type __atomic_load_##n(type *src, int model) {\
250 return __c11_atomic_load((_Atomic(type)*)src, model);\
251 Lock *l = lock_for_pointer(src);\
258 #undef OPTIMISED_CASE
260 #define OPTIMISED_CASE(n, lockfree, type)\
261 void __atomic_store_##n(type *dest, type val, int model) {\
263 __c11_atomic_store((_Atomic(type)*)dest, val, model);\
266 Lock *l = lock_for_pointer(dest);\
273 #undef OPTIMISED_CASE
275 #define OPTIMISED_CASE(n, lockfree, type)\
276 type __atomic_exchange_##n(type *dest, type val, int model) {\
278 return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
279 Lock *l = lock_for_pointer(dest);\
287 #undef OPTIMISED_CASE
289 #define OPTIMISED_CASE(n, lockfree, type)\
290 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
291 int success, int failure) {\
293 return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
295 Lock *l = lock_for_pointer(ptr);\
297 if (*ptr == *expected) {\
307 #undef OPTIMISED_CASE
309 ////////////////////////////////////////////////////////////////////////////////
310 // Atomic read-modify-write operations for integers of various sizes.
311 ////////////////////////////////////////////////////////////////////////////////
312 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
313 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
315 return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
316 Lock *l = lock_for_pointer(ptr);\
324 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
326 #undef OPTIMISED_CASE
327 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
329 #undef OPTIMISED_CASE
330 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
332 #undef OPTIMISED_CASE
333 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
335 #undef OPTIMISED_CASE
336 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
338 #undef OPTIMISED_CASE