1 //===-- atomic.c - Implement support functions for atomic operations.------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // atomic.c defines a set of functions for performing atomic accesses on
10 // arbitrary-sized memory locations. This design uses locks that should
11 // be fast in the uncontended case, for two reasons:
13 // 1) This code must work with C programs that do not link to anything
14 // (including pthreads) and so it should not depend on any pthread
16 // 2) Atomic operations, rather than explicit mutexes, are most commonly used
17 // on code where contended operations are rate.
19 // To avoid needing a per-object lock, this code allocates an array of
20 // locks and hashes the object pointers to find the one that it should use.
21 // For operations that must be atomic on two locations, the lower lock is
22 // always acquired first, to avoid deadlock.
24 //===----------------------------------------------------------------------===//
31 // Clang objects if you redefine a builtin. This little hack allows us to
32 // define a function with the same name as an intrinsic.
33 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
34 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
35 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
36 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
37 __atomic_compare_exchange)
39 /// Number of locks. This allocates one page on 32-bit platforms, two on
40 /// 64-bit. This can be specified externally if a different trade between
41 /// memory usage and contention probability is required for a given platform.
42 #ifndef SPINLOCK_COUNT
43 #define SPINLOCK_COUNT (1 << 10)
45 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
47 ////////////////////////////////////////////////////////////////////////////////
48 // Platform-specific lock implementation. Falls back to spinlocks if none is
49 // defined. Each platform should define the Lock type, and corresponding
50 // lock() and unlock() functions.
51 ////////////////////////////////////////////////////////////////////////////////
55 #include <sys/types.h>
56 #include <machine/atomic.h>
59 typedef struct _usem Lock;
60 __inline static void unlock(Lock *l) {
61 __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
62 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
64 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
66 __inline static void lock(Lock *l) {
68 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
69 &old, 0, __ATOMIC_ACQUIRE,
71 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
75 /// locks for atomic operations
76 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
78 #elif defined(__APPLE__)
79 #include <libkern/OSAtomic.h>
80 typedef OSSpinLock Lock;
81 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
82 /// Locks a lock. In the current implementation, this is potentially
83 /// unbounded in the contended case.
84 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
85 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
88 typedef _Atomic(uintptr_t) Lock;
89 /// Unlock a lock. This is a release operation.
90 __inline static void unlock(Lock *l) {
91 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
93 /// Locks a lock. In the current implementation, this is potentially
94 /// unbounded in the contended case.
95 __inline static void lock(Lock *l) {
97 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
101 /// locks for atomic operations
102 static Lock locks[SPINLOCK_COUNT];
105 /// Returns a lock to use for a given pointer.
106 static __inline Lock *lock_for_pointer(void *ptr) {
107 intptr_t hash = (intptr_t)ptr;
108 // Disregard the lowest 4 bits. We want all values that may be part of the
109 // same memory operation to hash to the same value and therefore use the same
112 // Use the next bits as the basis for the hash
113 intptr_t low = hash & SPINLOCK_MASK;
114 // Now use the high(er) set of bits to perturb the hash, so that we don't
115 // get collisions from atomic fields in a single object
118 // Return a pointer to the word to use
119 return locks + (hash & SPINLOCK_MASK);
122 /// Macros for determining whether a size is lock free.
123 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
124 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
125 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
127 /// 32 bit PowerPC doesn't support 8-byte lock_free atomics
128 #if !defined(__powerpc64__) && defined(__powerpc__)
129 #define IS_LOCK_FREE_8 0
131 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
134 /// Clang can not yet codegen __atomic_is_lock_free(16), so for now we assume
135 /// 16-byte values are not lock free.
136 #define IS_LOCK_FREE_16 0
138 /// Macro that calls the compiler-generated lock-free versions of functions
140 #define LOCK_FREE_CASES() \
144 if (IS_LOCK_FREE_1) { \
145 LOCK_FREE_ACTION(uint8_t); \
149 if (IS_LOCK_FREE_2) { \
150 LOCK_FREE_ACTION(uint16_t); \
154 if (IS_LOCK_FREE_4) { \
155 LOCK_FREE_ACTION(uint32_t); \
159 if (IS_LOCK_FREE_8) { \
160 LOCK_FREE_ACTION(uint64_t); \
164 if (IS_LOCK_FREE_16) { \
165 /* FIXME: __uint128_t isn't available on 32 bit platforms. \
166 LOCK_FREE_ACTION(__uint128_t);*/ \
172 /// An atomic load operation. This is atomic with respect to the source
174 void __atomic_load_c(int size, void *src, void *dest, int model) {
175 #define LOCK_FREE_ACTION(type) \
176 *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
179 #undef LOCK_FREE_ACTION
180 Lock *l = lock_for_pointer(src);
182 memcpy(dest, src, size);
186 /// An atomic store operation. This is atomic with respect to the destination
188 void __atomic_store_c(int size, void *dest, void *src, int model) {
189 #define LOCK_FREE_ACTION(type) \
190 __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
193 #undef LOCK_FREE_ACTION
194 Lock *l = lock_for_pointer(dest);
196 memcpy(dest, src, size);
200 /// Atomic compare and exchange operation. If the value at *ptr is identical
201 /// to the value at *expected, then this copies value at *desired to *ptr. If
202 /// they are not, then this stores the current value from *ptr in *expected.
204 /// This function returns 1 if the exchange takes place or 0 if it fails.
205 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
206 void *desired, int success, int failure) {
207 #define LOCK_FREE_ACTION(type) \
208 return __c11_atomic_compare_exchange_strong( \
209 (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
212 #undef LOCK_FREE_ACTION
213 Lock *l = lock_for_pointer(ptr);
215 if (memcmp(ptr, expected, size) == 0) {
216 memcpy(ptr, desired, size);
220 memcpy(expected, ptr, size);
225 /// Performs an atomic exchange operation between two pointers. This is atomic
226 /// with respect to the target address.
227 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
228 #define LOCK_FREE_ACTION(type) \
230 __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
233 #undef LOCK_FREE_ACTION
234 Lock *l = lock_for_pointer(ptr);
236 memcpy(old, ptr, size);
237 memcpy(ptr, val, size);
241 ////////////////////////////////////////////////////////////////////////////////
242 // Where the size is known at compile time, the compiler may emit calls to
243 // specialised versions of the above functions.
244 ////////////////////////////////////////////////////////////////////////////////
245 #ifdef __SIZEOF_INT128__
246 #define OPTIMISED_CASES \
247 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
248 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
249 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
250 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
251 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
253 #define OPTIMISED_CASES \
254 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
255 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
256 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
257 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
260 #define OPTIMISED_CASE(n, lockfree, type) \
261 type __atomic_load_##n(type *src, int model) { \
263 return __c11_atomic_load((_Atomic(type) *)src, model); \
264 Lock *l = lock_for_pointer(src); \
271 #undef OPTIMISED_CASE
273 #define OPTIMISED_CASE(n, lockfree, type) \
274 void __atomic_store_##n(type *dest, type val, int model) { \
276 __c11_atomic_store((_Atomic(type) *)dest, val, model); \
279 Lock *l = lock_for_pointer(dest); \
286 #undef OPTIMISED_CASE
288 #define OPTIMISED_CASE(n, lockfree, type) \
289 type __atomic_exchange_##n(type *dest, type val, int model) { \
291 return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
292 Lock *l = lock_for_pointer(dest); \
300 #undef OPTIMISED_CASE
302 #define OPTIMISED_CASE(n, lockfree, type) \
303 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
304 int success, int failure) { \
306 return __c11_atomic_compare_exchange_strong( \
307 (_Atomic(type) *)ptr, expected, desired, success, failure); \
308 Lock *l = lock_for_pointer(ptr); \
310 if (*ptr == *expected) { \
320 #undef OPTIMISED_CASE
322 ////////////////////////////////////////////////////////////////////////////////
323 // Atomic read-modify-write operations for integers of various sizes.
324 ////////////////////////////////////////////////////////////////////////////////
325 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
326 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
328 return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
329 Lock *l = lock_for_pointer(ptr); \
337 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
339 #undef OPTIMISED_CASE
340 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
342 #undef OPTIMISED_CASE
343 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
345 #undef OPTIMISED_CASE
346 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
348 #undef OPTIMISED_CASE
349 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
351 #undef OPTIMISED_CASE