1 //===-- atomic.c - Implement support functions for atomic operations.------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // atomic.c defines a set of functions for performing atomic accesses on
10 // arbitrary-sized memory locations. This design uses locks that should
11 // be fast in the uncontended case, for two reasons:
13 // 1) This code must work with C programs that do not link to anything
14 // (including pthreads) and so it should not depend on any pthread
16 // 2) Atomic operations, rather than explicit mutexes, are most commonly used
17 // on code where contended operations are rate.
19 // To avoid needing a per-object lock, this code allocates an array of
20 // locks and hashes the object pointers to find the one that it should use.
21 // For operations that must be atomic on two locations, the lower lock is
22 // always acquired first, to avoid deadlock.
24 //===----------------------------------------------------------------------===//
31 // Clang objects if you redefine a builtin. This little hack allows us to
32 // define a function with the same name as an intrinsic.
33 #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
34 #pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
35 #pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
36 #pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
37 __atomic_compare_exchange)
39 /// Number of locks. This allocates one page on 32-bit platforms, two on
40 /// 64-bit. This can be specified externally if a different trade between
41 /// memory usage and contention probability is required for a given platform.
42 #ifndef SPINLOCK_COUNT
43 #define SPINLOCK_COUNT (1 << 10)
45 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
47 ////////////////////////////////////////////////////////////////////////////////
48 // Platform-specific lock implementation. Falls back to spinlocks if none is
49 // defined. Each platform should define the Lock type, and corresponding
50 // lock() and unlock() functions.
51 ////////////////////////////////////////////////////////////////////////////////
54 #include <machine/atomic.h>
55 #include <sys/types.h>
57 typedef struct _usem Lock;
58 __inline static void unlock(Lock *l) {
59 __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
60 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
62 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
64 __inline static void lock(Lock *l) {
66 while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
67 &old, 0, __ATOMIC_ACQUIRE,
69 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
73 /// locks for atomic operations
74 static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
76 #elif defined(__APPLE__)
77 #include <libkern/OSAtomic.h>
78 typedef OSSpinLock Lock;
79 __inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
80 /// Locks a lock. In the current implementation, this is potentially
81 /// unbounded in the contended case.
82 __inline static void lock(Lock *l) { OSSpinLockLock(l); }
83 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
86 typedef _Atomic(uintptr_t) Lock;
87 /// Unlock a lock. This is a release operation.
88 __inline static void unlock(Lock *l) {
89 __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
91 /// Locks a lock. In the current implementation, this is potentially
92 /// unbounded in the contended case.
93 __inline static void lock(Lock *l) {
95 while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
99 /// locks for atomic operations
100 static Lock locks[SPINLOCK_COUNT];
103 /// Returns a lock to use for a given pointer.
104 static __inline Lock *lock_for_pointer(void *ptr) {
105 intptr_t hash = (intptr_t)ptr;
106 // Disregard the lowest 4 bits. We want all values that may be part of the
107 // same memory operation to hash to the same value and therefore use the same
110 // Use the next bits as the basis for the hash
111 intptr_t low = hash & SPINLOCK_MASK;
112 // Now use the high(er) set of bits to perturb the hash, so that we don't
113 // get collisions from atomic fields in a single object
116 // Return a pointer to the word to use
117 return locks + (hash & SPINLOCK_MASK);
120 /// Macros for determining whether a size is lock free. Clang can not yet
121 /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
123 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
124 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
125 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
126 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
127 #define IS_LOCK_FREE_16 0
129 /// Macro that calls the compiler-generated lock-free versions of functions
131 #define LOCK_FREE_CASES() \
135 if (IS_LOCK_FREE_1) { \
136 LOCK_FREE_ACTION(uint8_t); \
140 if (IS_LOCK_FREE_2) { \
141 LOCK_FREE_ACTION(uint16_t); \
145 if (IS_LOCK_FREE_4) { \
146 LOCK_FREE_ACTION(uint32_t); \
150 if (IS_LOCK_FREE_8) { \
151 LOCK_FREE_ACTION(uint64_t); \
155 if (IS_LOCK_FREE_16) { \
156 /* FIXME: __uint128_t isn't available on 32 bit platforms. \
157 LOCK_FREE_ACTION(__uint128_t);*/ \
163 /// An atomic load operation. This is atomic with respect to the source
165 void __atomic_load_c(int size, void *src, void *dest, int model) {
166 #define LOCK_FREE_ACTION(type) \
167 *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
170 #undef LOCK_FREE_ACTION
171 Lock *l = lock_for_pointer(src);
173 memcpy(dest, src, size);
177 /// An atomic store operation. This is atomic with respect to the destination
179 void __atomic_store_c(int size, void *dest, void *src, int model) {
180 #define LOCK_FREE_ACTION(type) \
181 __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
184 #undef LOCK_FREE_ACTION
185 Lock *l = lock_for_pointer(dest);
187 memcpy(dest, src, size);
191 /// Atomic compare and exchange operation. If the value at *ptr is identical
192 /// to the value at *expected, then this copies value at *desired to *ptr. If
193 /// they are not, then this stores the current value from *ptr in *expected.
195 /// This function returns 1 if the exchange takes place or 0 if it fails.
196 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
197 void *desired, int success, int failure) {
198 #define LOCK_FREE_ACTION(type) \
199 return __c11_atomic_compare_exchange_strong( \
200 (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
203 #undef LOCK_FREE_ACTION
204 Lock *l = lock_for_pointer(ptr);
206 if (memcmp(ptr, expected, size) == 0) {
207 memcpy(ptr, desired, size);
211 memcpy(expected, ptr, size);
216 /// Performs an atomic exchange operation between two pointers. This is atomic
217 /// with respect to the target address.
218 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
219 #define LOCK_FREE_ACTION(type) \
221 __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
224 #undef LOCK_FREE_ACTION
225 Lock *l = lock_for_pointer(ptr);
227 memcpy(old, ptr, size);
228 memcpy(ptr, val, size);
232 ////////////////////////////////////////////////////////////////////////////////
233 // Where the size is known at compile time, the compiler may emit calls to
234 // specialised versions of the above functions.
235 ////////////////////////////////////////////////////////////////////////////////
236 #ifdef __SIZEOF_INT128__
237 #define OPTIMISED_CASES \
238 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
239 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
240 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
241 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
242 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
244 #define OPTIMISED_CASES \
245 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
246 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
247 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
248 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
251 #define OPTIMISED_CASE(n, lockfree, type) \
252 type __atomic_load_##n(type *src, int model) { \
254 return __c11_atomic_load((_Atomic(type) *)src, model); \
255 Lock *l = lock_for_pointer(src); \
262 #undef OPTIMISED_CASE
264 #define OPTIMISED_CASE(n, lockfree, type) \
265 void __atomic_store_##n(type *dest, type val, int model) { \
267 __c11_atomic_store((_Atomic(type) *)dest, val, model); \
270 Lock *l = lock_for_pointer(dest); \
277 #undef OPTIMISED_CASE
279 #define OPTIMISED_CASE(n, lockfree, type) \
280 type __atomic_exchange_##n(type *dest, type val, int model) { \
282 return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
283 Lock *l = lock_for_pointer(dest); \
291 #undef OPTIMISED_CASE
293 #define OPTIMISED_CASE(n, lockfree, type) \
294 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
295 int success, int failure) { \
297 return __c11_atomic_compare_exchange_strong( \
298 (_Atomic(type) *)ptr, expected, desired, success, failure); \
299 Lock *l = lock_for_pointer(ptr); \
301 if (*ptr == *expected) { \
311 #undef OPTIMISED_CASE
313 ////////////////////////////////////////////////////////////////////////////////
314 // Atomic read-modify-write operations for integers of various sizes.
315 ////////////////////////////////////////////////////////////////////////////////
316 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
317 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
319 return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
320 Lock *l = lock_for_pointer(ptr); \
328 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
330 #undef OPTIMISED_CASE
331 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
333 #undef OPTIMISED_CASE
334 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
336 #undef OPTIMISED_CASE
337 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
339 #undef OPTIMISED_CASE
340 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
342 #undef OPTIMISED_CASE