1 //===-- tsan_interceptors_mac.cc ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 // Mac-specific interceptors.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_platform.h"
18 #include "interception/interception.h"
19 #include "tsan_interceptors.h"
20 #include "tsan_interface.h"
21 #include "tsan_interface_ann.h"
23 #include <libkern/OSAtomic.h>
26 typedef long long_t; // NOLINT
30 // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
31 // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
32 // actually aliases of each other, and we cannot have different interceptors for
33 // them, because they're actually the same function. Thus, we have to stay
34 // conservative and treat the non-barrier versions as mo_acq_rel.
35 static const morder kMacOrderBarrier = mo_acq_rel;
36 static const morder kMacOrderNonBarrier = mo_acq_rel;
38 #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
39 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
40 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
41 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
44 #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
45 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
46 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
47 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
50 #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
51 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
52 SCOPED_TSAN_INTERCEPTOR(f, ptr); \
53 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
56 #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
58 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
59 SCOPED_TSAN_INTERCEPTOR(f, ptr); \
60 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
63 #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
64 m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
65 kMacOrderNonBarrier) \
66 m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
68 m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
69 kMacOrderNonBarrier) \
70 m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
73 #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
74 m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
75 kMacOrderNonBarrier) \
76 m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
78 m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
79 kMacOrderNonBarrier) \
80 m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
81 __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
83 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
84 OSATOMIC_INTERCEPTOR_PLUS_X)
85 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
86 OSATOMIC_INTERCEPTOR_PLUS_1)
87 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
88 OSATOMIC_INTERCEPTOR_MINUS_1)
89 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
91 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
92 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
93 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
94 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
96 #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
97 TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
98 SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
99 return tsan_atomic_f##_compare_exchange_strong( \
100 (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
101 kMacOrderNonBarrier, kMacOrderNonBarrier); \
104 TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
106 SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
107 return tsan_atomic_f##_compare_exchange_strong( \
108 (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
109 kMacOrderBarrier, kMacOrderNonBarrier); \
112 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
113 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
115 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
117 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
119 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
122 #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
123 TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
124 SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
125 char *byte_ptr = ((char *)ptr) + (n >> 3); \
126 char bit = 0x80u >> (n & 7); \
127 char mask = clear ? ~bit : bit; \
128 char orig_byte = op((a8 *)byte_ptr, mask, mo); \
129 return orig_byte & bit; \
132 #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
133 OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
134 OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
136 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
137 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
140 TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
142 SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
143 __tsan_release(item);
144 REAL(OSAtomicEnqueue)(list, item, offset);
147 TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
148 SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
149 void *item = REAL(OSAtomicDequeue)(list, offset);
150 if (item) __tsan_acquire(item);
154 // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
157 TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
159 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
160 __tsan_release(item);
161 REAL(OSAtomicFifoEnqueue)(list, item, offset);
164 TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
166 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
167 void *item = REAL(OSAtomicFifoDequeue)(list, offset);
168 if (item) __tsan_acquire(item);
174 TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
175 CHECK(!cur_thread()->is_dead);
176 if (!cur_thread()->is_inited) {
177 return REAL(OSSpinLockLock)(lock);
179 SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
180 REAL(OSSpinLockLock)(lock);
181 Acquire(thr, pc, (uptr)lock);
184 TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
185 CHECK(!cur_thread()->is_dead);
186 if (!cur_thread()->is_inited) {
187 return REAL(OSSpinLockTry)(lock);
189 SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
190 bool result = REAL(OSSpinLockTry)(lock);
192 Acquire(thr, pc, (uptr)lock);
196 TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
197 CHECK(!cur_thread()->is_dead);
198 if (!cur_thread()->is_inited) {
199 return REAL(OSSpinLockUnlock)(lock);
201 SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
202 Release(thr, pc, (uptr)lock);
203 REAL(OSSpinLockUnlock)(lock);
206 TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
207 CHECK(!cur_thread()->is_dead);
208 if (!cur_thread()->is_inited) {
209 return REAL(os_lock_lock)(lock);
211 SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
212 REAL(os_lock_lock)(lock);
213 Acquire(thr, pc, (uptr)lock);
216 TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
217 CHECK(!cur_thread()->is_dead);
218 if (!cur_thread()->is_inited) {
219 return REAL(os_lock_trylock)(lock);
221 SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
222 bool result = REAL(os_lock_trylock)(lock);
224 Acquire(thr, pc, (uptr)lock);
228 TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
229 CHECK(!cur_thread()->is_dead);
230 if (!cur_thread()->is_inited) {
231 return REAL(os_lock_unlock)(lock);
233 SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
234 Release(thr, pc, (uptr)lock);
235 REAL(os_lock_unlock)(lock);
238 TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
239 xpc_connection_t connection, xpc_handler_t handler) {
240 SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
242 Release(thr, pc, (uptr)connection);
243 xpc_handler_t new_handler = ^(xpc_object_t object) {
245 SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
246 Acquire(thr, pc, (uptr)connection);
250 REAL(xpc_connection_set_event_handler)(connection, new_handler);
253 TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
254 dispatch_block_t barrier) {
255 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
256 Release(thr, pc, (uptr)connection);
257 dispatch_block_t new_barrier = ^() {
259 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
260 Acquire(thr, pc, (uptr)connection);
264 REAL(xpc_connection_send_barrier)(connection, new_barrier);
267 TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
268 xpc_connection_t connection, xpc_object_t message,
269 dispatch_queue_t replyq, xpc_handler_t handler) {
270 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
271 message, replyq, handler);
272 Release(thr, pc, (uptr)connection);
273 xpc_handler_t new_handler = ^(xpc_object_t object) {
275 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
276 Acquire(thr, pc, (uptr)connection);
280 REAL(xpc_connection_send_message_with_reply)
281 (connection, message, replyq, new_handler);
284 TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
285 SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
286 Release(thr, pc, (uptr)connection);
287 REAL(xpc_connection_cancel)(connection);
290 // On macOS, libc++ is always linked dynamically, so intercepting works the
292 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
295 struct fake_shared_weak_count {
296 volatile a64 shared_owners;
297 volatile a64 shared_weak_owners;
298 virtual void _unused_0x0() = 0;
299 virtual void _unused_0x8() = 0;
300 virtual void on_zero_shared() = 0;
301 virtual void _unused_0x18() = 0;
302 virtual void on_zero_shared_weak() = 0;
306 // The following code adds libc++ interceptors for:
307 // void __shared_weak_count::__release_shared() _NOEXCEPT;
308 // bool __shared_count::__release_shared() _NOEXCEPT;
309 // Shared and weak pointers in C++ maintain reference counts via atomics in
310 // libc++.dylib, which are TSan-invisible, and this leads to false positives in
311 // destructor code. These interceptors re-implements the whole functions so that
312 // the mo_acq_rel semantics of the atomic decrement are visible.
314 // Unfortunately, the interceptors cannot simply Acquire/Release some sync
315 // object and call the original function, because it would have a race between
316 // the sync and the destruction of the object. Calling both under a lock will
317 // not work because the destructor can invoke this interceptor again (and even
318 // in a different thread, so recursive locks don't help).
320 STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
321 fake_shared_weak_count *o) {
322 if (!flags()->shared_ptr_interceptor)
323 return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
325 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
327 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
328 Acquire(thr, pc, (uptr)&o->shared_owners);
330 if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
332 Acquire(thr, pc, (uptr)&o->shared_weak_owners);
333 o->on_zero_shared_weak();
338 STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
339 fake_shared_weak_count *o) {
340 if (!flags()->shared_ptr_interceptor)
341 return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
343 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
344 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
345 Acquire(thr, pc, (uptr)&o->shared_owners);
353 struct call_once_callback_args {
354 void (*orig_func)(void *arg);
359 void call_once_callback_wrapper(void *arg) {
360 call_once_callback_args *new_args = (call_once_callback_args *)arg;
361 new_args->orig_func(new_args->orig_arg);
362 __tsan_release(new_args->flag);
366 // This adds a libc++ interceptor for:
367 // void __call_once(volatile unsigned long&, void*, void(*)(void*));
368 // C++11 call_once is implemented via an internal function __call_once which is
369 // inside libc++.dylib, and the atomic release store inside it is thus
370 // TSan-invisible. To avoid false positives, this interceptor wraps the callback
371 // function and performs an explicit Release after the user code has run.
372 STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
373 void *arg, void (*func)(void *arg)) {
374 call_once_callback_args new_args = {func, arg, flag};
375 REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
376 call_once_callback_wrapper);
379 } // namespace __tsan
381 #endif // SANITIZER_MAC