1 //===-- tsan_interceptors_mac.cpp -----------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Mac-specific interceptors.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
17 #include "interception/interception.h"
18 #include "tsan_interceptors.h"
19 #include "tsan_interface.h"
20 #include "tsan_interface_ann.h"
21 #include "sanitizer_common/sanitizer_addrhashmap.h"
24 #include <libkern/OSAtomic.h>
25 #include <objc/objc-sync.h>
26 #include <sys/ucontext.h>
28 #if defined(__has_include) && __has_include(<os/lock.h>)
32 #if defined(__has_include) && __has_include(<xpc/xpc.h>)
34 #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
39 int getcontext(ucontext_t *ucp) __attribute__((returns_twice));
40 int setcontext(const ucontext_t *ucp);
45 // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
46 // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
47 // actually aliases of each other, and we cannot have different interceptors for
48 // them, because they're actually the same function. Thus, we have to stay
49 // conservative and treat the non-barrier versions as mo_acq_rel.
50 static const morder kMacOrderBarrier = mo_acq_rel;
51 static const morder kMacOrderNonBarrier = mo_acq_rel;
53 #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
54 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
55 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
56 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
59 #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
60 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
61 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
62 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
65 #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
66 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
67 SCOPED_TSAN_INTERCEPTOR(f, ptr); \
68 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
71 #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
73 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
74 SCOPED_TSAN_INTERCEPTOR(f, ptr); \
75 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
78 #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
79 m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
80 kMacOrderNonBarrier) \
81 m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
83 m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
84 kMacOrderNonBarrier) \
85 m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
88 #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
89 m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
90 kMacOrderNonBarrier) \
91 m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
93 m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
94 kMacOrderNonBarrier) \
95 m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
96 __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
98 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
99 OSATOMIC_INTERCEPTOR_PLUS_X)
100 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
101 OSATOMIC_INTERCEPTOR_PLUS_1)
102 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
103 OSATOMIC_INTERCEPTOR_MINUS_1)
104 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
105 OSATOMIC_INTERCEPTOR)
106 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
107 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
108 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
109 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
111 #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
112 TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
113 SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
114 return tsan_atomic_f##_compare_exchange_strong( \
115 (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
116 kMacOrderNonBarrier, kMacOrderNonBarrier); \
119 TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
121 SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
122 return tsan_atomic_f##_compare_exchange_strong( \
123 (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
124 kMacOrderBarrier, kMacOrderNonBarrier); \
127 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
128 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
130 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
132 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
134 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
137 #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
138 TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
139 SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
140 volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
141 char bit = 0x80u >> (n & 7); \
142 char mask = clear ? ~bit : bit; \
143 char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
144 return orig_byte & bit; \
147 #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
148 OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
149 OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
151 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
152 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
155 TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
157 SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
158 __tsan_release(item);
159 REAL(OSAtomicEnqueue)(list, item, offset);
162 TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
163 SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
164 void *item = REAL(OSAtomicDequeue)(list, offset);
165 if (item) __tsan_acquire(item);
169 // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
172 TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
174 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
175 __tsan_release(item);
176 REAL(OSAtomicFifoEnqueue)(list, item, offset);
179 TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
181 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
182 void *item = REAL(OSAtomicFifoDequeue)(list, offset);
183 if (item) __tsan_acquire(item);
189 TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
190 CHECK(!cur_thread()->is_dead);
191 if (!cur_thread()->is_inited) {
192 return REAL(OSSpinLockLock)(lock);
194 SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
195 REAL(OSSpinLockLock)(lock);
196 Acquire(thr, pc, (uptr)lock);
199 TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
200 CHECK(!cur_thread()->is_dead);
201 if (!cur_thread()->is_inited) {
202 return REAL(OSSpinLockTry)(lock);
204 SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
205 bool result = REAL(OSSpinLockTry)(lock);
207 Acquire(thr, pc, (uptr)lock);
211 TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
212 CHECK(!cur_thread()->is_dead);
213 if (!cur_thread()->is_inited) {
214 return REAL(OSSpinLockUnlock)(lock);
216 SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
217 Release(thr, pc, (uptr)lock);
218 REAL(OSSpinLockUnlock)(lock);
221 TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
222 CHECK(!cur_thread()->is_dead);
223 if (!cur_thread()->is_inited) {
224 return REAL(os_lock_lock)(lock);
226 SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
227 REAL(os_lock_lock)(lock);
228 Acquire(thr, pc, (uptr)lock);
231 TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
232 CHECK(!cur_thread()->is_dead);
233 if (!cur_thread()->is_inited) {
234 return REAL(os_lock_trylock)(lock);
236 SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
237 bool result = REAL(os_lock_trylock)(lock);
239 Acquire(thr, pc, (uptr)lock);
243 TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
244 CHECK(!cur_thread()->is_dead);
245 if (!cur_thread()->is_inited) {
246 return REAL(os_lock_unlock)(lock);
248 SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
249 Release(thr, pc, (uptr)lock);
250 REAL(os_lock_unlock)(lock);
253 #if defined(__has_include) && __has_include(<os/lock.h>)
255 TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) {
256 if (!cur_thread()->is_inited || cur_thread()->is_dead) {
257 return REAL(os_unfair_lock_lock)(lock);
259 SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock);
260 REAL(os_unfair_lock_lock)(lock);
261 Acquire(thr, pc, (uptr)lock);
264 TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock,
266 if (!cur_thread()->is_inited || cur_thread()->is_dead) {
267 return REAL(os_unfair_lock_lock_with_options)(lock, options);
269 SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options);
270 REAL(os_unfair_lock_lock_with_options)(lock, options);
271 Acquire(thr, pc, (uptr)lock);
274 TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) {
275 if (!cur_thread()->is_inited || cur_thread()->is_dead) {
276 return REAL(os_unfair_lock_trylock)(lock);
278 SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock);
279 bool result = REAL(os_unfair_lock_trylock)(lock);
281 Acquire(thr, pc, (uptr)lock);
285 TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) {
286 if (!cur_thread()->is_inited || cur_thread()->is_dead) {
287 return REAL(os_unfair_lock_unlock)(lock);
289 SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock);
290 Release(thr, pc, (uptr)lock);
291 REAL(os_unfair_lock_unlock)(lock);
294 #endif // #if defined(__has_include) && __has_include(<os/lock.h>)
296 #if defined(__has_include) && __has_include(<xpc/xpc.h>)
298 TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
299 xpc_connection_t connection, xpc_handler_t handler) {
300 SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
302 Release(thr, pc, (uptr)connection);
303 xpc_handler_t new_handler = ^(xpc_object_t object) {
305 SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
306 Acquire(thr, pc, (uptr)connection);
310 REAL(xpc_connection_set_event_handler)(connection, new_handler);
313 TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
314 dispatch_block_t barrier) {
315 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
316 Release(thr, pc, (uptr)connection);
317 dispatch_block_t new_barrier = ^() {
319 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
320 Acquire(thr, pc, (uptr)connection);
324 REAL(xpc_connection_send_barrier)(connection, new_barrier);
327 TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
328 xpc_connection_t connection, xpc_object_t message,
329 dispatch_queue_t replyq, xpc_handler_t handler) {
330 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
331 message, replyq, handler);
332 Release(thr, pc, (uptr)connection);
333 xpc_handler_t new_handler = ^(xpc_object_t object) {
335 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
336 Acquire(thr, pc, (uptr)connection);
340 REAL(xpc_connection_send_message_with_reply)
341 (connection, message, replyq, new_handler);
344 TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
345 SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
346 Release(thr, pc, (uptr)connection);
347 REAL(xpc_connection_cancel)(connection);
350 #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
352 // Determines whether the Obj-C object pointer is a tagged pointer. Tagged
353 // pointers encode the object data directly in their pointer bits and do not
354 // have an associated memory allocation. The Obj-C runtime uses tagged pointers
355 // to transparently optimize small objects.
356 static bool IsTaggedObjCPointer(id obj) {
357 const uptr kPossibleTaggedBits = 0x8000000000000001ull;
358 return ((uptr)obj & kPossibleTaggedBits) != 0;
361 // Returns an address which can be used to inform TSan about synchronization
362 // points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid
363 // address in the process space. We do a small allocation here to obtain a
364 // stable address (the array backing the hash map can change). The memory is
365 // never free'd (leaked) and allocation and locking are slow, but this code only
366 // runs for @synchronized with tagged pointers, which is very rare.
367 static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
368 typedef AddrHashMap<uptr, 5> Map;
369 static Map Addresses;
370 Map::Handle h(&Addresses, addr);
372 ThreadIgnoreBegin(thr, pc);
373 *h = (uptr) user_alloc(thr, pc, /*size=*/1);
374 ThreadIgnoreEnd(thr, pc);
379 // Returns an address on which we can synchronize given an Obj-C object pointer.
380 // For normal object pointers, this is just the address of the object in memory.
381 // Tagged pointers are not backed by an actual memory allocation, so we need to
382 // synthesize a valid address.
383 static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
384 if (IsTaggedObjCPointer(obj))
385 return GetOrCreateSyncAddress((uptr)obj, thr, pc);
389 TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
390 SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
391 if (!obj) return REAL(objc_sync_enter)(obj);
392 uptr addr = SyncAddressForObjCObject(obj, thr, pc);
393 MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
394 int result = REAL(objc_sync_enter)(obj);
395 CHECK_EQ(result, OBJC_SYNC_SUCCESS);
396 MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant);
400 TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
401 SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
402 if (!obj) return REAL(objc_sync_exit)(obj);
403 uptr addr = SyncAddressForObjCObject(obj, thr, pc);
404 MutexUnlock(thr, pc, addr);
405 int result = REAL(objc_sync_exit)(obj);
406 if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
410 TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
412 SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
414 // Bacause of swapcontext() semantics we have no option but to copy its
415 // impementation here
420 ThreadState *thr = cur_thread();
421 const int UCF_SWAPPED = 0x80000000;
422 oucp->uc_onstack &= ~UCF_SWAPPED;
423 thr->ignore_interceptors++;
424 int ret = getcontext(oucp);
425 if (!(oucp->uc_onstack & UCF_SWAPPED)) {
426 thr->ignore_interceptors--;
428 oucp->uc_onstack |= UCF_SWAPPED;
429 ret = setcontext(ucp);
435 // On macOS, libc++ is always linked dynamically, so intercepting works the
437 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
440 struct fake_shared_weak_count {
441 volatile a64 shared_owners;
442 volatile a64 shared_weak_owners;
443 virtual void _unused_0x0() = 0;
444 virtual void _unused_0x8() = 0;
445 virtual void on_zero_shared() = 0;
446 virtual void _unused_0x18() = 0;
447 virtual void on_zero_shared_weak() = 0;
451 // The following code adds libc++ interceptors for:
452 // void __shared_weak_count::__release_shared() _NOEXCEPT;
453 // bool __shared_count::__release_shared() _NOEXCEPT;
454 // Shared and weak pointers in C++ maintain reference counts via atomics in
455 // libc++.dylib, which are TSan-invisible, and this leads to false positives in
456 // destructor code. These interceptors re-implements the whole functions so that
457 // the mo_acq_rel semantics of the atomic decrement are visible.
459 // Unfortunately, the interceptors cannot simply Acquire/Release some sync
460 // object and call the original function, because it would have a race between
461 // the sync and the destruction of the object. Calling both under a lock will
462 // not work because the destructor can invoke this interceptor again (and even
463 // in a different thread, so recursive locks don't help).
465 STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
466 fake_shared_weak_count *o) {
467 if (!flags()->shared_ptr_interceptor)
468 return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
470 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
472 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
473 Acquire(thr, pc, (uptr)&o->shared_owners);
475 if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
477 Acquire(thr, pc, (uptr)&o->shared_weak_owners);
478 o->on_zero_shared_weak();
483 STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
484 fake_shared_weak_count *o) {
485 if (!flags()->shared_ptr_interceptor)
486 return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
488 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
489 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
490 Acquire(thr, pc, (uptr)&o->shared_owners);
498 struct call_once_callback_args {
499 void (*orig_func)(void *arg);
504 void call_once_callback_wrapper(void *arg) {
505 call_once_callback_args *new_args = (call_once_callback_args *)arg;
506 new_args->orig_func(new_args->orig_arg);
507 __tsan_release(new_args->flag);
511 // This adds a libc++ interceptor for:
512 // void __call_once(volatile unsigned long&, void*, void(*)(void*));
513 // C++11 call_once is implemented via an internal function __call_once which is
514 // inside libc++.dylib, and the atomic release store inside it is thus
515 // TSan-invisible. To avoid false positives, this interceptor wraps the callback
516 // function and performs an explicit Release after the user code has run.
517 STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
518 void *arg, void (*func)(void *arg)) {
519 call_once_callback_args new_args = {func, arg, flag};
520 REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
521 call_once_callback_wrapper);
524 } // namespace __tsan
526 #endif // SANITIZER_MAC