1 //===-- tsan_platform_mac.cc ----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_platform.h"
18 #include "sanitizer_common/sanitizer_atomic.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_libc.h"
21 #include "sanitizer_common/sanitizer_posix.h"
22 #include "sanitizer_common/sanitizer_procmaps.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "tsan_platform.h"
26 #include "tsan_flags.h"
28 #include <mach/mach.h>
36 #include <sys/syscall.h>
38 #include <sys/types.h>
39 #include <sys/resource.h>
48 static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
49 atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
50 void *val = (void *)atomic_load_relaxed(a);
51 atomic_signal_fence(memory_order_acquire); // Turns the previous load into
52 // acquire wrt signals.
53 if (UNLIKELY(val == nullptr)) {
54 val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
55 MAP_PRIVATE | MAP_ANON, -1, 0);
58 if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
59 memory_order_acq_rel)) {
60 internal_munmap(val, size);
67 // On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
68 // problematic, because there are several places where interceptors are called
69 // when TLVs are not accessible (early process startup, thread cleanup, ...).
70 // The following provides a "poor man's TLV" implementation, where we use the
71 // shadow memory of the pointer returned by pthread_self() to store a pointer to
72 // the ThreadState object. The main thread's ThreadState is stored separately
73 // in a static variable, because we need to access it even before the
74 // shadow memory is set up.
75 static uptr main_thread_identity = 0;
76 ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
78 ThreadState *cur_thread() {
79 uptr thread_identity = (uptr)pthread_self();
80 if (thread_identity == main_thread_identity || main_thread_identity == 0) {
81 return (ThreadState *)&main_thread_state;
83 ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
84 ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
85 (uptr *)fake_tls, sizeof(ThreadState));
89 // TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
90 // munmap first and then clear `fake_tls`; if we receive a signal in between,
91 // handler will try to access the unmapped ThreadState.
92 void cur_thread_finalize() {
93 uptr thread_identity = (uptr)pthread_self();
94 if (thread_identity == main_thread_identity) {
95 // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
96 // exit the main thread. Let's keep the main thread's ThreadState.
99 ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
100 internal_munmap(*fake_tls, sizeof(ThreadState));
105 void FlushShadowMemory() {
108 static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
109 vm_address_t address = start;
110 vm_address_t end_address = end;
111 uptr resident_pages = 0;
112 uptr dirty_pages = 0;
113 while (address < end_address) {
114 vm_size_t vm_region_size;
115 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
116 vm_region_extended_info_data_t vm_region_info;
117 mach_port_t object_name;
118 kern_return_t ret = vm_region_64(
119 mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
120 (vm_region_info_t)&vm_region_info, &count, &object_name);
121 if (ret != KERN_SUCCESS) break;
123 resident_pages += vm_region_info.pages_resident;
124 dirty_pages += vm_region_info.pages_dirtied;
126 address += vm_region_size;
128 *res = resident_pages * GetPageSizeCached();
129 *dirty = dirty_pages * GetPageSizeCached();
132 void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
133 uptr shadow_res, shadow_dirty;
134 uptr meta_res, meta_dirty;
135 uptr trace_res, trace_dirty;
136 RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
137 RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
138 RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
141 uptr low_res, low_dirty;
142 uptr high_res, high_dirty;
143 uptr heap_res, heap_dirty;
144 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
145 RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
146 RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
147 #else // !SANITIZER_GO
148 uptr app_res, app_dirty;
149 RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
152 StackDepotStats *stacks = StackDepotGetStats();
153 internal_snprintf(buf, buf_size,
154 "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
155 "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
156 "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
158 "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
159 "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
160 "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
161 #else // !SANITIZER_GO
162 "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
164 "stacks: %ld unique IDs, %ld kB allocated\n"
165 "threads: %ld total, %ld live\n"
166 "------------------------------\n",
167 ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
168 MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
169 TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
171 LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
172 HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
173 HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
174 #else // !SANITIZER_GO
175 AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
177 stacks->n_uniq_ids, stacks->allocated / 1024,
182 void InitializeShadowMemoryPlatform() { }
184 // On OS X, GCD worker threads are created without a call to pthread_create. We
185 // need to properly register these threads with ThreadCreate and ThreadStart.
186 // These threads don't have a parent thread, as they are created "spuriously".
187 // We're using a libpthread API that notifies us about a newly created thread.
188 // The `thread == pthread_self()` check indicates this is actually a worker
189 // thread. If it's just a regular thread, this hook is called on the parent
191 typedef void (*pthread_introspection_hook_t)(unsigned int event,
192 pthread_t thread, void *addr,
194 extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
195 pthread_introspection_hook_t hook);
196 static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
197 static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
198 static pthread_introspection_hook_t prev_pthread_introspection_hook;
199 static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
200 void *addr, size_t size) {
201 if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
202 if (thread == pthread_self()) {
203 // The current thread is a newly created GCD worker thread.
204 ThreadState *thr = cur_thread();
205 Processor *proc = ProcCreate();
207 ThreadState *parent_thread_state = nullptr; // No parent.
208 int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
210 ThreadStart(thr, tid, GetTid(), /*workerthread*/ true);
212 } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
213 if (thread == pthread_self()) {
214 ThreadState *thr = cur_thread();
216 DestroyThreadState();
221 if (prev_pthread_introspection_hook != nullptr)
222 prev_pthread_introspection_hook(event, thread, addr, size);
226 void InitializePlatformEarly() {
229 void InitializePlatform() {
230 DisableCoreDumperIfNecessary();
234 CHECK_EQ(main_thread_identity, 0);
235 main_thread_identity = (uptr)pthread_self();
237 prev_pthread_introspection_hook =
238 pthread_introspection_hook_install(&my_pthread_introspection_hook);
243 // Note: this function runs with async signals enabled,
244 // so it must not touch any tsan state.
245 int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
246 void *abstime), void *c, void *m, void *abstime,
247 void(*cleanup)(void *arg), void *arg) {
248 // pthread_cleanup_push/pop are hardcore macros mess.
249 // We can't intercept nor call them w/o including pthread.h.
251 pthread_cleanup_push(cleanup, arg);
252 res = fn(c, m, abstime);
253 pthread_cleanup_pop(0);
258 } // namespace __tsan
260 #endif // SANITIZER_MAC