1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_checks.h"
14 #include "sanitizer_common/sanitizer_allocator_interface.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_placement_new.h"
17 #include "tsan_mman.h"
19 #include "tsan_report.h"
20 #include "tsan_flags.h"
22 // May be overriden by front-end.
23 SANITIZER_WEAK_DEFAULT_IMPL
24 void __sanitizer_malloc_hook(void *ptr, uptr size) {
29 SANITIZER_WEAK_DEFAULT_IMPL
30 void __sanitizer_free_hook(void *ptr) {
36 struct MapUnmapCallback {
37 void OnMap(uptr p, uptr size) const { }
38 void OnUnmap(uptr p, uptr size) const {
39 // We are about to unmap a chunk of user memory.
40 // Mark the corresponding shadow memory as not needed.
41 DontNeedShadowFor(p, size);
42 // Mark the corresponding meta shadow memory as not needed.
43 // Note the block does not contain any meta info at this point
44 // (this happens after free).
45 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
46 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
47 // Block came from LargeMmapAllocator, so must be large.
48 // We rely on this in the calculations below.
49 CHECK_GE(size, 2 * kPageSize);
50 uptr diff = RoundUp(p, kPageSize) - p;
55 diff = p + size - RoundDown(p + size, kPageSize);
58 uptr p_meta = (uptr)MemToMeta(p);
59 ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
63 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
64 Allocator *allocator() {
65 return reinterpret_cast<Allocator*>(&allocator_placeholder);
73 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
74 , proc(ProcCreate()) {
78 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
79 GlobalProc *global_proc() {
80 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
83 ScopedGlobalProcessor::ScopedGlobalProcessor() {
84 GlobalProc *gp = global_proc();
85 ThreadState *thr = cur_thread();
88 // If we don't have a proc, use the global one.
89 // There are currently only two known case where this path is triggered:
91 // __nptl_deallocate_tsd
96 // __interceptor_munmap
100 // Ideally, we destroy thread state (and unwire proc) when a thread actually
101 // exits (i.e. when we join/wait it). Then we would not need the global proc
103 ProcWire(gp->proc, thr);
106 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
107 GlobalProc *gp = global_proc();
108 ThreadState *thr = cur_thread();
109 if (thr->proc() != gp->proc)
111 ProcUnwire(gp->proc, thr);
115 void InitializeAllocator() {
116 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
117 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
120 void InitializeAllocatorLate() {
121 new(global_proc()) GlobalProc();
124 void AllocatorProcStart(Processor *proc) {
125 allocator()->InitCache(&proc->alloc_cache);
126 internal_allocator()->InitCache(&proc->internal_alloc_cache);
129 void AllocatorProcFinish(Processor *proc) {
130 allocator()->DestroyCache(&proc->alloc_cache);
131 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
134 void AllocatorPrintStats() {
135 allocator()->PrintStats();
138 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
139 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
140 !flags()->report_signal_unsafe)
142 VarSizeStackTrace stack;
143 ObtainCurrentStack(thr, pc, &stack);
144 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
146 ThreadRegistryLock l(ctx->thread_registry);
147 ScopedReport rep(ReportTypeSignalUnsafe);
148 rep.AddStack(stack, true);
149 OutputReport(thr, rep);
152 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
153 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
154 return Allocator::FailureHandler::OnBadRequest();
155 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
158 if (ctx && ctx->initialized)
159 OnUserAlloc(thr, pc, (uptr)p, sz, true);
161 SignalUnsafeCall(thr, pc);
165 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
166 if (CheckForCallocOverflow(size, n))
167 return Allocator::FailureHandler::OnBadRequest();
168 void *p = user_alloc(thr, pc, n * size);
170 internal_memset(p, 0, n * size);
174 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
175 ScopedGlobalProcessor sgp;
176 if (ctx && ctx->initialized)
177 OnUserFree(thr, pc, (uptr)p, true);
178 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
180 SignalUnsafeCall(thr, pc);
183 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
184 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
185 ctx->metamap.AllocBlock(thr, pc, p, sz);
186 if (write && thr->ignore_reads_and_writes == 0)
187 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
189 MemoryResetRange(thr, pc, (uptr)p, sz);
192 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
193 CHECK_NE(p, (void*)0);
194 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
195 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
196 if (write && thr->ignore_reads_and_writes == 0)
197 MemoryRangeFreed(thr, pc, (uptr)p, sz);
200 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
201 // FIXME: Handle "shrinking" more efficiently,
202 // it seems that some software actually does this.
203 void *p2 = user_alloc(thr, pc, sz);
207 uptr oldsz = user_alloc_usable_size(p);
208 internal_memcpy(p2, p, min(oldsz, sz));
209 user_free(thr, pc, p);
214 uptr user_alloc_usable_size(const void *p) {
217 MBlock *b = ctx->metamap.GetBlock((uptr)p);
219 return 0; // Not a valid pointer.
221 return 1; // Zero-sized allocations are actually 1 byte.
225 void invoke_malloc_hook(void *ptr, uptr size) {
226 ThreadState *thr = cur_thread();
227 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
229 __sanitizer_malloc_hook(ptr, size);
230 RunMallocHooks(ptr, size);
233 void invoke_free_hook(void *ptr) {
234 ThreadState *thr = cur_thread();
235 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
237 __sanitizer_free_hook(ptr);
241 void *internal_alloc(MBlockType typ, uptr sz) {
242 ThreadState *thr = cur_thread();
244 thr->nomalloc = 0; // CHECK calls internal_malloc().
247 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
250 void internal_free(void *p) {
251 ThreadState *thr = cur_thread();
253 thr->nomalloc = 0; // CHECK calls internal_malloc().
256 InternalFree(p, &thr->proc()->internal_alloc_cache);
259 } // namespace __tsan
261 using namespace __tsan;
264 uptr __sanitizer_get_current_allocated_bytes() {
265 uptr stats[AllocatorStatCount];
266 allocator()->GetStats(stats);
267 return stats[AllocatorStatAllocated];
270 uptr __sanitizer_get_heap_size() {
271 uptr stats[AllocatorStatCount];
272 allocator()->GetStats(stats);
273 return stats[AllocatorStatMapped];
276 uptr __sanitizer_get_free_bytes() {
280 uptr __sanitizer_get_unmapped_bytes() {
284 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
288 int __sanitizer_get_ownership(const void *p) {
289 return allocator()->GetBlockBegin(p) != 0;
292 uptr __sanitizer_get_allocated_size(const void *p) {
293 return user_alloc_usable_size(p);
296 void __tsan_on_thread_idle() {
297 ThreadState *thr = cur_thread();
298 thr->clock.ResetCached(&thr->proc()->clock_cache);
299 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
300 allocator()->SwallowCache(&thr->proc()->alloc_cache);
301 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
302 ctx->metamap.OnProcIdle(thr->proc());