1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_checks.h"
14 #include "sanitizer_common/sanitizer_allocator_interface.h"
15 #include "sanitizer_common/sanitizer_allocator_report.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_errno.h"
18 #include "sanitizer_common/sanitizer_placement_new.h"
19 #include "tsan_mman.h"
21 #include "tsan_report.h"
22 #include "tsan_flags.h"
24 // May be overriden by front-end.
25 SANITIZER_WEAK_DEFAULT_IMPL
26 void __sanitizer_malloc_hook(void *ptr, uptr size) {
31 SANITIZER_WEAK_DEFAULT_IMPL
32 void __sanitizer_free_hook(void *ptr) {
38 struct MapUnmapCallback {
39 void OnMap(uptr p, uptr size) const { }
40 void OnUnmap(uptr p, uptr size) const {
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 DontNeedShadowFor(p, size);
44 // Mark the corresponding meta shadow memory as not needed.
45 // Note the block does not contain any meta info at this point
46 // (this happens after free).
47 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
48 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
49 // Block came from LargeMmapAllocator, so must be large.
50 // We rely on this in the calculations below.
51 CHECK_GE(size, 2 * kPageSize);
52 uptr diff = RoundUp(p, kPageSize) - p;
57 diff = p + size - RoundDown(p + size, kPageSize);
60 uptr p_meta = (uptr)MemToMeta(p);
61 ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
65 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
66 Allocator *allocator() {
67 return reinterpret_cast<Allocator*>(&allocator_placeholder);
75 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
76 , proc(ProcCreate()) {
80 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
81 GlobalProc *global_proc() {
82 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
85 ScopedGlobalProcessor::ScopedGlobalProcessor() {
86 GlobalProc *gp = global_proc();
87 ThreadState *thr = cur_thread();
90 // If we don't have a proc, use the global one.
91 // There are currently only two known case where this path is triggered:
93 // __nptl_deallocate_tsd
98 // __interceptor_munmap
102 // Ideally, we destroy thread state (and unwire proc) when a thread actually
103 // exits (i.e. when we join/wait it). Then we would not need the global proc
105 ProcWire(gp->proc, thr);
108 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
109 GlobalProc *gp = global_proc();
110 ThreadState *thr = cur_thread();
111 if (thr->proc() != gp->proc)
113 ProcUnwire(gp->proc, thr);
117 void InitializeAllocator() {
118 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
119 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
122 void InitializeAllocatorLate() {
123 new(global_proc()) GlobalProc();
126 void AllocatorProcStart(Processor *proc) {
127 allocator()->InitCache(&proc->alloc_cache);
128 internal_allocator()->InitCache(&proc->internal_alloc_cache);
131 void AllocatorProcFinish(Processor *proc) {
132 allocator()->DestroyCache(&proc->alloc_cache);
133 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
136 void AllocatorPrintStats() {
137 allocator()->PrintStats();
140 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
141 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
142 !flags()->report_signal_unsafe)
144 VarSizeStackTrace stack;
145 ObtainCurrentStack(thr, pc, &stack);
146 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
148 ThreadRegistryLock l(ctx->thread_registry);
149 ScopedReport rep(ReportTypeSignalUnsafe);
150 rep.AddStack(stack, true);
151 OutputReport(thr, rep);
154 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
156 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
158 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
159 if (AllocatorMayReturnNull())
161 GET_STACK_TRACE_FATAL(thr, pc);
162 ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
164 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
166 SetAllocatorOutOfMemory();
167 if (AllocatorMayReturnNull())
169 GET_STACK_TRACE_FATAL(thr, pc);
170 ReportOutOfMemory(sz, &stack);
172 if (ctx && ctx->initialized)
173 OnUserAlloc(thr, pc, (uptr)p, sz, true);
175 SignalUnsafeCall(thr, pc);
179 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
180 ScopedGlobalProcessor sgp;
181 if (ctx && ctx->initialized)
182 OnUserFree(thr, pc, (uptr)p, true);
183 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
185 SignalUnsafeCall(thr, pc);
188 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
189 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
192 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
193 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
194 if (AllocatorMayReturnNull())
195 return SetErrnoOnNull(nullptr);
196 GET_STACK_TRACE_FATAL(thr, pc);
197 ReportCallocOverflow(n, size, &stack);
199 void *p = user_alloc_internal(thr, pc, n * size);
201 internal_memset(p, 0, n * size);
202 return SetErrnoOnNull(p);
205 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
206 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
207 ctx->metamap.AllocBlock(thr, pc, p, sz);
208 if (write && thr->ignore_reads_and_writes == 0)
209 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
211 MemoryResetRange(thr, pc, (uptr)p, sz);
214 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
215 CHECK_NE(p, (void*)0);
216 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
217 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
218 if (write && thr->ignore_reads_and_writes == 0)
219 MemoryRangeFreed(thr, pc, (uptr)p, sz);
222 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
223 // FIXME: Handle "shrinking" more efficiently,
224 // it seems that some software actually does this.
226 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
228 user_free(thr, pc, p);
231 void *new_p = user_alloc_internal(thr, pc, sz);
233 uptr old_sz = user_alloc_usable_size(p);
234 internal_memcpy(new_p, p, min(old_sz, sz));
235 user_free(thr, pc, p);
237 return SetErrnoOnNull(new_p);
240 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
241 if (UNLIKELY(!IsPowerOfTwo(align))) {
242 errno = errno_EINVAL;
243 if (AllocatorMayReturnNull())
245 GET_STACK_TRACE_FATAL(thr, pc);
246 ReportInvalidAllocationAlignment(align, &stack);
248 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
251 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
253 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
254 if (AllocatorMayReturnNull())
256 GET_STACK_TRACE_FATAL(thr, pc);
257 ReportInvalidPosixMemalignAlignment(align, &stack);
259 void *ptr = user_alloc_internal(thr, pc, sz, align);
261 // OOM error is already taken care of by user_alloc_internal.
263 CHECK(IsAligned((uptr)ptr, align));
268 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
269 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
270 errno = errno_EINVAL;
271 if (AllocatorMayReturnNull())
273 GET_STACK_TRACE_FATAL(thr, pc);
274 ReportInvalidAlignedAllocAlignment(sz, align, &stack);
276 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
279 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
280 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
283 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
284 uptr PageSize = GetPageSizeCached();
285 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
286 errno = errno_ENOMEM;
287 if (AllocatorMayReturnNull())
289 GET_STACK_TRACE_FATAL(thr, pc);
290 ReportPvallocOverflow(sz, &stack);
292 // pvalloc(0) should allocate one page.
293 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
294 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
297 uptr user_alloc_usable_size(const void *p) {
300 MBlock *b = ctx->metamap.GetBlock((uptr)p);
302 return 0; // Not a valid pointer.
304 return 1; // Zero-sized allocations are actually 1 byte.
308 void invoke_malloc_hook(void *ptr, uptr size) {
309 ThreadState *thr = cur_thread();
310 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
312 __sanitizer_malloc_hook(ptr, size);
313 RunMallocHooks(ptr, size);
316 void invoke_free_hook(void *ptr) {
317 ThreadState *thr = cur_thread();
318 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
320 __sanitizer_free_hook(ptr);
324 void *internal_alloc(MBlockType typ, uptr sz) {
325 ThreadState *thr = cur_thread();
327 thr->nomalloc = 0; // CHECK calls internal_malloc().
330 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
333 void internal_free(void *p) {
334 ThreadState *thr = cur_thread();
336 thr->nomalloc = 0; // CHECK calls internal_malloc().
339 InternalFree(p, &thr->proc()->internal_alloc_cache);
342 } // namespace __tsan
344 using namespace __tsan;
347 uptr __sanitizer_get_current_allocated_bytes() {
348 uptr stats[AllocatorStatCount];
349 allocator()->GetStats(stats);
350 return stats[AllocatorStatAllocated];
353 uptr __sanitizer_get_heap_size() {
354 uptr stats[AllocatorStatCount];
355 allocator()->GetStats(stats);
356 return stats[AllocatorStatMapped];
359 uptr __sanitizer_get_free_bytes() {
363 uptr __sanitizer_get_unmapped_bytes() {
367 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
371 int __sanitizer_get_ownership(const void *p) {
372 return allocator()->GetBlockBegin(p) != 0;
375 uptr __sanitizer_get_allocated_size(const void *p) {
376 return user_alloc_usable_size(p);
379 void __tsan_on_thread_idle() {
380 ThreadState *thr = cur_thread();
381 thr->clock.ResetCached(&thr->proc()->clock_cache);
382 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
383 allocator()->SwallowCache(&thr->proc()->alloc_cache);
384 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
385 ctx->metamap.OnProcIdle(thr->proc());