1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr, uptr size) {
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr) {
37 struct MapUnmapCallback {
38 void OnMap(uptr p, uptr size) const { }
39 void OnUnmap(uptr p, uptr size) const {
40 // We are about to unmap a chunk of user memory.
41 // Mark the corresponding shadow memory as not needed.
42 DontNeedShadowFor(p, size);
43 // Mark the corresponding meta shadow memory as not needed.
44 // Note the block does not contain any meta info at this point
45 // (this happens after free).
46 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
47 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
48 // Block came from LargeMmapAllocator, so must be large.
49 // We rely on this in the calculations below.
50 CHECK_GE(size, 2 * kPageSize);
51 uptr diff = RoundUp(p, kPageSize) - p;
56 diff = p + size - RoundDown(p + size, kPageSize);
59 uptr p_meta = (uptr)MemToMeta(p);
60 ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
64 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
65 Allocator *allocator() {
66 return reinterpret_cast<Allocator*>(&allocator_placeholder);
74 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
75 , proc(ProcCreate()) {
79 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
80 GlobalProc *global_proc() {
81 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
84 ScopedGlobalProcessor::ScopedGlobalProcessor() {
85 GlobalProc *gp = global_proc();
86 ThreadState *thr = cur_thread();
89 // If we don't have a proc, use the global one.
90 // There are currently only two known case where this path is triggered:
92 // __nptl_deallocate_tsd
97 // __interceptor_munmap
101 // Ideally, we destroy thread state (and unwire proc) when a thread actually
102 // exits (i.e. when we join/wait it). Then we would not need the global proc
104 ProcWire(gp->proc, thr);
107 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108 GlobalProc *gp = global_proc();
109 ThreadState *thr = cur_thread();
110 if (thr->proc() != gp->proc)
112 ProcUnwire(gp->proc, thr);
116 void InitializeAllocator() {
117 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
118 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
121 void InitializeAllocatorLate() {
122 new(global_proc()) GlobalProc();
125 void AllocatorProcStart(Processor *proc) {
126 allocator()->InitCache(&proc->alloc_cache);
127 internal_allocator()->InitCache(&proc->internal_alloc_cache);
130 void AllocatorProcFinish(Processor *proc) {
131 allocator()->DestroyCache(&proc->alloc_cache);
132 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
135 void AllocatorPrintStats() {
136 allocator()->PrintStats();
139 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
140 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
141 !flags()->report_signal_unsafe)
143 VarSizeStackTrace stack;
144 ObtainCurrentStack(thr, pc, &stack);
145 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
147 ThreadRegistryLock l(ctx->thread_registry);
148 ScopedReport rep(ReportTypeSignalUnsafe);
149 rep.AddStack(stack, true);
150 OutputReport(thr, rep);
153 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
155 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
157 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
158 if (AllocatorMayReturnNull())
160 GET_STACK_TRACE_FATAL(thr, pc);
161 ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
163 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
165 SetAllocatorOutOfMemory();
166 if (AllocatorMayReturnNull())
168 GET_STACK_TRACE_FATAL(thr, pc);
169 ReportOutOfMemory(sz, &stack);
171 if (ctx && ctx->initialized)
172 OnUserAlloc(thr, pc, (uptr)p, sz, true);
174 SignalUnsafeCall(thr, pc);
178 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
179 ScopedGlobalProcessor sgp;
180 if (ctx && ctx->initialized)
181 OnUserFree(thr, pc, (uptr)p, true);
182 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
184 SignalUnsafeCall(thr, pc);
187 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
188 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
191 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
192 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
193 if (AllocatorMayReturnNull())
194 return SetErrnoOnNull(nullptr);
195 GET_STACK_TRACE_FATAL(thr, pc);
196 ReportCallocOverflow(n, size, &stack);
198 void *p = user_alloc_internal(thr, pc, n * size);
200 internal_memset(p, 0, n * size);
201 return SetErrnoOnNull(p);
204 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
205 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
206 if (AllocatorMayReturnNull())
207 return SetErrnoOnNull(nullptr);
208 GET_STACK_TRACE_FATAL(thr, pc);
209 ReportReallocArrayOverflow(size, n, &stack);
211 return user_realloc(thr, pc, p, size * n);
214 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
215 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
216 ctx->metamap.AllocBlock(thr, pc, p, sz);
217 if (write && thr->ignore_reads_and_writes == 0)
218 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
220 MemoryResetRange(thr, pc, (uptr)p, sz);
223 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
224 CHECK_NE(p, (void*)0);
225 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
226 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
227 if (write && thr->ignore_reads_and_writes == 0)
228 MemoryRangeFreed(thr, pc, (uptr)p, sz);
231 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
232 // FIXME: Handle "shrinking" more efficiently,
233 // it seems that some software actually does this.
235 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
237 user_free(thr, pc, p);
240 void *new_p = user_alloc_internal(thr, pc, sz);
242 uptr old_sz = user_alloc_usable_size(p);
243 internal_memcpy(new_p, p, min(old_sz, sz));
244 user_free(thr, pc, p);
246 return SetErrnoOnNull(new_p);
249 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
250 if (UNLIKELY(!IsPowerOfTwo(align))) {
251 errno = errno_EINVAL;
252 if (AllocatorMayReturnNull())
254 GET_STACK_TRACE_FATAL(thr, pc);
255 ReportInvalidAllocationAlignment(align, &stack);
257 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
260 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
262 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
263 if (AllocatorMayReturnNull())
265 GET_STACK_TRACE_FATAL(thr, pc);
266 ReportInvalidPosixMemalignAlignment(align, &stack);
268 void *ptr = user_alloc_internal(thr, pc, sz, align);
270 // OOM error is already taken care of by user_alloc_internal.
272 CHECK(IsAligned((uptr)ptr, align));
277 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
278 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
279 errno = errno_EINVAL;
280 if (AllocatorMayReturnNull())
282 GET_STACK_TRACE_FATAL(thr, pc);
283 ReportInvalidAlignedAllocAlignment(sz, align, &stack);
285 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
288 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
289 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
292 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
293 uptr PageSize = GetPageSizeCached();
294 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
295 errno = errno_ENOMEM;
296 if (AllocatorMayReturnNull())
298 GET_STACK_TRACE_FATAL(thr, pc);
299 ReportPvallocOverflow(sz, &stack);
301 // pvalloc(0) should allocate one page.
302 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
303 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
306 uptr user_alloc_usable_size(const void *p) {
309 MBlock *b = ctx->metamap.GetBlock((uptr)p);
311 return 0; // Not a valid pointer.
313 return 1; // Zero-sized allocations are actually 1 byte.
317 void invoke_malloc_hook(void *ptr, uptr size) {
318 ThreadState *thr = cur_thread();
319 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
321 __sanitizer_malloc_hook(ptr, size);
322 RunMallocHooks(ptr, size);
325 void invoke_free_hook(void *ptr) {
326 ThreadState *thr = cur_thread();
327 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
329 __sanitizer_free_hook(ptr);
333 void *internal_alloc(MBlockType typ, uptr sz) {
334 ThreadState *thr = cur_thread();
336 thr->nomalloc = 0; // CHECK calls internal_malloc().
339 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
342 void internal_free(void *p) {
343 ThreadState *thr = cur_thread();
345 thr->nomalloc = 0; // CHECK calls internal_malloc().
348 InternalFree(p, &thr->proc()->internal_alloc_cache);
351 } // namespace __tsan
353 using namespace __tsan;
356 uptr __sanitizer_get_current_allocated_bytes() {
357 uptr stats[AllocatorStatCount];
358 allocator()->GetStats(stats);
359 return stats[AllocatorStatAllocated];
362 uptr __sanitizer_get_heap_size() {
363 uptr stats[AllocatorStatCount];
364 allocator()->GetStats(stats);
365 return stats[AllocatorStatMapped];
368 uptr __sanitizer_get_free_bytes() {
372 uptr __sanitizer_get_unmapped_bytes() {
376 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
380 int __sanitizer_get_ownership(const void *p) {
381 return allocator()->GetBlockBegin(p) != 0;
384 uptr __sanitizer_get_allocated_size(const void *p) {
385 return user_alloc_usable_size(p);
388 void __tsan_on_thread_idle() {
389 ThreadState *thr = cur_thread();
390 thr->clock.ResetCached(&thr->proc()->clock_cache);
391 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
392 allocator()->SwallowCache(&thr->proc()->alloc_cache);
393 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
394 ctx->metamap.OnProcIdle(thr->proc());