1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "tsan_mman.h"
18 #include "tsan_report.h"
19 #include "tsan_flags.h"
21 // May be overriden by front-end.
22 SANITIZER_WEAK_DEFAULT_IMPL
23 void __sanitizer_malloc_hook(void *ptr, uptr size) {
28 SANITIZER_WEAK_DEFAULT_IMPL
29 void __sanitizer_free_hook(void *ptr) {
35 struct MapUnmapCallback {
36 void OnMap(uptr p, uptr size) const { }
37 void OnUnmap(uptr p, uptr size) const {
38 // We are about to unmap a chunk of user memory.
39 // Mark the corresponding shadow memory as not needed.
40 DontNeedShadowFor(p, size);
41 // Mark the corresponding meta shadow memory as not needed.
42 // Note the block does not contain any meta info at this point
43 // (this happens after free).
44 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
45 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
46 // Block came from LargeMmapAllocator, so must be large.
47 // We rely on this in the calculations below.
48 CHECK_GE(size, 2 * kPageSize);
49 uptr diff = RoundUp(p, kPageSize) - p;
54 diff = p + size - RoundDown(p + size, kPageSize);
57 FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
61 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
62 Allocator *allocator() {
63 return reinterpret_cast<Allocator*>(&allocator_placeholder);
66 void InitializeAllocator() {
67 allocator()->Init(common_flags()->allocator_may_return_null);
70 void AllocatorThreadStart(ThreadState *thr) {
71 allocator()->InitCache(&thr->alloc_cache);
72 internal_allocator()->InitCache(&thr->internal_alloc_cache);
75 void AllocatorThreadFinish(ThreadState *thr) {
76 allocator()->DestroyCache(&thr->alloc_cache);
77 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
80 void AllocatorPrintStats() {
81 allocator()->PrintStats();
84 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
85 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
86 !flags()->report_signal_unsafe)
88 VarSizeStackTrace stack;
89 ObtainCurrentStack(thr, pc, &stack);
90 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
92 ThreadRegistryLock l(ctx->thread_registry);
93 ScopedReport rep(ReportTypeSignalUnsafe);
94 rep.AddStack(stack, true);
95 OutputReport(thr, rep);
98 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
99 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
100 return allocator()->ReturnNullOrDie();
101 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
104 if (ctx && ctx->initialized)
105 OnUserAlloc(thr, pc, (uptr)p, sz, true);
107 SignalUnsafeCall(thr, pc);
111 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
112 if (CallocShouldReturnNullDueToOverflow(size, n))
113 return allocator()->ReturnNullOrDie();
114 void *p = user_alloc(thr, pc, n * size);
116 internal_memset(p, 0, n * size);
120 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
121 if (ctx && ctx->initialized)
122 OnUserFree(thr, pc, (uptr)p, true);
123 allocator()->Deallocate(&thr->alloc_cache, p);
125 SignalUnsafeCall(thr, pc);
128 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
129 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
130 ctx->metamap.AllocBlock(thr, pc, p, sz);
131 if (write && thr->ignore_reads_and_writes == 0)
132 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
134 MemoryResetRange(thr, pc, (uptr)p, sz);
137 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
138 CHECK_NE(p, (void*)0);
139 uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
140 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
141 if (write && thr->ignore_reads_and_writes == 0)
142 MemoryRangeFreed(thr, pc, (uptr)p, sz);
145 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
147 // FIXME: Handle "shrinking" more efficiently,
148 // it seems that some software actually does this.
150 p2 = user_alloc(thr, pc, sz);
154 uptr oldsz = user_alloc_usable_size(p);
155 internal_memcpy(p2, p, min(oldsz, sz));
159 user_free(thr, pc, p);
163 uptr user_alloc_usable_size(const void *p) {
166 MBlock *b = ctx->metamap.GetBlock((uptr)p);
167 return b ? b->siz : 0;
170 void invoke_malloc_hook(void *ptr, uptr size) {
171 ThreadState *thr = cur_thread();
172 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
174 __sanitizer_malloc_hook(ptr, size);
177 void invoke_free_hook(void *ptr) {
178 ThreadState *thr = cur_thread();
179 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
181 __sanitizer_free_hook(ptr);
184 void *internal_alloc(MBlockType typ, uptr sz) {
185 ThreadState *thr = cur_thread();
187 thr->nomalloc = 0; // CHECK calls internal_malloc().
190 return InternalAlloc(sz, &thr->internal_alloc_cache);
193 void internal_free(void *p) {
194 ThreadState *thr = cur_thread();
196 thr->nomalloc = 0; // CHECK calls internal_malloc().
199 InternalFree(p, &thr->internal_alloc_cache);
202 } // namespace __tsan
204 using namespace __tsan;
207 uptr __sanitizer_get_current_allocated_bytes() {
208 uptr stats[AllocatorStatCount];
209 allocator()->GetStats(stats);
210 return stats[AllocatorStatAllocated];
213 uptr __sanitizer_get_heap_size() {
214 uptr stats[AllocatorStatCount];
215 allocator()->GetStats(stats);
216 return stats[AllocatorStatMapped];
219 uptr __sanitizer_get_free_bytes() {
223 uptr __sanitizer_get_unmapped_bytes() {
227 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
231 int __sanitizer_get_ownership(const void *p) {
232 return allocator()->GetBlockBegin(p) != 0;
235 uptr __sanitizer_get_allocated_size(const void *p) {
236 return user_alloc_usable_size(p);
239 void __tsan_on_thread_idle() {
240 ThreadState *thr = cur_thread();
241 allocator()->SwallowCache(&thr->alloc_cache);
242 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
243 ctx->metamap.OnThreadIdle(thr);