1 //=-- lsan_allocator.cc ---------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // See lsan_allocator.h for details.
13 //===----------------------------------------------------------------------===//
15 #include "lsan_allocator.h"
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_allocator_checks.h"
19 #include "sanitizer_common/sanitizer_allocator_interface.h"
20 #include "sanitizer_common/sanitizer_allocator_report.h"
21 #include "sanitizer_common/sanitizer_errno.h"
22 #include "sanitizer_common/sanitizer_internal_defs.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "sanitizer_common/sanitizer_stacktrace.h"
25 #include "lsan_common.h"
27 extern "C" void *memset(void *ptr, int value, uptr num);
30 #if defined(__i386__) || defined(__arm__)
31 static const uptr kMaxAllowedMallocSize = 1UL << 30;
32 #elif defined(__mips64) || defined(__aarch64__)
33 static const uptr kMaxAllowedMallocSize = 4UL << 30;
35 static const uptr kMaxAllowedMallocSize = 8UL << 30;
38 static Allocator allocator;
40 void InitializeAllocator() {
41 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
42 allocator.InitLinkerInitialized(
43 common_flags()->allocator_release_to_os_interval_ms);
46 void AllocatorThreadFinish() {
47 allocator.SwallowCache(GetAllocatorCache());
50 static ChunkMetadata *Metadata(const void *p) {
51 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
54 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
56 ChunkMetadata *m = Metadata(p);
58 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
59 m->stack_trace_id = StackDepotPut(stack);
60 m->requested_size = size;
61 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
64 static void RegisterDeallocation(void *p) {
66 ChunkMetadata *m = Metadata(p);
68 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
71 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
72 if (AllocatorMayReturnNull()) {
73 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
76 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
79 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
83 if (size > kMaxAllowedMallocSize)
84 return ReportAllocationSizeTooBig(size, stack);
85 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
87 SetAllocatorOutOfMemory();
88 if (AllocatorMayReturnNull())
90 ReportOutOfMemory(size, &stack);
92 // Do not rely on the allocator to clear the memory (it's slow).
93 if (cleared && allocator.FromPrimary(p))
95 RegisterAllocation(stack, p, size);
96 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
97 RunMallocHooks(p, size);
101 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
102 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
103 if (AllocatorMayReturnNull())
105 ReportCallocOverflow(nmemb, size, &stack);
108 return Allocate(stack, size, 1, true);
111 void Deallocate(void *p) {
112 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
114 RegisterDeallocation(p);
115 allocator.Deallocate(GetAllocatorCache(), p);
118 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
120 RegisterDeallocation(p);
121 if (new_size > kMaxAllowedMallocSize) {
122 allocator.Deallocate(GetAllocatorCache(), p);
123 return ReportAllocationSizeTooBig(new_size, stack);
125 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
126 RegisterAllocation(stack, p, new_size);
130 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
131 *begin = (uptr)GetAllocatorCache();
132 *end = *begin + sizeof(AllocatorCache);
135 uptr GetMallocUsableSize(const void *p) {
136 ChunkMetadata *m = Metadata(p);
138 return m->requested_size;
141 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
142 const StackTrace &stack) {
143 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
144 if (AllocatorMayReturnNull())
146 ReportInvalidPosixMemalignAlignment(alignment, &stack);
148 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
150 // OOM error is already taken care of by Allocate.
152 CHECK(IsAligned((uptr)ptr, alignment));
157 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
158 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
159 errno = errno_EINVAL;
160 if (AllocatorMayReturnNull())
162 ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
164 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
167 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
168 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
169 errno = errno_EINVAL;
170 if (AllocatorMayReturnNull())
172 ReportInvalidAllocationAlignment(alignment, &stack);
174 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
177 void *lsan_malloc(uptr size, const StackTrace &stack) {
178 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
181 void lsan_free(void *p) {
185 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
186 return SetErrnoOnNull(Reallocate(stack, p, size, 1));
189 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
190 return SetErrnoOnNull(Calloc(nmemb, size, stack));
193 void *lsan_valloc(uptr size, const StackTrace &stack) {
194 return SetErrnoOnNull(
195 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
198 void *lsan_pvalloc(uptr size, const StackTrace &stack) {
199 uptr PageSize = GetPageSizeCached();
200 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
201 errno = errno_ENOMEM;
202 if (AllocatorMayReturnNull())
204 ReportPvallocOverflow(size, &stack);
206 // pvalloc(0) should allocate one page.
207 size = size ? RoundUpTo(size, PageSize) : PageSize;
208 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
211 uptr lsan_mz_size(const void *p) {
212 return GetMallocUsableSize(p);
215 ///// Interface to the common LSan module. /////
217 void LockAllocator() {
218 allocator.ForceLock();
221 void UnlockAllocator() {
222 allocator.ForceUnlock();
225 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
226 *begin = (uptr)&allocator;
227 *end = *begin + sizeof(allocator);
230 uptr PointsIntoChunk(void* p) {
231 uptr addr = reinterpret_cast<uptr>(p);
232 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
233 if (!chunk) return 0;
234 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
235 // valid, but we don't want that.
236 if (addr < chunk) return 0;
237 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
241 if (addr < chunk + m->requested_size)
243 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
248 uptr GetUserBegin(uptr chunk) {
252 LsanMetadata::LsanMetadata(uptr chunk) {
253 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
257 bool LsanMetadata::allocated() const {
258 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
261 ChunkTag LsanMetadata::tag() const {
262 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
265 void LsanMetadata::set_tag(ChunkTag value) {
266 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
269 uptr LsanMetadata::requested_size() const {
270 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
273 u32 LsanMetadata::stack_trace_id() const {
274 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
277 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
278 allocator.ForEachChunk(callback, arg);
281 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
282 void *chunk = allocator.GetBlockBegin(p);
283 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
284 ChunkMetadata *m = Metadata(chunk);
286 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
287 if (m->tag == kIgnored)
288 return kIgnoreObjectAlreadyIgnored;
290 return kIgnoreObjectSuccess;
292 return kIgnoreObjectInvalid;
295 } // namespace __lsan
297 using namespace __lsan;
300 SANITIZER_INTERFACE_ATTRIBUTE
301 uptr __sanitizer_get_current_allocated_bytes() {
302 uptr stats[AllocatorStatCount];
303 allocator.GetStats(stats);
304 return stats[AllocatorStatAllocated];
307 SANITIZER_INTERFACE_ATTRIBUTE
308 uptr __sanitizer_get_heap_size() {
309 uptr stats[AllocatorStatCount];
310 allocator.GetStats(stats);
311 return stats[AllocatorStatMapped];
314 SANITIZER_INTERFACE_ATTRIBUTE
315 uptr __sanitizer_get_free_bytes() { return 0; }
317 SANITIZER_INTERFACE_ATTRIBUTE
318 uptr __sanitizer_get_unmapped_bytes() { return 0; }
320 SANITIZER_INTERFACE_ATTRIBUTE
321 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
323 SANITIZER_INTERFACE_ATTRIBUTE
324 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
326 SANITIZER_INTERFACE_ATTRIBUTE
327 uptr __sanitizer_get_allocated_size(const void *p) {
328 return GetMallocUsableSize(p);
331 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
332 // Provide default (no-op) implementation of malloc hooks.
333 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
334 void __sanitizer_malloc_hook(void *ptr, uptr size) {
338 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
339 void __sanitizer_free_hook(void *ptr) {