1 //=-- lsan_allocator.cc ---------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // See lsan_allocator.h for details.
13 //===----------------------------------------------------------------------===//
15 #include "lsan_allocator.h"
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_allocator_checks.h"
19 #include "sanitizer_common/sanitizer_allocator_interface.h"
20 #include "sanitizer_common/sanitizer_allocator_report.h"
21 #include "sanitizer_common/sanitizer_errno.h"
22 #include "sanitizer_common/sanitizer_internal_defs.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "sanitizer_common/sanitizer_stacktrace.h"
25 #include "lsan_common.h"
27 extern "C" void *memset(void *ptr, int value, uptr num);
30 #if defined(__i386__) || defined(__arm__)
31 static const uptr kMaxAllowedMallocSize = 1UL << 30;
32 #elif defined(__mips64) || defined(__aarch64__)
33 static const uptr kMaxAllowedMallocSize = 4UL << 30;
35 static const uptr kMaxAllowedMallocSize = 8UL << 30;
37 typedef LargeMmapAllocator<> SecondaryAllocator;
38 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
39 SecondaryAllocator> Allocator;
41 static Allocator allocator;
43 void InitializeAllocator() {
44 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
45 allocator.InitLinkerInitialized(
46 common_flags()->allocator_release_to_os_interval_ms);
49 void AllocatorThreadFinish() {
50 allocator.SwallowCache(GetAllocatorCache());
53 static ChunkMetadata *Metadata(const void *p) {
54 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
57 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
59 ChunkMetadata *m = Metadata(p);
61 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
62 m->stack_trace_id = StackDepotPut(stack);
63 m->requested_size = size;
64 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
67 static void RegisterDeallocation(void *p) {
69 ChunkMetadata *m = Metadata(p);
71 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
74 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
75 if (AllocatorMayReturnNull()) {
76 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
79 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
82 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
86 if (size > kMaxAllowedMallocSize)
87 return ReportAllocationSizeTooBig(size, stack);
88 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
90 SetAllocatorOutOfMemory();
91 if (AllocatorMayReturnNull())
93 ReportOutOfMemory(size, &stack);
95 // Do not rely on the allocator to clear the memory (it's slow).
96 if (cleared && allocator.FromPrimary(p))
98 RegisterAllocation(stack, p, size);
99 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
100 RunMallocHooks(p, size);
104 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
105 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
106 if (AllocatorMayReturnNull())
108 ReportCallocOverflow(nmemb, size, &stack);
111 return Allocate(stack, size, 1, true);
114 void Deallocate(void *p) {
115 if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
117 RegisterDeallocation(p);
118 allocator.Deallocate(GetAllocatorCache(), p);
121 void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
123 RegisterDeallocation(p);
124 if (new_size > kMaxAllowedMallocSize) {
125 allocator.Deallocate(GetAllocatorCache(), p);
126 return ReportAllocationSizeTooBig(new_size, stack);
128 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
129 RegisterAllocation(stack, p, new_size);
133 void GetAllocatorCacheRange(uptr *begin, uptr *end) {
134 *begin = (uptr)GetAllocatorCache();
135 *end = *begin + sizeof(AllocatorCache);
138 uptr GetMallocUsableSize(const void *p) {
139 ChunkMetadata *m = Metadata(p);
141 return m->requested_size;
144 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
145 const StackTrace &stack) {
146 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
147 if (AllocatorMayReturnNull())
149 ReportInvalidPosixMemalignAlignment(alignment, &stack);
151 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
153 // OOM error is already taken care of by Allocate.
155 CHECK(IsAligned((uptr)ptr, alignment));
160 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
161 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
162 errno = errno_EINVAL;
163 if (AllocatorMayReturnNull())
165 ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
167 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
170 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
171 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
172 errno = errno_EINVAL;
173 if (AllocatorMayReturnNull())
175 ReportInvalidAllocationAlignment(alignment, &stack);
177 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
180 void *lsan_malloc(uptr size, const StackTrace &stack) {
181 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
184 void lsan_free(void *p) {
188 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
189 return SetErrnoOnNull(Reallocate(stack, p, size, 1));
192 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
193 return SetErrnoOnNull(Calloc(nmemb, size, stack));
196 void *lsan_valloc(uptr size, const StackTrace &stack) {
197 return SetErrnoOnNull(
198 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
201 void *lsan_pvalloc(uptr size, const StackTrace &stack) {
202 uptr PageSize = GetPageSizeCached();
203 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
204 errno = errno_ENOMEM;
205 if (AllocatorMayReturnNull())
207 ReportPvallocOverflow(size, &stack);
209 // pvalloc(0) should allocate one page.
210 size = size ? RoundUpTo(size, PageSize) : PageSize;
211 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
214 uptr lsan_mz_size(const void *p) {
215 return GetMallocUsableSize(p);
218 ///// Interface to the common LSan module. /////
220 void LockAllocator() {
221 allocator.ForceLock();
224 void UnlockAllocator() {
225 allocator.ForceUnlock();
228 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
229 *begin = (uptr)&allocator;
230 *end = *begin + sizeof(allocator);
233 uptr PointsIntoChunk(void* p) {
234 uptr addr = reinterpret_cast<uptr>(p);
235 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
236 if (!chunk) return 0;
237 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
238 // valid, but we don't want that.
239 if (addr < chunk) return 0;
240 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
244 if (addr < chunk + m->requested_size)
246 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
251 uptr GetUserBegin(uptr chunk) {
255 LsanMetadata::LsanMetadata(uptr chunk) {
256 metadata_ = Metadata(reinterpret_cast<void *>(chunk));
260 bool LsanMetadata::allocated() const {
261 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
264 ChunkTag LsanMetadata::tag() const {
265 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
268 void LsanMetadata::set_tag(ChunkTag value) {
269 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
272 uptr LsanMetadata::requested_size() const {
273 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
276 u32 LsanMetadata::stack_trace_id() const {
277 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
280 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
281 allocator.ForEachChunk(callback, arg);
284 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
285 void *chunk = allocator.GetBlockBegin(p);
286 if (!chunk || p < chunk) return kIgnoreObjectInvalid;
287 ChunkMetadata *m = Metadata(chunk);
289 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
290 if (m->tag == kIgnored)
291 return kIgnoreObjectAlreadyIgnored;
293 return kIgnoreObjectSuccess;
295 return kIgnoreObjectInvalid;
298 } // namespace __lsan
300 using namespace __lsan;
303 SANITIZER_INTERFACE_ATTRIBUTE
304 uptr __sanitizer_get_current_allocated_bytes() {
305 uptr stats[AllocatorStatCount];
306 allocator.GetStats(stats);
307 return stats[AllocatorStatAllocated];
310 SANITIZER_INTERFACE_ATTRIBUTE
311 uptr __sanitizer_get_heap_size() {
312 uptr stats[AllocatorStatCount];
313 allocator.GetStats(stats);
314 return stats[AllocatorStatMapped];
317 SANITIZER_INTERFACE_ATTRIBUTE
318 uptr __sanitizer_get_free_bytes() { return 0; }
320 SANITIZER_INTERFACE_ATTRIBUTE
321 uptr __sanitizer_get_unmapped_bytes() { return 0; }
323 SANITIZER_INTERFACE_ATTRIBUTE
324 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
326 SANITIZER_INTERFACE_ATTRIBUTE
327 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
329 SANITIZER_INTERFACE_ATTRIBUTE
330 uptr __sanitizer_get_allocated_size(const void *p) {
331 return GetMallocUsableSize(p);
334 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
335 // Provide default (no-op) implementation of malloc hooks.
336 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
337 void __sanitizer_malloc_hook(void *ptr, uptr size) {
341 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
342 void __sanitizer_free_hook(void *ptr) {