1 //===-- asan_allocator2.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // Implementation of ASan's memory allocator, 2-nd version.
13 // This variant uses the allocator from sanitizer_common, i.e. the one shared
14 // with ThreadSanitizer and MemorySanitizer.
16 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
19 #include "asan_mapping.h"
20 #include "asan_poisoning.h"
21 #include "asan_report.h"
22 #include "asan_stack.h"
23 #include "asan_thread.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_flags.h"
26 #include "sanitizer_common/sanitizer_internal_defs.h"
27 #include "sanitizer_common/sanitizer_list.h"
28 #include "sanitizer_common/sanitizer_stackdepot.h"
29 #include "sanitizer_common/sanitizer_quarantine.h"
30 #include "lsan/lsan_common.h"
34 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
35 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
37 AsanStats &thread_stats = GetCurrentThreadStats();
39 thread_stats.mmaped += size;
41 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
42 PoisonShadow(p, size, 0);
43 // We are about to unmap a chunk of user memory.
44 // Mark the corresponding shadow memory as not needed.
45 FlushUnneededASanShadowMemory(p, size);
47 AsanStats &thread_stats = GetCurrentThreadStats();
48 thread_stats.munmaps++;
49 thread_stats.munmaped += size;
52 // We can not use THREADLOCAL because it is not supported on some of the
53 // platforms we care about (OSX 10.6, Android).
54 // static THREADLOCAL AllocatorCache cache;
55 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
57 return &ms->allocator2_cache;
60 static Allocator allocator;
62 static const uptr kMaxAllowedMallocSize =
63 FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
65 static const uptr kMaxThreadLocalQuarantine =
66 FIRST_32_SECOND_64(1 << 18, 1 << 20);
68 // Every chunk of memory allocated by this allocator can be in one of 3 states:
69 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
70 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
71 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
73 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
78 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
79 // We use adaptive redzones: for larger allocation larger redzones are used.
80 static u32 RZLog2Size(u32 rz_log) {
85 static u32 RZSize2Log(u32 rz_size) {
86 CHECK_GE(rz_size, 16);
87 CHECK_LE(rz_size, 2048);
88 CHECK(IsPowerOfTwo(rz_size));
89 u32 res = Log2(rz_size) - 4;
90 CHECK_EQ(rz_size, RZLog2Size(res));
94 static uptr ComputeRZLog(uptr user_requested_size) {
96 user_requested_size <= 64 - 16 ? 0 :
97 user_requested_size <= 128 - 32 ? 1 :
98 user_requested_size <= 512 - 64 ? 2 :
99 user_requested_size <= 4096 - 128 ? 3 :
100 user_requested_size <= (1 << 14) - 256 ? 4 :
101 user_requested_size <= (1 << 15) - 512 ? 5 :
102 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
103 return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
104 RZSize2Log(flags()->max_redzone));
107 // The memory chunk allocated from the underlying allocator looks like this:
108 // L L L L L L H H U U U U U U R R
109 // L -- left redzone words (0 or more bytes)
110 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
112 // R -- right redzone (0 or more bytes)
113 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
116 // If the left redzone is greater than the ChunkHeader size we store a magic
117 // value in the first uptr word of the memory block and store the address of
118 // ChunkBase in the next uptr.
119 // M B L L L L L L L L L H H U U U U U U
121 // ---------------------|
122 // M -- magic value kAllocBegMagic
123 // B -- address of ChunkHeader pointing to the first 'H'
124 static const uptr kAllocBegMagic = 0xCC6E96B9;
128 u32 chunk_state : 8; // Must be first.
132 u32 from_memalign : 1;
137 // This field is used for small sizes. For large sizes it is equal to
138 // SizeClassMap::kMaxSize and the actual size is stored in the
139 // SecondaryAllocator's metadata.
140 u32 user_requested_size;
141 u32 alloc_context_id;
144 struct ChunkBase : ChunkHeader {
145 // Header2, intersects with user memory.
149 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
150 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
151 COMPILER_CHECK(kChunkHeaderSize == 16);
152 COMPILER_CHECK(kChunkHeader2Size <= 16);
154 struct AsanChunk: ChunkBase {
155 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
156 uptr UsedSize(bool locked_version = false) {
157 if (user_requested_size != SizeClassMap::kMaxSize)
158 return user_requested_size;
159 return *reinterpret_cast<uptr *>(
160 allocator.GetMetaData(AllocBeg(locked_version)));
162 void *AllocBeg(bool locked_version = false) {
165 return allocator.GetBlockBeginFastLocked(
166 reinterpret_cast<void *>(this));
167 return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
169 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
171 bool AddrIsInside(uptr addr, bool locked_version = false) {
172 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
176 bool AsanChunkView::IsValid() {
177 return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
179 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
180 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
181 uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
182 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
183 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
185 static StackTrace GetStackTraceFromId(u32 id) {
187 StackTrace res = StackDepotGet(id);
192 StackTrace AsanChunkView::GetAllocStack() {
193 return GetStackTraceFromId(chunk_->alloc_context_id);
196 StackTrace AsanChunkView::GetFreeStack() {
197 return GetStackTraceFromId(chunk_->free_context_id);
200 struct QuarantineCallback;
201 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
202 typedef AsanQuarantine::Cache QuarantineCache;
203 static AsanQuarantine quarantine(LINKER_INITIALIZED);
204 static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
205 static AllocatorCache fallback_allocator_cache;
206 static SpinMutex fallback_mutex;
208 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
210 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
211 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
214 struct QuarantineCallback {
215 explicit QuarantineCallback(AllocatorCache *cache)
219 void Recycle(AsanChunk *m) {
220 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
221 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
222 CHECK_NE(m->alloc_tid, kInvalidTid);
223 CHECK_NE(m->free_tid, kInvalidTid);
224 PoisonShadow(m->Beg(),
225 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
226 kAsanHeapLeftRedzoneMagic);
227 void *p = reinterpret_cast<void *>(m->AllocBeg());
229 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
230 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
231 // Clear the magic value, as allocator internals may overwrite the
232 // contents of deallocated chunk, confusing GetAsanChunk lookup.
234 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
238 AsanStats &thread_stats = GetCurrentThreadStats();
239 thread_stats.real_frees++;
240 thread_stats.really_freed += m->UsedSize();
242 allocator.Deallocate(cache_, p);
245 void *Allocate(uptr size) {
246 return allocator.Allocate(cache_, size, 1, false);
249 void Deallocate(void *p) {
250 allocator.Deallocate(cache_, p);
253 AllocatorCache *cache_;
256 void InitializeAllocator() {
258 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
261 void ReInitializeAllocator() {
262 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
265 static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
266 AllocType alloc_type, bool can_fill) {
267 if (UNLIKELY(!asan_inited))
269 Flags &fl = *flags();
271 const uptr min_alignment = SHADOW_GRANULARITY;
272 if (alignment < min_alignment)
273 alignment = min_alignment;
275 // We'd be happy to avoid allocating memory for zero-size requests, but
276 // some programs/tests depend on this behavior and assume that malloc would
277 // not return NULL even for zero-size allocations. Moreover, it looks like
278 // operator new should never return NULL, and results of consecutive "new"
279 // calls must be different even if the allocated size is zero.
282 CHECK(IsPowerOfTwo(alignment));
283 uptr rz_log = ComputeRZLog(size);
284 uptr rz_size = RZLog2Size(rz_log);
285 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
286 uptr needed_size = rounded_size + rz_size;
287 if (alignment > min_alignment)
288 needed_size += alignment;
289 bool using_primary_allocator = true;
290 // If we are allocating from the secondary allocator, there will be no
291 // automatic right redzone, so add the right redzone manually.
292 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
293 needed_size += rz_size;
294 using_primary_allocator = false;
296 CHECK(IsAligned(needed_size, min_alignment));
297 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
298 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
300 return AllocatorReturnNull();
303 AsanThread *t = GetCurrentThread();
306 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
307 allocated = allocator.Allocate(cache, needed_size, 8, false);
309 SpinMutexLock l(&fallback_mutex);
310 AllocatorCache *cache = &fallback_allocator_cache;
311 allocated = allocator.Allocate(cache, needed_size, 8, false);
314 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
315 // Heap poisoning is enabled, but the allocator provides an unpoisoned
316 // chunk. This is possible if flags()->poison_heap was disabled for some
317 // time, for example, due to flags()->start_disabled.
318 // Anyway, poison the block before using it for anything else.
319 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
320 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
323 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
324 uptr alloc_end = alloc_beg + needed_size;
325 uptr beg_plus_redzone = alloc_beg + rz_size;
326 uptr user_beg = beg_plus_redzone;
327 if (!IsAligned(user_beg, alignment))
328 user_beg = RoundUpTo(user_beg, alignment);
329 uptr user_end = user_beg + size;
330 CHECK_LE(user_end, alloc_end);
331 uptr chunk_beg = user_beg - kChunkHeaderSize;
332 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
333 m->alloc_type = alloc_type;
335 u32 alloc_tid = t ? t->tid() : 0;
336 m->alloc_tid = alloc_tid;
337 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
338 m->free_tid = kInvalidTid;
339 m->from_memalign = user_beg != beg_plus_redzone;
340 if (alloc_beg != chunk_beg) {
341 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
342 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
343 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
345 if (using_primary_allocator) {
347 m->user_requested_size = size;
348 CHECK(allocator.FromPrimary(allocated));
350 CHECK(!allocator.FromPrimary(allocated));
351 m->user_requested_size = SizeClassMap::kMaxSize;
352 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
357 m->alloc_context_id = StackDepotPut(*stack);
359 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
360 // Unpoison the bulk of the memory region.
361 if (size_rounded_down_to_granularity)
362 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
363 // Deal with the end of the region if size is not aligned to granularity.
364 if (size != size_rounded_down_to_granularity && fl.poison_heap) {
365 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
366 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
369 AsanStats &thread_stats = GetCurrentThreadStats();
370 thread_stats.mallocs++;
371 thread_stats.malloced += size;
372 thread_stats.malloced_redzones += needed_size - size;
373 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
374 thread_stats.malloced_by_size[class_id]++;
375 if (needed_size > SizeClassMap::kMaxSize)
376 thread_stats.malloc_large++;
378 void *res = reinterpret_cast<void *>(user_beg);
379 if (can_fill && fl.max_malloc_fill_size) {
380 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
381 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
383 #if CAN_SANITIZE_LEAKS
384 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
385 : __lsan::kDirectlyLeaked;
387 // Must be the last mutation of metadata in this function.
388 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
389 ASAN_MALLOC_HOOK(res, size);
393 static void ReportInvalidFree(void *ptr, u8 chunk_state,
394 BufferedStackTrace *stack) {
395 if (chunk_state == CHUNK_QUARANTINE)
396 ReportDoubleFree((uptr)ptr, stack);
398 ReportFreeNotMalloced((uptr)ptr, stack);
401 static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
402 BufferedStackTrace *stack) {
403 u8 old_chunk_state = CHUNK_ALLOCATED;
404 // Flip the chunk_state atomically to avoid race on double-free.
405 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
406 CHUNK_QUARANTINE, memory_order_acquire))
407 ReportInvalidFree(ptr, old_chunk_state, stack);
408 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
411 // Expects the chunk to already be marked as quarantined by using
412 // AtomicallySetQuarantineFlag.
413 static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
414 AllocType alloc_type) {
415 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
417 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
418 ReportAllocTypeMismatch((uptr)ptr, stack,
419 (AllocType)m->alloc_type, (AllocType)alloc_type);
421 CHECK_GE(m->alloc_tid, 0);
422 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
423 CHECK_EQ(m->free_tid, kInvalidTid);
424 AsanThread *t = GetCurrentThread();
425 m->free_tid = t ? t->tid() : 0;
426 m->free_context_id = StackDepotPut(*stack);
427 // Poison the region.
428 PoisonShadow(m->Beg(),
429 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
432 AsanStats &thread_stats = GetCurrentThreadStats();
433 thread_stats.frees++;
434 thread_stats.freed += m->UsedSize();
436 // Push into quarantine.
438 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
439 AllocatorCache *ac = GetAllocatorCache(ms);
440 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
443 SpinMutexLock l(&fallback_mutex);
444 AllocatorCache *ac = &fallback_allocator_cache;
445 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
450 static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
451 AllocType alloc_type) {
452 uptr p = reinterpret_cast<uptr>(ptr);
455 uptr chunk_beg = p - kChunkHeaderSize;
456 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
457 if (delete_size && flags()->new_delete_type_mismatch &&
458 delete_size != m->UsedSize()) {
459 ReportNewDeleteSizeMismatch(p, delete_size, stack);
462 // Must mark the chunk as quarantined before any changes to its metadata.
463 AtomicallySetQuarantineFlag(m, ptr, stack);
464 QuarantineChunk(m, ptr, stack, alloc_type);
467 static void *Reallocate(void *old_ptr, uptr new_size,
468 BufferedStackTrace *stack) {
469 CHECK(old_ptr && new_size);
470 uptr p = reinterpret_cast<uptr>(old_ptr);
471 uptr chunk_beg = p - kChunkHeaderSize;
472 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
474 AsanStats &thread_stats = GetCurrentThreadStats();
475 thread_stats.reallocs++;
476 thread_stats.realloced += new_size;
478 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
480 u8 chunk_state = m->chunk_state;
481 if (chunk_state != CHUNK_ALLOCATED)
482 ReportInvalidFree(old_ptr, chunk_state, stack);
483 CHECK_NE(REAL(memcpy), (void*)0);
484 uptr memcpy_size = Min(new_size, m->UsedSize());
485 // If realloc() races with free(), we may start copying freed memory.
486 // However, we will report racy double-free later anyway.
487 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
488 Deallocate(old_ptr, 0, stack, FROM_MALLOC);
493 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
494 static AsanChunk *GetAsanChunk(void *alloc_beg) {
495 if (!alloc_beg) return 0;
496 if (!allocator.FromPrimary(alloc_beg)) {
497 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
498 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
501 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
502 if (alloc_magic[0] == kAllocBegMagic)
503 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
504 return reinterpret_cast<AsanChunk *>(alloc_beg);
507 static AsanChunk *GetAsanChunkByAddr(uptr p) {
508 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
509 return GetAsanChunk(alloc_beg);
512 // Allocator must be locked when this function is called.
513 static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
515 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
516 return GetAsanChunk(alloc_beg);
519 static uptr AllocationSize(uptr p) {
520 AsanChunk *m = GetAsanChunkByAddr(p);
522 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
523 if (m->Beg() != p) return 0;
524 return m->UsedSize();
527 // We have an address between two chunks, and we want to report just one.
528 AsanChunk *ChooseChunk(uptr addr,
529 AsanChunk *left_chunk, AsanChunk *right_chunk) {
530 // Prefer an allocated chunk over freed chunk and freed chunk
531 // over available chunk.
532 if (left_chunk->chunk_state != right_chunk->chunk_state) {
533 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
535 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
537 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
539 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
542 // Same chunk_state: choose based on offset.
543 sptr l_offset = 0, r_offset = 0;
544 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
545 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
546 if (l_offset < r_offset)
551 AsanChunkView FindHeapChunkByAddress(uptr addr) {
552 AsanChunk *m1 = GetAsanChunkByAddr(addr);
553 if (!m1) return AsanChunkView(m1);
555 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
556 // The address is in the chunk's left redzone, so maybe it is actually
557 // a right buffer overflow from the other chunk to the left.
558 // Search a bit to the left to see if there is another chunk.
560 for (uptr l = 1; l < GetPageSizeCached(); l++) {
561 m2 = GetAsanChunkByAddr(addr - l);
562 if (m2 == m1) continue; // Still the same chunk.
565 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
566 m1 = ChooseChunk(addr, m2, m1);
568 return AsanChunkView(m1);
571 void AsanThreadLocalMallocStorage::CommitBack() {
572 AllocatorCache *ac = GetAllocatorCache(this);
573 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
574 allocator.SwallowCache(GetAllocatorCache(this));
577 void PrintInternalAllocatorStats() {
578 allocator.PrintStats();
581 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
582 AllocType alloc_type) {
583 return Allocate(size, alignment, stack, alloc_type, true);
586 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
587 Deallocate(ptr, 0, stack, alloc_type);
590 void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
591 AllocType alloc_type) {
592 Deallocate(ptr, size, stack, alloc_type);
595 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
596 return Allocate(size, 8, stack, FROM_MALLOC, true);
599 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
600 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
601 return AllocatorReturnNull();
602 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
603 // If the memory comes from the secondary allocator no need to clear it
604 // as it comes directly from mmap.
605 if (ptr && allocator.FromPrimary(ptr))
606 REAL(memset)(ptr, 0, nmemb * size);
610 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
612 return Allocate(size, 8, stack, FROM_MALLOC, true);
614 Deallocate(p, 0, stack, FROM_MALLOC);
617 return Reallocate(p, size, stack);
620 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
621 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
624 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
625 uptr PageSize = GetPageSizeCached();
626 size = RoundUpTo(size, PageSize);
628 // pvalloc(0) should allocate one page.
631 return Allocate(size, PageSize, stack, FROM_MALLOC, true);
634 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
635 BufferedStackTrace *stack) {
636 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
637 CHECK(IsAligned((uptr)ptr, alignment));
642 uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
643 if (ptr == 0) return 0;
644 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
645 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
646 GET_STACK_TRACE_FATAL(pc, bp);
647 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
652 uptr asan_mz_size(const void *ptr) {
653 return AllocationSize(reinterpret_cast<uptr>(ptr));
656 void asan_mz_force_lock() {
657 allocator.ForceLock();
658 fallback_mutex.Lock();
661 void asan_mz_force_unlock() {
662 fallback_mutex.Unlock();
663 allocator.ForceUnlock();
666 } // namespace __asan
668 // --- Implementation of LSan-specific functions --- {{{1
670 void LockAllocator() {
671 __asan::allocator.ForceLock();
674 void UnlockAllocator() {
675 __asan::allocator.ForceUnlock();
678 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
679 *begin = (uptr)&__asan::allocator;
680 *end = *begin + sizeof(__asan::allocator);
683 uptr PointsIntoChunk(void* p) {
684 uptr addr = reinterpret_cast<uptr>(p);
685 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
687 uptr chunk = m->Beg();
688 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
690 if (m->AddrIsInside(addr, /*locked_version=*/true))
692 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
698 uptr GetUserBegin(uptr chunk) {
699 __asan::AsanChunk *m =
700 __asan::GetAsanChunkByAddrFastLocked(chunk);
705 LsanMetadata::LsanMetadata(uptr chunk) {
706 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
709 bool LsanMetadata::allocated() const {
710 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
711 return m->chunk_state == __asan::CHUNK_ALLOCATED;
714 ChunkTag LsanMetadata::tag() const {
715 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
716 return static_cast<ChunkTag>(m->lsan_tag);
719 void LsanMetadata::set_tag(ChunkTag value) {
720 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
724 uptr LsanMetadata::requested_size() const {
725 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
726 return m->UsedSize(/*locked_version=*/true);
729 u32 LsanMetadata::stack_trace_id() const {
730 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
731 return m->alloc_context_id;
734 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
735 __asan::allocator.ForEachChunk(callback, arg);
738 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
739 uptr addr = reinterpret_cast<uptr>(p);
740 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
741 if (!m) return kIgnoreObjectInvalid;
742 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
743 if (m->lsan_tag == kIgnored)
744 return kIgnoreObjectAlreadyIgnored;
745 m->lsan_tag = __lsan::kIgnored;
746 return kIgnoreObjectSuccess;
748 return kIgnoreObjectInvalid;
751 } // namespace __lsan
753 // ---------------------- Interface ---------------- {{{1
754 using namespace __asan; // NOLINT
756 // ASan allocator doesn't reserve extra bytes, so normally we would
757 // just return "size". We don't want to expose our redzone sizes, etc here.
758 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
762 int __sanitizer_get_ownership(const void *p) {
763 uptr ptr = reinterpret_cast<uptr>(p);
764 return (AllocationSize(ptr) > 0);
767 uptr __sanitizer_get_allocated_size(const void *p) {
768 if (p == 0) return 0;
769 uptr ptr = reinterpret_cast<uptr>(p);
770 uptr allocated_size = AllocationSize(ptr);
771 // Die if p is not malloced or if it is already freed.
772 if (allocated_size == 0) {
773 GET_STACK_TRACE_FATAL_HERE;
774 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
776 return allocated_size;
779 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
780 // Provide default (no-op) implementation of malloc hooks.
782 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
783 void __sanitizer_malloc_hook(void *ptr, uptr size) {
787 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
788 void __sanitizer_free_hook(void *ptr) {