1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Implementation of ASan's memory allocator, 2-nd version.
12 // This variant uses the allocator from sanitizer_common, i.e. the one shared
13 // with ThreadSanitizer and MemorySanitizer.
15 //===----------------------------------------------------------------------===//
17 #include "asan_allocator.h"
18 #include "asan_mapping.h"
19 #include "asan_poisoning.h"
20 #include "asan_report.h"
21 #include "asan_stack.h"
22 #include "asan_thread.h"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_errno.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_internal_defs.h"
28 #include "sanitizer_common/sanitizer_list.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_quarantine.h"
31 #include "lsan/lsan_common.h"
35 // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
36 // We use adaptive redzones: for larger allocation larger redzones are used.
37 static u32 RZLog2Size(u32 rz_log) {
42 static u32 RZSize2Log(u32 rz_size) {
43 CHECK_GE(rz_size, 16);
44 CHECK_LE(rz_size, 2048);
45 CHECK(IsPowerOfTwo(rz_size));
46 u32 res = Log2(rz_size) - 4;
47 CHECK_EQ(rz_size, RZLog2Size(res));
51 static AsanAllocator &get_allocator();
53 // The memory chunk allocated from the underlying allocator looks like this:
54 // L L L L L L H H U U U U U U R R
55 // L -- left redzone words (0 or more bytes)
56 // H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
58 // R -- right redzone (0 or more bytes)
59 // ChunkBase consists of ChunkHeader and other bytes that overlap with user
62 // If the left redzone is greater than the ChunkHeader size we store a magic
63 // value in the first uptr word of the memory block and store the address of
64 // ChunkBase in the next uptr.
65 // M B L L L L L L L L L H H U U U U U U
67 // ---------------------|
68 // M -- magic value kAllocBegMagic
69 // B -- address of ChunkHeader pointing to the first 'H'
70 static const uptr kAllocBegMagic = 0xCC6E96B9;
74 u32 chunk_state : 8; // Must be first.
78 u32 from_memalign : 1;
83 // This field is used for small sizes. For large sizes it is equal to
84 // SizeClassMap::kMaxSize and the actual size is stored in the
85 // SecondaryAllocator's metadata.
86 u32 user_requested_size : 29;
88 // else -> log2(min(align, 512)) - 2
89 u32 user_requested_alignment_log : 3;
93 struct ChunkBase : ChunkHeader {
94 // Header2, intersects with user memory.
98 static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
99 static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
100 COMPILER_CHECK(kChunkHeaderSize == 16);
101 COMPILER_CHECK(kChunkHeader2Size <= 16);
103 // Every chunk of memory allocated by this allocator can be in one of 3 states:
104 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
105 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
106 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
108 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
113 struct AsanChunk: ChunkBase {
114 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
115 uptr UsedSize(bool locked_version = false) {
116 if (user_requested_size != SizeClassMap::kMaxSize)
117 return user_requested_size;
118 return *reinterpret_cast<uptr *>(
119 get_allocator().GetMetaData(AllocBeg(locked_version)));
121 void *AllocBeg(bool locked_version = false) {
124 return get_allocator().GetBlockBeginFastLocked(
125 reinterpret_cast<void *>(this));
126 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
128 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
130 bool AddrIsInside(uptr addr, bool locked_version = false) {
131 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
135 struct QuarantineCallback {
136 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
141 void Recycle(AsanChunk *m) {
142 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
143 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
144 CHECK_NE(m->alloc_tid, kInvalidTid);
145 CHECK_NE(m->free_tid, kInvalidTid);
146 PoisonShadow(m->Beg(),
147 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
148 kAsanHeapLeftRedzoneMagic);
149 void *p = reinterpret_cast<void *>(m->AllocBeg());
151 uptr *alloc_magic = reinterpret_cast<uptr *>(p);
152 CHECK_EQ(alloc_magic[0], kAllocBegMagic);
153 // Clear the magic value, as allocator internals may overwrite the
154 // contents of deallocated chunk, confusing GetAsanChunk lookup.
156 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
160 AsanStats &thread_stats = GetCurrentThreadStats();
161 thread_stats.real_frees++;
162 thread_stats.really_freed += m->UsedSize();
164 get_allocator().Deallocate(cache_, p);
167 void *Allocate(uptr size) {
168 void *res = get_allocator().Allocate(cache_, size, 1);
169 // TODO(alekseys): Consider making quarantine OOM-friendly.
171 ReportOutOfMemory(size, stack_);
175 void Deallocate(void *p) {
176 get_allocator().Deallocate(cache_, p);
180 AllocatorCache* const cache_;
181 BufferedStackTrace* const stack_;
184 typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
185 typedef AsanQuarantine::Cache QuarantineCache;
187 void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
188 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
190 AsanStats &thread_stats = GetCurrentThreadStats();
191 thread_stats.mmaps++;
192 thread_stats.mmaped += size;
194 void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
195 PoisonShadow(p, size, 0);
196 // We are about to unmap a chunk of user memory.
197 // Mark the corresponding shadow memory as not needed.
198 FlushUnneededASanShadowMemory(p, size);
200 AsanStats &thread_stats = GetCurrentThreadStats();
201 thread_stats.munmaps++;
202 thread_stats.munmaped += size;
205 // We can not use THREADLOCAL because it is not supported on some of the
206 // platforms we care about (OSX 10.6, Android).
207 // static THREADLOCAL AllocatorCache cache;
208 AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
210 return &ms->allocator_cache;
213 QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
215 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
216 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
219 void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
220 quarantine_size_mb = f->quarantine_size_mb;
221 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
222 min_redzone = f->redzone;
223 max_redzone = f->max_redzone;
224 may_return_null = cf->allocator_may_return_null;
225 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
226 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
229 void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
230 f->quarantine_size_mb = quarantine_size_mb;
231 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
232 f->redzone = min_redzone;
233 f->max_redzone = max_redzone;
234 cf->allocator_may_return_null = may_return_null;
235 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
236 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
240 static const uptr kMaxAllowedMallocSize =
241 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
243 AsanAllocator allocator;
244 AsanQuarantine quarantine;
245 StaticSpinMutex fallback_mutex;
246 AllocatorCache fallback_allocator_cache;
247 QuarantineCache fallback_quarantine_cache;
249 atomic_uint8_t rss_limit_exceeded;
251 // ------------------- Options --------------------------
252 atomic_uint16_t min_redzone;
253 atomic_uint16_t max_redzone;
254 atomic_uint8_t alloc_dealloc_mismatch;
256 // ------------------- Initialization ------------------------
257 explicit Allocator(LinkerInitialized)
258 : quarantine(LINKER_INITIALIZED),
259 fallback_quarantine_cache(LINKER_INITIALIZED) {}
261 void CheckOptions(const AllocatorOptions &options) const {
262 CHECK_GE(options.min_redzone, 16);
263 CHECK_GE(options.max_redzone, options.min_redzone);
264 CHECK_LE(options.max_redzone, 2048);
265 CHECK(IsPowerOfTwo(options.min_redzone));
266 CHECK(IsPowerOfTwo(options.max_redzone));
269 void SharedInitCode(const AllocatorOptions &options) {
270 CheckOptions(options);
271 quarantine.Init((uptr)options.quarantine_size_mb << 20,
272 (uptr)options.thread_local_quarantine_size_kb << 10);
273 atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
274 memory_order_release);
275 atomic_store(&min_redzone, options.min_redzone, memory_order_release);
276 atomic_store(&max_redzone, options.max_redzone, memory_order_release);
279 void InitLinkerInitialized(const AllocatorOptions &options) {
280 SetAllocatorMayReturnNull(options.may_return_null);
281 allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
282 SharedInitCode(options);
285 bool RssLimitExceeded() {
286 return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
289 void SetRssLimitExceeded(bool limit_exceeded) {
290 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
293 void RePoisonChunk(uptr chunk) {
294 // This could be a user-facing chunk (with redzones), or some internal
295 // housekeeping chunk, like TransferBatch. Start by assuming the former.
296 AsanChunk *ac = GetAsanChunk((void *)chunk);
297 uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
298 uptr beg = ac->Beg();
299 uptr end = ac->Beg() + ac->UsedSize(true);
300 uptr chunk_end = chunk + allocated_size;
301 if (chunk < beg && beg < end && end <= chunk_end &&
302 ac->chunk_state == CHUNK_ALLOCATED) {
303 // Looks like a valid AsanChunk in use, poison redzones only.
304 PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
305 uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
306 FastPoisonShadowPartialRightRedzone(
307 end_aligned_down, end - end_aligned_down,
308 chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
310 // This is either not an AsanChunk or freed or quarantined AsanChunk.
311 // In either case, poison everything.
312 PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
316 void ReInitialize(const AllocatorOptions &options) {
317 SetAllocatorMayReturnNull(options.may_return_null);
318 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
319 SharedInitCode(options);
321 // Poison all existing allocation's redzones.
322 if (CanPoisonMemory()) {
323 allocator.ForceLock();
324 allocator.ForEachChunk(
325 [](uptr chunk, void *alloc) {
326 ((Allocator *)alloc)->RePoisonChunk(chunk);
329 allocator.ForceUnlock();
333 void GetOptions(AllocatorOptions *options) const {
334 options->quarantine_size_mb = quarantine.GetSize() >> 20;
335 options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
336 options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
337 options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
338 options->may_return_null = AllocatorMayReturnNull();
339 options->alloc_dealloc_mismatch =
340 atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
341 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
344 // -------------------- Helper methods. -------------------------
345 uptr ComputeRZLog(uptr user_requested_size) {
347 user_requested_size <= 64 - 16 ? 0 :
348 user_requested_size <= 128 - 32 ? 1 :
349 user_requested_size <= 512 - 64 ? 2 :
350 user_requested_size <= 4096 - 128 ? 3 :
351 user_requested_size <= (1 << 14) - 256 ? 4 :
352 user_requested_size <= (1 << 15) - 512 ? 5 :
353 user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
354 u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
355 u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
356 return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
359 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
360 if (user_requested_alignment < 8)
362 if (user_requested_alignment > 512)
363 user_requested_alignment = 512;
364 return Log2(user_requested_alignment) - 2;
367 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
368 if (user_requested_alignment_log == 0)
370 return 1LL << (user_requested_alignment_log + 2);
373 // We have an address between two chunks, and we want to report just one.
374 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
375 AsanChunk *right_chunk) {
376 // Prefer an allocated chunk over freed chunk and freed chunk
377 // over available chunk.
378 if (left_chunk->chunk_state != right_chunk->chunk_state) {
379 if (left_chunk->chunk_state == CHUNK_ALLOCATED)
381 if (right_chunk->chunk_state == CHUNK_ALLOCATED)
383 if (left_chunk->chunk_state == CHUNK_QUARANTINE)
385 if (right_chunk->chunk_state == CHUNK_QUARANTINE)
388 // Same chunk_state: choose based on offset.
389 sptr l_offset = 0, r_offset = 0;
390 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
391 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
392 if (l_offset < r_offset)
397 // -------------------- Allocation/Deallocation routines ---------------
398 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
399 AllocType alloc_type, bool can_fill) {
400 if (UNLIKELY(!asan_inited))
402 if (RssLimitExceeded()) {
403 if (AllocatorMayReturnNull())
405 ReportRssLimitExceeded(stack);
407 Flags &fl = *flags();
409 const uptr min_alignment = SHADOW_GRANULARITY;
410 const uptr user_requested_alignment_log =
411 ComputeUserRequestedAlignmentLog(alignment);
412 if (alignment < min_alignment)
413 alignment = min_alignment;
415 // We'd be happy to avoid allocating memory for zero-size requests, but
416 // some programs/tests depend on this behavior and assume that malloc
417 // would not return NULL even for zero-size allocations. Moreover, it
418 // looks like operator new should never return NULL, and results of
419 // consecutive "new" calls must be different even if the allocated size
423 CHECK(IsPowerOfTwo(alignment));
424 uptr rz_log = ComputeRZLog(size);
425 uptr rz_size = RZLog2Size(rz_log);
426 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
427 uptr needed_size = rounded_size + rz_size;
428 if (alignment > min_alignment)
429 needed_size += alignment;
430 bool using_primary_allocator = true;
431 // If we are allocating from the secondary allocator, there will be no
432 // automatic right redzone, so add the right redzone manually.
433 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
434 needed_size += rz_size;
435 using_primary_allocator = false;
437 CHECK(IsAligned(needed_size, min_alignment));
438 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
439 if (AllocatorMayReturnNull()) {
440 Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
444 ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
448 AsanThread *t = GetCurrentThread();
451 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
452 allocated = allocator.Allocate(cache, needed_size, 8);
454 SpinMutexLock l(&fallback_mutex);
455 AllocatorCache *cache = &fallback_allocator_cache;
456 allocated = allocator.Allocate(cache, needed_size, 8);
458 if (UNLIKELY(!allocated)) {
459 SetAllocatorOutOfMemory();
460 if (AllocatorMayReturnNull())
462 ReportOutOfMemory(size, stack);
465 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
466 // Heap poisoning is enabled, but the allocator provides an unpoisoned
467 // chunk. This is possible if CanPoisonMemory() was false for some
468 // time, for example, due to flags()->start_disabled.
469 // Anyway, poison the block before using it for anything else.
470 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
471 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
474 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
475 uptr alloc_end = alloc_beg + needed_size;
476 uptr beg_plus_redzone = alloc_beg + rz_size;
477 uptr user_beg = beg_plus_redzone;
478 if (!IsAligned(user_beg, alignment))
479 user_beg = RoundUpTo(user_beg, alignment);
480 uptr user_end = user_beg + size;
481 CHECK_LE(user_end, alloc_end);
482 uptr chunk_beg = user_beg - kChunkHeaderSize;
483 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
484 m->alloc_type = alloc_type;
486 u32 alloc_tid = t ? t->tid() : 0;
487 m->alloc_tid = alloc_tid;
488 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
489 m->free_tid = kInvalidTid;
490 m->from_memalign = user_beg != beg_plus_redzone;
491 if (alloc_beg != chunk_beg) {
492 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
493 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
494 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
496 if (using_primary_allocator) {
498 m->user_requested_size = size;
499 CHECK(allocator.FromPrimary(allocated));
501 CHECK(!allocator.FromPrimary(allocated));
502 m->user_requested_size = SizeClassMap::kMaxSize;
503 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
507 m->user_requested_alignment_log = user_requested_alignment_log;
509 m->alloc_context_id = StackDepotPut(*stack);
511 uptr size_rounded_down_to_granularity =
512 RoundDownTo(size, SHADOW_GRANULARITY);
513 // Unpoison the bulk of the memory region.
514 if (size_rounded_down_to_granularity)
515 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
516 // Deal with the end of the region if size is not aligned to granularity.
517 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
519 (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
520 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
523 AsanStats &thread_stats = GetCurrentThreadStats();
524 thread_stats.mallocs++;
525 thread_stats.malloced += size;
526 thread_stats.malloced_redzones += needed_size - size;
527 if (needed_size > SizeClassMap::kMaxSize)
528 thread_stats.malloc_large++;
530 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
532 void *res = reinterpret_cast<void *>(user_beg);
533 if (can_fill && fl.max_malloc_fill_size) {
534 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
535 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
537 #if CAN_SANITIZE_LEAKS
538 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
539 : __lsan::kDirectlyLeaked;
541 // Must be the last mutation of metadata in this function.
542 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
543 ASAN_MALLOC_HOOK(res, size);
547 // Set quarantine flag if chunk is allocated, issue ASan error report on
548 // available and quarantined chunks. Return true on success, false otherwise.
549 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
550 BufferedStackTrace *stack) {
551 u8 old_chunk_state = CHUNK_ALLOCATED;
552 // Flip the chunk_state atomically to avoid race on double-free.
553 if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
555 memory_order_acquire)) {
556 ReportInvalidFree(ptr, old_chunk_state, stack);
557 // It's not safe to push a chunk in quarantine on invalid free.
560 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
564 // Expects the chunk to already be marked as quarantined by using
565 // AtomicallySetQuarantineFlagIfAllocated.
566 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
567 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
568 CHECK_GE(m->alloc_tid, 0);
569 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
570 CHECK_EQ(m->free_tid, kInvalidTid);
571 AsanThread *t = GetCurrentThread();
572 m->free_tid = t ? t->tid() : 0;
573 m->free_context_id = StackDepotPut(*stack);
575 Flags &fl = *flags();
576 if (fl.max_free_fill_size > 0) {
577 // We have to skip the chunk header, it contains free_context_id.
578 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
579 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
580 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
581 size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
582 REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
586 // Poison the region.
587 PoisonShadow(m->Beg(),
588 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
591 AsanStats &thread_stats = GetCurrentThreadStats();
592 thread_stats.frees++;
593 thread_stats.freed += m->UsedSize();
595 // Push into quarantine.
597 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
598 AllocatorCache *ac = GetAllocatorCache(ms);
599 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
602 SpinMutexLock l(&fallback_mutex);
603 AllocatorCache *ac = &fallback_allocator_cache;
604 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
609 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
610 BufferedStackTrace *stack, AllocType alloc_type) {
611 uptr p = reinterpret_cast<uptr>(ptr);
614 uptr chunk_beg = p - kChunkHeaderSize;
615 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
617 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
618 // malloc. Don't report an invalid free in this case.
619 if (SANITIZER_WINDOWS &&
620 !get_allocator().PointerIsMine(ptr)) {
621 if (!IsSystemHeapAddress(p))
622 ReportFreeNotMalloced(p, stack);
628 // Must mark the chunk as quarantined before any changes to its metadata.
629 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
630 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
632 if (m->alloc_type != alloc_type) {
633 if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
634 ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
635 (AllocType)alloc_type);
638 if (flags()->new_delete_type_mismatch &&
639 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
640 ((delete_size && delete_size != m->UsedSize()) ||
641 ComputeUserRequestedAlignmentLog(delete_alignment) !=
642 m->user_requested_alignment_log)) {
643 ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
647 QuarantineChunk(m, ptr, stack);
650 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
651 CHECK(old_ptr && new_size);
652 uptr p = reinterpret_cast<uptr>(old_ptr);
653 uptr chunk_beg = p - kChunkHeaderSize;
654 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
656 AsanStats &thread_stats = GetCurrentThreadStats();
657 thread_stats.reallocs++;
658 thread_stats.realloced += new_size;
660 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
662 u8 chunk_state = m->chunk_state;
663 if (chunk_state != CHUNK_ALLOCATED)
664 ReportInvalidFree(old_ptr, chunk_state, stack);
665 CHECK_NE(REAL(memcpy), nullptr);
666 uptr memcpy_size = Min(new_size, m->UsedSize());
667 // If realloc() races with free(), we may start copying freed memory.
668 // However, we will report racy double-free later anyway.
669 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
670 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
675 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
676 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
677 if (AllocatorMayReturnNull())
679 ReportCallocOverflow(nmemb, size, stack);
681 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
682 // If the memory comes from the secondary allocator no need to clear it
683 // as it comes directly from mmap.
684 if (ptr && allocator.FromPrimary(ptr))
685 REAL(memset)(ptr, 0, nmemb * size);
689 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
690 if (chunk_state == CHUNK_QUARANTINE)
691 ReportDoubleFree((uptr)ptr, stack);
693 ReportFreeNotMalloced((uptr)ptr, stack);
696 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
697 AllocatorCache *ac = GetAllocatorCache(ms);
698 quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
699 allocator.SwallowCache(ac);
702 // -------------------------- Chunk lookup ----------------------
704 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
705 AsanChunk *GetAsanChunk(void *alloc_beg) {
706 if (!alloc_beg) return nullptr;
707 if (!allocator.FromPrimary(alloc_beg)) {
708 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
709 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
712 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
713 if (alloc_magic[0] == kAllocBegMagic)
714 return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
715 return reinterpret_cast<AsanChunk *>(alloc_beg);
718 AsanChunk *GetAsanChunkByAddr(uptr p) {
719 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
720 return GetAsanChunk(alloc_beg);
723 // Allocator must be locked when this function is called.
724 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
726 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
727 return GetAsanChunk(alloc_beg);
730 uptr AllocationSize(uptr p) {
731 AsanChunk *m = GetAsanChunkByAddr(p);
733 if (m->chunk_state != CHUNK_ALLOCATED) return 0;
734 if (m->Beg() != p) return 0;
735 return m->UsedSize();
738 AsanChunkView FindHeapChunkByAddress(uptr addr) {
739 AsanChunk *m1 = GetAsanChunkByAddr(addr);
740 if (!m1) return AsanChunkView(m1);
742 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
743 // The address is in the chunk's left redzone, so maybe it is actually
744 // a right buffer overflow from the other chunk to the left.
745 // Search a bit to the left to see if there is another chunk.
746 AsanChunk *m2 = nullptr;
747 for (uptr l = 1; l < GetPageSizeCached(); l++) {
748 m2 = GetAsanChunkByAddr(addr - l);
749 if (m2 == m1) continue; // Still the same chunk.
752 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
753 m1 = ChooseChunk(addr, m2, m1);
755 return AsanChunkView(m1);
758 void Purge(BufferedStackTrace *stack) {
759 AsanThread *t = GetCurrentThread();
761 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
762 quarantine.DrainAndRecycle(GetQuarantineCache(ms),
763 QuarantineCallback(GetAllocatorCache(ms),
767 SpinMutexLock l(&fallback_mutex);
768 quarantine.DrainAndRecycle(&fallback_quarantine_cache,
769 QuarantineCallback(&fallback_allocator_cache,
773 allocator.ForceReleaseToOS();
777 allocator.PrintStats();
778 quarantine.PrintStats();
782 allocator.ForceLock();
783 fallback_mutex.Lock();
787 fallback_mutex.Unlock();
788 allocator.ForceUnlock();
792 static Allocator instance(LINKER_INITIALIZED);
794 static AsanAllocator &get_allocator() {
795 return instance.allocator;
798 bool AsanChunkView::IsValid() const {
799 return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
801 bool AsanChunkView::IsAllocated() const {
802 return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
804 bool AsanChunkView::IsQuarantined() const {
805 return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
807 uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
808 uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
809 uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
810 u32 AsanChunkView::UserRequestedAlignment() const {
811 return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
813 uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
814 uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
815 AllocType AsanChunkView::GetAllocType() const {
816 return (AllocType)chunk_->alloc_type;
819 static StackTrace GetStackTraceFromId(u32 id) {
821 StackTrace res = StackDepotGet(id);
826 u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
827 u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
829 StackTrace AsanChunkView::GetAllocStack() const {
830 return GetStackTraceFromId(GetAllocStackId());
833 StackTrace AsanChunkView::GetFreeStack() const {
834 return GetStackTraceFromId(GetFreeStackId());
837 void InitializeAllocator(const AllocatorOptions &options) {
838 instance.InitLinkerInitialized(options);
841 void ReInitializeAllocator(const AllocatorOptions &options) {
842 instance.ReInitialize(options);
845 void GetAllocatorOptions(AllocatorOptions *options) {
846 instance.GetOptions(options);
849 AsanChunkView FindHeapChunkByAddress(uptr addr) {
850 return instance.FindHeapChunkByAddress(addr);
852 AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
853 return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
856 void AsanThreadLocalMallocStorage::CommitBack() {
857 GET_STACK_TRACE_MALLOC;
858 instance.CommitBack(this, &stack);
861 void PrintInternalAllocatorStats() {
862 instance.PrintStats();
865 void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
866 instance.Deallocate(ptr, 0, 0, stack, alloc_type);
869 void asan_delete(void *ptr, uptr size, uptr alignment,
870 BufferedStackTrace *stack, AllocType alloc_type) {
871 instance.Deallocate(ptr, size, alignment, stack, alloc_type);
874 void *asan_malloc(uptr size, BufferedStackTrace *stack) {
875 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
878 void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
879 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
882 void *asan_reallocarray(void *p, uptr nmemb, uptr size,
883 BufferedStackTrace *stack) {
884 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
885 errno = errno_ENOMEM;
886 if (AllocatorMayReturnNull())
888 ReportReallocArrayOverflow(nmemb, size, stack);
890 return asan_realloc(p, nmemb * size, stack);
893 void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
895 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
897 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
898 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
901 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
904 return SetErrnoOnNull(instance.Reallocate(p, size, stack));
907 void *asan_valloc(uptr size, BufferedStackTrace *stack) {
908 return SetErrnoOnNull(
909 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
912 void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
913 uptr PageSize = GetPageSizeCached();
914 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
915 errno = errno_ENOMEM;
916 if (AllocatorMayReturnNull())
918 ReportPvallocOverflow(size, stack);
920 // pvalloc(0) should allocate one page.
921 size = size ? RoundUpTo(size, PageSize) : PageSize;
922 return SetErrnoOnNull(
923 instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
926 void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
927 AllocType alloc_type) {
928 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
929 errno = errno_EINVAL;
930 if (AllocatorMayReturnNull())
932 ReportInvalidAllocationAlignment(alignment, stack);
934 return SetErrnoOnNull(
935 instance.Allocate(size, alignment, stack, alloc_type, true));
938 void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
939 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
940 errno = errno_EINVAL;
941 if (AllocatorMayReturnNull())
943 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
945 return SetErrnoOnNull(
946 instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
949 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
950 BufferedStackTrace *stack) {
951 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
952 if (AllocatorMayReturnNull())
954 ReportInvalidPosixMemalignAlignment(alignment, stack);
956 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
958 // OOM error is already taken care of by Allocate.
960 CHECK(IsAligned((uptr)ptr, alignment));
965 uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
967 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
968 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
969 GET_STACK_TRACE_FATAL(pc, bp);
970 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
975 uptr asan_mz_size(const void *ptr) {
976 return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
979 void asan_mz_force_lock() {
980 instance.ForceLock();
983 void asan_mz_force_unlock() {
984 instance.ForceUnlock();
987 void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
988 instance.SetRssLimitExceeded(limit_exceeded);
991 } // namespace __asan
993 // --- Implementation of LSan-specific functions --- {{{1
995 void LockAllocator() {
996 __asan::get_allocator().ForceLock();
999 void UnlockAllocator() {
1000 __asan::get_allocator().ForceUnlock();
1003 void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1004 *begin = (uptr)&__asan::get_allocator();
1005 *end = *begin + sizeof(__asan::get_allocator());
1008 uptr PointsIntoChunk(void* p) {
1009 uptr addr = reinterpret_cast<uptr>(p);
1010 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
1012 uptr chunk = m->Beg();
1013 if (m->chunk_state != __asan::CHUNK_ALLOCATED)
1015 if (m->AddrIsInside(addr, /*locked_version=*/true))
1017 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
1023 uptr GetUserBegin(uptr chunk) {
1024 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
1029 LsanMetadata::LsanMetadata(uptr chunk) {
1030 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
1033 bool LsanMetadata::allocated() const {
1034 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1035 return m->chunk_state == __asan::CHUNK_ALLOCATED;
1038 ChunkTag LsanMetadata::tag() const {
1039 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1040 return static_cast<ChunkTag>(m->lsan_tag);
1043 void LsanMetadata::set_tag(ChunkTag value) {
1044 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1045 m->lsan_tag = value;
1048 uptr LsanMetadata::requested_size() const {
1049 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1050 return m->UsedSize(/*locked_version=*/true);
1053 u32 LsanMetadata::stack_trace_id() const {
1054 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1055 return m->alloc_context_id;
1058 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1059 __asan::get_allocator().ForEachChunk(callback, arg);
1062 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
1063 uptr addr = reinterpret_cast<uptr>(p);
1064 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
1065 if (!m) return kIgnoreObjectInvalid;
1066 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
1067 if (m->lsan_tag == kIgnored)
1068 return kIgnoreObjectAlreadyIgnored;
1069 m->lsan_tag = __lsan::kIgnored;
1070 return kIgnoreObjectSuccess;
1072 return kIgnoreObjectInvalid;
1075 } // namespace __lsan
1077 // ---------------------- Interface ---------------- {{{1
1078 using namespace __asan; // NOLINT
1080 // ASan allocator doesn't reserve extra bytes, so normally we would
1081 // just return "size". We don't want to expose our redzone sizes, etc here.
1082 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1086 int __sanitizer_get_ownership(const void *p) {
1087 uptr ptr = reinterpret_cast<uptr>(p);
1088 return instance.AllocationSize(ptr) > 0;
1091 uptr __sanitizer_get_allocated_size(const void *p) {
1093 uptr ptr = reinterpret_cast<uptr>(p);
1094 uptr allocated_size = instance.AllocationSize(ptr);
1095 // Die if p is not malloced or if it is already freed.
1096 if (allocated_size == 0) {
1097 GET_STACK_TRACE_FATAL_HERE;
1098 ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
1100 return allocated_size;
1103 void __sanitizer_purge_allocator() {
1104 GET_STACK_TRACE_MALLOC;
1105 instance.Purge(&stack);
1108 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1109 // Provide default (no-op) implementation of malloc hooks.
1110 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
1111 void *ptr, uptr size) {
1116 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {