1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
25 #if HWASAN_WITH_INTERCEPTORS
26 DEFINE_REAL(void *, realloc, void *ptr, uptr size)
27 DEFINE_REAL(void, free, void *ptr)
32 static Allocator allocator;
33 static AllocatorCache fallback_allocator_cache;
34 static SpinMutex fallback_mutex;
35 static atomic_uint8_t hwasan_allocator_tagging_enabled;
37 static const tag_t kFallbackAllocTag = 0xBB;
38 static const tag_t kFallbackFreeTag = 0xBC;
46 // Initialized in HwasanAllocatorInit, an never changed.
47 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
49 bool HwasanChunkView::IsAllocated() const {
50 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
53 // Aligns the 'addr' right to the granule boundary.
54 static uptr AlignRight(uptr addr, uptr requested_size) {
55 uptr tail_size = requested_size % kShadowAlignment;
56 if (!tail_size) return addr;
57 return addr + kShadowAlignment - tail_size;
60 uptr HwasanChunkView::Beg() const {
61 if (metadata_ && metadata_->right_aligned)
62 return AlignRight(block_, metadata_->requested_size);
65 uptr HwasanChunkView::End() const {
66 return Beg() + UsedSize();
68 uptr HwasanChunkView::UsedSize() const {
69 return metadata_->requested_size;
71 u32 HwasanChunkView::GetAllocStackId() const {
72 return metadata_->alloc_context_id;
75 uptr HwasanChunkView::ActualSize() const {
76 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
79 bool HwasanChunkView::FromSmallHeap() const {
80 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
83 void GetAllocatorStats(AllocatorStatCounters s) {
84 allocator.GetStats(s);
87 void HwasanAllocatorInit() {
88 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
89 !flags()->disable_allocator_tagging);
90 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
91 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
92 for (uptr i = 0; i < sizeof(tail_magic); i++)
93 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
96 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
97 allocator.SwallowCache(cache);
100 static uptr TaggedSize(uptr size) {
102 uptr new_size = RoundUpTo(size, kShadowAlignment);
103 CHECK_GE(new_size, size);
107 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
109 if (orig_size > kMaxAllowedMallocSize) {
110 if (AllocatorMayReturnNull()) {
111 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
115 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
118 alignment = Max(alignment, kShadowAlignment);
119 uptr size = TaggedSize(orig_size);
120 Thread *t = GetCurrentThread();
123 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
125 SpinMutexLock l(&fallback_mutex);
126 AllocatorCache *cache = &fallback_allocator_cache;
127 allocated = allocator.Allocate(cache, size, alignment);
129 if (UNLIKELY(!allocated)) {
130 SetAllocatorOutOfMemory();
131 if (AllocatorMayReturnNull())
133 ReportOutOfMemory(size, stack);
136 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
137 meta->requested_size = static_cast<u32>(orig_size);
138 meta->alloc_context_id = StackDepotPut(*stack);
139 meta->right_aligned = false;
141 internal_memset(allocated, 0, size);
142 } else if (flags()->max_malloc_fill_size > 0) {
143 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
144 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
146 if (size != orig_size) {
147 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
148 size - orig_size - 1);
151 void *user_ptr = allocated;
152 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
153 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
155 if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
156 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
157 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
158 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
159 uptr tag_size = orig_size ? orig_size : 1;
160 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
162 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
163 if (full_granule_size != tag_size) {
165 reinterpret_cast<u8 *>(allocated) + full_granule_size;
166 TagMemoryAligned((uptr)short_granule, kShadowAlignment,
167 tag_size % kShadowAlignment);
168 short_granule[kShadowAlignment - 1] = tag;
171 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
175 HWASAN_MALLOC_HOOK(user_ptr, size);
179 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
181 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
182 tag_t mem_tag = *reinterpret_cast<tag_t *>(
183 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
184 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
187 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
189 HWASAN_FREE_HOOK(tagged_ptr);
191 if (!PointerAndMemoryTagsMatch(tagged_ptr))
192 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
194 void *untagged_ptr = UntagPtr(tagged_ptr);
195 void *aligned_ptr = reinterpret_cast<void *>(
196 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
198 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
199 uptr orig_size = meta->requested_size;
200 u32 free_context_id = StackDepotPut(*stack);
201 u32 alloc_context_id = meta->alloc_context_id;
204 uptr tagged_size = TaggedSize(orig_size);
205 if (flags()->free_checks_tail_magic && orig_size &&
206 tagged_size != orig_size) {
207 uptr tail_size = tagged_size - orig_size - 1;
208 CHECK_LT(tail_size, kShadowAlignment);
209 void *tail_beg = reinterpret_cast<void *>(
210 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
211 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
212 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
213 orig_size, tail_magic);
216 meta->requested_size = 0;
217 meta->alloc_context_id = 0;
218 // This memory will not be reused by anyone else, so we are free to keep it
220 Thread *t = GetCurrentThread();
221 if (flags()->max_free_fill_size > 0) {
223 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
224 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
226 if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
227 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
228 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
229 t ? t->GenerateRandomTag() : kFallbackFreeTag);
231 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
232 if (auto *ha = t->heap_allocations())
233 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
234 free_context_id, static_cast<u32>(orig_size)});
236 SpinMutexLock l(&fallback_mutex);
237 AllocatorCache *cache = &fallback_allocator_cache;
238 allocator.Deallocate(cache, aligned_ptr);
242 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
243 uptr new_size, uptr alignment) {
244 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
245 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
247 void *tagged_ptr_new =
248 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
249 if (tagged_ptr_old && tagged_ptr_new) {
250 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
252 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
253 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
254 Min(new_size, static_cast<uptr>(meta->requested_size)));
255 HwasanDeallocate(stack, tagged_ptr_old);
257 return tagged_ptr_new;
260 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
261 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
262 if (AllocatorMayReturnNull())
264 ReportCallocOverflow(nmemb, size, stack);
266 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
269 HwasanChunkView FindHeapChunkByAddress(uptr address) {
270 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
272 return HwasanChunkView();
274 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
275 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
278 static uptr AllocationSize(const void *tagged_ptr) {
279 const void *untagged_ptr = UntagPtr(tagged_ptr);
280 if (!untagged_ptr) return 0;
281 const void *beg = allocator.GetBlockBegin(untagged_ptr);
282 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
283 if (b->right_aligned) {
284 if (beg != reinterpret_cast<void *>(RoundDownTo(
285 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
288 if (beg != untagged_ptr) return 0;
290 return b->requested_size;
293 void *hwasan_malloc(uptr size, StackTrace *stack) {
294 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
297 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
298 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
301 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
303 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
305 #if HWASAN_WITH_INTERCEPTORS
306 // A tag of 0 means that this is a system allocator allocation, so we must use
307 // the system allocator to realloc it.
308 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
309 return REAL(realloc)(ptr, size);
313 HwasanDeallocate(stack, ptr);
316 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
319 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
320 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
321 errno = errno_ENOMEM;
322 if (AllocatorMayReturnNull())
324 ReportReallocArrayOverflow(nmemb, size, stack);
326 return hwasan_realloc(ptr, nmemb * size, stack);
329 void *hwasan_valloc(uptr size, StackTrace *stack) {
330 return SetErrnoOnNull(
331 HwasanAllocate(stack, size, GetPageSizeCached(), false));
334 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
335 uptr PageSize = GetPageSizeCached();
336 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
337 errno = errno_ENOMEM;
338 if (AllocatorMayReturnNull())
340 ReportPvallocOverflow(size, stack);
342 // pvalloc(0) should allocate one page.
343 size = size ? RoundUpTo(size, PageSize) : PageSize;
344 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
347 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
348 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
349 errno = errno_EINVAL;
350 if (AllocatorMayReturnNull())
352 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
354 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
357 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
358 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
359 errno = errno_EINVAL;
360 if (AllocatorMayReturnNull())
362 ReportInvalidAllocationAlignment(alignment, stack);
364 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
367 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
369 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
370 if (AllocatorMayReturnNull())
372 ReportInvalidPosixMemalignAlignment(alignment, stack);
374 void *ptr = HwasanAllocate(stack, size, alignment, false);
376 // OOM error is already taken care of by HwasanAllocate.
378 CHECK(IsAligned((uptr)ptr, alignment));
383 void hwasan_free(void *ptr, StackTrace *stack) {
384 #if HWASAN_WITH_INTERCEPTORS
385 // A tag of 0 means that this is a system allocator allocation, so we must use
386 // the system allocator to free it.
387 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
388 return REAL(free)(ptr);
391 return HwasanDeallocate(stack, ptr);
394 } // namespace __hwasan
396 using namespace __hwasan;
398 void __hwasan_enable_allocator_tagging() {
399 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
402 void __hwasan_disable_allocator_tagging() {
403 #if HWASAN_WITH_INTERCEPTORS
404 // Allocator tagging must be enabled for the system allocator fallback to work
405 // correctly. This means that we can't disable it at runtime if it was enabled
406 // at startup since that might result in our deallocations going to the system
407 // allocator. If tagging was disabled at startup we avoid this problem by
408 // disabling the fallback altogether.
409 CHECK(flags()->disable_allocator_tagging);
412 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
415 uptr __sanitizer_get_current_allocated_bytes() {
416 uptr stats[AllocatorStatCount];
417 allocator.GetStats(stats);
418 return stats[AllocatorStatAllocated];
421 uptr __sanitizer_get_heap_size() {
422 uptr stats[AllocatorStatCount];
423 allocator.GetStats(stats);
424 return stats[AllocatorStatMapped];
427 uptr __sanitizer_get_free_bytes() { return 1; }
429 uptr __sanitizer_get_unmapped_bytes() { return 1; }
431 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
433 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
435 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }