1 //===-- hwasan_allocator.cpp ------------------------ ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 // HWAddressSanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_atomic.h"
15 #include "sanitizer_common/sanitizer_errno.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "hwasan_allocator.h"
19 #include "hwasan_checks.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_malloc_bisect.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_report.h"
27 static Allocator allocator;
28 static AllocatorCache fallback_allocator_cache;
29 static SpinMutex fallback_mutex;
30 static atomic_uint8_t hwasan_allocator_tagging_enabled;
32 static const tag_t kFallbackAllocTag = 0xBB;
33 static const tag_t kFallbackFreeTag = 0xBC;
41 // Initialized in HwasanAllocatorInit, an never changed.
42 static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
44 bool HwasanChunkView::IsAllocated() const {
45 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
48 // Aligns the 'addr' right to the granule boundary.
49 static uptr AlignRight(uptr addr, uptr requested_size) {
50 uptr tail_size = requested_size % kShadowAlignment;
51 if (!tail_size) return addr;
52 return addr + kShadowAlignment - tail_size;
55 uptr HwasanChunkView::Beg() const {
56 if (metadata_ && metadata_->right_aligned)
57 return AlignRight(block_, metadata_->requested_size);
60 uptr HwasanChunkView::End() const {
61 return Beg() + UsedSize();
63 uptr HwasanChunkView::UsedSize() const {
64 return metadata_->requested_size;
66 u32 HwasanChunkView::GetAllocStackId() const {
67 return metadata_->alloc_context_id;
70 uptr HwasanChunkView::ActualSize() const {
71 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
74 bool HwasanChunkView::FromSmallHeap() const {
75 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
78 void GetAllocatorStats(AllocatorStatCounters s) {
79 allocator.GetStats(s);
82 void HwasanAllocatorInit() {
83 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
84 !flags()->disable_allocator_tagging);
85 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
86 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
87 for (uptr i = 0; i < sizeof(tail_magic); i++)
88 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
91 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
92 allocator.SwallowCache(cache);
95 static uptr TaggedSize(uptr size) {
97 uptr new_size = RoundUpTo(size, kShadowAlignment);
98 CHECK_GE(new_size, size);
102 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
104 if (orig_size > kMaxAllowedMallocSize) {
105 if (AllocatorMayReturnNull()) {
106 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
110 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
113 alignment = Max(alignment, kShadowAlignment);
114 uptr size = TaggedSize(orig_size);
115 Thread *t = GetCurrentThread();
118 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
120 SpinMutexLock l(&fallback_mutex);
121 AllocatorCache *cache = &fallback_allocator_cache;
122 allocated = allocator.Allocate(cache, size, alignment);
124 if (UNLIKELY(!allocated)) {
125 SetAllocatorOutOfMemory();
126 if (AllocatorMayReturnNull())
128 ReportOutOfMemory(size, stack);
131 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
132 meta->requested_size = static_cast<u32>(orig_size);
133 meta->alloc_context_id = StackDepotPut(*stack);
134 meta->right_aligned = false;
136 internal_memset(allocated, 0, size);
137 } else if (flags()->max_malloc_fill_size > 0) {
138 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
139 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
141 if (size != orig_size) {
142 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
143 size - orig_size - 1);
146 void *user_ptr = allocated;
147 // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
148 // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
150 if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
151 atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
152 if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
153 tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
154 uptr tag_size = orig_size ? orig_size : 1;
155 uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
157 (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
158 if (full_granule_size != tag_size) {
160 reinterpret_cast<u8 *>(allocated) + full_granule_size;
161 TagMemoryAligned((uptr)short_granule, kShadowAlignment,
162 tag_size % kShadowAlignment);
163 short_granule[kShadowAlignment - 1] = tag;
166 user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
170 HWASAN_MALLOC_HOOK(user_ptr, size);
174 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
176 uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
177 tag_t mem_tag = *reinterpret_cast<tag_t *>(
178 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
179 return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
182 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
184 HWASAN_FREE_HOOK(tagged_ptr);
186 if (!PointerAndMemoryTagsMatch(tagged_ptr))
187 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
189 void *untagged_ptr = UntagPtr(tagged_ptr);
190 void *aligned_ptr = reinterpret_cast<void *>(
191 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
193 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
194 uptr orig_size = meta->requested_size;
195 u32 free_context_id = StackDepotPut(*stack);
196 u32 alloc_context_id = meta->alloc_context_id;
199 uptr tagged_size = TaggedSize(orig_size);
200 if (flags()->free_checks_tail_magic && orig_size &&
201 tagged_size != orig_size) {
202 uptr tail_size = tagged_size - orig_size - 1;
203 CHECK_LT(tail_size, kShadowAlignment);
204 void *tail_beg = reinterpret_cast<void *>(
205 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
206 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
207 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
208 orig_size, tail_magic);
211 meta->requested_size = 0;
212 meta->alloc_context_id = 0;
213 // This memory will not be reused by anyone else, so we are free to keep it
215 Thread *t = GetCurrentThread();
216 if (flags()->max_free_fill_size > 0) {
218 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
219 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
221 if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
222 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
223 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
224 t ? t->GenerateRandomTag() : kFallbackFreeTag);
226 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
227 if (auto *ha = t->heap_allocations())
228 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
229 free_context_id, static_cast<u32>(orig_size)});
231 SpinMutexLock l(&fallback_mutex);
232 AllocatorCache *cache = &fallback_allocator_cache;
233 allocator.Deallocate(cache, aligned_ptr);
237 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
238 uptr new_size, uptr alignment) {
239 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
240 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
242 void *tagged_ptr_new =
243 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
244 if (tagged_ptr_old && tagged_ptr_new) {
245 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
247 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
248 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
249 Min(new_size, static_cast<uptr>(meta->requested_size)));
250 HwasanDeallocate(stack, tagged_ptr_old);
252 return tagged_ptr_new;
255 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
256 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
257 if (AllocatorMayReturnNull())
259 ReportCallocOverflow(nmemb, size, stack);
261 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
264 HwasanChunkView FindHeapChunkByAddress(uptr address) {
265 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
267 return HwasanChunkView();
269 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
270 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
273 static uptr AllocationSize(const void *tagged_ptr) {
274 const void *untagged_ptr = UntagPtr(tagged_ptr);
275 if (!untagged_ptr) return 0;
276 const void *beg = allocator.GetBlockBegin(untagged_ptr);
277 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
278 if (b->right_aligned) {
279 if (beg != reinterpret_cast<void *>(RoundDownTo(
280 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
283 if (beg != untagged_ptr) return 0;
285 return b->requested_size;
288 void *hwasan_malloc(uptr size, StackTrace *stack) {
289 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
292 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
293 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
296 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
298 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
300 HwasanDeallocate(stack, ptr);
303 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
306 void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
307 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
308 errno = errno_ENOMEM;
309 if (AllocatorMayReturnNull())
311 ReportReallocArrayOverflow(nmemb, size, stack);
313 return hwasan_realloc(ptr, nmemb * size, stack);
316 void *hwasan_valloc(uptr size, StackTrace *stack) {
317 return SetErrnoOnNull(
318 HwasanAllocate(stack, size, GetPageSizeCached(), false));
321 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
322 uptr PageSize = GetPageSizeCached();
323 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
324 errno = errno_ENOMEM;
325 if (AllocatorMayReturnNull())
327 ReportPvallocOverflow(size, stack);
329 // pvalloc(0) should allocate one page.
330 size = size ? RoundUpTo(size, PageSize) : PageSize;
331 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
334 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
335 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
336 errno = errno_EINVAL;
337 if (AllocatorMayReturnNull())
339 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
341 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
344 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
345 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
346 errno = errno_EINVAL;
347 if (AllocatorMayReturnNull())
349 ReportInvalidAllocationAlignment(alignment, stack);
351 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
354 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
356 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
357 if (AllocatorMayReturnNull())
359 ReportInvalidPosixMemalignAlignment(alignment, stack);
361 void *ptr = HwasanAllocate(stack, size, alignment, false);
363 // OOM error is already taken care of by HwasanAllocate.
365 CHECK(IsAligned((uptr)ptr, alignment));
366 *(void **)UntagPtr(memptr) = ptr;
370 void hwasan_free(void *ptr, StackTrace *stack) {
371 return HwasanDeallocate(stack, ptr);
374 } // namespace __hwasan
376 using namespace __hwasan;
378 void __hwasan_enable_allocator_tagging() {
379 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
382 void __hwasan_disable_allocator_tagging() {
383 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
386 uptr __sanitizer_get_current_allocated_bytes() {
387 uptr stats[AllocatorStatCount];
388 allocator.GetStats(stats);
389 return stats[AllocatorStatAllocated];
392 uptr __sanitizer_get_heap_size() {
393 uptr stats[AllocatorStatCount];
394 allocator.GetStats(stats);
395 return stats[AllocatorStatMapped];
398 uptr __sanitizer_get_free_bytes() { return 1; }
400 uptr __sanitizer_get_unmapped_bytes() { return 1; }
402 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
404 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
406 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }