1 //===-- hwasan_allocator.cc ------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of HWAddressSanitizer.
12 // HWAddressSanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_mapping.h"
21 #include "hwasan_thread.h"
22 #include "hwasan_report.h"
24 #if HWASAN_WITH_INTERCEPTORS
25 DEFINE_REAL(void *, realloc, void *ptr, uptr size)
26 DEFINE_REAL(void, free, void *ptr)
31 static Allocator allocator;
32 static AllocatorCache fallback_allocator_cache;
33 static SpinMutex fallback_mutex;
34 static atomic_uint8_t hwasan_allocator_tagging_enabled;
36 static const tag_t kFallbackAllocTag = 0xBB;
37 static const tag_t kFallbackFreeTag = 0xBC;
45 // These two variables are initialized from flags()->malloc_align_right
46 // in HwasanAllocatorInit and are never changed afterwards.
47 static RightAlignMode right_align_mode = kRightAlignNever;
48 static bool right_align_8 = false;
50 // Initialized in HwasanAllocatorInit, an never changed.
51 static ALIGNED(16) u8 tail_magic[kShadowAlignment];
53 bool HwasanChunkView::IsAllocated() const {
54 return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
57 // Aligns the 'addr' right to the granule boundary.
58 static uptr AlignRight(uptr addr, uptr requested_size) {
59 uptr tail_size = requested_size % kShadowAlignment;
60 if (!tail_size) return addr;
62 return tail_size > 8 ? addr : addr + 8;
63 return addr + kShadowAlignment - tail_size;
66 uptr HwasanChunkView::Beg() const {
67 if (metadata_ && metadata_->right_aligned)
68 return AlignRight(block_, metadata_->requested_size);
71 uptr HwasanChunkView::End() const {
72 return Beg() + UsedSize();
74 uptr HwasanChunkView::UsedSize() const {
75 return metadata_->requested_size;
77 u32 HwasanChunkView::GetAllocStackId() const {
78 return metadata_->alloc_context_id;
81 uptr HwasanChunkView::ActualSize() const {
82 return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
85 bool HwasanChunkView::FromSmallHeap() const {
86 return allocator.FromPrimary(reinterpret_cast<void *>(block_));
89 void GetAllocatorStats(AllocatorStatCounters s) {
90 allocator.GetStats(s);
93 void HwasanAllocatorInit() {
94 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
95 !flags()->disable_allocator_tagging);
96 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
97 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
98 switch (flags()->malloc_align_right) {
101 right_align_mode = kRightAlignSometimes;
102 right_align_8 = false;
105 right_align_mode = kRightAlignAlways;
106 right_align_8 = false;
109 right_align_mode = kRightAlignSometimes;
110 right_align_8 = true;
113 right_align_mode = kRightAlignAlways;
114 right_align_8 = true;
117 Report("ERROR: unsupported value of malloc_align_right flag: %d\n",
118 flags()->malloc_align_right);
121 for (uptr i = 0; i < kShadowAlignment; i++)
122 tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
125 void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
126 allocator.SwallowCache(cache);
129 static uptr TaggedSize(uptr size) {
131 uptr new_size = RoundUpTo(size, kShadowAlignment);
132 CHECK_GE(new_size, size);
136 static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
138 if (orig_size > kMaxAllowedMallocSize) {
139 if (AllocatorMayReturnNull()) {
140 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
144 ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
147 alignment = Max(alignment, kShadowAlignment);
148 uptr size = TaggedSize(orig_size);
149 Thread *t = GetCurrentThread();
152 allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
154 SpinMutexLock l(&fallback_mutex);
155 AllocatorCache *cache = &fallback_allocator_cache;
156 allocated = allocator.Allocate(cache, size, alignment);
158 if (UNLIKELY(!allocated)) {
159 SetAllocatorOutOfMemory();
160 if (AllocatorMayReturnNull())
162 ReportOutOfMemory(size, stack);
165 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
166 meta->requested_size = static_cast<u32>(orig_size);
167 meta->alloc_context_id = StackDepotPut(*stack);
168 meta->right_aligned = false;
170 internal_memset(allocated, 0, size);
171 } else if (flags()->max_malloc_fill_size > 0) {
172 uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
173 internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
175 if (!right_align_mode)
176 internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
179 void *user_ptr = allocated;
180 if (flags()->tag_in_malloc &&
181 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
182 user_ptr = (void *)TagMemoryAligned(
183 (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
185 if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
187 uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
188 if (right_align_mode == kRightAlignAlways ||
189 GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit.
190 user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
191 meta->right_aligned = 1;
195 HWASAN_MALLOC_HOOK(user_ptr, size);
199 static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
201 tag_t ptr_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
202 tag_t mem_tag = *reinterpret_cast<tag_t *>(
203 MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
204 return ptr_tag == mem_tag;
207 static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
209 HWASAN_FREE_HOOK(tagged_ptr);
211 if (!PointerAndMemoryTagsMatch(tagged_ptr))
212 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
214 void *untagged_ptr = UntagPtr(tagged_ptr);
215 void *aligned_ptr = reinterpret_cast<void *>(
216 RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
218 reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
219 uptr orig_size = meta->requested_size;
220 u32 free_context_id = StackDepotPut(*stack);
221 u32 alloc_context_id = meta->alloc_context_id;
224 uptr tagged_size = TaggedSize(orig_size);
225 if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) {
226 uptr tail_size = tagged_size - orig_size;
227 CHECK_LT(tail_size, kShadowAlignment);
228 void *tail_beg = reinterpret_cast<void *>(
229 reinterpret_cast<uptr>(aligned_ptr) + orig_size);
230 if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
231 ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
232 orig_size, tail_size, tail_magic);
235 meta->requested_size = 0;
236 meta->alloc_context_id = 0;
237 // This memory will not be reused by anyone else, so we are free to keep it
239 Thread *t = GetCurrentThread();
240 if (flags()->max_free_fill_size > 0) {
242 Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
243 internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
245 if (flags()->tag_in_free &&
246 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
247 TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
248 t ? t->GenerateRandomTag() : kFallbackFreeTag);
250 allocator.Deallocate(t->allocator_cache(), aligned_ptr);
251 if (auto *ha = t->heap_allocations())
252 ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
253 free_context_id, static_cast<u32>(orig_size)});
255 SpinMutexLock l(&fallback_mutex);
256 AllocatorCache *cache = &fallback_allocator_cache;
257 allocator.Deallocate(cache, aligned_ptr);
261 static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
262 uptr new_size, uptr alignment) {
263 if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
264 ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
266 void *tagged_ptr_new =
267 HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
268 if (tagged_ptr_old && tagged_ptr_new) {
269 void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
271 reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
272 internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
273 Min(new_size, static_cast<uptr>(meta->requested_size)));
274 HwasanDeallocate(stack, tagged_ptr_old);
276 return tagged_ptr_new;
279 static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
280 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
281 if (AllocatorMayReturnNull())
283 ReportCallocOverflow(nmemb, size, stack);
285 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
288 HwasanChunkView FindHeapChunkByAddress(uptr address) {
289 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
291 return HwasanChunkView();
293 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
294 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
297 static uptr AllocationSize(const void *tagged_ptr) {
298 const void *untagged_ptr = UntagPtr(tagged_ptr);
299 if (!untagged_ptr) return 0;
300 const void *beg = allocator.GetBlockBegin(untagged_ptr);
301 Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
302 if (b->right_aligned) {
303 if (beg != reinterpret_cast<void *>(RoundDownTo(
304 reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
307 if (beg != untagged_ptr) return 0;
309 return b->requested_size;
312 void *hwasan_malloc(uptr size, StackTrace *stack) {
313 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
316 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
317 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
320 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
322 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
324 #if HWASAN_WITH_INTERCEPTORS
325 // A tag of 0 means that this is a system allocator allocation, so we must use
326 // the system allocator to realloc it.
327 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
328 return REAL(realloc)(ptr, size);
332 HwasanDeallocate(stack, ptr);
335 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
338 void *hwasan_valloc(uptr size, StackTrace *stack) {
339 return SetErrnoOnNull(
340 HwasanAllocate(stack, size, GetPageSizeCached(), false));
343 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
344 uptr PageSize = GetPageSizeCached();
345 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
346 errno = errno_ENOMEM;
347 if (AllocatorMayReturnNull())
349 ReportPvallocOverflow(size, stack);
351 // pvalloc(0) should allocate one page.
352 size = size ? RoundUpTo(size, PageSize) : PageSize;
353 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
356 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
357 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
358 errno = errno_EINVAL;
359 if (AllocatorMayReturnNull())
361 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
363 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
366 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
367 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
368 errno = errno_EINVAL;
369 if (AllocatorMayReturnNull())
371 ReportInvalidAllocationAlignment(alignment, stack);
373 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
376 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
378 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
379 if (AllocatorMayReturnNull())
381 ReportInvalidPosixMemalignAlignment(alignment, stack);
383 void *ptr = HwasanAllocate(stack, size, alignment, false);
385 // OOM error is already taken care of by HwasanAllocate.
387 CHECK(IsAligned((uptr)ptr, alignment));
392 void hwasan_free(void *ptr, StackTrace *stack) {
393 #if HWASAN_WITH_INTERCEPTORS
394 // A tag of 0 means that this is a system allocator allocation, so we must use
395 // the system allocator to free it.
396 if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
397 return REAL(free)(ptr);
400 return HwasanDeallocate(stack, ptr);
403 } // namespace __hwasan
405 using namespace __hwasan;
407 void __hwasan_enable_allocator_tagging() {
408 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
411 void __hwasan_disable_allocator_tagging() {
412 #if HWASAN_WITH_INTERCEPTORS
413 // Allocator tagging must be enabled for the system allocator fallback to work
414 // correctly. This means that we can't disable it at runtime if it was enabled
415 // at startup since that might result in our deallocations going to the system
416 // allocator. If tagging was disabled at startup we avoid this problem by
417 // disabling the fallback altogether.
418 CHECK(flags()->disable_allocator_tagging);
421 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
424 uptr __sanitizer_get_current_allocated_bytes() {
425 uptr stats[AllocatorStatCount];
426 allocator.GetStats(stats);
427 return stats[AllocatorStatAllocated];
430 uptr __sanitizer_get_heap_size() {
431 uptr stats[AllocatorStatCount];
432 allocator.GetStats(stats);
433 return stats[AllocatorStatMapped];
436 uptr __sanitizer_get_free_bytes() { return 1; }
438 uptr __sanitizer_get_unmapped_bytes() { return 1; }
440 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
442 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
444 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }