1 //===-- hwasan_allocator.cc ------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of HWAddressSanitizer.
12 // HWAddressSanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_allocator_report.h"
19 #include "sanitizer_common/sanitizer_atomic.h"
20 #include "sanitizer_common/sanitizer_errno.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "hwasan_allocator.h"
24 #include "hwasan_mapping.h"
25 #include "hwasan_thread.h"
26 #include "hwasan_poisoning.h"
38 u64 requested_size : 62;
43 bool HwasanChunkView::IsValid() const {
44 return metadata_ && metadata_->state != CHUNK_INVALID;
46 bool HwasanChunkView::IsAllocated() const {
47 return metadata_ && metadata_->state == CHUNK_ALLOCATED;
49 uptr HwasanChunkView::Beg() const {
52 uptr HwasanChunkView::End() const {
53 return Beg() + UsedSize();
55 uptr HwasanChunkView::UsedSize() const {
56 return metadata_->requested_size;
58 u32 HwasanChunkView::GetAllocStackId() const {
59 return metadata_->alloc_context_id;
61 u32 HwasanChunkView::GetFreeStackId() const {
62 return metadata_->free_context_id;
65 struct HwasanMapUnmapCallback {
66 void OnMap(uptr p, uptr size) const {}
67 void OnUnmap(uptr p, uptr size) const {
68 // We are about to unmap a chunk of user memory.
69 // It can return as user-requested mmap() or another thread stack.
70 // Make it accessible with zero-tagged pointer.
71 TagMemory(p, size, 0);
75 #if !defined(__aarch64__) && !defined(__x86_64__)
76 #error Unsupported platform
79 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
80 static const uptr kRegionSizeLog = 20;
81 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
82 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
85 static const uptr kSpaceBeg = 0;
86 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
87 static const uptr kMetadataSize = sizeof(Metadata);
88 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
89 static const uptr kRegionSizeLog = __hwasan::kRegionSizeLog;
90 typedef __hwasan::ByteMap ByteMap;
91 typedef HwasanMapUnmapCallback MapUnmapCallback;
92 static const uptr kFlags = 0;
94 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
95 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
96 typedef LargeMmapAllocator<HwasanMapUnmapCallback> SecondaryAllocator;
97 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
98 SecondaryAllocator> Allocator;
100 static Allocator allocator;
101 static AllocatorCache fallback_allocator_cache;
102 static SpinMutex fallback_mutex;
103 static atomic_uint8_t hwasan_allocator_tagging_enabled;
105 static const tag_t kFallbackAllocTag = 0xBB;
106 static const tag_t kFallbackFreeTag = 0xBC;
108 void HwasanAllocatorInit() {
109 atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
110 !flags()->disable_allocator_tagging);
111 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
112 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
115 AllocatorCache *GetAllocatorCache(HwasanThreadLocalMallocStorage *ms) {
117 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
118 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
121 void HwasanThreadLocalMallocStorage::CommitBack() {
122 allocator.SwallowCache(GetAllocatorCache(this));
125 static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
127 alignment = Max(alignment, kShadowAlignment);
128 size = RoundUpTo(size, kShadowAlignment);
130 if (size > kMaxAllowedMallocSize) {
131 if (AllocatorMayReturnNull()) {
132 Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
136 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
138 HwasanThread *t = GetCurrentThread();
141 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
142 allocated = allocator.Allocate(cache, size, alignment);
144 SpinMutexLock l(&fallback_mutex);
145 AllocatorCache *cache = &fallback_allocator_cache;
146 allocated = allocator.Allocate(cache, size, alignment);
148 if (UNLIKELY(!allocated)) {
149 SetAllocatorOutOfMemory();
150 if (AllocatorMayReturnNull())
152 ReportOutOfMemory(size, stack);
155 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
156 meta->state = CHUNK_ALLOCATED;
157 meta->requested_size = size;
158 meta->alloc_context_id = StackDepotPut(*stack);
160 internal_memset(allocated, 0, size);
162 void *user_ptr = allocated;
163 if (flags()->tag_in_malloc &&
164 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
165 user_ptr = (void *)TagMemoryAligned(
166 (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
168 HWASAN_MALLOC_HOOK(user_ptr, size);
172 void HwasanDeallocate(StackTrace *stack, void *user_ptr) {
174 HWASAN_FREE_HOOK(user_ptr);
176 void *p = GetAddressFromPointer(user_ptr);
177 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
178 uptr size = meta->requested_size;
179 meta->state = CHUNK_FREE;
180 meta->requested_size = 0;
181 meta->free_context_id = StackDepotPut(*stack);
182 // This memory will not be reused by anyone else, so we are free to keep it
184 HwasanThread *t = GetCurrentThread();
185 if (flags()->tag_in_free &&
186 atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
187 TagMemoryAligned((uptr)p, size,
188 t ? t->GenerateRandomTag() : kFallbackFreeTag);
190 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
191 allocator.Deallocate(cache, p);
193 SpinMutexLock l(&fallback_mutex);
194 AllocatorCache *cache = &fallback_allocator_cache;
195 allocator.Deallocate(cache, p);
199 void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size,
201 alignment = Max(alignment, kShadowAlignment);
202 new_size = RoundUpTo(new_size, kShadowAlignment);
204 void *old_p = GetAddressFromPointer(user_old_p);
205 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
206 uptr old_size = meta->requested_size;
207 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
208 if (new_size <= actually_allocated_size) {
209 // We are not reallocating here.
210 // FIXME: update stack trace for the allocation?
211 meta->requested_size = new_size;
212 if (!atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
214 if (flags()->retag_in_realloc) {
215 HwasanThread *t = GetCurrentThread();
216 return (void *)TagMemoryAligned(
217 (uptr)old_p, new_size,
218 t ? t->GenerateRandomTag() : kFallbackAllocTag);
220 if (new_size > old_size) {
221 tag_t tag = GetTagFromPointer((uptr)user_old_p);
222 TagMemoryAligned((uptr)old_p + old_size, new_size - old_size, tag);
226 uptr memcpy_size = Min(new_size, old_size);
227 void *new_p = HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
229 internal_memcpy(new_p, old_p, memcpy_size);
230 HwasanDeallocate(stack, old_p);
235 void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
236 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
237 if (AllocatorMayReturnNull())
239 ReportCallocOverflow(nmemb, size, stack);
241 return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
244 HwasanChunkView FindHeapChunkByAddress(uptr address) {
245 void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
247 return HwasanChunkView();
249 reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
250 return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
253 static uptr AllocationSize(const void *user_ptr) {
254 const void *p = GetAddressFromPointer(user_ptr);
256 const void *beg = allocator.GetBlockBegin(p);
257 if (beg != p) return 0;
258 Metadata *b = (Metadata *)allocator.GetMetaData(p);
259 return b->requested_size;
262 void *hwasan_malloc(uptr size, StackTrace *stack) {
263 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
266 void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
267 return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
270 void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
272 return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
274 HwasanDeallocate(stack, ptr);
277 return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
280 void *hwasan_valloc(uptr size, StackTrace *stack) {
281 return SetErrnoOnNull(
282 HwasanAllocate(stack, size, GetPageSizeCached(), false));
285 void *hwasan_pvalloc(uptr size, StackTrace *stack) {
286 uptr PageSize = GetPageSizeCached();
287 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
288 errno = errno_ENOMEM;
289 if (AllocatorMayReturnNull())
291 ReportPvallocOverflow(size, stack);
293 // pvalloc(0) should allocate one page.
294 size = size ? RoundUpTo(size, PageSize) : PageSize;
295 return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
298 void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
299 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
300 errno = errno_EINVAL;
301 if (AllocatorMayReturnNull())
303 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
305 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
308 void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
309 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
310 errno = errno_EINVAL;
311 if (AllocatorMayReturnNull())
313 ReportInvalidAllocationAlignment(alignment, stack);
315 return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
318 int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
320 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
321 if (AllocatorMayReturnNull())
323 ReportInvalidPosixMemalignAlignment(alignment, stack);
325 void *ptr = HwasanAllocate(stack, size, alignment, false);
327 // OOM error is already taken care of by HwasanAllocate.
329 CHECK(IsAligned((uptr)ptr, alignment));
334 } // namespace __hwasan
336 using namespace __hwasan;
338 void __hwasan_enable_allocator_tagging() {
339 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
342 void __hwasan_disable_allocator_tagging() {
343 atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
346 uptr __sanitizer_get_current_allocated_bytes() {
347 uptr stats[AllocatorStatCount];
348 allocator.GetStats(stats);
349 return stats[AllocatorStatAllocated];
352 uptr __sanitizer_get_heap_size() {
353 uptr stats[AllocatorStatCount];
354 allocator.GetStats(stats);
355 return stats[AllocatorStatMapped];
358 uptr __sanitizer_get_free_bytes() { return 1; }
360 uptr __sanitizer_get_unmapped_bytes() { return 1; }
362 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
364 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
366 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }