1 //===-- msan_allocator.cc --------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of MemorySanitizer.
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_allocator_report.h"
19 #include "sanitizer_common/sanitizer_errno.h"
21 #include "msan_allocator.h"
22 #include "msan_origin.h"
23 #include "msan_thread.h"
24 #include "msan_poisoning.h"
32 struct MsanMapUnmapCallback {
33 void OnMap(uptr p, uptr size) const {}
34 void OnUnmap(uptr p, uptr size) const {
35 __msan_unpoison((void *)p, size);
37 // We are about to unmap a chunk of user memory.
38 // Mark the corresponding shadow memory as not needed.
39 uptr shadow_p = MEM_TO_SHADOW(p);
40 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
41 if (__msan_get_track_origins()) {
42 uptr origin_p = MEM_TO_ORIGIN(p);
43 ReleaseMemoryPagesToOS(origin_p, origin_p + size);
49 static const uptr kMaxAllowedMallocSize = 2UL << 30;
50 static const uptr kRegionSizeLog = 20;
51 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
52 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
55 static const uptr kSpaceBeg = 0;
56 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
57 static const uptr kMetadataSize = sizeof(Metadata);
58 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
59 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
60 typedef __msan::ByteMap ByteMap;
61 typedef MsanMapUnmapCallback MapUnmapCallback;
62 static const uptr kFlags = 0;
64 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
65 #elif defined(__x86_64__)
66 #if SANITIZER_NETBSD || \
67 (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
68 static const uptr kAllocatorSpace = 0x700000000000ULL;
70 static const uptr kAllocatorSpace = 0x600000000000ULL;
72 static const uptr kMaxAllowedMallocSize = 8UL << 30;
74 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
75 static const uptr kSpaceBeg = kAllocatorSpace;
76 static const uptr kSpaceSize = 0x40000000000; // 4T.
77 static const uptr kMetadataSize = sizeof(Metadata);
78 typedef DefaultSizeClassMap SizeClassMap;
79 typedef MsanMapUnmapCallback MapUnmapCallback;
80 static const uptr kFlags = 0;
83 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
85 #elif defined(__powerpc64__)
86 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
88 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
89 static const uptr kSpaceBeg = 0x300000000000;
90 static const uptr kSpaceSize = 0x020000000000; // 2T.
91 static const uptr kMetadataSize = sizeof(Metadata);
92 typedef DefaultSizeClassMap SizeClassMap;
93 typedef MsanMapUnmapCallback MapUnmapCallback;
94 static const uptr kFlags = 0;
97 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
98 #elif defined(__aarch64__)
99 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
100 static const uptr kRegionSizeLog = 20;
101 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
102 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
105 static const uptr kSpaceBeg = 0;
106 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
107 static const uptr kMetadataSize = sizeof(Metadata);
108 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
109 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
110 typedef __msan::ByteMap ByteMap;
111 typedef MsanMapUnmapCallback MapUnmapCallback;
112 static const uptr kFlags = 0;
114 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
116 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
117 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
118 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
119 SecondaryAllocator> Allocator;
121 static Allocator allocator;
122 static AllocatorCache fallback_allocator_cache;
123 static StaticSpinMutex fallback_mutex;
125 void MsanAllocatorInit() {
126 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
127 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
130 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
132 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
133 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
136 void MsanThreadLocalMallocStorage::CommitBack() {
137 allocator.SwallowCache(GetAllocatorCache(this));
140 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
142 if (size > kMaxAllowedMallocSize) {
143 if (AllocatorMayReturnNull()) {
144 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
147 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
149 MsanThread *t = GetCurrentThread();
152 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
153 allocated = allocator.Allocate(cache, size, alignment);
155 SpinMutexLock l(&fallback_mutex);
156 AllocatorCache *cache = &fallback_allocator_cache;
157 allocated = allocator.Allocate(cache, size, alignment);
159 if (UNLIKELY(!allocated)) {
160 SetAllocatorOutOfMemory();
161 if (AllocatorMayReturnNull())
163 ReportOutOfMemory(size, stack);
166 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
167 meta->requested_size = size;
169 __msan_clear_and_unpoison(allocated, size);
170 } else if (flags()->poison_in_malloc) {
171 __msan_poison(allocated, size);
172 if (__msan_get_track_origins()) {
173 stack->tag = StackTrace::TAG_ALLOC;
174 Origin o = Origin::CreateHeapOrigin(stack);
175 __msan_set_origin(allocated, size, o.raw_id());
178 MSAN_MALLOC_HOOK(allocated, size);
182 void MsanDeallocate(StackTrace *stack, void *p) {
185 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
186 uptr size = meta->requested_size;
187 meta->requested_size = 0;
188 // This memory will not be reused by anyone else, so we are free to keep it
190 if (flags()->poison_in_free) {
191 __msan_poison(p, size);
192 if (__msan_get_track_origins()) {
193 stack->tag = StackTrace::TAG_DEALLOC;
194 Origin o = Origin::CreateHeapOrigin(stack);
195 __msan_set_origin(p, size, o.raw_id());
198 MsanThread *t = GetCurrentThread();
200 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
201 allocator.Deallocate(cache, p);
203 SpinMutexLock l(&fallback_mutex);
204 AllocatorCache *cache = &fallback_allocator_cache;
205 allocator.Deallocate(cache, p);
209 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
211 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
212 uptr old_size = meta->requested_size;
213 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
214 if (new_size <= actually_allocated_size) {
215 // We are not reallocating here.
216 meta->requested_size = new_size;
217 if (new_size > old_size) {
218 if (flags()->poison_in_malloc) {
219 stack->tag = StackTrace::TAG_ALLOC;
220 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
225 uptr memcpy_size = Min(new_size, old_size);
226 void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
228 CopyMemory(new_p, old_p, memcpy_size, stack);
229 MsanDeallocate(stack, old_p);
234 void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
235 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
236 if (AllocatorMayReturnNull())
238 ReportCallocOverflow(nmemb, size, stack);
240 return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
243 static uptr AllocationSize(const void *p) {
245 const void *beg = allocator.GetBlockBegin(p);
246 if (beg != p) return 0;
247 Metadata *b = (Metadata *)allocator.GetMetaData(p);
248 return b->requested_size;
251 void *msan_malloc(uptr size, StackTrace *stack) {
252 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
255 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
256 return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
259 void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
261 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
263 MsanDeallocate(stack, ptr);
266 return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
269 void *msan_valloc(uptr size, StackTrace *stack) {
270 return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
273 void *msan_pvalloc(uptr size, StackTrace *stack) {
274 uptr PageSize = GetPageSizeCached();
275 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
276 errno = errno_ENOMEM;
277 if (AllocatorMayReturnNull())
279 ReportPvallocOverflow(size, stack);
281 // pvalloc(0) should allocate one page.
282 size = size ? RoundUpTo(size, PageSize) : PageSize;
283 return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
286 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
287 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
288 errno = errno_EINVAL;
289 if (AllocatorMayReturnNull())
291 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
293 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
296 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
297 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
298 errno = errno_EINVAL;
299 if (AllocatorMayReturnNull())
301 ReportInvalidAllocationAlignment(alignment, stack);
303 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
306 int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
308 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
309 if (AllocatorMayReturnNull())
311 ReportInvalidPosixMemalignAlignment(alignment, stack);
313 void *ptr = MsanAllocate(stack, size, alignment, false);
315 // OOM error is already taken care of by MsanAllocate.
317 CHECK(IsAligned((uptr)ptr, alignment));
322 } // namespace __msan
324 using namespace __msan;
326 uptr __sanitizer_get_current_allocated_bytes() {
327 uptr stats[AllocatorStatCount];
328 allocator.GetStats(stats);
329 return stats[AllocatorStatAllocated];
332 uptr __sanitizer_get_heap_size() {
333 uptr stats[AllocatorStatCount];
334 allocator.GetStats(stats);
335 return stats[AllocatorStatMapped];
338 uptr __sanitizer_get_free_bytes() { return 1; }
340 uptr __sanitizer_get_unmapped_bytes() { return 1; }
342 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
344 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
346 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }