1 //===-- msan_allocator.cc --------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of MemorySanitizer.
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_checks.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_errno.h"
20 #include "msan_allocator.h"
21 #include "msan_origin.h"
22 #include "msan_thread.h"
23 #include "msan_poisoning.h"
31 struct MsanMapUnmapCallback {
32 void OnMap(uptr p, uptr size) const {}
33 void OnUnmap(uptr p, uptr size) const {
34 __msan_unpoison((void *)p, size);
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
38 uptr shadow_p = MEM_TO_SHADOW(p);
39 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
40 if (__msan_get_track_origins()) {
41 uptr origin_p = MEM_TO_ORIGIN(p);
42 ReleaseMemoryPagesToOS(origin_p, origin_p + size);
48 static const uptr kMaxAllowedMallocSize = 2UL << 30;
49 static const uptr kRegionSizeLog = 20;
50 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
51 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
54 static const uptr kSpaceBeg = 0;
55 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
56 static const uptr kMetadataSize = sizeof(Metadata);
57 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
58 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
59 typedef __msan::ByteMap ByteMap;
60 typedef MsanMapUnmapCallback MapUnmapCallback;
61 static const uptr kFlags = 0;
63 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
64 #elif defined(__x86_64__)
65 #if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
66 static const uptr kAllocatorSpace = 0x700000000000ULL;
68 static const uptr kAllocatorSpace = 0x600000000000ULL;
70 static const uptr kMaxAllowedMallocSize = 8UL << 30;
72 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
73 static const uptr kSpaceBeg = kAllocatorSpace;
74 static const uptr kSpaceSize = 0x40000000000; // 4T.
75 static const uptr kMetadataSize = sizeof(Metadata);
76 typedef DefaultSizeClassMap SizeClassMap;
77 typedef MsanMapUnmapCallback MapUnmapCallback;
78 static const uptr kFlags = 0;
81 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
83 #elif defined(__powerpc64__)
84 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
86 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
87 static const uptr kSpaceBeg = 0x300000000000;
88 static const uptr kSpaceSize = 0x020000000000; // 2T.
89 static const uptr kMetadataSize = sizeof(Metadata);
90 typedef DefaultSizeClassMap SizeClassMap;
91 typedef MsanMapUnmapCallback MapUnmapCallback;
92 static const uptr kFlags = 0;
95 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
96 #elif defined(__aarch64__)
97 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
98 static const uptr kRegionSizeLog = 20;
99 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
100 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
103 static const uptr kSpaceBeg = 0;
104 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
105 static const uptr kMetadataSize = sizeof(Metadata);
106 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
107 static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
108 typedef __msan::ByteMap ByteMap;
109 typedef MsanMapUnmapCallback MapUnmapCallback;
110 static const uptr kFlags = 0;
112 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
114 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
115 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
116 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
117 SecondaryAllocator> Allocator;
119 static Allocator allocator;
120 static AllocatorCache fallback_allocator_cache;
121 static SpinMutex fallback_mutex;
123 void MsanAllocatorInit() {
124 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
125 allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
128 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
130 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
131 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
134 void MsanThreadLocalMallocStorage::CommitBack() {
135 allocator.SwallowCache(GetAllocatorCache(this));
138 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
140 if (size > kMaxAllowedMallocSize) {
141 Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
143 return Allocator::FailureHandler::OnBadRequest();
145 MsanThread *t = GetCurrentThread();
148 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
149 allocated = allocator.Allocate(cache, size, alignment);
151 SpinMutexLock l(&fallback_mutex);
152 AllocatorCache *cache = &fallback_allocator_cache;
153 allocated = allocator.Allocate(cache, size, alignment);
156 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
157 meta->requested_size = size;
159 __msan_clear_and_unpoison(allocated, size);
160 } else if (flags()->poison_in_malloc) {
161 __msan_poison(allocated, size);
162 if (__msan_get_track_origins()) {
163 stack->tag = StackTrace::TAG_ALLOC;
164 Origin o = Origin::CreateHeapOrigin(stack);
165 __msan_set_origin(allocated, size, o.raw_id());
168 MSAN_MALLOC_HOOK(allocated, size);
172 void MsanDeallocate(StackTrace *stack, void *p) {
175 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
176 uptr size = meta->requested_size;
177 meta->requested_size = 0;
178 // This memory will not be reused by anyone else, so we are free to keep it
180 if (flags()->poison_in_free) {
181 __msan_poison(p, size);
182 if (__msan_get_track_origins()) {
183 stack->tag = StackTrace::TAG_DEALLOC;
184 Origin o = Origin::CreateHeapOrigin(stack);
185 __msan_set_origin(p, size, o.raw_id());
188 MsanThread *t = GetCurrentThread();
190 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
191 allocator.Deallocate(cache, p);
193 SpinMutexLock l(&fallback_mutex);
194 AllocatorCache *cache = &fallback_allocator_cache;
195 allocator.Deallocate(cache, p);
199 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
201 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
202 uptr old_size = meta->requested_size;
203 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
204 if (new_size <= actually_allocated_size) {
205 // We are not reallocating here.
206 meta->requested_size = new_size;
207 if (new_size > old_size) {
208 if (flags()->poison_in_malloc) {
209 stack->tag = StackTrace::TAG_ALLOC;
210 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
215 uptr memcpy_size = Min(new_size, old_size);
216 void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
218 CopyMemory(new_p, old_p, memcpy_size, stack);
219 MsanDeallocate(stack, old_p);
224 static uptr AllocationSize(const void *p) {
226 const void *beg = allocator.GetBlockBegin(p);
227 if (beg != p) return 0;
228 Metadata *b = (Metadata *)allocator.GetMetaData(p);
229 return b->requested_size;
232 void *msan_malloc(uptr size, StackTrace *stack) {
233 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
236 void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
237 if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
238 return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
239 return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
242 void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
244 return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
246 MsanDeallocate(stack, ptr);
249 return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
252 void *msan_valloc(uptr size, StackTrace *stack) {
253 return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
256 void *msan_pvalloc(uptr size, StackTrace *stack) {
257 uptr PageSize = GetPageSizeCached();
258 // pvalloc(0) should allocate one page.
259 size = size == 0 ? PageSize : RoundUpTo(size, PageSize);
260 return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
263 void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
264 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
265 errno = errno_EINVAL;
266 return Allocator::FailureHandler::OnBadRequest();
268 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
271 void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
272 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
273 errno = errno_EINVAL;
274 return Allocator::FailureHandler::OnBadRequest();
276 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
279 int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
281 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
282 Allocator::FailureHandler::OnBadRequest();
285 void *ptr = MsanAllocate(stack, size, alignment, false);
288 CHECK(IsAligned((uptr)ptr, alignment));
293 } // namespace __msan
295 using namespace __msan;
297 uptr __sanitizer_get_current_allocated_bytes() {
298 uptr stats[AllocatorStatCount];
299 allocator.GetStats(stats);
300 return stats[AllocatorStatAllocated];
303 uptr __sanitizer_get_heap_size() {
304 uptr stats[AllocatorStatCount];
305 allocator.GetStats(stats);
306 return stats[AllocatorStatMapped];
309 uptr __sanitizer_get_free_bytes() { return 1; }
311 uptr __sanitizer_get_unmapped_bytes() { return 1; }
313 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
315 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
317 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }