1 //===-- msan_allocator.cc --------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of MemorySanitizer.
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "msan_allocator.h"
19 #include "msan_origin.h"
20 #include "msan_thread.h"
21 #include "msan_poisoning.h"
29 struct MsanMapUnmapCallback {
30 void OnMap(uptr p, uptr size) const {}
31 void OnUnmap(uptr p, uptr size) const {
32 __msan_unpoison((void *)p, size);
34 // We are about to unmap a chunk of user memory.
35 // Mark the corresponding shadow memory as not needed.
36 FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
37 if (__msan_get_track_origins())
38 FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
43 static const uptr kMaxAllowedMallocSize = 2UL << 30;
44 static const uptr kRegionSizeLog = 20;
45 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
46 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
47 typedef CompactSizeClassMap SizeClassMap;
49 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata),
50 SizeClassMap, kRegionSizeLog, ByteMap,
51 MsanMapUnmapCallback> PrimaryAllocator;
53 #elif defined(__x86_64__)
54 #if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
55 static const uptr kAllocatorSpace = 0x700000000000ULL;
57 static const uptr kAllocatorSpace = 0x600000000000ULL;
59 static const uptr kAllocatorSize = 0x80000000000; // 8T.
60 static const uptr kMetadataSize = sizeof(Metadata);
61 static const uptr kMaxAllowedMallocSize = 8UL << 30;
63 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
65 MsanMapUnmapCallback> PrimaryAllocator;
67 #elif defined(__powerpc64__)
68 static const uptr kAllocatorSpace = 0x300000000000;
69 static const uptr kAllocatorSize = 0x020000000000; // 2T
70 static const uptr kMetadataSize = sizeof(Metadata);
71 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
73 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
75 MsanMapUnmapCallback> PrimaryAllocator;
76 #elif defined(__aarch64__)
77 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
78 static const uptr kRegionSizeLog = 20;
79 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
80 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
81 typedef CompactSizeClassMap SizeClassMap;
83 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata),
84 SizeClassMap, kRegionSizeLog, ByteMap,
85 MsanMapUnmapCallback> PrimaryAllocator;
87 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
88 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
89 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
90 SecondaryAllocator> Allocator;
92 static Allocator allocator;
93 static AllocatorCache fallback_allocator_cache;
94 static SpinMutex fallback_mutex;
96 void MsanAllocatorInit() {
97 allocator.Init(common_flags()->allocator_may_return_null);
100 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
102 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
103 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
106 void MsanThreadLocalMallocStorage::CommitBack() {
107 allocator.SwallowCache(GetAllocatorCache(this));
110 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
112 if (size > kMaxAllowedMallocSize) {
113 Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
115 return allocator.ReturnNullOrDie();
117 MsanThread *t = GetCurrentThread();
120 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
121 allocated = allocator.Allocate(cache, size, alignment, false);
123 SpinMutexLock l(&fallback_mutex);
124 AllocatorCache *cache = &fallback_allocator_cache;
125 allocated = allocator.Allocate(cache, size, alignment, false);
128 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
129 meta->requested_size = size;
131 __msan_clear_and_unpoison(allocated, size);
132 } else if (flags()->poison_in_malloc) {
133 __msan_poison(allocated, size);
134 if (__msan_get_track_origins()) {
135 stack->tag = StackTrace::TAG_ALLOC;
136 Origin o = Origin::CreateHeapOrigin(stack);
137 __msan_set_origin(allocated, size, o.raw_id());
140 MSAN_MALLOC_HOOK(allocated, size);
144 void MsanDeallocate(StackTrace *stack, void *p) {
147 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
148 uptr size = meta->requested_size;
149 meta->requested_size = 0;
150 // This memory will not be reused by anyone else, so we are free to keep it
152 if (flags()->poison_in_free) {
153 __msan_poison(p, size);
154 if (__msan_get_track_origins()) {
155 stack->tag = StackTrace::TAG_DEALLOC;
156 Origin o = Origin::CreateHeapOrigin(stack);
157 __msan_set_origin(p, size, o.raw_id());
160 MsanThread *t = GetCurrentThread();
162 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
163 allocator.Deallocate(cache, p);
165 SpinMutexLock l(&fallback_mutex);
166 AllocatorCache *cache = &fallback_allocator_cache;
167 allocator.Deallocate(cache, p);
171 void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
172 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
173 return allocator.ReturnNullOrDie();
174 return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
177 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
178 uptr alignment, bool zeroise) {
180 return MsanAllocate(stack, new_size, alignment, zeroise);
182 MsanDeallocate(stack, old_p);
185 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
186 uptr old_size = meta->requested_size;
187 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
188 if (new_size <= actually_allocated_size) {
189 // We are not reallocating here.
190 meta->requested_size = new_size;
191 if (new_size > old_size) {
193 __msan_clear_and_unpoison((char *)old_p + old_size,
194 new_size - old_size);
195 } else if (flags()->poison_in_malloc) {
196 stack->tag = StackTrace::TAG_ALLOC;
197 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
202 uptr memcpy_size = Min(new_size, old_size);
203 void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
204 // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
206 CopyMemory(new_p, old_p, memcpy_size, stack);
207 MsanDeallocate(stack, old_p);
212 static uptr AllocationSize(const void *p) {
214 const void *beg = allocator.GetBlockBegin(p);
215 if (beg != p) return 0;
216 Metadata *b = (Metadata *)allocator.GetMetaData(p);
217 return b->requested_size;
220 } // namespace __msan
222 using namespace __msan;
224 uptr __sanitizer_get_current_allocated_bytes() {
225 uptr stats[AllocatorStatCount];
226 allocator.GetStats(stats);
227 return stats[AllocatorStatAllocated];
230 uptr __sanitizer_get_heap_size() {
231 uptr stats[AllocatorStatCount];
232 allocator.GetStats(stats);
233 return stats[AllocatorStatMapped];
236 uptr __sanitizer_get_free_bytes() { return 1; }
238 uptr __sanitizer_get_unmapped_bytes() { return 1; }
240 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
242 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
244 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }