1 //===-- msan_allocator.cc --------------------------- ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of MemorySanitizer.
12 // MemorySanitizer allocator.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "msan_allocator.h"
19 #include "msan_origin.h"
20 #include "msan_thread.h"
21 #include "msan_poisoning.h"
29 struct MsanMapUnmapCallback {
30 void OnMap(uptr p, uptr size) const {}
31 void OnUnmap(uptr p, uptr size) const {
32 __msan_unpoison((void *)p, size);
34 // We are about to unmap a chunk of user memory.
35 // Mark the corresponding shadow memory as not needed.
36 uptr shadow_p = MEM_TO_SHADOW(p);
37 ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
38 if (__msan_get_track_origins()) {
39 uptr origin_p = MEM_TO_ORIGIN(p);
40 ReleaseMemoryPagesToOS(origin_p, origin_p + size);
46 static const uptr kMaxAllowedMallocSize = 2UL << 30;
47 static const uptr kRegionSizeLog = 20;
48 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
49 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
50 typedef CompactSizeClassMap SizeClassMap;
52 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata),
53 SizeClassMap, kRegionSizeLog, ByteMap,
54 MsanMapUnmapCallback> PrimaryAllocator;
56 #elif defined(__x86_64__)
57 #if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
58 static const uptr kAllocatorSpace = 0x700000000000ULL;
60 static const uptr kAllocatorSpace = 0x600000000000ULL;
62 static const uptr kMaxAllowedMallocSize = 8UL << 30;
64 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
65 static const uptr kSpaceBeg = kAllocatorSpace;
66 static const uptr kSpaceSize = 0x40000000000; // 4T.
67 static const uptr kMetadataSize = sizeof(Metadata);
68 typedef DefaultSizeClassMap SizeClassMap;
69 typedef MsanMapUnmapCallback MapUnmapCallback;
70 static const uptr kFlags = 0;
73 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
75 #elif defined(__powerpc64__)
76 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
78 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
79 static const uptr kSpaceBeg = 0x300000000000;
80 static const uptr kSpaceSize = 0x020000000000; // 2T.
81 static const uptr kMetadataSize = sizeof(Metadata);
82 typedef DefaultSizeClassMap SizeClassMap;
83 typedef MsanMapUnmapCallback MapUnmapCallback;
84 static const uptr kFlags = 0;
87 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
88 #elif defined(__aarch64__)
89 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
90 static const uptr kRegionSizeLog = 20;
91 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
92 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
93 typedef CompactSizeClassMap SizeClassMap;
95 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata),
96 SizeClassMap, kRegionSizeLog, ByteMap,
97 MsanMapUnmapCallback> PrimaryAllocator;
99 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
100 typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
101 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
102 SecondaryAllocator> Allocator;
104 static Allocator allocator;
105 static AllocatorCache fallback_allocator_cache;
106 static SpinMutex fallback_mutex;
108 void MsanAllocatorInit() {
110 common_flags()->allocator_may_return_null,
111 common_flags()->allocator_release_to_os_interval_ms);
114 AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
116 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
117 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
120 void MsanThreadLocalMallocStorage::CommitBack() {
121 allocator.SwallowCache(GetAllocatorCache(this));
124 static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
126 if (size > kMaxAllowedMallocSize) {
127 Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
129 return allocator.ReturnNullOrDieOnBadRequest();
131 MsanThread *t = GetCurrentThread();
134 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
135 allocated = allocator.Allocate(cache, size, alignment, false);
137 SpinMutexLock l(&fallback_mutex);
138 AllocatorCache *cache = &fallback_allocator_cache;
139 allocated = allocator.Allocate(cache, size, alignment, false);
142 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
143 meta->requested_size = size;
145 __msan_clear_and_unpoison(allocated, size);
146 } else if (flags()->poison_in_malloc) {
147 __msan_poison(allocated, size);
148 if (__msan_get_track_origins()) {
149 stack->tag = StackTrace::TAG_ALLOC;
150 Origin o = Origin::CreateHeapOrigin(stack);
151 __msan_set_origin(allocated, size, o.raw_id());
154 MSAN_MALLOC_HOOK(allocated, size);
158 void MsanDeallocate(StackTrace *stack, void *p) {
161 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
162 uptr size = meta->requested_size;
163 meta->requested_size = 0;
164 // This memory will not be reused by anyone else, so we are free to keep it
166 if (flags()->poison_in_free) {
167 __msan_poison(p, size);
168 if (__msan_get_track_origins()) {
169 stack->tag = StackTrace::TAG_DEALLOC;
170 Origin o = Origin::CreateHeapOrigin(stack);
171 __msan_set_origin(p, size, o.raw_id());
174 MsanThread *t = GetCurrentThread();
176 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
177 allocator.Deallocate(cache, p);
179 SpinMutexLock l(&fallback_mutex);
180 AllocatorCache *cache = &fallback_allocator_cache;
181 allocator.Deallocate(cache, p);
185 void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
186 if (CallocShouldReturnNullDueToOverflow(size, nmemb))
187 return allocator.ReturnNullOrDieOnBadRequest();
188 return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
191 void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
192 uptr alignment, bool zeroise) {
194 return MsanAllocate(stack, new_size, alignment, zeroise);
196 MsanDeallocate(stack, old_p);
199 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
200 uptr old_size = meta->requested_size;
201 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
202 if (new_size <= actually_allocated_size) {
203 // We are not reallocating here.
204 meta->requested_size = new_size;
205 if (new_size > old_size) {
207 __msan_clear_and_unpoison((char *)old_p + old_size,
208 new_size - old_size);
209 } else if (flags()->poison_in_malloc) {
210 stack->tag = StackTrace::TAG_ALLOC;
211 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
216 uptr memcpy_size = Min(new_size, old_size);
217 void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
218 // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
220 CopyMemory(new_p, old_p, memcpy_size, stack);
221 MsanDeallocate(stack, old_p);
226 static uptr AllocationSize(const void *p) {
228 const void *beg = allocator.GetBlockBegin(p);
229 if (beg != p) return 0;
230 Metadata *b = (Metadata *)allocator.GetMetaData(p);
231 return b->requested_size;
234 } // namespace __msan
236 using namespace __msan;
238 uptr __sanitizer_get_current_allocated_bytes() {
239 uptr stats[AllocatorStatCount];
240 allocator.GetStats(stats);
241 return stats[AllocatorStatAllocated];
244 uptr __sanitizer_get_heap_size() {
245 uptr stats[AllocatorStatCount];
246 allocator.GetStats(stats);
247 return stats[AllocatorStatMapped];
250 uptr __sanitizer_get_free_bytes() { return 1; }
252 uptr __sanitizer_get_unmapped_bytes() { return 1; }
254 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
256 int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
258 uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }