1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 // This allocator is used inside run-times.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_allocator.h"
17 #include "sanitizer_allocator_internal.h"
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_common.h"
21 namespace __sanitizer {
23 // ThreadSanitizer for Go uses libc malloc/free.
24 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
25 # if SANITIZER_LINUX && !SANITIZER_ANDROID
26 extern "C" void *__libc_malloc(uptr size);
28 extern "C" void *__libc_memalign(uptr alignment, uptr size);
30 extern "C" void *__libc_realloc(void *ptr, uptr size);
31 extern "C" void __libc_free(void *ptr);
34 # define __libc_malloc malloc
36 static void *__libc_memalign(uptr alignment, uptr size) {
38 uptr error = posix_memalign(&p, alignment, size);
39 if (error) return nullptr;
43 # define __libc_realloc realloc
44 # define __libc_free free
47 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
52 return __libc_malloc(size);
54 return __libc_memalign(alignment, size);
56 // Windows does not provide __libc_memalign/posix_memalign. It provides
57 // __aligned_malloc, but the allocated blocks can't be passed to free,
58 // they need to be passed to __aligned_free. InternalAlloc interface does
59 // not account for such requirement. Alignemnt does not seem to be used
60 // anywhere in runtime, so just call __libc_malloc for now.
61 DCHECK_EQ(alignment, 0);
62 return __libc_malloc(size);
66 static void *RawInternalRealloc(void *ptr, uptr size,
67 InternalAllocatorCache *cache) {
69 return __libc_realloc(ptr, size);
72 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
77 InternalAllocator *internal_allocator() {
81 #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
83 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
84 static atomic_uint8_t internal_allocator_initialized;
85 static StaticSpinMutex internal_alloc_init_mu;
87 static InternalAllocatorCache internal_allocator_cache;
88 static StaticSpinMutex internal_allocator_cache_mu;
90 InternalAllocator *internal_allocator() {
91 InternalAllocator *internal_allocator_instance =
92 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
93 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
94 SpinMutexLock l(&internal_alloc_init_mu);
95 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
97 internal_allocator_instance->Init(
98 /* may_return_null */ false, kReleaseToOSIntervalNever);
99 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
102 return internal_allocator_instance;
105 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
107 if (alignment == 0) alignment = 8;
109 SpinMutexLock l(&internal_allocator_cache_mu);
110 return internal_allocator()->Allocate(&internal_allocator_cache, size,
113 return internal_allocator()->Allocate(cache, size, alignment, false);
116 static void *RawInternalRealloc(void *ptr, uptr size,
117 InternalAllocatorCache *cache) {
120 SpinMutexLock l(&internal_allocator_cache_mu);
121 return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
124 return internal_allocator()->Reallocate(cache, ptr, size, alignment);
127 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
129 SpinMutexLock l(&internal_allocator_cache_mu);
130 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
132 internal_allocator()->Deallocate(cache, ptr);
135 #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
137 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
139 void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
140 if (size + sizeof(u64) < size)
142 void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
145 ((u64*)p)[0] = kBlockMagic;
146 return (char*)p + sizeof(u64);
149 void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
151 return InternalAlloc(size, cache);
152 if (size + sizeof(u64) < size)
154 addr = (char*)addr - sizeof(u64);
155 size = size + sizeof(u64);
156 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
157 void *p = RawInternalRealloc(addr, size, cache);
160 return (char*)p + sizeof(u64);
163 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
164 if (CallocShouldReturnNullDueToOverflow(count, size))
165 return internal_allocator()->ReturnNullOrDieOnBadRequest();
166 void *p = InternalAlloc(count * size, cache);
167 if (p) internal_memset(p, 0, count * size);
171 void InternalFree(void *addr, InternalAllocatorCache *cache) {
174 addr = (char*)addr - sizeof(u64);
175 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
177 RawInternalFree(addr, cache);
181 static LowLevelAllocateCallback low_level_alloc_callback;
183 void *LowLevelAllocator::Allocate(uptr size) {
184 // Align allocation size.
185 size = RoundUpTo(size, 8);
186 if (allocated_end_ - allocated_current_ < (sptr)size) {
187 uptr size_to_allocate = Max(size, GetPageSizeCached());
189 (char*)MmapOrDie(size_to_allocate, __func__);
190 allocated_end_ = allocated_current_ + size_to_allocate;
191 if (low_level_alloc_callback) {
192 low_level_alloc_callback((uptr)allocated_current_,
196 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
197 void *res = allocated_current_;
198 allocated_current_ += size;
202 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
203 low_level_alloc_callback = callback;
206 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
207 if (!size) return false;
208 uptr max = (uptr)-1L;
209 return (max / size) < n;
212 static atomic_uint8_t reporting_out_of_memory = {0};
214 bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
216 void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
217 if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
218 Report("%s's allocator is terminating the process instead of returning 0\n",
220 Report("If you don't like this behavior set allocator_may_return_null=1\n");
225 } // namespace __sanitizer