1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 // This allocator is used inside run-times.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_allocator.h"
17 #include "sanitizer_allocator_checks.h"
18 #include "sanitizer_allocator_internal.h"
19 #include "sanitizer_atomic.h"
20 #include "sanitizer_common.h"
22 namespace __sanitizer {
24 // ThreadSanitizer for Go uses libc malloc/free.
25 #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
26 # if SANITIZER_LINUX && !SANITIZER_ANDROID
27 extern "C" void *__libc_malloc(uptr size);
29 extern "C" void *__libc_memalign(uptr alignment, uptr size);
31 extern "C" void *__libc_realloc(void *ptr, uptr size);
32 extern "C" void __libc_free(void *ptr);
35 # define __libc_malloc malloc
37 static void *__libc_memalign(uptr alignment, uptr size) {
39 uptr error = posix_memalign(&p, alignment, size);
40 if (error) return nullptr;
44 # define __libc_realloc realloc
45 # define __libc_free free
48 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
53 return __libc_malloc(size);
55 return __libc_memalign(alignment, size);
57 // Windows does not provide __libc_memalign/posix_memalign. It provides
58 // __aligned_malloc, but the allocated blocks can't be passed to free,
59 // they need to be passed to __aligned_free. InternalAlloc interface does
60 // not account for such requirement. Alignemnt does not seem to be used
61 // anywhere in runtime, so just call __libc_malloc for now.
62 DCHECK_EQ(alignment, 0);
63 return __libc_malloc(size);
67 static void *RawInternalRealloc(void *ptr, uptr size,
68 InternalAllocatorCache *cache) {
70 return __libc_realloc(ptr, size);
73 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
78 InternalAllocator *internal_allocator() {
82 #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
84 static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
85 static atomic_uint8_t internal_allocator_initialized;
86 static StaticSpinMutex internal_alloc_init_mu;
88 static InternalAllocatorCache internal_allocator_cache;
89 static StaticSpinMutex internal_allocator_cache_mu;
91 InternalAllocator *internal_allocator() {
92 InternalAllocator *internal_allocator_instance =
93 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
94 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
95 SpinMutexLock l(&internal_alloc_init_mu);
96 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
98 internal_allocator_instance->Init(kReleaseToOSIntervalNever);
99 atomic_store(&internal_allocator_initialized, 1, memory_order_release);
102 return internal_allocator_instance;
105 static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
107 if (alignment == 0) alignment = 8;
109 SpinMutexLock l(&internal_allocator_cache_mu);
110 return internal_allocator()->Allocate(&internal_allocator_cache, size,
113 return internal_allocator()->Allocate(cache, size, alignment);
116 static void *RawInternalRealloc(void *ptr, uptr size,
117 InternalAllocatorCache *cache) {
120 SpinMutexLock l(&internal_allocator_cache_mu);
121 return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
124 return internal_allocator()->Reallocate(cache, ptr, size, alignment);
127 static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
129 SpinMutexLock l(&internal_allocator_cache_mu);
130 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
132 internal_allocator()->Deallocate(cache, ptr);
135 #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
137 const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
139 void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
140 if (size + sizeof(u64) < size)
142 void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
145 ((u64*)p)[0] = kBlockMagic;
146 return (char*)p + sizeof(u64);
149 void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
151 return InternalAlloc(size, cache);
152 if (size + sizeof(u64) < size)
154 addr = (char*)addr - sizeof(u64);
155 size = size + sizeof(u64);
156 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
157 void *p = RawInternalRealloc(addr, size, cache);
160 return (char*)p + sizeof(u64);
163 void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
164 if (UNLIKELY(CheckForCallocOverflow(count, size)))
165 return InternalAllocator::FailureHandler::OnBadRequest();
166 void *p = InternalAlloc(count * size, cache);
167 if (p) internal_memset(p, 0, count * size);
171 void InternalFree(void *addr, InternalAllocatorCache *cache) {
174 addr = (char*)addr - sizeof(u64);
175 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
177 RawInternalFree(addr, cache);
181 constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
182 static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
183 static LowLevelAllocateCallback low_level_alloc_callback;
185 void *LowLevelAllocator::Allocate(uptr size) {
186 // Align allocation size.
187 size = RoundUpTo(size, low_level_alloc_min_alignment);
188 if (allocated_end_ - allocated_current_ < (sptr)size) {
189 uptr size_to_allocate = Max(size, GetPageSizeCached());
191 (char*)MmapOrDie(size_to_allocate, __func__);
192 allocated_end_ = allocated_current_ + size_to_allocate;
193 if (low_level_alloc_callback) {
194 low_level_alloc_callback((uptr)allocated_current_,
198 CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
199 void *res = allocated_current_;
200 allocated_current_ += size;
204 void SetLowLevelAllocateMinAlignment(uptr alignment) {
205 CHECK(IsPowerOfTwo(alignment));
206 low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
209 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
210 low_level_alloc_callback = callback;
213 static atomic_uint8_t allocator_out_of_memory = {0};
214 static atomic_uint8_t allocator_may_return_null = {0};
216 bool IsAllocatorOutOfMemory() {
217 return atomic_load_relaxed(&allocator_out_of_memory);
220 // Prints error message and kills the program.
221 void NORETURN ReportAllocatorCannotReturnNull() {
222 Report("%s's allocator is terminating the process instead of returning 0\n",
224 Report("If you don't like this behavior set allocator_may_return_null=1\n");
229 bool AllocatorMayReturnNull() {
230 return atomic_load(&allocator_may_return_null, memory_order_relaxed);
233 void SetAllocatorMayReturnNull(bool may_return_null) {
234 atomic_store(&allocator_may_return_null, may_return_null,
235 memory_order_relaxed);
238 void *ReturnNullOrDieOnFailure::OnBadRequest() {
239 if (AllocatorMayReturnNull())
241 ReportAllocatorCannotReturnNull();
244 void *ReturnNullOrDieOnFailure::OnOOM() {
245 atomic_store_relaxed(&allocator_out_of_memory, 1);
246 if (AllocatorMayReturnNull())
248 ReportAllocatorCannotReturnNull();
251 void NORETURN *DieOnFailure::OnBadRequest() {
252 ReportAllocatorCannotReturnNull();
255 void NORETURN *DieOnFailure::OnOOM() {
256 atomic_store_relaxed(&allocator_out_of_memory, 1);
257 ReportAllocatorCannotReturnNull();
260 } // namespace __sanitizer