1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // This class can (de)allocate only large chunks of memory using mmap/unmap.
18 // The main purpose of this allocator is to cover large and rare allocation
19 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
20 template <class MapUnmapCallback = NoOpMapUnmapCallback>
21 class LargeMmapAllocator {
23 void InitLinkerInitialized(bool may_return_null) {
24 page_size_ = GetPageSizeCached();
25 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
28 void Init(bool may_return_null) {
29 internal_memset(this, 0, sizeof(*this));
30 InitLinkerInitialized(may_return_null);
33 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
34 CHECK(IsPowerOfTwo(alignment));
35 uptr map_size = RoundUpMapSize(size);
36 if (alignment > page_size_)
37 map_size += alignment;
39 if (map_size < size) return ReturnNullOrDieOnBadRequest();
40 uptr map_beg = reinterpret_cast<uptr>(
41 MmapOrDie(map_size, "LargeMmapAllocator"));
42 CHECK(IsAligned(map_beg, page_size_));
43 MapUnmapCallback().OnMap(map_beg, map_size);
44 uptr map_end = map_beg + map_size;
45 uptr res = map_beg + page_size_;
46 if (res & (alignment - 1)) // Align.
47 res += alignment - (res & (alignment - 1));
48 CHECK(IsAligned(res, alignment));
49 CHECK(IsAligned(res, page_size_));
50 CHECK_GE(res + size, map_beg);
51 CHECK_LE(res + size, map_end);
52 Header *h = GetHeader(res);
55 h->map_size = map_size;
56 uptr size_log = MostSignificantSetBitIndex(map_size);
57 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
59 SpinMutexLock l(&mutex_);
60 uptr idx = n_chunks_++;
61 chunks_sorted_ = false;
62 CHECK_LT(idx, kMaxNumChunks);
66 stats.currently_allocated += map_size;
67 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
68 stats.by_size_log[size_log]++;
69 stat->Add(AllocatorStatAllocated, map_size);
70 stat->Add(AllocatorStatMapped, map_size);
72 return reinterpret_cast<void*>(res);
75 bool MayReturnNull() const {
76 return atomic_load(&may_return_null_, memory_order_acquire);
79 void *ReturnNullOrDieOnBadRequest() {
80 if (MayReturnNull()) return nullptr;
81 ReportAllocatorCannotReturnNull(false);
84 void *ReturnNullOrDieOnOOM() {
85 if (MayReturnNull()) return nullptr;
86 ReportAllocatorCannotReturnNull(true);
89 void SetMayReturnNull(bool may_return_null) {
90 atomic_store(&may_return_null_, may_return_null, memory_order_release);
93 void Deallocate(AllocatorStats *stat, void *p) {
94 Header *h = GetHeader(p);
96 SpinMutexLock l(&mutex_);
97 uptr idx = h->chunk_idx;
98 CHECK_EQ(chunks_[idx], h);
99 CHECK_LT(idx, n_chunks_);
100 chunks_[idx] = chunks_[n_chunks_ - 1];
101 chunks_[idx]->chunk_idx = idx;
103 chunks_sorted_ = false;
105 stats.currently_allocated -= h->map_size;
106 stat->Sub(AllocatorStatAllocated, h->map_size);
107 stat->Sub(AllocatorStatMapped, h->map_size);
109 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
110 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
113 uptr TotalMemoryUsed() {
114 SpinMutexLock l(&mutex_);
116 for (uptr i = 0; i < n_chunks_; i++) {
117 Header *h = chunks_[i];
118 CHECK_EQ(h->chunk_idx, i);
119 res += RoundUpMapSize(h->size);
124 bool PointerIsMine(const void *p) {
125 return GetBlockBegin(p) != nullptr;
128 uptr GetActuallyAllocatedSize(void *p) {
129 return RoundUpTo(GetHeader(p)->size, page_size_);
132 // At least page_size_/2 metadata bytes is available.
133 void *GetMetaData(const void *p) {
134 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
135 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
136 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
137 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
139 return GetHeader(p) + 1;
142 void *GetBlockBegin(const void *ptr) {
143 uptr p = reinterpret_cast<uptr>(ptr);
144 SpinMutexLock l(&mutex_);
145 uptr nearest_chunk = 0;
146 // Cache-friendly linear search.
147 for (uptr i = 0; i < n_chunks_; i++) {
148 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
149 if (p < ch) continue; // p is at left to this chunk, skip it.
150 if (p - ch < p - nearest_chunk)
155 Header *h = reinterpret_cast<Header *>(nearest_chunk);
156 CHECK_GE(nearest_chunk, h->map_beg);
157 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
158 CHECK_LE(nearest_chunk, p);
159 if (h->map_beg + h->map_size <= p)
164 void EnsureSortedChunks() {
165 if (chunks_sorted_) return;
166 SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
167 for (uptr i = 0; i < n_chunks_; i++)
168 chunks_[i]->chunk_idx = i;
169 chunks_sorted_ = true;
172 // This function does the same as GetBlockBegin, but is much faster.
173 // Must be called with the allocator locked.
174 void *GetBlockBeginFastLocked(void *ptr) {
175 mutex_.CheckLocked();
176 uptr p = reinterpret_cast<uptr>(ptr);
178 if (!n) return nullptr;
179 EnsureSortedChunks();
180 auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
182 reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
183 if (p < min_mmap_ || p >= max_mmap_)
185 uptr beg = 0, end = n - 1;
186 // This loop is a log(n) lower_bound. It does not check for the exact match
187 // to avoid expensive cache-thrashing loads.
188 while (end - beg >= 2) {
189 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
190 if (p < reinterpret_cast<uptr>(chunks_[mid]))
191 end = mid - 1; // We are not interested in chunks_[mid].
193 beg = mid; // chunks_[mid] may still be what we want.
197 CHECK_EQ(beg + 1, end);
198 // There are 2 chunks left, choose one.
199 if (p >= reinterpret_cast<uptr>(chunks_[end]))
203 Header *h = chunks_[beg];
204 if (h->map_beg + h->map_size <= p || p < h->map_beg)
210 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
211 "remains %zd (%zd K) max %zd M; by size logs: ",
212 stats.n_allocs, stats.n_allocs - stats.n_frees,
213 stats.currently_allocated >> 10, stats.max_allocated >> 20);
214 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
215 uptr c = stats.by_size_log[i];
217 Printf("%zd:%zd; ", i, c);
222 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
223 // introspection API.
232 // Iterate over all existing chunks.
233 // The allocator must be locked when calling this function.
234 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
235 EnsureSortedChunks(); // Avoid doing the sort while iterating.
236 for (uptr i = 0; i < n_chunks_; i++) {
238 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
239 // Consistency check: verify that the array did not change.
240 CHECK_EQ(chunks_[i], t);
241 CHECK_EQ(chunks_[i]->chunk_idx, i);
246 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
254 Header *GetHeader(uptr p) {
255 CHECK(IsAligned(p, page_size_));
256 return reinterpret_cast<Header*>(p - page_size_);
258 Header *GetHeader(const void *p) {
259 return GetHeader(reinterpret_cast<uptr>(p));
262 void *GetUser(Header *h) {
263 CHECK(IsAligned((uptr)h, page_size_));
264 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
267 uptr RoundUpMapSize(uptr size) {
268 return RoundUpTo(size, page_size_) + page_size_;
272 Header *chunks_[kMaxNumChunks];
276 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
278 atomic_uint8_t may_return_null_;