1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // This class can (de)allocate only large chunks of memory using mmap/unmap.
18 // The main purpose of this allocator is to cover large and rare allocation
19 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
20 template <class MapUnmapCallback = NoOpMapUnmapCallback,
21 class FailureHandlerT = ReturnNullOrDieOnFailure>
22 class LargeMmapAllocator {
24 typedef FailureHandlerT FailureHandler;
26 void InitLinkerInitialized() {
27 page_size_ = GetPageSizeCached();
31 internal_memset(this, 0, sizeof(*this));
32 InitLinkerInitialized();
35 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
36 CHECK(IsPowerOfTwo(alignment));
37 uptr map_size = RoundUpMapSize(size);
38 if (alignment > page_size_)
39 map_size += alignment;
42 return FailureHandler::OnBadRequest();
43 uptr map_beg = reinterpret_cast<uptr>(
44 MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
46 return FailureHandler::OnOOM();
47 CHECK(IsAligned(map_beg, page_size_));
48 MapUnmapCallback().OnMap(map_beg, map_size);
49 uptr map_end = map_beg + map_size;
50 uptr res = map_beg + page_size_;
51 if (res & (alignment - 1)) // Align.
52 res += alignment - (res & (alignment - 1));
53 CHECK(IsAligned(res, alignment));
54 CHECK(IsAligned(res, page_size_));
55 CHECK_GE(res + size, map_beg);
56 CHECK_LE(res + size, map_end);
57 Header *h = GetHeader(res);
60 h->map_size = map_size;
61 uptr size_log = MostSignificantSetBitIndex(map_size);
62 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
64 SpinMutexLock l(&mutex_);
65 uptr idx = n_chunks_++;
66 chunks_sorted_ = false;
67 CHECK_LT(idx, kMaxNumChunks);
71 stats.currently_allocated += map_size;
72 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
73 stats.by_size_log[size_log]++;
74 stat->Add(AllocatorStatAllocated, map_size);
75 stat->Add(AllocatorStatMapped, map_size);
77 return reinterpret_cast<void*>(res);
80 void Deallocate(AllocatorStats *stat, void *p) {
81 Header *h = GetHeader(p);
83 SpinMutexLock l(&mutex_);
84 uptr idx = h->chunk_idx;
85 CHECK_EQ(chunks_[idx], h);
86 CHECK_LT(idx, n_chunks_);
87 chunks_[idx] = chunks_[n_chunks_ - 1];
88 chunks_[idx]->chunk_idx = idx;
90 chunks_sorted_ = false;
92 stats.currently_allocated -= h->map_size;
93 stat->Sub(AllocatorStatAllocated, h->map_size);
94 stat->Sub(AllocatorStatMapped, h->map_size);
96 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
97 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
100 uptr TotalMemoryUsed() {
101 SpinMutexLock l(&mutex_);
103 for (uptr i = 0; i < n_chunks_; i++) {
104 Header *h = chunks_[i];
105 CHECK_EQ(h->chunk_idx, i);
106 res += RoundUpMapSize(h->size);
111 bool PointerIsMine(const void *p) {
112 return GetBlockBegin(p) != nullptr;
115 uptr GetActuallyAllocatedSize(void *p) {
116 return RoundUpTo(GetHeader(p)->size, page_size_);
119 // At least page_size_/2 metadata bytes is available.
120 void *GetMetaData(const void *p) {
121 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
122 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
123 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
124 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
126 return GetHeader(p) + 1;
129 void *GetBlockBegin(const void *ptr) {
130 uptr p = reinterpret_cast<uptr>(ptr);
131 SpinMutexLock l(&mutex_);
132 uptr nearest_chunk = 0;
133 // Cache-friendly linear search.
134 for (uptr i = 0; i < n_chunks_; i++) {
135 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
136 if (p < ch) continue; // p is at left to this chunk, skip it.
137 if (p - ch < p - nearest_chunk)
142 Header *h = reinterpret_cast<Header *>(nearest_chunk);
143 CHECK_GE(nearest_chunk, h->map_beg);
144 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
145 CHECK_LE(nearest_chunk, p);
146 if (h->map_beg + h->map_size <= p)
151 void EnsureSortedChunks() {
152 if (chunks_sorted_) return;
153 SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
154 for (uptr i = 0; i < n_chunks_; i++)
155 chunks_[i]->chunk_idx = i;
156 chunks_sorted_ = true;
159 // This function does the same as GetBlockBegin, but is much faster.
160 // Must be called with the allocator locked.
161 void *GetBlockBeginFastLocked(void *ptr) {
162 mutex_.CheckLocked();
163 uptr p = reinterpret_cast<uptr>(ptr);
165 if (!n) return nullptr;
166 EnsureSortedChunks();
167 auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
169 reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
170 if (p < min_mmap_ || p >= max_mmap_)
172 uptr beg = 0, end = n - 1;
173 // This loop is a log(n) lower_bound. It does not check for the exact match
174 // to avoid expensive cache-thrashing loads.
175 while (end - beg >= 2) {
176 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
177 if (p < reinterpret_cast<uptr>(chunks_[mid]))
178 end = mid - 1; // We are not interested in chunks_[mid].
180 beg = mid; // chunks_[mid] may still be what we want.
184 CHECK_EQ(beg + 1, end);
185 // There are 2 chunks left, choose one.
186 if (p >= reinterpret_cast<uptr>(chunks_[end]))
190 Header *h = chunks_[beg];
191 if (h->map_beg + h->map_size <= p || p < h->map_beg)
197 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
198 "remains %zd (%zd K) max %zd M; by size logs: ",
199 stats.n_allocs, stats.n_allocs - stats.n_frees,
200 stats.currently_allocated >> 10, stats.max_allocated >> 20);
201 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
202 uptr c = stats.by_size_log[i];
204 Printf("%zd:%zd; ", i, c);
209 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
210 // introspection API.
219 // Iterate over all existing chunks.
220 // The allocator must be locked when calling this function.
221 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
222 EnsureSortedChunks(); // Avoid doing the sort while iterating.
223 for (uptr i = 0; i < n_chunks_; i++) {
225 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
226 // Consistency check: verify that the array did not change.
227 CHECK_EQ(chunks_[i], t);
228 CHECK_EQ(chunks_[i]->chunk_idx, i);
233 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
241 Header *GetHeader(uptr p) {
242 CHECK(IsAligned(p, page_size_));
243 return reinterpret_cast<Header*>(p - page_size_);
245 Header *GetHeader(const void *p) {
246 return GetHeader(reinterpret_cast<uptr>(p));
249 void *GetUser(Header *h) {
250 CHECK(IsAligned((uptr)h, page_size_));
251 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
254 uptr RoundUpMapSize(uptr size) {
255 return RoundUpTo(size, page_size_) + page_size_;
259 Header *chunks_[kMaxNumChunks];
263 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];