1 //===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
18 // allocated chunks. To be used in memory constrained or not memory hungry cases
19 // (currently, 32 bits and internal allocator).
20 class LargeMmapAllocatorPtrArrayStatic {
22 INLINE void *Init() { return &p_[0]; }
23 INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
25 static const int kMaxNumChunks = 1 << 15;
26 uptr p_[kMaxNumChunks];
29 // Much less restricted LargeMmapAllocator chunks list (comparing to
30 // PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
31 // ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
32 // same functionality in Fuchsia case, which does not support MAP_NORESERVE.
33 class LargeMmapAllocatorPtrArrayDynamic {
36 uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
37 SecondaryAllocatorName);
39 return reinterpret_cast<void*>(p);
42 INLINE void EnsureSpace(uptr n) {
43 CHECK_LT(n, kMaxNumChunks);
44 DCHECK(n <= n_reserved_);
45 if (UNLIKELY(n == n_reserved_)) {
46 address_range_.MapOrDie(
47 reinterpret_cast<uptr>(address_range_.base()) +
48 n_reserved_ * sizeof(uptr),
49 kChunksBlockCount * sizeof(uptr));
50 n_reserved_ += kChunksBlockCount;
55 static const int kMaxNumChunks = 1 << 20;
56 static const int kChunksBlockCount = 1 << 14;
57 ReservedAddressRange address_range_;
61 #if SANITIZER_WORDSIZE == 32
62 typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
64 typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
67 // This class can (de)allocate only large chunks of memory using mmap/unmap.
68 // The main purpose of this allocator is to cover large and rare allocation
69 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
70 template <class MapUnmapCallback = NoOpMapUnmapCallback,
71 class PtrArrayT = DefaultLargeMmapAllocatorPtrArray>
72 class LargeMmapAllocator {
74 void InitLinkerInitialized() {
75 page_size_ = GetPageSizeCached();
76 chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
80 internal_memset(this, 0, sizeof(*this));
81 InitLinkerInitialized();
84 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
85 CHECK(IsPowerOfTwo(alignment));
86 uptr map_size = RoundUpMapSize(size);
87 if (alignment > page_size_)
88 map_size += alignment;
90 if (map_size < size) {
91 Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
92 "0x%zx bytes with 0x%zx alignment requested\n",
93 SanitizerToolName, map_size, alignment);
96 uptr map_beg = reinterpret_cast<uptr>(
97 MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
100 CHECK(IsAligned(map_beg, page_size_));
101 MapUnmapCallback().OnMap(map_beg, map_size);
102 uptr map_end = map_beg + map_size;
103 uptr res = map_beg + page_size_;
104 if (res & (alignment - 1)) // Align.
105 res += alignment - (res & (alignment - 1));
106 CHECK(IsAligned(res, alignment));
107 CHECK(IsAligned(res, page_size_));
108 CHECK_GE(res + size, map_beg);
109 CHECK_LE(res + size, map_end);
110 Header *h = GetHeader(res);
112 h->map_beg = map_beg;
113 h->map_size = map_size;
114 uptr size_log = MostSignificantSetBitIndex(map_size);
115 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
117 SpinMutexLock l(&mutex_);
118 ptr_array_.EnsureSpace(n_chunks_);
119 uptr idx = n_chunks_++;
122 chunks_sorted_ = false;
124 stats.currently_allocated += map_size;
125 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
126 stats.by_size_log[size_log]++;
127 stat->Add(AllocatorStatAllocated, map_size);
128 stat->Add(AllocatorStatMapped, map_size);
130 return reinterpret_cast<void*>(res);
133 void Deallocate(AllocatorStats *stat, void *p) {
134 Header *h = GetHeader(p);
136 SpinMutexLock l(&mutex_);
137 uptr idx = h->chunk_idx;
138 CHECK_EQ(chunks_[idx], h);
139 CHECK_LT(idx, n_chunks_);
140 chunks_[idx] = chunks_[--n_chunks_];
141 chunks_[idx]->chunk_idx = idx;
142 chunks_sorted_ = false;
144 stats.currently_allocated -= h->map_size;
145 stat->Sub(AllocatorStatAllocated, h->map_size);
146 stat->Sub(AllocatorStatMapped, h->map_size);
148 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
149 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
152 uptr TotalMemoryUsed() {
153 SpinMutexLock l(&mutex_);
155 for (uptr i = 0; i < n_chunks_; i++) {
156 Header *h = chunks_[i];
157 CHECK_EQ(h->chunk_idx, i);
158 res += RoundUpMapSize(h->size);
163 bool PointerIsMine(const void *p) {
164 return GetBlockBegin(p) != nullptr;
167 uptr GetActuallyAllocatedSize(void *p) {
168 return RoundUpTo(GetHeader(p)->size, page_size_);
171 // At least page_size_/2 metadata bytes is available.
172 void *GetMetaData(const void *p) {
173 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
174 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
175 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
176 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
178 return GetHeader(p) + 1;
181 void *GetBlockBegin(const void *ptr) {
182 uptr p = reinterpret_cast<uptr>(ptr);
183 SpinMutexLock l(&mutex_);
184 uptr nearest_chunk = 0;
185 // Cache-friendly linear search.
186 for (uptr i = 0; i < n_chunks_; i++) {
187 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
188 if (p < ch) continue; // p is at left to this chunk, skip it.
189 if (p - ch < p - nearest_chunk)
194 Header *h = reinterpret_cast<Header *>(nearest_chunk);
195 CHECK_GE(nearest_chunk, h->map_beg);
196 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
197 CHECK_LE(nearest_chunk, p);
198 if (h->map_beg + h->map_size <= p)
203 void EnsureSortedChunks() {
204 if (chunks_sorted_) return;
205 Sort(reinterpret_cast<uptr *>(chunks_), n_chunks_);
206 for (uptr i = 0; i < n_chunks_; i++)
207 chunks_[i]->chunk_idx = i;
208 chunks_sorted_ = true;
211 // This function does the same as GetBlockBegin, but is much faster.
212 // Must be called with the allocator locked.
213 void *GetBlockBeginFastLocked(void *ptr) {
214 mutex_.CheckLocked();
215 uptr p = reinterpret_cast<uptr>(ptr);
217 if (!n) return nullptr;
218 EnsureSortedChunks();
219 auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
221 reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
222 if (p < min_mmap_ || p >= max_mmap_)
224 uptr beg = 0, end = n - 1;
225 // This loop is a log(n) lower_bound. It does not check for the exact match
226 // to avoid expensive cache-thrashing loads.
227 while (end - beg >= 2) {
228 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
229 if (p < reinterpret_cast<uptr>(chunks_[mid]))
230 end = mid - 1; // We are not interested in chunks_[mid].
232 beg = mid; // chunks_[mid] may still be what we want.
236 CHECK_EQ(beg + 1, end);
237 // There are 2 chunks left, choose one.
238 if (p >= reinterpret_cast<uptr>(chunks_[end]))
242 Header *h = chunks_[beg];
243 if (h->map_beg + h->map_size <= p || p < h->map_beg)
249 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
250 "remains %zd (%zd K) max %zd M; by size logs: ",
251 stats.n_allocs, stats.n_allocs - stats.n_frees,
252 stats.currently_allocated >> 10, stats.max_allocated >> 20);
253 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
254 uptr c = stats.by_size_log[i];
256 Printf("%zd:%zd; ", i, c);
261 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
262 // introspection API.
271 // Iterate over all existing chunks.
272 // The allocator must be locked when calling this function.
273 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
274 EnsureSortedChunks(); // Avoid doing the sort while iterating.
275 for (uptr i = 0; i < n_chunks_; i++) {
277 callback(reinterpret_cast<uptr>(GetUser(t)), arg);
278 // Consistency check: verify that the array did not change.
279 CHECK_EQ(chunks_[i], t);
280 CHECK_EQ(chunks_[i]->chunk_idx, i);
292 Header *GetHeader(uptr p) {
293 CHECK(IsAligned(p, page_size_));
294 return reinterpret_cast<Header*>(p - page_size_);
296 Header *GetHeader(const void *p) {
297 return GetHeader(reinterpret_cast<uptr>(p));
300 void *GetUser(Header *h) {
301 CHECK(IsAligned((uptr)h, page_size_));
302 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
305 uptr RoundUpMapSize(uptr size) {
306 return RoundUpTo(size, page_size_) + page_size_;
311 PtrArrayT ptr_array_;
315 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
317 StaticSpinMutex mutex_;