1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
19 // SizeClassAllocator64 -- allocator for 64-bit address space.
20 // The template parameter Params is a class containing the actual parameters.
22 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
23 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
24 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
25 // kSpaceSize is a power of two.
26 // At the beginning the entire space is mprotect-ed, then small parts of it
27 // are mapped on demand.
29 // Region: a part of Space dedicated to a single size class.
30 // There are kNumClasses Regions of equal size.
32 // UserChunk: a piece of memory returned to user.
33 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
35 // FreeArray is an array free-d chunks (stored as 4-byte offsets)
37 // A Region looks like this:
38 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
40 struct SizeClassAllocator64FlagMasks { // Bit masks.
42 kRandomShuffleChunks = 1,
46 template <class Params>
47 class SizeClassAllocator64 {
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const uptr kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 typedef typename Params::MapUnmapCallback MapUnmapCallback;
55 static const bool kRandomShuffleChunks =
56 Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
58 typedef SizeClassAllocator64<Params> ThisT;
59 typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
61 // When we know the size class (the region base) we can represent a pointer
62 // as a 4-byte integer (offset from the region start shifted right by 4).
63 typedef u32 CompactPtrT;
64 static const uptr kCompactPtrScale = 4;
65 CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
66 return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
68 uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
69 return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
72 void Init(s32 release_to_os_interval_ms) {
73 uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
74 if (kUsingConstantSpaceBeg) {
75 CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
76 PrimaryAllocatorName, kSpaceBeg));
78 NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
79 PrimaryAllocatorName);
80 CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
82 SetReleaseToOSIntervalMs(release_to_os_interval_ms);
83 MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
84 // Check that the RegionInfo array is aligned on the CacheLine size.
85 DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
88 s32 ReleaseToOSIntervalMs() const {
89 return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
92 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
93 atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
94 memory_order_relaxed);
97 void ForceReleaseToOS() {
98 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
99 BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
100 MaybeReleaseToOS(class_id, true /*force*/);
104 static bool CanAllocate(uptr size, uptr alignment) {
105 return size <= SizeClassMap::kMaxSize &&
106 alignment <= SizeClassMap::kMaxSize;
109 NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
110 const CompactPtrT *chunks, uptr n_chunks) {
111 RegionInfo *region = GetRegionInfo(class_id);
112 uptr region_beg = GetRegionBeginBySizeClass(class_id);
113 CompactPtrT *free_array = GetFreeArray(region_beg);
115 BlockingMutexLock l(®ion->mutex);
116 uptr old_num_chunks = region->num_freed_chunks;
117 uptr new_num_freed_chunks = old_num_chunks + n_chunks;
118 // Failure to allocate free array space while releasing memory is non
120 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
121 new_num_freed_chunks))) {
122 Report("FATAL: Internal error: %s's allocator exhausted the free list "
123 "space for size class %zd (%zd bytes).\n", SanitizerToolName,
124 class_id, ClassIdToSize(class_id));
127 for (uptr i = 0; i < n_chunks; i++)
128 free_array[old_num_chunks + i] = chunks[i];
129 region->num_freed_chunks = new_num_freed_chunks;
130 region->stats.n_freed += n_chunks;
132 MaybeReleaseToOS(class_id, false /*force*/);
135 NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
136 CompactPtrT *chunks, uptr n_chunks) {
137 RegionInfo *region = GetRegionInfo(class_id);
138 uptr region_beg = GetRegionBeginBySizeClass(class_id);
139 CompactPtrT *free_array = GetFreeArray(region_beg);
141 BlockingMutexLock l(®ion->mutex);
142 if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
143 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
144 n_chunks - region->num_freed_chunks)))
146 CHECK_GE(region->num_freed_chunks, n_chunks);
148 region->num_freed_chunks -= n_chunks;
149 uptr base_idx = region->num_freed_chunks;
150 for (uptr i = 0; i < n_chunks; i++)
151 chunks[i] = free_array[base_idx + i];
152 region->stats.n_allocated += n_chunks;
156 bool PointerIsMine(const void *p) {
157 uptr P = reinterpret_cast<uptr>(p);
158 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
159 return P / kSpaceSize == kSpaceBeg / kSpaceSize;
160 return P >= SpaceBeg() && P < SpaceEnd();
163 uptr GetRegionBegin(const void *p) {
164 if (kUsingConstantSpaceBeg)
165 return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
166 uptr space_beg = SpaceBeg();
167 return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
171 uptr GetRegionBeginBySizeClass(uptr class_id) const {
172 return SpaceBeg() + kRegionSize * class_id;
175 uptr GetSizeClass(const void *p) {
176 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
177 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
178 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
182 void *GetBlockBegin(const void *p) {
183 uptr class_id = GetSizeClass(p);
184 uptr size = ClassIdToSize(class_id);
185 if (!size) return nullptr;
186 uptr chunk_idx = GetChunkIdx((uptr)p, size);
187 uptr reg_beg = GetRegionBegin(p);
188 uptr beg = chunk_idx * size;
189 uptr next_beg = beg + size;
190 if (class_id >= kNumClasses) return nullptr;
191 RegionInfo *region = GetRegionInfo(class_id);
192 if (region->mapped_user >= next_beg)
193 return reinterpret_cast<void*>(reg_beg + beg);
197 uptr GetActuallyAllocatedSize(void *p) {
198 CHECK(PointerIsMine(p));
199 return ClassIdToSize(GetSizeClass(p));
202 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
204 void *GetMetaData(const void *p) {
205 uptr class_id = GetSizeClass(p);
206 uptr size = ClassIdToSize(class_id);
207 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
208 uptr region_beg = GetRegionBeginBySizeClass(class_id);
209 return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
210 (1 + chunk_idx) * kMetadataSize);
213 uptr TotalMemoryUsed() {
215 for (uptr i = 0; i < kNumClasses; i++)
216 res += GetRegionInfo(i)->allocated_user;
221 void TestOnlyUnmap() {
222 UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
225 static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
227 for (uptr class_id = 0; class_id < stats_size; class_id++)
228 if (stats[class_id] == start)
229 stats[class_id] = rss;
232 void PrintStats(uptr class_id, uptr rss) {
233 RegionInfo *region = GetRegionInfo(class_id);
234 if (region->mapped_user == 0) return;
235 uptr in_use = region->stats.n_allocated - region->stats.n_freed;
236 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
238 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
239 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
240 "last released: %6zdK region: 0x%zx\n",
241 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
242 region->mapped_user >> 10, region->stats.n_allocated,
243 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
244 rss >> 10, region->rtoi.num_releases,
245 region->rtoi.last_released_bytes >> 10,
246 SpaceBeg() + kRegionSize * class_id);
250 uptr rss_stats[kNumClasses];
251 for (uptr class_id = 0; class_id < kNumClasses; class_id++)
252 rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
253 GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
255 uptr total_mapped = 0;
257 uptr n_allocated = 0;
259 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
260 RegionInfo *region = GetRegionInfo(class_id);
261 if (region->mapped_user != 0) {
262 total_mapped += region->mapped_user;
263 total_rss += rss_stats[class_id];
265 n_allocated += region->stats.n_allocated;
266 n_freed += region->stats.n_freed;
269 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
270 "%zd allocations; remains %zd\n", total_mapped >> 20,
271 total_rss >> 20, n_allocated, n_allocated - n_freed);
272 for (uptr class_id = 1; class_id < kNumClasses; class_id++)
273 PrintStats(class_id, rss_stats[class_id]);
276 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
277 // introspection API.
279 for (uptr i = 0; i < kNumClasses; i++) {
280 GetRegionInfo(i)->mutex.Lock();
285 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
286 GetRegionInfo(i)->mutex.Unlock();
290 // Iterate over all existing chunks.
291 // The allocator must be locked when calling this function.
292 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
293 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
294 RegionInfo *region = GetRegionInfo(class_id);
295 uptr chunk_size = ClassIdToSize(class_id);
296 uptr region_beg = SpaceBeg() + class_id * kRegionSize;
297 for (uptr chunk = region_beg;
298 chunk < region_beg + region->allocated_user;
299 chunk += chunk_size) {
300 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
301 callback(chunk, arg);
306 static uptr ClassIdToSize(uptr class_id) {
307 return SizeClassMap::Size(class_id);
310 static uptr AdditionalSize() {
311 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
312 GetPageSizeCached());
315 typedef SizeClassMap SizeClassMapT;
316 static const uptr kNumClasses = SizeClassMap::kNumClasses;
317 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
319 // A packed array of counters. Each counter occupies 2^n bits, enough to store
320 // counter's max_value. Ctor will try to allocate the required buffer via
321 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
322 // whether the initialization was successful by checking IsAllocated() result.
323 // For the performance sake, none of the accessors check the validity of the
324 // arguments, it is assumed that index is always in [0, n) range and the value
325 // is not incremented past max_value.
326 template<class MemoryMapperT>
327 class PackedCounterArray {
329 PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
330 : n(num_counters), memory_mapper(mapper) {
331 CHECK_GT(num_counters, 0);
332 CHECK_GT(max_value, 0);
333 constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
334 // Rounding counter storage size up to the power of two allows for using
335 // bit shifts calculating particular counter's index and offset.
336 uptr counter_size_bits =
337 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
338 CHECK_LE(counter_size_bits, kMaxCounterBits);
339 counter_size_bits_log = Log2(counter_size_bits);
340 counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
342 uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
343 CHECK_GT(packing_ratio, 0);
344 packing_ratio_log = Log2(packing_ratio);
345 bit_offset_mask = packing_ratio - 1;
348 (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
350 buffer = reinterpret_cast<u64*>(
351 memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
353 ~PackedCounterArray() {
355 memory_mapper->UnmapPackedCounterArrayBuffer(
356 reinterpret_cast<uptr>(buffer), buffer_size);
360 bool IsAllocated() const {
364 u64 GetCount() const {
368 uptr Get(uptr i) const {
370 uptr index = i >> packing_ratio_log;
371 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
372 return (buffer[index] >> bit_offset) & counter_mask;
375 void Inc(uptr i) const {
376 DCHECK_LT(Get(i), counter_mask);
377 uptr index = i >> packing_ratio_log;
378 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
379 buffer[index] += 1ULL << bit_offset;
382 void IncRange(uptr from, uptr to) const {
384 for (uptr i = from; i <= to; i++)
390 u64 counter_size_bits_log;
392 u64 packing_ratio_log;
395 MemoryMapperT* const memory_mapper;
400 template<class MemoryMapperT>
401 class FreePagesRangeTracker {
403 explicit FreePagesRangeTracker(MemoryMapperT* mapper)
404 : memory_mapper(mapper),
405 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
406 in_the_range(false), current_page(0), current_range_start_page(0) {}
408 void NextPage(bool freed) {
411 current_range_start_page = current_page;
425 void CloseOpenedRange() {
427 memory_mapper->ReleasePageRangeToOS(
428 current_range_start_page << page_size_scaled_log,
429 current_page << page_size_scaled_log);
430 in_the_range = false;
434 MemoryMapperT* const memory_mapper;
435 const uptr page_size_scaled_log;
438 uptr current_range_start_page;
441 // Iterates over the free_array to identify memory pages containing freed
442 // chunks only and returns these pages back to OS.
443 // allocated_pages_count is the total number of pages allocated for the
445 template<class MemoryMapperT>
446 static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
447 uptr free_array_count, uptr chunk_size,
448 uptr allocated_pages_count,
449 MemoryMapperT *memory_mapper) {
450 const uptr page_size = GetPageSizeCached();
452 // Figure out the number of chunks per page and whether we can take a fast
453 // path (the number of chunks per page is the same for all pages).
454 uptr full_pages_chunk_count_max;
455 bool same_chunk_count_per_page;
456 if (chunk_size <= page_size && page_size % chunk_size == 0) {
457 // Same number of chunks per page, no cross overs.
458 full_pages_chunk_count_max = page_size / chunk_size;
459 same_chunk_count_per_page = true;
460 } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
461 chunk_size % (page_size % chunk_size) == 0) {
462 // Some chunks are crossing page boundaries, which means that the page
463 // contains one or two partial chunks, but all pages contain the same
465 full_pages_chunk_count_max = page_size / chunk_size + 1;
466 same_chunk_count_per_page = true;
467 } else if (chunk_size <= page_size) {
468 // Some chunks are crossing page boundaries, which means that the page
469 // contains one or two partial chunks.
470 full_pages_chunk_count_max = page_size / chunk_size + 2;
471 same_chunk_count_per_page = false;
472 } else if (chunk_size > page_size && chunk_size % page_size == 0) {
473 // One chunk covers multiple pages, no cross overs.
474 full_pages_chunk_count_max = 1;
475 same_chunk_count_per_page = true;
476 } else if (chunk_size > page_size) {
477 // One chunk covers multiple pages, Some chunks are crossing page
478 // boundaries. Some pages contain one chunk, some contain two.
479 full_pages_chunk_count_max = 2;
480 same_chunk_count_per_page = false;
482 UNREACHABLE("All chunk_size/page_size ratios must be handled.");
485 PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
486 full_pages_chunk_count_max,
488 if (!counters.IsAllocated())
491 const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
492 const uptr page_size_scaled = page_size >> kCompactPtrScale;
493 const uptr page_size_scaled_log = Log2(page_size_scaled);
495 // Iterate over free chunks and count how many free chunks affect each
497 if (chunk_size <= page_size && page_size % chunk_size == 0) {
498 // Each chunk affects one page only.
499 for (uptr i = 0; i < free_array_count; i++)
500 counters.Inc(free_array[i] >> page_size_scaled_log);
502 // In all other cases chunks might affect more than one page.
503 for (uptr i = 0; i < free_array_count; i++) {
505 free_array[i] >> page_size_scaled_log,
506 (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
510 // Iterate over pages detecting ranges of pages with chunk counters equal
511 // to the expected number of chunks for the particular page.
512 FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
513 if (same_chunk_count_per_page) {
514 // Fast path, every page has the same number of chunks affecting it.
515 for (uptr i = 0; i < counters.GetCount(); i++)
516 range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
518 // Show path, go through the pages keeping count how many chunks affect
521 chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
522 const uptr pnc = pn * chunk_size_scaled;
523 // The idea is to increment the current page pointer by the first chunk
524 // size, middle portion size (the portion of the page covered by chunks
525 // except the first and the last one) and then the last chunk size, adding
526 // up the number of chunks on the current page and checking on every step
527 // whether the page boundary was crossed.
528 uptr prev_page_boundary = 0;
529 uptr current_boundary = 0;
530 for (uptr i = 0; i < counters.GetCount(); i++) {
531 uptr page_boundary = prev_page_boundary + page_size_scaled;
532 uptr chunks_per_page = pn;
533 if (current_boundary < page_boundary) {
534 if (current_boundary > prev_page_boundary)
536 current_boundary += pnc;
537 if (current_boundary < page_boundary) {
539 current_boundary += chunk_size_scaled;
542 prev_page_boundary = page_boundary;
544 range_tracker.NextPage(counters.Get(i) == chunks_per_page);
547 range_tracker.Done();
551 friend class MemoryMapper;
553 ReservedAddressRange address_range;
555 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
556 // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
557 // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
558 // elements, but in reality this will not happen. For simplicity we
559 // dedicate 1/8 of the region's virtual space to FreeArray.
560 static const uptr kFreeArraySize = kRegionSize / 8;
562 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
563 uptr NonConstSpaceBeg;
564 uptr SpaceBeg() const {
565 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
567 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
568 // kRegionSize must be >= 2^32.
569 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
570 // kRegionSize must be <= 2^36, see CompactPtrT.
571 COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
572 // Call mmap for user memory with at least this size.
573 static const uptr kUserMapSize = 1 << 16;
574 // Call mmap for metadata memory with at least this size.
575 static const uptr kMetaMapSize = 1 << 16;
576 // Call mmap for free array memory with at least this size.
577 static const uptr kFreeArrayMapSize = 1 << 16;
579 atomic_sint32_t release_to_os_interval_ms_;
586 struct ReleaseToOsInfo {
587 uptr n_freed_at_last_release;
589 u64 last_release_at_ns;
590 u64 last_released_bytes;
593 struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
595 uptr num_freed_chunks; // Number of elements in the freearray.
596 uptr mapped_free_array; // Bytes mapped for freearray.
597 uptr allocated_user; // Bytes allocated for user memory.
598 uptr allocated_meta; // Bytes allocated for metadata.
599 uptr mapped_user; // Bytes mapped for user memory.
600 uptr mapped_meta; // Bytes mapped for metadata.
601 u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
602 bool exhausted; // Whether region is out of space for new chunks.
604 ReleaseToOsInfo rtoi;
606 COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
608 RegionInfo *GetRegionInfo(uptr class_id) const {
609 DCHECK_LT(class_id, kNumClasses);
610 RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
611 return ®ions[class_id];
614 uptr GetMetadataEnd(uptr region_beg) const {
615 return region_beg + kRegionSize - kFreeArraySize;
618 uptr GetChunkIdx(uptr chunk, uptr size) const {
619 if (!kUsingConstantSpaceBeg)
622 uptr offset = chunk % kRegionSize;
623 // Here we divide by a non-constant. This is costly.
624 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
625 if (offset >> (SANITIZER_WORDSIZE / 2))
626 return offset / size;
627 return (u32)offset / (u32)size;
630 CompactPtrT *GetFreeArray(uptr region_beg) const {
631 return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
634 bool MapWithCallback(uptr beg, uptr size) {
635 uptr mapped = address_range.Map(beg, size);
636 if (UNLIKELY(!mapped))
638 CHECK_EQ(beg, mapped);
639 MapUnmapCallback().OnMap(beg, size);
643 void MapWithCallbackOrDie(uptr beg, uptr size) {
644 CHECK_EQ(beg, address_range.MapOrDie(beg, size));
645 MapUnmapCallback().OnMap(beg, size);
648 void UnmapWithCallbackOrDie(uptr beg, uptr size) {
649 MapUnmapCallback().OnUnmap(beg, size);
650 address_range.Unmap(beg, size);
653 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
654 uptr num_freed_chunks) {
655 uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
656 if (region->mapped_free_array < needed_space) {
657 uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
658 CHECK_LE(new_mapped_free_array, kFreeArraySize);
659 uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
660 region->mapped_free_array;
661 uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
662 if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size)))
664 region->mapped_free_array = new_mapped_free_array;
669 // Check whether this size class is exhausted.
670 bool IsRegionExhausted(RegionInfo *region, uptr class_id,
671 uptr additional_map_size) {
672 if (LIKELY(region->mapped_user + region->mapped_meta +
673 additional_map_size <= kRegionSize - kFreeArraySize))
675 if (!region->exhausted) {
676 region->exhausted = true;
677 Printf("%s: Out of memory. ", SanitizerToolName);
678 Printf("The process has exhausted %zuMB for size class %zu.\n",
679 kRegionSize >> 20, ClassIdToSize(class_id));
684 NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
685 RegionInfo *region, uptr requested_count) {
686 // region->mutex is held.
687 const uptr region_beg = GetRegionBeginBySizeClass(class_id);
688 const uptr size = ClassIdToSize(class_id);
690 const uptr total_user_bytes =
691 region->allocated_user + requested_count * size;
692 // Map more space for chunks, if necessary.
693 if (LIKELY(total_user_bytes > region->mapped_user)) {
694 if (UNLIKELY(region->mapped_user == 0)) {
695 if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
696 // The random state is initialized from ASLR.
697 region->rand_state = static_cast<u32>(region_beg >> 12);
698 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
699 // preventing just allocated memory from being released sooner than
700 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
701 // for short lived processes.
702 // Do it only when the feature is turned on, to avoid a potentially
703 // extraneous syscall.
704 if (ReleaseToOSIntervalMs() >= 0)
705 region->rtoi.last_release_at_ns = MonotonicNanoTime();
707 // Do the mmap for the user memory.
708 const uptr user_map_size =
709 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
710 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
712 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
715 stat->Add(AllocatorStatMapped, user_map_size);
716 region->mapped_user += user_map_size;
718 const uptr new_chunks_count =
719 (region->mapped_user - region->allocated_user) / size;
722 // Calculate the required space for metadata.
723 const uptr total_meta_bytes =
724 region->allocated_meta + new_chunks_count * kMetadataSize;
725 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
726 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
727 // Map more space for metadata, if necessary.
729 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
731 if (UNLIKELY(!MapWithCallback(
732 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
735 region->mapped_meta += meta_map_size;
739 // If necessary, allocate more space for the free array and populate it with
740 // newly allocated chunks.
741 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
742 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
744 CompactPtrT *free_array = GetFreeArray(region_beg);
745 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
747 free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
748 if (kRandomShuffleChunks)
749 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
750 ®ion->rand_state);
752 // All necessary memory is mapped and now it is safe to advance all
753 // 'allocated_*' counters.
754 region->num_freed_chunks += new_chunks_count;
755 region->allocated_user += new_chunks_count * size;
756 CHECK_LE(region->allocated_user, region->mapped_user);
757 region->allocated_meta += new_chunks_count * kMetadataSize;
758 CHECK_LE(region->allocated_meta, region->mapped_meta);
759 region->exhausted = false;
761 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
762 // MaybeReleaseToOS from releasing just allocated pages or protect these
763 // not yet used chunks some other way.
770 MemoryMapper(const ThisT& base_allocator, uptr class_id)
771 : allocator(base_allocator),
772 region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
773 released_ranges_count(0),
777 uptr GetReleasedRangesCount() const {
778 return released_ranges_count;
781 uptr GetReleasedBytes() const {
782 return released_bytes;
785 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
786 // TODO(alekseyshl): The idea to explore is to check if we have enough
787 // space between num_freed_chunks*sizeof(CompactPtrT) and
788 // mapped_free_array to fit buffer_size bytes and use that space instead
789 // of mapping a temporary one.
790 return reinterpret_cast<uptr>(
791 MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
794 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
795 UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
798 // Releases [from, to) range of pages back to OS.
799 void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
800 const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
801 const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
802 ReleaseMemoryPagesToOS(from_page, to_page);
803 released_ranges_count++;
804 released_bytes += to_page - from_page;
808 const ThisT& allocator;
809 const uptr region_base;
810 uptr released_ranges_count;
814 // Attempts to release RAM occupied by freed chunks back to OS. The region is
815 // expected to be locked.
816 void MaybeReleaseToOS(uptr class_id, bool force) {
817 RegionInfo *region = GetRegionInfo(class_id);
818 const uptr chunk_size = ClassIdToSize(class_id);
819 const uptr page_size = GetPageSizeCached();
821 uptr n = region->num_freed_chunks;
822 if (n * chunk_size < page_size)
823 return; // No chance to release anything.
824 if ((region->stats.n_freed -
825 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
826 return; // Nothing new to release.
830 s32 interval_ms = ReleaseToOSIntervalMs();
834 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
835 MonotonicNanoTime()) {
836 return; // Memory was returned recently.
840 MemoryMapper memory_mapper(*this, class_id);
842 ReleaseFreeMemoryToOS<MemoryMapper>(
843 GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
844 RoundUpTo(region->allocated_user, page_size) / page_size,
847 if (memory_mapper.GetReleasedRangesCount() > 0) {
848 region->rtoi.n_freed_at_last_release = region->stats.n_freed;
849 region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
850 region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
852 region->rtoi.last_release_at_ns = MonotonicNanoTime();