1 //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
12 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_ALLOCATOR_H
15 #define SANITIZER_ALLOCATOR_H
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_list.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_lfstack.h"
24 namespace __sanitizer {
26 // Prints error message and kills the program.
27 void NORETURN ReportAllocatorCannotReturnNull();
29 // SizeClassMap maps allocation sizes into size classes and back.
30 // Class 0 corresponds to size 0.
31 // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
32 // Next 4 classes: 256 + i * 64 (i = 1 to 4).
33 // Next 4 classes: 512 + i * 128 (i = 1 to 4).
35 // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
36 // Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
38 // This structure of the size class map gives us:
39 // - Efficient table-free class-to-size and size-to-class functions.
40 // - Difference between two consequent size classes is betweed 14% and 25%
42 // This class also gives a hint to a thread-caching allocator about the amount
43 // of chunks that need to be cached per-thread:
44 // - kMaxNumCached is the maximal number of chunks per size class.
45 // - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
47 // Part of output of SizeClassMap::Print():
48 // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49 // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50 // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51 // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52 // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53 // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54 // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55 // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
57 // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58 // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59 // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60 // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61 // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62 // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63 // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64 // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
66 // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
67 // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68 // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69 // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
71 // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72 // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73 // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74 // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
76 // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77 // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78 // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79 // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
83 // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84 // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85 // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86 // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
88 // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
90 template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
92 static const uptr kMinSizeLog = 4;
93 static const uptr kMidSizeLog = kMinSizeLog + 4;
94 static const uptr kMinSize = 1 << kMinSizeLog;
95 static const uptr kMidSize = 1 << kMidSizeLog;
96 static const uptr kMidClass = kMidSize / kMinSize;
97 static const uptr S = 2;
98 static const uptr M = (1 << S) - 1;
101 static const uptr kMaxNumCached = kMaxNumCachedT;
102 // We transfer chunks between central and thread-local free lists in batches.
103 // For small size classes we allocate batches separately.
104 // For large size classes we use one of the chunks to store the batch.
105 struct TransferBatch {
108 void *batch[kMaxNumCached];
111 static const uptr kMaxSize = 1UL << kMaxSizeLog;
112 static const uptr kNumClasses =
113 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
114 COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
115 static const uptr kNumClassesRounded =
116 kNumClasses == 32 ? 32 :
117 kNumClasses <= 64 ? 64 :
118 kNumClasses <= 128 ? 128 : 256;
120 static uptr Size(uptr class_id) {
121 if (class_id <= kMidClass)
122 return kMinSize * class_id;
123 class_id -= kMidClass;
124 uptr t = kMidSize << (class_id >> S);
125 return t + (t >> S) * (class_id & M);
128 static uptr ClassID(uptr size) {
129 if (size <= kMidSize)
130 return (size + kMinSize - 1) >> kMinSizeLog;
131 if (size > kMaxSize) return 0;
132 uptr l = MostSignificantSetBitIndex(size);
133 uptr hbits = (size >> (l - S)) & M;
134 uptr lbits = size & ((1 << (l - S)) - 1);
135 uptr l1 = l - kMidSizeLog;
136 return kMidClass + (l1 << S) + hbits + (lbits > 0);
139 static uptr MaxCached(uptr class_id) {
140 if (class_id == 0) return 0;
141 uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
142 return Max<uptr>(1, Min(kMaxNumCached, n));
145 static void Print() {
147 uptr total_cached = 0;
148 for (uptr i = 0; i < kNumClasses; i++) {
150 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
153 uptr p = prev_s ? (d * 100 / prev_s) : 0;
154 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
155 uptr cached = MaxCached(i) * s;
156 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
157 "cached: %zd %zd; id %zd\n",
158 i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
159 total_cached += cached;
162 Printf("Total cached: %zd\n", total_cached);
165 static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
166 return Size(class_id) < sizeof(TransferBatch) -
167 sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
170 static void Validate() {
171 for (uptr c = 1; c < kNumClasses; c++) {
172 // Printf("Validate: c%zd\n", c);
175 CHECK_EQ(ClassID(s), c);
176 if (c != kNumClasses - 1)
177 CHECK_EQ(ClassID(s + 1), c + 1);
178 CHECK_EQ(ClassID(s - 1), c);
180 CHECK_GT(Size(c), Size(c-1));
182 CHECK_EQ(ClassID(kMaxSize + 1), 0);
184 for (uptr s = 1; s <= kMaxSize; s++) {
186 // Printf("s%zd => c%zd\n", s, c);
187 CHECK_LT(c, kNumClasses);
188 CHECK_GE(Size(c), s);
190 CHECK_LT(Size(c-1), s);
195 typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
196 typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
197 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
199 // Memory allocator statistics
201 AllocatorStatAllocated,
206 typedef uptr AllocatorStatCounters[AllocatorStatCount];
208 // Per-thread stats, live in per-thread cache.
209 class AllocatorStats {
212 internal_memset(this, 0, sizeof(*this));
214 void InitLinkerInitialized() {}
216 void Add(AllocatorStat i, uptr v) {
217 v += atomic_load(&stats_[i], memory_order_relaxed);
218 atomic_store(&stats_[i], v, memory_order_relaxed);
221 void Sub(AllocatorStat i, uptr v) {
222 v = atomic_load(&stats_[i], memory_order_relaxed) - v;
223 atomic_store(&stats_[i], v, memory_order_relaxed);
226 void Set(AllocatorStat i, uptr v) {
227 atomic_store(&stats_[i], v, memory_order_relaxed);
230 uptr Get(AllocatorStat i) const {
231 return atomic_load(&stats_[i], memory_order_relaxed);
235 friend class AllocatorGlobalStats;
236 AllocatorStats *next_;
237 AllocatorStats *prev_;
238 atomic_uintptr_t stats_[AllocatorStatCount];
241 // Global stats, used for aggregation and querying.
242 class AllocatorGlobalStats : public AllocatorStats {
244 void InitLinkerInitialized() {
249 internal_memset(this, 0, sizeof(*this));
250 InitLinkerInitialized();
253 void Register(AllocatorStats *s) {
254 SpinMutexLock l(&mu_);
261 void Unregister(AllocatorStats *s) {
262 SpinMutexLock l(&mu_);
263 s->prev_->next_ = s->next_;
264 s->next_->prev_ = s->prev_;
265 for (int i = 0; i < AllocatorStatCount; i++)
266 Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
269 void Get(AllocatorStatCounters s) const {
270 internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
271 SpinMutexLock l(&mu_);
272 const AllocatorStats *stats = this;
274 for (int i = 0; i < AllocatorStatCount; i++)
275 s[i] += stats->Get(AllocatorStat(i));
276 stats = stats->next_;
280 // All stats must be non-negative.
281 for (int i = 0; i < AllocatorStatCount; i++)
282 s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
286 mutable SpinMutex mu_;
289 // Allocators call these callbacks on mmap/munmap.
290 struct NoOpMapUnmapCallback {
291 void OnMap(uptr p, uptr size) const { }
292 void OnUnmap(uptr p, uptr size) const { }
295 // Callback type for iterating over chunks.
296 typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
298 // SizeClassAllocator64 -- allocator for 64-bit address space.
300 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
301 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
302 // Otherwise SpaceBeg=kSpaceBeg (fixed address).
303 // kSpaceSize is a power of two.
304 // At the beginning the entire space is mprotect-ed, then small parts of it
305 // are mapped on demand.
307 // Region: a part of Space dedicated to a single size class.
308 // There are kNumClasses Regions of equal size.
310 // UserChunk: a piece of memory returned to user.
311 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
313 // A Region looks like this:
314 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
315 template <const uptr kSpaceBeg, const uptr kSpaceSize,
316 const uptr kMetadataSize, class SizeClassMap,
317 class MapUnmapCallback = NoOpMapUnmapCallback>
318 class SizeClassAllocator64 {
320 typedef typename SizeClassMap::TransferBatch Batch;
321 typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
322 SizeClassMap, MapUnmapCallback> ThisT;
323 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
326 uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
327 if (kUsingConstantSpaceBeg) {
328 CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
329 MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
332 reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
333 CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
335 MapWithCallback(SpaceEnd(), AdditionalSize());
338 void MapWithCallback(uptr beg, uptr size) {
339 CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
340 MapUnmapCallback().OnMap(beg, size);
343 void UnmapWithCallback(uptr beg, uptr size) {
344 MapUnmapCallback().OnUnmap(beg, size);
345 UnmapOrDie(reinterpret_cast<void *>(beg), size);
348 static bool CanAllocate(uptr size, uptr alignment) {
349 return size <= SizeClassMap::kMaxSize &&
350 alignment <= SizeClassMap::kMaxSize;
353 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
355 CHECK_LT(class_id, kNumClasses);
356 RegionInfo *region = GetRegionInfo(class_id);
357 Batch *b = region->free_list.Pop();
359 b = PopulateFreeList(stat, c, class_id, region);
360 region->n_allocated += b->count;
364 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
365 RegionInfo *region = GetRegionInfo(class_id);
366 CHECK_GT(b->count, 0);
367 region->free_list.Push(b);
368 region->n_freed += b->count;
371 bool PointerIsMine(const void *p) {
372 uptr P = reinterpret_cast<uptr>(p);
373 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
374 return P / kSpaceSize == kSpaceBeg / kSpaceSize;
375 return P >= SpaceBeg() && P < SpaceEnd();
378 uptr GetSizeClass(const void *p) {
379 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
380 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
381 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
385 void *GetBlockBegin(const void *p) {
386 uptr class_id = GetSizeClass(p);
387 uptr size = SizeClassMap::Size(class_id);
388 if (!size) return nullptr;
389 uptr chunk_idx = GetChunkIdx((uptr)p, size);
390 uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
391 uptr beg = chunk_idx * size;
392 uptr next_beg = beg + size;
393 if (class_id >= kNumClasses) return nullptr;
394 RegionInfo *region = GetRegionInfo(class_id);
395 if (region->mapped_user >= next_beg)
396 return reinterpret_cast<void*>(reg_beg + beg);
400 uptr GetActuallyAllocatedSize(void *p) {
401 CHECK(PointerIsMine(p));
402 return SizeClassMap::Size(GetSizeClass(p));
405 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
407 void *GetMetaData(const void *p) {
408 uptr class_id = GetSizeClass(p);
409 uptr size = SizeClassMap::Size(class_id);
410 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
411 return reinterpret_cast<void *>(SpaceBeg() +
412 (kRegionSize * (class_id + 1)) -
413 (1 + chunk_idx) * kMetadataSize);
416 uptr TotalMemoryUsed() {
418 for (uptr i = 0; i < kNumClasses; i++)
419 res += GetRegionInfo(i)->allocated_user;
424 void TestOnlyUnmap() {
425 UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
429 uptr total_mapped = 0;
430 uptr n_allocated = 0;
432 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
433 RegionInfo *region = GetRegionInfo(class_id);
434 total_mapped += region->mapped_user;
435 n_allocated += region->n_allocated;
436 n_freed += region->n_freed;
438 Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
440 total_mapped >> 20, n_allocated, n_allocated - n_freed);
441 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
442 RegionInfo *region = GetRegionInfo(class_id);
443 if (region->mapped_user == 0) continue;
444 Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
446 SizeClassMap::Size(class_id),
447 region->mapped_user >> 10,
449 region->n_allocated - region->n_freed);
453 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
454 // introspection API.
456 for (uptr i = 0; i < kNumClasses; i++) {
457 GetRegionInfo(i)->mutex.Lock();
462 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
463 GetRegionInfo(i)->mutex.Unlock();
467 // Iterate over all existing chunks.
468 // The allocator must be locked when calling this function.
469 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
470 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
471 RegionInfo *region = GetRegionInfo(class_id);
472 uptr chunk_size = SizeClassMap::Size(class_id);
473 uptr region_beg = SpaceBeg() + class_id * kRegionSize;
474 for (uptr chunk = region_beg;
475 chunk < region_beg + region->allocated_user;
476 chunk += chunk_size) {
477 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
478 callback(chunk, arg);
483 static uptr AdditionalSize() {
484 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
485 GetPageSizeCached());
488 typedef SizeClassMap SizeClassMapT;
489 static const uptr kNumClasses = SizeClassMap::kNumClasses;
490 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
493 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
495 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
496 uptr NonConstSpaceBeg;
497 uptr SpaceBeg() const {
498 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
500 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
501 // kRegionSize must be >= 2^32.
502 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
503 // Populate the free list with at most this number of bytes at once
504 // or with one element if its size is greater.
505 static const uptr kPopulateSize = 1 << 14;
506 // Call mmap for user memory with at least this size.
507 static const uptr kUserMapSize = 1 << 16;
508 // Call mmap for metadata memory with at least this size.
509 static const uptr kMetaMapSize = 1 << 16;
513 LFStack<Batch> free_list;
514 uptr allocated_user; // Bytes allocated for user memory.
515 uptr allocated_meta; // Bytes allocated for metadata.
516 uptr mapped_user; // Bytes mapped for user memory.
517 uptr mapped_meta; // Bytes mapped for metadata.
518 uptr n_allocated, n_freed; // Just stats.
520 COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
522 RegionInfo *GetRegionInfo(uptr class_id) {
523 CHECK_LT(class_id, kNumClasses);
524 RegionInfo *regions =
525 reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
526 return ®ions[class_id];
529 static uptr GetChunkIdx(uptr chunk, uptr size) {
530 uptr offset = chunk % kRegionSize;
531 // Here we divide by a non-constant. This is costly.
532 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
533 if (offset >> (SANITIZER_WORDSIZE / 2))
534 return offset / size;
535 return (u32)offset / (u32)size;
538 NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
539 uptr class_id, RegionInfo *region) {
540 BlockingMutexLock l(®ion->mutex);
541 Batch *b = region->free_list.Pop();
544 uptr size = SizeClassMap::Size(class_id);
545 uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
546 uptr beg_idx = region->allocated_user;
547 uptr end_idx = beg_idx + count * size;
548 uptr region_beg = SpaceBeg() + kRegionSize * class_id;
549 if (end_idx + size > region->mapped_user) {
550 // Do the mmap for the user memory.
551 uptr map_size = kUserMapSize;
552 while (end_idx + size > region->mapped_user + map_size)
553 map_size += kUserMapSize;
554 CHECK_GE(region->mapped_user + map_size, end_idx);
555 MapWithCallback(region_beg + region->mapped_user, map_size);
556 stat->Add(AllocatorStatMapped, map_size);
557 region->mapped_user += map_size;
559 uptr total_count = (region->mapped_user - beg_idx - size)
560 / size / count * count;
561 region->allocated_meta += total_count * kMetadataSize;
562 if (region->allocated_meta > region->mapped_meta) {
563 uptr map_size = kMetaMapSize;
564 while (region->allocated_meta > region->mapped_meta + map_size)
565 map_size += kMetaMapSize;
566 // Do the mmap for the metadata.
567 CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
568 MapWithCallback(region_beg + kRegionSize -
569 region->mapped_meta - map_size, map_size);
570 region->mapped_meta += map_size;
572 CHECK_LE(region->allocated_meta, region->mapped_meta);
573 if (region->mapped_user + region->mapped_meta > kRegionSize) {
574 Printf("%s: Out of memory. Dying. ", SanitizerToolName);
575 Printf("The process has exhausted %zuMB for size class %zu.\n",
576 kRegionSize / 1024 / 1024, size);
580 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
581 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
583 b = (Batch*)(region_beg + beg_idx);
585 for (uptr i = 0; i < count; i++)
586 b->batch[i] = (void*)(region_beg + beg_idx + i * size);
587 region->allocated_user += count * size;
588 CHECK_LE(region->allocated_user, region->mapped_user);
589 beg_idx += count * size;
590 if (beg_idx + count * size + size > region->mapped_user)
592 CHECK_GT(b->count, 0);
593 region->free_list.Push(b);
599 // Maps integers in rage [0, kSize) to u8 values.
603 void TestOnlyInit() {
604 internal_memset(map_, 0, sizeof(map_));
607 void set(uptr idx, u8 val) {
608 CHECK_LT(idx, kSize);
609 CHECK_EQ(0U, map_[idx]);
612 u8 operator[] (uptr idx) {
613 CHECK_LT(idx, kSize);
614 // FIXME: CHECK may be too expensive here.
621 // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
622 // It is implemented as a two-dimensional array: array of kSize1 pointers
623 // to kSize2-byte arrays. The secondary arrays are mmaped on demand.
624 // Each value is initially zero and can be set to something else only once.
625 // Setting and getting values from multiple threads is safe w/o extra locking.
626 template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
627 class TwoLevelByteMap {
629 void TestOnlyInit() {
630 internal_memset(map1_, 0, sizeof(map1_));
634 void TestOnlyUnmap() {
635 for (uptr i = 0; i < kSize1; i++) {
638 MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
639 UnmapOrDie(p, kSize2);
643 uptr size() const { return kSize1 * kSize2; }
644 uptr size1() const { return kSize1; }
645 uptr size2() const { return kSize2; }
647 void set(uptr idx, u8 val) {
648 CHECK_LT(idx, kSize1 * kSize2);
649 u8 *map2 = GetOrCreate(idx / kSize2);
650 CHECK_EQ(0U, map2[idx % kSize2]);
651 map2[idx % kSize2] = val;
654 u8 operator[] (uptr idx) const {
655 CHECK_LT(idx, kSize1 * kSize2);
656 u8 *map2 = Get(idx / kSize2);
658 return map2[idx % kSize2];
662 u8 *Get(uptr idx) const {
663 CHECK_LT(idx, kSize1);
664 return reinterpret_cast<u8 *>(
665 atomic_load(&map1_[idx], memory_order_acquire));
668 u8 *GetOrCreate(uptr idx) {
671 SpinMutexLock l(&mu_);
672 if (!(res = Get(idx))) {
673 res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
674 MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
675 atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
676 memory_order_release);
682 atomic_uintptr_t map1_[kSize1];
686 // SizeClassAllocator32 -- allocator for 32-bit address space.
687 // This allocator can theoretically be used on 64-bit arch, but there it is less
688 // efficient than SizeClassAllocator64.
690 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
691 // be returned by MmapOrDie().
694 // a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
695 // Since the regions are aligned by kRegionSize, there are exactly
696 // kNumPossibleRegions possible regions in the address space and so we keep
697 // a ByteMap possible_regions to store the size classes of each Region.
698 // 0 size class means the region is not used by the allocator.
700 // One Region is used to allocate chunks of a single size class.
701 // A Region looks like this:
702 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
704 // In order to avoid false sharing the objects of this class should be
705 // chache-line aligned.
706 template <const uptr kSpaceBeg, const u64 kSpaceSize,
707 const uptr kMetadataSize, class SizeClassMap,
708 const uptr kRegionSizeLog,
710 class MapUnmapCallback = NoOpMapUnmapCallback>
711 class SizeClassAllocator32 {
713 typedef typename SizeClassMap::TransferBatch Batch;
714 typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
715 SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
716 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
719 possible_regions.TestOnlyInit();
720 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
723 void *MapWithCallback(uptr size) {
724 size = RoundUpTo(size, GetPageSizeCached());
725 void *res = MmapOrDie(size, "SizeClassAllocator32");
726 MapUnmapCallback().OnMap((uptr)res, size);
730 void UnmapWithCallback(uptr beg, uptr size) {
731 MapUnmapCallback().OnUnmap(beg, size);
732 UnmapOrDie(reinterpret_cast<void *>(beg), size);
735 static bool CanAllocate(uptr size, uptr alignment) {
736 return size <= SizeClassMap::kMaxSize &&
737 alignment <= SizeClassMap::kMaxSize;
740 void *GetMetaData(const void *p) {
741 CHECK(PointerIsMine(p));
742 uptr mem = reinterpret_cast<uptr>(p);
743 uptr beg = ComputeRegionBeg(mem);
744 uptr size = SizeClassMap::Size(GetSizeClass(p));
745 u32 offset = mem - beg;
746 uptr n = offset / (u32)size; // 32-bit division
747 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
748 return reinterpret_cast<void*>(meta);
751 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
753 CHECK_LT(class_id, kNumClasses);
754 SizeClassInfo *sci = GetSizeClassInfo(class_id);
755 SpinMutexLock l(&sci->mutex);
756 if (sci->free_list.empty())
757 PopulateFreeList(stat, c, sci, class_id);
758 CHECK(!sci->free_list.empty());
759 Batch *b = sci->free_list.front();
760 sci->free_list.pop_front();
764 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
765 CHECK_LT(class_id, kNumClasses);
766 SizeClassInfo *sci = GetSizeClassInfo(class_id);
767 SpinMutexLock l(&sci->mutex);
768 CHECK_GT(b->count, 0);
769 sci->free_list.push_front(b);
772 bool PointerIsMine(const void *p) {
773 uptr mem = reinterpret_cast<uptr>(p);
774 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
776 return GetSizeClass(p) != 0;
779 uptr GetSizeClass(const void *p) {
780 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
783 void *GetBlockBegin(const void *p) {
784 CHECK(PointerIsMine(p));
785 uptr mem = reinterpret_cast<uptr>(p);
786 uptr beg = ComputeRegionBeg(mem);
787 uptr size = SizeClassMap::Size(GetSizeClass(p));
788 u32 offset = mem - beg;
789 u32 n = offset / (u32)size; // 32-bit division
790 uptr res = beg + (n * (u32)size);
791 return reinterpret_cast<void*>(res);
794 uptr GetActuallyAllocatedSize(void *p) {
795 CHECK(PointerIsMine(p));
796 return SizeClassMap::Size(GetSizeClass(p));
799 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
801 uptr TotalMemoryUsed() {
802 // No need to lock here.
804 for (uptr i = 0; i < kNumPossibleRegions; i++)
805 if (possible_regions[i])
810 void TestOnlyUnmap() {
811 for (uptr i = 0; i < kNumPossibleRegions; i++)
812 if (possible_regions[i])
813 UnmapWithCallback((i * kRegionSize), kRegionSize);
816 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
817 // introspection API.
819 for (uptr i = 0; i < kNumClasses; i++) {
820 GetSizeClassInfo(i)->mutex.Lock();
825 for (int i = kNumClasses - 1; i >= 0; i--) {
826 GetSizeClassInfo(i)->mutex.Unlock();
830 // Iterate over all existing chunks.
831 // The allocator must be locked when calling this function.
832 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
833 for (uptr region = 0; region < kNumPossibleRegions; region++)
834 if (possible_regions[region]) {
835 uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
836 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
837 uptr region_beg = region * kRegionSize;
838 for (uptr chunk = region_beg;
839 chunk < region_beg + max_chunks_in_region * chunk_size;
840 chunk += chunk_size) {
841 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
842 callback(chunk, arg);
850 static uptr AdditionalSize() {
854 typedef SizeClassMap SizeClassMapT;
855 static const uptr kNumClasses = SizeClassMap::kNumClasses;
858 static const uptr kRegionSize = 1 << kRegionSizeLog;
859 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
861 struct SizeClassInfo {
863 IntrusiveList<Batch> free_list;
864 char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
866 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
868 uptr ComputeRegionId(uptr mem) {
869 uptr res = mem >> kRegionSizeLog;
870 CHECK_LT(res, kNumPossibleRegions);
874 uptr ComputeRegionBeg(uptr mem) {
875 return mem & ~(kRegionSize - 1);
878 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
879 CHECK_LT(class_id, kNumClasses);
880 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
881 "SizeClassAllocator32"));
882 MapUnmapCallback().OnMap(res, kRegionSize);
883 stat->Add(AllocatorStatMapped, kRegionSize);
884 CHECK_EQ(0U, (res & (kRegionSize - 1)));
885 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
889 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
890 CHECK_LT(class_id, kNumClasses);
891 return &size_class_info_array[class_id];
894 void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
895 SizeClassInfo *sci, uptr class_id) {
896 uptr size = SizeClassMap::Size(class_id);
897 uptr reg = AllocateRegion(stat, class_id);
898 uptr n_chunks = kRegionSize / (size + kMetadataSize);
899 uptr max_count = SizeClassMap::MaxCached(class_id);
901 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
903 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
904 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
909 b->batch[b->count++] = (void*)i;
910 if (b->count == max_count) {
911 CHECK_GT(b->count, 0);
912 sci->free_list.push_back(b);
917 CHECK_GT(b->count, 0);
918 sci->free_list.push_back(b);
922 ByteMap possible_regions;
923 SizeClassInfo size_class_info_array[kNumClasses];
926 // Objects of this type should be used as local caches for SizeClassAllocator64
927 // or SizeClassAllocator32. Since the typical use of this class is to have one
928 // object per thread in TLS, is has to be POD.
929 template<class SizeClassAllocator>
930 struct SizeClassAllocatorLocalCache {
931 typedef SizeClassAllocator Allocator;
932 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
934 void Init(AllocatorGlobalStats *s) {
937 s->Register(&stats_);
940 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
943 s->Unregister(&stats_);
946 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
947 CHECK_NE(class_id, 0UL);
948 CHECK_LT(class_id, kNumClasses);
949 stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
950 PerClass *c = &per_class_[class_id];
951 if (UNLIKELY(c->count == 0))
952 Refill(allocator, class_id);
953 void *res = c->batch[--c->count];
954 PREFETCH(c->batch[c->count - 1]);
958 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
959 CHECK_NE(class_id, 0UL);
960 CHECK_LT(class_id, kNumClasses);
961 // If the first allocator call on a new thread is a deallocation, then
962 // max_count will be zero, leading to check failure.
964 stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
965 PerClass *c = &per_class_[class_id];
966 CHECK_NE(c->max_count, 0UL);
967 if (UNLIKELY(c->count == c->max_count))
968 Drain(allocator, class_id);
969 c->batch[c->count++] = p;
972 void Drain(SizeClassAllocator *allocator) {
973 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
974 PerClass *c = &per_class_[class_id];
976 Drain(allocator, class_id);
981 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
982 typedef typename SizeClassMap::TransferBatch Batch;
986 void *batch[2 * SizeClassMap::kMaxNumCached];
988 PerClass per_class_[kNumClasses];
989 AllocatorStats stats_;
992 if (per_class_[1].max_count)
994 for (uptr i = 0; i < kNumClasses; i++) {
995 PerClass *c = &per_class_[i];
996 c->max_count = 2 * SizeClassMap::MaxCached(i);
1000 NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
1002 PerClass *c = &per_class_[class_id];
1003 Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
1004 CHECK_GT(b->count, 0);
1005 for (uptr i = 0; i < b->count; i++)
1006 c->batch[i] = b->batch[i];
1007 c->count = b->count;
1008 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
1009 Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
1012 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
1014 PerClass *c = &per_class_[class_id];
1016 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
1017 b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
1019 b = (Batch*)c->batch[0];
1020 uptr cnt = Min(c->max_count / 2, c->count);
1021 for (uptr i = 0; i < cnt; i++) {
1022 b->batch[i] = c->batch[i];
1023 c->batch[i] = c->batch[i + c->max_count / 2];
1027 CHECK_GT(b->count, 0);
1028 allocator->DeallocateBatch(&stats_, class_id, b);
1032 // This class can (de)allocate only large chunks of memory using mmap/unmap.
1033 // The main purpose of this allocator is to cover large and rare allocation
1034 // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
1035 template <class MapUnmapCallback = NoOpMapUnmapCallback>
1036 class LargeMmapAllocator {
1038 void InitLinkerInitialized(bool may_return_null) {
1039 page_size_ = GetPageSizeCached();
1040 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1043 void Init(bool may_return_null) {
1044 internal_memset(this, 0, sizeof(*this));
1045 InitLinkerInitialized(may_return_null);
1048 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
1049 CHECK(IsPowerOfTwo(alignment));
1050 uptr map_size = RoundUpMapSize(size);
1051 if (alignment > page_size_)
1052 map_size += alignment;
1054 if (map_size < size)
1055 return ReturnNullOrDie();
1056 uptr map_beg = reinterpret_cast<uptr>(
1057 MmapOrDie(map_size, "LargeMmapAllocator"));
1058 CHECK(IsAligned(map_beg, page_size_));
1059 MapUnmapCallback().OnMap(map_beg, map_size);
1060 uptr map_end = map_beg + map_size;
1061 uptr res = map_beg + page_size_;
1062 if (res & (alignment - 1)) // Align.
1063 res += alignment - (res & (alignment - 1));
1064 CHECK(IsAligned(res, alignment));
1065 CHECK(IsAligned(res, page_size_));
1066 CHECK_GE(res + size, map_beg);
1067 CHECK_LE(res + size, map_end);
1068 Header *h = GetHeader(res);
1070 h->map_beg = map_beg;
1071 h->map_size = map_size;
1072 uptr size_log = MostSignificantSetBitIndex(map_size);
1073 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
1075 SpinMutexLock l(&mutex_);
1076 uptr idx = n_chunks_++;
1077 chunks_sorted_ = false;
1078 CHECK_LT(idx, kMaxNumChunks);
1082 stats.currently_allocated += map_size;
1083 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
1084 stats.by_size_log[size_log]++;
1085 stat->Add(AllocatorStatAllocated, map_size);
1086 stat->Add(AllocatorStatMapped, map_size);
1088 return reinterpret_cast<void*>(res);
1091 void *ReturnNullOrDie() {
1092 if (atomic_load(&may_return_null_, memory_order_acquire))
1094 ReportAllocatorCannotReturnNull();
1097 void SetMayReturnNull(bool may_return_null) {
1098 atomic_store(&may_return_null_, may_return_null, memory_order_release);
1101 void Deallocate(AllocatorStats *stat, void *p) {
1102 Header *h = GetHeader(p);
1104 SpinMutexLock l(&mutex_);
1105 uptr idx = h->chunk_idx;
1106 CHECK_EQ(chunks_[idx], h);
1107 CHECK_LT(idx, n_chunks_);
1108 chunks_[idx] = chunks_[n_chunks_ - 1];
1109 chunks_[idx]->chunk_idx = idx;
1111 chunks_sorted_ = false;
1113 stats.currently_allocated -= h->map_size;
1114 stat->Sub(AllocatorStatAllocated, h->map_size);
1115 stat->Sub(AllocatorStatMapped, h->map_size);
1117 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
1118 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1121 uptr TotalMemoryUsed() {
1122 SpinMutexLock l(&mutex_);
1124 for (uptr i = 0; i < n_chunks_; i++) {
1125 Header *h = chunks_[i];
1126 CHECK_EQ(h->chunk_idx, i);
1127 res += RoundUpMapSize(h->size);
1132 bool PointerIsMine(const void *p) {
1133 return GetBlockBegin(p) != nullptr;
1136 uptr GetActuallyAllocatedSize(void *p) {
1137 return RoundUpTo(GetHeader(p)->size, page_size_);
1140 // At least page_size_/2 metadata bytes is available.
1141 void *GetMetaData(const void *p) {
1142 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
1143 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1144 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1145 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1147 return GetHeader(p) + 1;
1150 void *GetBlockBegin(const void *ptr) {
1151 uptr p = reinterpret_cast<uptr>(ptr);
1152 SpinMutexLock l(&mutex_);
1153 uptr nearest_chunk = 0;
1154 // Cache-friendly linear search.
1155 for (uptr i = 0; i < n_chunks_; i++) {
1156 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
1157 if (p < ch) continue; // p is at left to this chunk, skip it.
1158 if (p - ch < p - nearest_chunk)
1163 Header *h = reinterpret_cast<Header *>(nearest_chunk);
1164 CHECK_GE(nearest_chunk, h->map_beg);
1165 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
1166 CHECK_LE(nearest_chunk, p);
1167 if (h->map_beg + h->map_size <= p)
1172 // This function does the same as GetBlockBegin, but is much faster.
1173 // Must be called with the allocator locked.
1174 void *GetBlockBeginFastLocked(void *ptr) {
1175 mutex_.CheckLocked();
1176 uptr p = reinterpret_cast<uptr>(ptr);
1178 if (!n) return nullptr;
1179 if (!chunks_sorted_) {
1180 // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
1181 SortArray(reinterpret_cast<uptr*>(chunks_), n);
1182 for (uptr i = 0; i < n; i++)
1183 chunks_[i]->chunk_idx = i;
1184 chunks_sorted_ = true;
1185 min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
1186 max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
1187 chunks_[n - 1]->map_size;
1189 if (p < min_mmap_ || p >= max_mmap_)
1191 uptr beg = 0, end = n - 1;
1192 // This loop is a log(n) lower_bound. It does not check for the exact match
1193 // to avoid expensive cache-thrashing loads.
1194 while (end - beg >= 2) {
1195 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
1196 if (p < reinterpret_cast<uptr>(chunks_[mid]))
1197 end = mid - 1; // We are not interested in chunks_[mid].
1199 beg = mid; // chunks_[mid] may still be what we want.
1203 CHECK_EQ(beg + 1, end);
1204 // There are 2 chunks left, choose one.
1205 if (p >= reinterpret_cast<uptr>(chunks_[end]))
1209 Header *h = chunks_[beg];
1210 if (h->map_beg + h->map_size <= p || p < h->map_beg)
1216 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
1217 "remains %zd (%zd K) max %zd M; by size logs: ",
1218 stats.n_allocs, stats.n_allocs - stats.n_frees,
1219 stats.currently_allocated >> 10, stats.max_allocated >> 20);
1220 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
1221 uptr c = stats.by_size_log[i];
1223 Printf("%zd:%zd; ", i, c);
1228 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1229 // introspection API.
1234 void ForceUnlock() {
1238 // Iterate over all existing chunks.
1239 // The allocator must be locked when calling this function.
1240 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1241 for (uptr i = 0; i < n_chunks_; i++)
1242 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
1246 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
1254 Header *GetHeader(uptr p) {
1255 CHECK(IsAligned(p, page_size_));
1256 return reinterpret_cast<Header*>(p - page_size_);
1258 Header *GetHeader(const void *p) {
1259 return GetHeader(reinterpret_cast<uptr>(p));
1262 void *GetUser(Header *h) {
1263 CHECK(IsAligned((uptr)h, page_size_));
1264 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
1267 uptr RoundUpMapSize(uptr size) {
1268 return RoundUpTo(size, page_size_) + page_size_;
1272 Header *chunks_[kMaxNumChunks];
1274 uptr min_mmap_, max_mmap_;
1275 bool chunks_sorted_;
1277 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
1279 atomic_uint8_t may_return_null_;
1283 // This class implements a complete memory allocator by using two
1284 // internal allocators:
1285 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
1286 // When allocating 2^x bytes it should return 2^x aligned chunk.
1287 // PrimaryAllocator is used via a local AllocatorCache.
1288 // SecondaryAllocator can allocate anything, but is not efficient.
1289 template <class PrimaryAllocator, class AllocatorCache,
1290 class SecondaryAllocator> // NOLINT
1291 class CombinedAllocator {
1293 void InitCommon(bool may_return_null) {
1295 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1298 void InitLinkerInitialized(bool may_return_null) {
1299 secondary_.InitLinkerInitialized(may_return_null);
1300 stats_.InitLinkerInitialized();
1301 InitCommon(may_return_null);
1304 void Init(bool may_return_null) {
1305 secondary_.Init(may_return_null);
1307 InitCommon(may_return_null);
1310 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
1311 bool cleared = false, bool check_rss_limit = false) {
1312 // Returning 0 on malloc(0) may break a lot of code.
1315 if (size + alignment < size)
1316 return ReturnNullOrDie();
1317 if (check_rss_limit && RssLimitIsExceeded())
1318 return ReturnNullOrDie();
1320 size = RoundUpTo(size, alignment);
1322 bool from_primary = primary_.CanAllocate(size, alignment);
1324 res = cache->Allocate(&primary_, primary_.ClassID(size));
1326 res = secondary_.Allocate(&stats_, size, alignment);
1328 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
1329 if (cleared && res && from_primary)
1330 internal_bzero_aligned16(res, RoundUpTo(size, 16));
1334 bool MayReturnNull() const {
1335 return atomic_load(&may_return_null_, memory_order_acquire);
1338 void *ReturnNullOrDie() {
1339 if (MayReturnNull())
1341 ReportAllocatorCannotReturnNull();
1344 void SetMayReturnNull(bool may_return_null) {
1345 secondary_.SetMayReturnNull(may_return_null);
1346 atomic_store(&may_return_null_, may_return_null, memory_order_release);
1349 bool RssLimitIsExceeded() {
1350 return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
1353 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
1354 atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
1355 memory_order_release);
1358 void Deallocate(AllocatorCache *cache, void *p) {
1360 if (primary_.PointerIsMine(p))
1361 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
1363 secondary_.Deallocate(&stats_, p);
1366 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
1369 return Allocate(cache, new_size, alignment);
1371 Deallocate(cache, p);
1374 CHECK(PointerIsMine(p));
1375 uptr old_size = GetActuallyAllocatedSize(p);
1376 uptr memcpy_size = Min(new_size, old_size);
1377 void *new_p = Allocate(cache, new_size, alignment);
1379 internal_memcpy(new_p, p, memcpy_size);
1380 Deallocate(cache, p);
1384 bool PointerIsMine(void *p) {
1385 if (primary_.PointerIsMine(p))
1387 return secondary_.PointerIsMine(p);
1390 bool FromPrimary(void *p) {
1391 return primary_.PointerIsMine(p);
1394 void *GetMetaData(const void *p) {
1395 if (primary_.PointerIsMine(p))
1396 return primary_.GetMetaData(p);
1397 return secondary_.GetMetaData(p);
1400 void *GetBlockBegin(const void *p) {
1401 if (primary_.PointerIsMine(p))
1402 return primary_.GetBlockBegin(p);
1403 return secondary_.GetBlockBegin(p);
1406 // This function does the same as GetBlockBegin, but is much faster.
1407 // Must be called with the allocator locked.
1408 void *GetBlockBeginFastLocked(void *p) {
1409 if (primary_.PointerIsMine(p))
1410 return primary_.GetBlockBegin(p);
1411 return secondary_.GetBlockBeginFastLocked(p);
1414 uptr GetActuallyAllocatedSize(void *p) {
1415 if (primary_.PointerIsMine(p))
1416 return primary_.GetActuallyAllocatedSize(p);
1417 return secondary_.GetActuallyAllocatedSize(p);
1420 uptr TotalMemoryUsed() {
1421 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
1424 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
1426 void InitCache(AllocatorCache *cache) {
1427 cache->Init(&stats_);
1430 void DestroyCache(AllocatorCache *cache) {
1431 cache->Destroy(&primary_, &stats_);
1434 void SwallowCache(AllocatorCache *cache) {
1435 cache->Drain(&primary_);
1438 void GetStats(AllocatorStatCounters s) const {
1443 primary_.PrintStats();
1444 secondary_.PrintStats();
1447 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1448 // introspection API.
1450 primary_.ForceLock();
1451 secondary_.ForceLock();
1454 void ForceUnlock() {
1455 secondary_.ForceUnlock();
1456 primary_.ForceUnlock();
1459 // Iterate over all existing chunks.
1460 // The allocator must be locked when calling this function.
1461 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1462 primary_.ForEachChunk(callback, arg);
1463 secondary_.ForEachChunk(callback, arg);
1467 PrimaryAllocator primary_;
1468 SecondaryAllocator secondary_;
1469 AllocatorGlobalStats stats_;
1470 atomic_uint8_t may_return_null_;
1471 atomic_uint8_t rss_limit_is_exceeded_;
1474 // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
1475 bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
1477 } // namespace __sanitizer
1479 #endif // SANITIZER_ALLOCATOR_H