1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
15 #include "local_cache.h"
19 #include "string_utils.h"
23 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
25 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
26 // boundary, and keeps a bytemap of the mappable address space to track the size
27 // class they are associated with.
29 // Mapped regions are split into equally sized Blocks according to the size
30 // class they belong to, and the associated pointers are shuffled to prevent any
31 // predictable address pattern (the predictability increases with the block
34 // Regions for size class 0 are special and used to hold TransferBatches, which
35 // allow to transfer arrays of pointers from the global size class freelist to
36 // the thread specific freelist for said class, and back.
38 // Memory used by this allocator is never unmapped but can be partially
39 // reclaimed if the platform allows for it.
41 template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
43 typedef SizeClassMapT SizeClassMap;
44 // Regions should be large enough to hold the largest Block.
45 COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
46 typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
47 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
48 typedef typename CacheT::TransferBatch TransferBatch;
50 static uptr getSizeByClassId(uptr ClassId) {
51 return (ClassId == SizeClassMap::BatchClassId)
52 ? sizeof(TransferBatch)
53 : SizeClassMap::getSizeByClassId(ClassId);
56 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
58 void initLinkerInitialized(s32 ReleaseToOsInterval) {
60 reportError("SizeClassAllocator32 is not supported on Fuchsia");
62 PossibleRegions.initLinkerInitialized();
63 MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
66 if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
68 static_cast<u32>(getMonotonicTime() ^
69 (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
70 const uptr PageSize = getPageSizeCached();
71 for (uptr I = 0; I < NumClasses; I++) {
72 SizeClassInfo *Sci = getSizeClassInfo(I);
73 Sci->RandState = getRandomU32(&Seed);
74 // See comment in the 64-bit primary about releasing smaller size classes.
75 Sci->CanRelease = (ReleaseToOsInterval > 0) &&
76 (I != SizeClassMap::BatchClassId) &&
77 (getSizeByClassId(I) >= (PageSize / 32));
79 ReleaseToOsIntervalMs = ReleaseToOsInterval;
81 void init(s32 ReleaseToOsInterval) {
82 memset(this, 0, sizeof(*this));
83 initLinkerInitialized(ReleaseToOsInterval);
86 void unmapTestOnly() {
87 while (NumberOfStashedRegions > 0)
88 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
90 // TODO(kostyak): unmap the TransferBatch regions as well.
91 for (uptr I = 0; I < NumRegions; I++)
92 if (PossibleRegions[I])
93 unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
94 PossibleRegions.unmapTestOnly();
97 TransferBatch *popBatch(CacheT *C, uptr ClassId) {
98 DCHECK_LT(ClassId, NumClasses);
99 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
100 ScopedLock L(Sci->Mutex);
101 TransferBatch *B = Sci->FreeList.front();
103 Sci->FreeList.pop_front();
105 B = populateFreeList(C, ClassId, Sci);
109 DCHECK_GT(B->getCount(), 0);
110 Sci->Stats.PoppedBlocks += B->getCount();
114 void pushBatch(uptr ClassId, TransferBatch *B) {
115 DCHECK_LT(ClassId, NumClasses);
116 DCHECK_GT(B->getCount(), 0);
117 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
118 ScopedLock L(Sci->Mutex);
119 Sci->FreeList.push_front(B);
120 Sci->Stats.PushedBlocks += B->getCount();
122 releaseToOSMaybe(Sci, ClassId);
126 for (uptr I = 0; I < NumClasses; I++)
127 getSizeClassInfo(I)->Mutex.lock();
131 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
132 getSizeClassInfo(I)->Mutex.unlock();
135 template <typename F> void iterateOverBlocks(F Callback) {
136 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
137 if (PossibleRegions[I]) {
138 const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
139 const uptr From = I * RegionSize;
140 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
141 for (uptr Block = From; Block < To; Block += BlockSize)
147 // TODO(kostyak): get the RSS per region.
148 uptr TotalMapped = 0;
149 uptr PoppedBlocks = 0;
150 uptr PushedBlocks = 0;
151 for (uptr I = 0; I < NumClasses; I++) {
152 SizeClassInfo *Sci = getSizeClassInfo(I);
153 TotalMapped += Sci->AllocatedUser;
154 PoppedBlocks += Sci->Stats.PoppedBlocks;
155 PushedBlocks += Sci->Stats.PushedBlocks;
157 Printf("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
159 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
160 for (uptr I = 0; I < NumClasses; I++)
165 for (uptr I = 0; I < NumClasses; I++) {
166 if (I == SizeClassMap::BatchClassId)
168 SizeClassInfo *Sci = getSizeClassInfo(I);
169 ScopedLock L(Sci->Mutex);
170 releaseToOSMaybe(Sci, I, /*Force=*/true);
175 static const uptr NumClasses = SizeClassMap::NumClasses;
176 static const uptr RegionSize = 1UL << RegionSizeLog;
177 static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
178 #if SCUDO_WORDSIZE == 32U
179 typedef FlatByteMap<NumRegions> ByteMap;
181 typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
184 struct SizeClassStats {
189 struct ReleaseToOsInfo {
190 uptr PushedBlocksAtLastRelease;
192 uptr LastReleasedBytes;
196 struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
198 IntrusiveList<TransferBatch> FreeList;
199 SizeClassStats Stats;
203 ReleaseToOsInfo ReleaseInfo;
205 COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
207 uptr computeRegionId(uptr Mem) {
208 const uptr Id = Mem >> RegionSizeLog;
209 CHECK_LT(Id, NumRegions);
213 uptr allocateRegionSlow() {
214 uptr MapSize = 2 * RegionSize;
215 const uptr MapBase = reinterpret_cast<uptr>(
216 map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
217 if (UNLIKELY(!MapBase))
219 const uptr MapEnd = MapBase + MapSize;
220 uptr Region = MapBase;
221 if (isAligned(Region, RegionSize)) {
222 ScopedLock L(RegionsStashMutex);
223 if (NumberOfStashedRegions < MaxStashedRegions)
224 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
226 MapSize = RegionSize;
228 Region = roundUpTo(MapBase, RegionSize);
229 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
230 MapSize = RegionSize;
232 const uptr End = Region + MapSize;
234 unmap(reinterpret_cast<void *>(End), MapEnd - End);
238 uptr allocateRegion(uptr ClassId) {
239 DCHECK_LT(ClassId, NumClasses);
242 ScopedLock L(RegionsStashMutex);
243 if (NumberOfStashedRegions > 0)
244 Region = RegionsStash[--NumberOfStashedRegions];
247 Region = allocateRegionSlow();
248 if (LIKELY(Region)) {
250 const uptr RegionIndex = computeRegionId(Region);
251 if (RegionIndex < MinRegionIndex)
252 MinRegionIndex = RegionIndex;
253 if (RegionIndex > MaxRegionIndex)
254 MaxRegionIndex = RegionIndex;
255 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
261 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
262 DCHECK_LT(ClassId, NumClasses);
263 return &SizeClassInfoArray[ClassId];
266 bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
267 TransferBatch **CurrentBatch, u32 MaxCount,
268 void **PointersArray, u32 Count) {
269 if (ClassId != SizeClassMap::BatchClassId)
270 shuffle(PointersArray, Count, &Sci->RandState);
271 TransferBatch *B = *CurrentBatch;
272 for (uptr I = 0; I < Count; I++) {
273 if (B && B->getCount() == MaxCount) {
274 Sci->FreeList.push_back(B);
278 B = C->createBatch(ClassId, PointersArray[I]);
283 B->add(PointersArray[I]);
289 NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
290 SizeClassInfo *Sci) {
291 const uptr Region = allocateRegion(ClassId);
292 if (UNLIKELY(!Region))
294 C->getStats().add(StatMapped, RegionSize);
295 const uptr Size = getSizeByClassId(ClassId);
296 const u32 MaxCount = TransferBatch::getMaxCached(Size);
297 DCHECK_GT(MaxCount, 0);
298 const uptr NumberOfBlocks = RegionSize / Size;
299 DCHECK_GT(NumberOfBlocks, 0);
300 TransferBatch *B = nullptr;
301 constexpr uptr ShuffleArraySize = 48;
302 void *ShuffleArray[ShuffleArraySize];
304 const uptr AllocatedUser = NumberOfBlocks * Size;
305 for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
306 ShuffleArray[Count++] = reinterpret_cast<void *>(I);
307 if (Count == ShuffleArraySize) {
308 if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
309 ShuffleArray, Count)))
315 if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
320 DCHECK_GT(B->getCount(), 0);
321 Sci->AllocatedUser += AllocatedUser;
323 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
327 void printStats(uptr ClassId, uptr Rss) {
328 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
329 if (Sci->AllocatedUser == 0)
331 const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
332 const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
333 Printf(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: %6zu"
334 " avail: %6zu rss: %6zuK\n",
335 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
336 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
337 AvailableChunks, Rss >> 10);
340 NOINLINE void releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
341 bool Force = false) {
342 const uptr BlockSize = getSizeByClassId(ClassId);
343 const uptr PageSize = getPageSizeCached();
345 CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
346 const uptr N = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
347 if (N * BlockSize < PageSize)
348 return; // No chance to release anything.
349 if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
352 return; // Nothing new to release.
356 const s32 IntervalMs = ReleaseToOsIntervalMs;
359 if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
360 getMonotonicTime()) {
361 return; // Memory was returned recently.
365 // TODO(kostyak): currently not ideal as we loop over all regions and
366 // iterate multiple times over the same freelist if a ClassId spans multiple
367 // regions. But it will have to do for now.
368 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
369 if (PossibleRegions[I] == ClassId) {
370 ReleaseRecorder Recorder(I * RegionSize);
371 releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
372 RegionSize / PageSize, BlockSize, &Recorder);
373 if (Recorder.getReleasedRangesCount() > 0) {
374 Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
375 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
376 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
380 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
383 SizeClassInfo SizeClassInfoArray[NumClasses];
385 ByteMap PossibleRegions;
386 // Keep track of the lowest & highest regions allocated to avoid looping
387 // through the whole NumRegions.
390 s32 ReleaseToOsIntervalMs;
391 // Unless several threads request regions simultaneously from different size
392 // classes, the stash rarely contains more than 1 entry.
393 static constexpr uptr MaxStashedRegions = 4;
394 HybridMutex RegionsStashMutex;
395 uptr NumberOfStashedRegions;
396 uptr RegionsStash[MaxStashedRegions];
401 #endif // SCUDO_PRIMARY32_H_