1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
15 #include "local_cache.h"
19 #include "string_utils.h"
23 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
25 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
26 // boundary, and keeps a bytemap of the mappable address space to track the size
27 // class they are associated with.
29 // Mapped regions are split into equally sized Blocks according to the size
30 // class they belong to, and the associated pointers are shuffled to prevent any
31 // predictable address pattern (the predictability increases with the block
34 // Regions for size class 0 are special and used to hold TransferBatches, which
35 // allow to transfer arrays of pointers from the global size class freelist to
36 // the thread specific freelist for said class, and back.
38 // Memory used by this allocator is never unmapped but can be partially
39 // reclaimed if the platform allows for it.
41 template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
43 typedef SizeClassMapT SizeClassMap;
44 // Regions should be large enough to hold the largest Block.
45 COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
46 typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
47 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
48 typedef typename CacheT::TransferBatch TransferBatch;
50 static uptr getSizeByClassId(uptr ClassId) {
51 return (ClassId == SizeClassMap::BatchClassId)
52 ? sizeof(TransferBatch)
53 : SizeClassMap::getSizeByClassId(ClassId);
56 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
58 void initLinkerInitialized(s32 ReleaseToOsInterval) {
60 reportError("SizeClassAllocator32 is not supported on Fuchsia");
62 PossibleRegions.initLinkerInitialized();
63 MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
66 if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
68 static_cast<u32>(getMonotonicTime() ^
69 (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
70 const uptr PageSize = getPageSizeCached();
71 for (uptr I = 0; I < NumClasses; I++) {
72 SizeClassInfo *Sci = getSizeClassInfo(I);
73 Sci->RandState = getRandomU32(&Seed);
74 // See comment in the 64-bit primary about releasing smaller size classes.
75 Sci->CanRelease = (ReleaseToOsInterval >= 0) &&
76 (I != SizeClassMap::BatchClassId) &&
77 (getSizeByClassId(I) >= (PageSize / 32));
79 ReleaseToOsIntervalMs = ReleaseToOsInterval;
81 void init(s32 ReleaseToOsInterval) {
82 memset(this, 0, sizeof(*this));
83 initLinkerInitialized(ReleaseToOsInterval);
86 void unmapTestOnly() {
87 while (NumberOfStashedRegions > 0)
88 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
90 // TODO(kostyak): unmap the TransferBatch regions as well.
91 for (uptr I = 0; I < NumRegions; I++)
92 if (PossibleRegions[I])
93 unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
94 PossibleRegions.unmapTestOnly();
97 TransferBatch *popBatch(CacheT *C, uptr ClassId) {
98 DCHECK_LT(ClassId, NumClasses);
99 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
100 ScopedLock L(Sci->Mutex);
101 TransferBatch *B = Sci->FreeList.front();
103 Sci->FreeList.pop_front();
105 B = populateFreeList(C, ClassId, Sci);
109 DCHECK_GT(B->getCount(), 0);
110 Sci->Stats.PoppedBlocks += B->getCount();
114 void pushBatch(uptr ClassId, TransferBatch *B) {
115 DCHECK_LT(ClassId, NumClasses);
116 DCHECK_GT(B->getCount(), 0);
117 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
118 ScopedLock L(Sci->Mutex);
119 Sci->FreeList.push_front(B);
120 Sci->Stats.PushedBlocks += B->getCount();
122 releaseToOSMaybe(Sci, ClassId);
126 for (uptr I = 0; I < NumClasses; I++)
127 getSizeClassInfo(I)->Mutex.lock();
131 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
132 getSizeClassInfo(static_cast<uptr>(I))->Mutex.unlock();
135 template <typename F> void iterateOverBlocks(F Callback) {
136 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
137 if (PossibleRegions[I]) {
138 const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
139 const uptr From = I * RegionSize;
140 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
141 for (uptr Block = From; Block < To; Block += BlockSize)
146 void getStats(ScopedString *Str) {
147 // TODO(kostyak): get the RSS per region.
148 uptr TotalMapped = 0;
149 uptr PoppedBlocks = 0;
150 uptr PushedBlocks = 0;
151 for (uptr I = 0; I < NumClasses; I++) {
152 SizeClassInfo *Sci = getSizeClassInfo(I);
153 TotalMapped += Sci->AllocatedUser;
154 PoppedBlocks += Sci->Stats.PoppedBlocks;
155 PushedBlocks += Sci->Stats.PushedBlocks;
157 Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
159 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
160 for (uptr I = 0; I < NumClasses; I++)
165 uptr TotalReleasedBytes = 0;
166 for (uptr I = 0; I < NumClasses; I++) {
167 if (I == SizeClassMap::BatchClassId)
169 SizeClassInfo *Sci = getSizeClassInfo(I);
170 ScopedLock L(Sci->Mutex);
171 TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
173 return TotalReleasedBytes;
177 static const uptr NumClasses = SizeClassMap::NumClasses;
178 static const uptr RegionSize = 1UL << RegionSizeLog;
179 static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
180 #if SCUDO_WORDSIZE == 32U
181 typedef FlatByteMap<NumRegions> ByteMap;
183 typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
186 struct SizeClassStats {
191 struct ReleaseToOsInfo {
192 uptr PushedBlocksAtLastRelease;
194 uptr LastReleasedBytes;
198 struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
200 IntrusiveList<TransferBatch> FreeList;
201 SizeClassStats Stats;
205 ReleaseToOsInfo ReleaseInfo;
207 COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
209 uptr computeRegionId(uptr Mem) {
210 const uptr Id = Mem >> RegionSizeLog;
211 CHECK_LT(Id, NumRegions);
215 uptr allocateRegionSlow() {
216 uptr MapSize = 2 * RegionSize;
217 const uptr MapBase = reinterpret_cast<uptr>(
218 map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
219 if (UNLIKELY(!MapBase))
221 const uptr MapEnd = MapBase + MapSize;
222 uptr Region = MapBase;
223 if (isAligned(Region, RegionSize)) {
224 ScopedLock L(RegionsStashMutex);
225 if (NumberOfStashedRegions < MaxStashedRegions)
226 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
228 MapSize = RegionSize;
230 Region = roundUpTo(MapBase, RegionSize);
231 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
232 MapSize = RegionSize;
234 const uptr End = Region + MapSize;
236 unmap(reinterpret_cast<void *>(End), MapEnd - End);
240 uptr allocateRegion(uptr ClassId) {
241 DCHECK_LT(ClassId, NumClasses);
244 ScopedLock L(RegionsStashMutex);
245 if (NumberOfStashedRegions > 0)
246 Region = RegionsStash[--NumberOfStashedRegions];
249 Region = allocateRegionSlow();
250 if (LIKELY(Region)) {
252 const uptr RegionIndex = computeRegionId(Region);
253 if (RegionIndex < MinRegionIndex)
254 MinRegionIndex = RegionIndex;
255 if (RegionIndex > MaxRegionIndex)
256 MaxRegionIndex = RegionIndex;
257 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
263 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
264 DCHECK_LT(ClassId, NumClasses);
265 return &SizeClassInfoArray[ClassId];
268 bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
269 TransferBatch **CurrentBatch, u32 MaxCount,
270 void **PointersArray, u32 Count) {
271 if (ClassId != SizeClassMap::BatchClassId)
272 shuffle(PointersArray, Count, &Sci->RandState);
273 TransferBatch *B = *CurrentBatch;
274 for (uptr I = 0; I < Count; I++) {
275 if (B && B->getCount() == MaxCount) {
276 Sci->FreeList.push_back(B);
280 B = C->createBatch(ClassId, PointersArray[I]);
285 B->add(PointersArray[I]);
291 NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
292 SizeClassInfo *Sci) {
293 const uptr Region = allocateRegion(ClassId);
294 if (UNLIKELY(!Region))
296 C->getStats().add(StatMapped, RegionSize);
297 const uptr Size = getSizeByClassId(ClassId);
298 const u32 MaxCount = TransferBatch::getMaxCached(Size);
299 DCHECK_GT(MaxCount, 0);
300 const uptr NumberOfBlocks = RegionSize / Size;
301 DCHECK_GT(NumberOfBlocks, 0);
302 TransferBatch *B = nullptr;
303 constexpr uptr ShuffleArraySize = 48;
304 void *ShuffleArray[ShuffleArraySize];
306 const uptr AllocatedUser = NumberOfBlocks * Size;
307 for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
308 ShuffleArray[Count++] = reinterpret_cast<void *>(I);
309 if (Count == ShuffleArraySize) {
310 if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
311 ShuffleArray, Count)))
317 if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
322 DCHECK_GT(B->getCount(), 0);
324 C->getStats().add(StatFree, AllocatedUser);
325 Sci->AllocatedUser += AllocatedUser;
327 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
331 void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
332 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
333 if (Sci->AllocatedUser == 0)
335 const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
336 const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
337 Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
338 "inuse: %6zu avail: %6zu rss: %6zuK\n",
339 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
340 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
341 AvailableChunks, Rss >> 10);
344 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
345 bool Force = false) {
346 const uptr BlockSize = getSizeByClassId(ClassId);
347 const uptr PageSize = getPageSizeCached();
349 CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
350 const uptr BytesInFreeList =
352 (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
353 if (BytesInFreeList < PageSize)
354 return 0; // No chance to release anything.
355 if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
358 return 0; // Nothing new to release.
362 const s32 IntervalMs = ReleaseToOsIntervalMs;
365 if (Sci->ReleaseInfo.LastReleaseAtNs +
366 static_cast<uptr>(IntervalMs) * 1000000ULL >
367 getMonotonicTime()) {
368 return 0; // Memory was returned recently.
372 // TODO(kostyak): currently not ideal as we loop over all regions and
373 // iterate multiple times over the same freelist if a ClassId spans multiple
374 // regions. But it will have to do for now.
375 uptr TotalReleasedBytes = 0;
376 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
377 if (PossibleRegions[I] == ClassId) {
378 ReleaseRecorder Recorder(I * RegionSize);
379 releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
380 RegionSize / PageSize, BlockSize, &Recorder);
381 if (Recorder.getReleasedRangesCount() > 0) {
382 Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
383 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
384 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
385 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
389 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
390 return TotalReleasedBytes;
393 SizeClassInfo SizeClassInfoArray[NumClasses];
395 ByteMap PossibleRegions;
396 // Keep track of the lowest & highest regions allocated to avoid looping
397 // through the whole NumRegions.
400 s32 ReleaseToOsIntervalMs;
401 // Unless several threads request regions simultaneously from different size
402 // classes, the stash rarely contains more than 1 entry.
403 static constexpr uptr MaxStashedRegions = 4;
404 HybridMutex RegionsStashMutex;
405 uptr NumberOfStashedRegions;
406 uptr RegionsStash[MaxStashedRegions];
411 #endif // SCUDO_PRIMARY32_H_