1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
19 // SizeClassAllocator32 -- allocator for 32-bit address space.
20 // This allocator can theoretically be used on 64-bit arch, but there it is less
21 // efficient than SizeClassAllocator64.
23 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
24 // be returned by MmapOrDie().
27 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
29 // Since the regions are aligned by kRegionSize, there are exactly
30 // kNumPossibleRegions possible regions in the address space and so we keep
31 // a ByteMap possible_regions to store the size classes of each Region.
32 // 0 size class means the region is not used by the allocator.
34 // One Region is used to allocate chunks of a single size class.
35 // A Region looks like this:
36 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
38 // In order to avoid false sharing the objects of this class should be
39 // chache-line aligned.
41 struct SizeClassAllocator32FlagMasks { // Bit masks.
43 kRandomShuffleChunks = 1,
44 kUseSeparateSizeClassForBatch = 2,
48 template <class Params>
49 class SizeClassAllocator32 {
51 static const uptr kSpaceBeg = Params::kSpaceBeg;
52 static const u64 kSpaceSize = Params::kSpaceSize;
53 static const uptr kMetadataSize = Params::kMetadataSize;
54 typedef typename Params::SizeClassMap SizeClassMap;
55 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
56 typedef typename Params::ByteMap ByteMap;
57 typedef typename Params::MapUnmapCallback MapUnmapCallback;
59 static const bool kRandomShuffleChunks = Params::kFlags &
60 SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
61 static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
62 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
64 struct TransferBatch {
65 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
66 void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
68 CHECK_LE(count_, kMaxNumCached);
69 for (uptr i = 0; i < count; i++)
72 uptr Count() const { return count_; }
73 void Clear() { count_ = 0; }
75 batch_[count_++] = ptr;
76 CHECK_LE(count_, kMaxNumCached);
78 void CopyToArray(void *to_batch[]) {
79 for (uptr i = 0, n = Count(); i < n; i++)
80 to_batch[i] = batch_[i];
83 // How much memory do we need for a batch containing n elements.
84 static uptr AllocationSizeRequiredForNElements(uptr n) {
85 return sizeof(uptr) * 2 + sizeof(void *) * n;
87 static uptr MaxCached(uptr class_id) {
88 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
95 void *batch_[kMaxNumCached];
98 static const uptr kBatchSize = sizeof(TransferBatch);
99 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
100 COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
102 static uptr ClassIdToSize(uptr class_id) {
103 return (class_id == SizeClassMap::kBatchClassID) ?
104 kBatchSize : SizeClassMap::Size(class_id);
107 typedef SizeClassAllocator32<Params> ThisT;
108 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
110 void Init(s32 release_to_os_interval_ms) {
111 possible_regions.TestOnlyInit();
112 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
115 s32 ReleaseToOSIntervalMs() const {
116 return kReleaseToOSIntervalNever;
119 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
120 // This is empty here. Currently only implemented in 64-bit allocator.
123 void ForceReleaseToOS() {
124 // Currently implemented in 64-bit allocator only.
127 void *MapWithCallback(uptr size) {
128 void *res = MmapOrDie(size, "SizeClassAllocator32");
129 MapUnmapCallback().OnMap((uptr)res, size);
133 void UnmapWithCallback(uptr beg, uptr size) {
134 MapUnmapCallback().OnUnmap(beg, size);
135 UnmapOrDie(reinterpret_cast<void *>(beg), size);
138 static bool CanAllocate(uptr size, uptr alignment) {
139 return size <= SizeClassMap::kMaxSize &&
140 alignment <= SizeClassMap::kMaxSize;
143 void *GetMetaData(const void *p) {
144 CHECK(PointerIsMine(p));
145 uptr mem = reinterpret_cast<uptr>(p);
146 uptr beg = ComputeRegionBeg(mem);
147 uptr size = ClassIdToSize(GetSizeClass(p));
148 u32 offset = mem - beg;
149 uptr n = offset / (u32)size; // 32-bit division
150 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
151 return reinterpret_cast<void*>(meta);
154 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
156 CHECK_LT(class_id, kNumClasses);
157 SizeClassInfo *sci = GetSizeClassInfo(class_id);
158 SpinMutexLock l(&sci->mutex);
159 if (sci->free_list.empty() &&
160 UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
162 CHECK(!sci->free_list.empty());
163 TransferBatch *b = sci->free_list.front();
164 sci->free_list.pop_front();
168 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
170 CHECK_LT(class_id, kNumClasses);
171 CHECK_GT(b->Count(), 0);
172 SizeClassInfo *sci = GetSizeClassInfo(class_id);
173 SpinMutexLock l(&sci->mutex);
174 sci->free_list.push_front(b);
177 uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
179 bool PointerIsMine(const void *p) {
180 uptr mem = reinterpret_cast<uptr>(p);
181 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
183 return GetSizeClass(p) != 0;
186 uptr GetSizeClass(const void *p) {
187 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
190 void *GetBlockBegin(const void *p) {
191 CHECK(PointerIsMine(p));
192 uptr mem = reinterpret_cast<uptr>(p);
193 uptr beg = ComputeRegionBeg(mem);
194 uptr size = ClassIdToSize(GetSizeClass(p));
195 u32 offset = mem - beg;
196 u32 n = offset / (u32)size; // 32-bit division
197 uptr res = beg + (n * (u32)size);
198 return reinterpret_cast<void*>(res);
201 uptr GetActuallyAllocatedSize(void *p) {
202 CHECK(PointerIsMine(p));
203 return ClassIdToSize(GetSizeClass(p));
206 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
208 uptr TotalMemoryUsed() {
209 // No need to lock here.
211 for (uptr i = 0; i < kNumPossibleRegions; i++)
212 if (possible_regions[i])
217 void TestOnlyUnmap() {
218 for (uptr i = 0; i < kNumPossibleRegions; i++)
219 if (possible_regions[i])
220 UnmapWithCallback((i * kRegionSize), kRegionSize);
223 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
224 // introspection API.
226 for (uptr i = 0; i < kNumClasses; i++) {
227 GetSizeClassInfo(i)->mutex.Lock();
232 for (int i = kNumClasses - 1; i >= 0; i--) {
233 GetSizeClassInfo(i)->mutex.Unlock();
237 // Iterate over all existing chunks.
238 // The allocator must be locked when calling this function.
239 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
240 for (uptr region = 0; region < kNumPossibleRegions; region++)
241 if (possible_regions[region]) {
242 uptr chunk_size = ClassIdToSize(possible_regions[region]);
243 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
244 uptr region_beg = region * kRegionSize;
245 for (uptr chunk = region_beg;
246 chunk < region_beg + max_chunks_in_region * chunk_size;
247 chunk += chunk_size) {
248 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
249 callback(chunk, arg);
257 static uptr AdditionalSize() {
261 typedef SizeClassMap SizeClassMapT;
262 static const uptr kNumClasses = SizeClassMap::kNumClasses;
265 static const uptr kRegionSize = 1 << kRegionSizeLog;
266 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
268 struct SizeClassInfo {
270 IntrusiveList<TransferBatch> free_list;
272 char padding[kCacheLineSize - 2 * sizeof(uptr) -
273 sizeof(IntrusiveList<TransferBatch>)];
275 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
277 uptr ComputeRegionId(uptr mem) {
278 uptr res = mem >> kRegionSizeLog;
279 CHECK_LT(res, kNumPossibleRegions);
283 uptr ComputeRegionBeg(uptr mem) {
284 return mem & ~(kRegionSize - 1);
287 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
288 CHECK_LT(class_id, kNumClasses);
289 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
290 kRegionSize, kRegionSize, "SizeClassAllocator32"));
293 MapUnmapCallback().OnMap(res, kRegionSize);
294 stat->Add(AllocatorStatMapped, kRegionSize);
295 CHECK(IsAligned(res, kRegionSize));
296 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
300 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
301 CHECK_LT(class_id, kNumClasses);
302 return &size_class_info_array[class_id];
305 bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
306 TransferBatch **current_batch, uptr max_count,
307 uptr *pointers_array, uptr count) {
308 // If using a separate class for batches, we do not need to shuffle it.
309 if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
310 class_id != SizeClassMap::kBatchClassID))
311 RandomShuffle(pointers_array, count, &sci->rand_state);
312 TransferBatch *b = *current_batch;
313 for (uptr i = 0; i < count; i++) {
315 b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
320 b->Add((void*)pointers_array[i]);
321 if (b->Count() == max_count) {
322 sci->free_list.push_back(b);
330 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
331 SizeClassInfo *sci, uptr class_id) {
332 uptr size = ClassIdToSize(class_id);
333 uptr reg = AllocateRegion(stat, class_id);
336 if (kRandomShuffleChunks)
337 if (UNLIKELY(sci->rand_state == 0))
338 // The random state is initialized from ASLR (PIE) and time.
339 sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
340 uptr n_chunks = kRegionSize / (size + kMetadataSize);
341 uptr max_count = TransferBatch::MaxCached(class_id);
342 CHECK_GT(max_count, 0);
343 TransferBatch *b = nullptr;
344 const uptr kShuffleArraySize = 48;
345 uptr shuffle_array[kShuffleArraySize];
347 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
348 shuffle_array[count++] = i;
349 if (count == kShuffleArraySize) {
350 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
351 shuffle_array, count)))
357 if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
358 shuffle_array, count)))
362 CHECK_GT(b->Count(), 0);
363 sci->free_list.push_back(b);
368 ByteMap possible_regions;
369 SizeClassInfo size_class_info_array[kNumClasses];