1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
19 // SizeClassAllocator32 -- allocator for 32-bit address space.
20 // This allocator can theoretically be used on 64-bit arch, but there it is less
21 // efficient than SizeClassAllocator64.
23 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
24 // be returned by MmapOrDie().
27 // a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
28 // Since the regions are aligned by kRegionSize, there are exactly
29 // kNumPossibleRegions possible regions in the address space and so we keep
30 // a ByteMap possible_regions to store the size classes of each Region.
31 // 0 size class means the region is not used by the allocator.
33 // One Region is used to allocate chunks of a single size class.
34 // A Region looks like this:
35 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
37 // In order to avoid false sharing the objects of this class should be
38 // chache-line aligned.
40 struct SizeClassAllocator32FlagMasks { // Bit masks.
42 kRandomShuffleChunks = 1,
46 template <class Params>
47 class SizeClassAllocator32 {
49 static const uptr kSpaceBeg = Params::kSpaceBeg;
50 static const u64 kSpaceSize = Params::kSpaceSize;
51 static const uptr kMetadataSize = Params::kMetadataSize;
52 typedef typename Params::SizeClassMap SizeClassMap;
53 static const uptr kRegionSizeLog = Params::kRegionSizeLog;
54 typedef typename Params::ByteMap ByteMap;
55 typedef typename Params::MapUnmapCallback MapUnmapCallback;
57 static const bool kRandomShuffleChunks =
58 Params::kFlags & SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
60 struct TransferBatch {
61 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
62 void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
64 CHECK_LE(count_, kMaxNumCached);
65 for (uptr i = 0; i < count; i++)
68 uptr Count() const { return count_; }
69 void Clear() { count_ = 0; }
71 batch_[count_++] = ptr;
72 CHECK_LE(count_, kMaxNumCached);
74 void CopyToArray(void *to_batch[]) {
75 for (uptr i = 0, n = Count(); i < n; i++)
76 to_batch[i] = batch_[i];
79 // How much memory do we need for a batch containing n elements.
80 static uptr AllocationSizeRequiredForNElements(uptr n) {
81 return sizeof(uptr) * 2 + sizeof(void *) * n;
83 static uptr MaxCached(uptr class_id) {
84 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
91 void *batch_[kMaxNumCached];
94 static const uptr kBatchSize = sizeof(TransferBatch);
95 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
96 COMPILER_CHECK(sizeof(TransferBatch) ==
97 SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
99 static uptr ClassIdToSize(uptr class_id) {
100 return SizeClassMap::Size(class_id);
103 typedef SizeClassAllocator32<Params> ThisT;
104 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
106 void Init(s32 release_to_os_interval_ms) {
107 possible_regions.TestOnlyInit();
108 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
111 s32 ReleaseToOSIntervalMs() const {
112 return kReleaseToOSIntervalNever;
115 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
116 // This is empty here. Currently only implemented in 64-bit allocator.
119 void *MapWithCallback(uptr size) {
120 size = RoundUpTo(size, GetPageSizeCached());
121 void *res = MmapOrDie(size, "SizeClassAllocator32");
122 MapUnmapCallback().OnMap((uptr)res, size);
126 void UnmapWithCallback(uptr beg, uptr size) {
127 MapUnmapCallback().OnUnmap(beg, size);
128 UnmapOrDie(reinterpret_cast<void *>(beg), size);
131 static bool CanAllocate(uptr size, uptr alignment) {
132 return size <= SizeClassMap::kMaxSize &&
133 alignment <= SizeClassMap::kMaxSize;
136 void *GetMetaData(const void *p) {
137 CHECK(PointerIsMine(p));
138 uptr mem = reinterpret_cast<uptr>(p);
139 uptr beg = ComputeRegionBeg(mem);
140 uptr size = ClassIdToSize(GetSizeClass(p));
141 u32 offset = mem - beg;
142 uptr n = offset / (u32)size; // 32-bit division
143 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
144 return reinterpret_cast<void*>(meta);
147 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
149 CHECK_LT(class_id, kNumClasses);
150 SizeClassInfo *sci = GetSizeClassInfo(class_id);
151 SpinMutexLock l(&sci->mutex);
152 if (sci->free_list.empty())
153 PopulateFreeList(stat, c, sci, class_id);
154 CHECK(!sci->free_list.empty());
155 TransferBatch *b = sci->free_list.front();
156 sci->free_list.pop_front();
160 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
162 CHECK_LT(class_id, kNumClasses);
163 SizeClassInfo *sci = GetSizeClassInfo(class_id);
164 SpinMutexLock l(&sci->mutex);
165 CHECK_GT(b->Count(), 0);
166 sci->free_list.push_front(b);
169 uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
171 bool PointerIsMine(const void *p) {
172 uptr mem = reinterpret_cast<uptr>(p);
173 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
175 return GetSizeClass(p) != 0;
178 uptr GetSizeClass(const void *p) {
179 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
182 void *GetBlockBegin(const void *p) {
183 CHECK(PointerIsMine(p));
184 uptr mem = reinterpret_cast<uptr>(p);
185 uptr beg = ComputeRegionBeg(mem);
186 uptr size = ClassIdToSize(GetSizeClass(p));
187 u32 offset = mem - beg;
188 u32 n = offset / (u32)size; // 32-bit division
189 uptr res = beg + (n * (u32)size);
190 return reinterpret_cast<void*>(res);
193 uptr GetActuallyAllocatedSize(void *p) {
194 CHECK(PointerIsMine(p));
195 return ClassIdToSize(GetSizeClass(p));
198 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
200 uptr TotalMemoryUsed() {
201 // No need to lock here.
203 for (uptr i = 0; i < kNumPossibleRegions; i++)
204 if (possible_regions[i])
209 void TestOnlyUnmap() {
210 for (uptr i = 0; i < kNumPossibleRegions; i++)
211 if (possible_regions[i])
212 UnmapWithCallback((i * kRegionSize), kRegionSize);
215 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
216 // introspection API.
218 for (uptr i = 0; i < kNumClasses; i++) {
219 GetSizeClassInfo(i)->mutex.Lock();
224 for (int i = kNumClasses - 1; i >= 0; i--) {
225 GetSizeClassInfo(i)->mutex.Unlock();
229 // Iterate over all existing chunks.
230 // The allocator must be locked when calling this function.
231 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
232 for (uptr region = 0; region < kNumPossibleRegions; region++)
233 if (possible_regions[region]) {
234 uptr chunk_size = ClassIdToSize(possible_regions[region]);
235 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
236 uptr region_beg = region * kRegionSize;
237 for (uptr chunk = region_beg;
238 chunk < region_beg + max_chunks_in_region * chunk_size;
239 chunk += chunk_size) {
240 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
241 callback(chunk, arg);
249 static uptr AdditionalSize() {
253 typedef SizeClassMap SizeClassMapT;
254 static const uptr kNumClasses = SizeClassMap::kNumClasses;
257 static const uptr kRegionSize = 1 << kRegionSizeLog;
258 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
260 struct SizeClassInfo {
262 IntrusiveList<TransferBatch> free_list;
263 char padding[kCacheLineSize - sizeof(uptr) -
264 sizeof(IntrusiveList<TransferBatch>)];
266 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
268 uptr ComputeRegionId(uptr mem) {
269 uptr res = mem >> kRegionSizeLog;
270 CHECK_LT(res, kNumPossibleRegions);
274 uptr ComputeRegionBeg(uptr mem) {
275 return mem & ~(kRegionSize - 1);
278 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
279 CHECK_LT(class_id, kNumClasses);
280 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
281 "SizeClassAllocator32"));
282 MapUnmapCallback().OnMap(res, kRegionSize);
283 stat->Add(AllocatorStatMapped, kRegionSize);
284 CHECK_EQ(0U, (res & (kRegionSize - 1)));
285 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
289 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
290 CHECK_LT(class_id, kNumClasses);
291 return &size_class_info_array[class_id];
294 void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
295 SizeClassInfo *sci, uptr class_id) {
296 uptr size = ClassIdToSize(class_id);
297 uptr reg = AllocateRegion(stat, class_id);
298 uptr n_chunks = kRegionSize / (size + kMetadataSize);
299 uptr max_count = TransferBatch::MaxCached(class_id);
300 TransferBatch *b = nullptr;
301 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
303 b = c->CreateBatch(class_id, this, (TransferBatch*)i);
307 if (b->Count() == max_count) {
308 CHECK_GT(b->Count(), 0);
309 sci->free_list.push_back(b);
314 CHECK_GT(b->Count(), 0);
315 sci->free_list.push_back(b);
319 ByteMap possible_regions;
320 SizeClassInfo size_class_info_array[kNumClasses];