1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // Objects of this type should be used as local caches for SizeClassAllocator64
18 // or SizeClassAllocator32. Since the typical use of this class is to have one
19 // object per thread in TLS, is has to be POD.
20 template<class SizeClassAllocator>
21 struct SizeClassAllocatorLocalCache
22 : SizeClassAllocator::AllocatorCache {};
24 // Cache used by SizeClassAllocator64.
25 template <class SizeClassAllocator>
26 struct SizeClassAllocator64LocalCache {
27 typedef SizeClassAllocator Allocator;
29 void Init(AllocatorGlobalStats *s) {
35 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
38 s->Unregister(&stats_);
41 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
42 CHECK_NE(class_id, 0UL);
43 CHECK_LT(class_id, kNumClasses);
44 PerClass *c = &per_class_[class_id];
45 if (UNLIKELY(c->count == 0)) {
46 if (UNLIKELY(!Refill(c, allocator, class_id)))
48 DCHECK_GT(c->count, 0);
50 CompactPtrT chunk = c->chunks[--c->count];
51 stats_.Add(AllocatorStatAllocated, c->class_size);
52 return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
53 allocator->GetRegionBeginBySizeClass(class_id), chunk));
56 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
57 CHECK_NE(class_id, 0UL);
58 CHECK_LT(class_id, kNumClasses);
59 // If the first allocator call on a new thread is a deallocation, then
60 // max_count will be zero, leading to check failure.
61 PerClass *c = &per_class_[class_id];
63 if (UNLIKELY(c->count == c->max_count))
64 Drain(c, allocator, class_id, c->max_count / 2);
65 CompactPtrT chunk = allocator->PointerToCompactPtr(
66 allocator->GetRegionBeginBySizeClass(class_id),
67 reinterpret_cast<uptr>(p));
68 c->chunks[c->count++] = chunk;
69 stats_.Sub(AllocatorStatAllocated, c->class_size);
72 void Drain(SizeClassAllocator *allocator) {
73 for (uptr i = 1; i < kNumClasses; i++) {
74 PerClass *c = &per_class_[i];
76 Drain(c, allocator, i, c->count);
81 typedef typename Allocator::SizeClassMapT SizeClassMap;
82 static const uptr kNumClasses = SizeClassMap::kNumClasses;
83 typedef typename Allocator::CompactPtrT CompactPtrT;
89 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
91 PerClass per_class_[kNumClasses];
92 AllocatorStats stats_;
94 void InitCache(PerClass *c) {
95 if (LIKELY(c->max_count))
97 for (uptr i = 1; i < kNumClasses; i++) {
98 PerClass *c = &per_class_[i];
99 const uptr size = Allocator::ClassIdToSize(i);
100 c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
101 c->class_size = size;
103 DCHECK_NE(c->max_count, 0UL);
106 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
109 const uptr num_requested_chunks = c->max_count / 2;
110 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
111 num_requested_chunks)))
113 c->count = num_requested_chunks;
117 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
119 CHECK_GE(c->count, count);
120 const uptr first_idx_to_drain = c->count - count;
122 allocator->ReturnToAllocator(&stats_, class_id,
123 &c->chunks[first_idx_to_drain], count);
127 // Cache used by SizeClassAllocator32.
128 template <class SizeClassAllocator>
129 struct SizeClassAllocator32LocalCache {
130 typedef SizeClassAllocator Allocator;
131 typedef typename Allocator::TransferBatch TransferBatch;
133 void Init(AllocatorGlobalStats *s) {
136 s->Register(&stats_);
139 // Returns a TransferBatch suitable for class_id.
140 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
142 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
143 return (TransferBatch*)Allocate(allocator, batch_class_id);
147 // Destroys TransferBatch b.
148 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
150 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
151 Deallocate(allocator, batch_class_id, b);
154 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
157 s->Unregister(&stats_);
160 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
161 CHECK_NE(class_id, 0UL);
162 CHECK_LT(class_id, kNumClasses);
163 PerClass *c = &per_class_[class_id];
164 if (UNLIKELY(c->count == 0)) {
165 if (UNLIKELY(!Refill(c, allocator, class_id)))
167 DCHECK_GT(c->count, 0);
169 void *res = c->batch[--c->count];
170 PREFETCH(c->batch[c->count - 1]);
171 stats_.Add(AllocatorStatAllocated, c->class_size);
175 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
176 CHECK_NE(class_id, 0UL);
177 CHECK_LT(class_id, kNumClasses);
178 // If the first allocator call on a new thread is a deallocation, then
179 // max_count will be zero, leading to check failure.
180 PerClass *c = &per_class_[class_id];
182 if (UNLIKELY(c->count == c->max_count))
183 Drain(c, allocator, class_id);
184 c->batch[c->count++] = p;
185 stats_.Sub(AllocatorStatAllocated, c->class_size);
188 void Drain(SizeClassAllocator *allocator) {
189 for (uptr i = 1; i < kNumClasses; i++) {
190 PerClass *c = &per_class_[i];
192 Drain(c, allocator, i);
197 typedef typename Allocator::SizeClassMapT SizeClassMap;
198 static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
199 static const uptr kNumClasses = SizeClassMap::kNumClasses;
200 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
201 // allocated from kBatchClassID size class (except for those that are needed
202 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
203 // different region of RAM to improve security.
204 static const bool kUseSeparateSizeClassForBatch =
205 Allocator::kUseSeparateSizeClassForBatch;
212 void *batch[2 * TransferBatch::kMaxNumCached];
214 PerClass per_class_[kNumClasses];
215 AllocatorStats stats_;
217 void InitCache(PerClass *c) {
218 if (LIKELY(c->max_count))
220 const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
221 for (uptr i = 1; i < kNumClasses; i++) {
222 PerClass *c = &per_class_[i];
223 const uptr size = Allocator::ClassIdToSize(i);
224 const uptr max_cached = TransferBatch::MaxCached(size);
225 c->max_count = 2 * max_cached;
226 c->class_size = size;
227 // Precompute the class id to use to store batches for the current class
228 // id. 0 means the class size is large enough to store a batch within one
229 // of the chunks. If using a separate size class, it will always be
230 // kBatchClassID, except for kBatchClassID itself.
231 if (kUseSeparateSizeClassForBatch) {
232 c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
234 c->batch_class_id = (size <
235 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
239 DCHECK_NE(c->max_count, 0UL);
242 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
245 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
248 CHECK_GT(b->Count(), 0);
249 b->CopyToArray(c->batch);
250 c->count = b->Count();
251 DestroyBatch(class_id, allocator, b);
255 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
257 const uptr count = Min(c->max_count / 2, c->count);
258 const uptr first_idx_to_drain = c->count - count;
259 TransferBatch *b = CreateBatch(
260 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
261 // Failure to allocate a batch while releasing memory is non recoverable.
262 // TODO(alekseys): Figure out how to do it without allocating a new batch.
264 Report("FATAL: Internal error: %s's allocator failed to allocate a "
265 "transfer batch.\n", SanitizerToolName);
268 b->SetFromArray(&c->batch[first_idx_to_drain], count);
270 allocator->DeallocateBatch(&stats_, class_id, b);