1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // Objects of this type should be used as local caches for SizeClassAllocator64
18 // or SizeClassAllocator32. Since the typical use of this class is to have one
19 // object per thread in TLS, is has to be POD.
20 template<class SizeClassAllocator>
21 struct SizeClassAllocatorLocalCache
22 : SizeClassAllocator::AllocatorCache {
25 // Cache used by SizeClassAllocator64.
26 template <class SizeClassAllocator>
27 struct SizeClassAllocator64LocalCache {
28 typedef SizeClassAllocator Allocator;
30 void Init(AllocatorGlobalStats *s) {
36 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
39 s->Unregister(&stats_);
42 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
43 CHECK_NE(class_id, 0UL);
44 CHECK_LT(class_id, kNumClasses);
45 PerClass *c = &per_class_[class_id];
46 if (UNLIKELY(c->count == 0)) {
47 if (UNLIKELY(!Refill(c, allocator, class_id)))
50 stats_.Add(AllocatorStatAllocated, c->class_size);
51 CHECK_GT(c->count, 0);
52 CompactPtrT chunk = c->chunks[--c->count];
53 void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
54 allocator->GetRegionBeginBySizeClass(class_id), chunk));
58 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
59 CHECK_NE(class_id, 0UL);
60 CHECK_LT(class_id, kNumClasses);
61 // If the first allocator call on a new thread is a deallocation, then
62 // max_count will be zero, leading to check failure.
64 PerClass *c = &per_class_[class_id];
65 stats_.Sub(AllocatorStatAllocated, c->class_size);
66 CHECK_NE(c->max_count, 0UL);
67 if (UNLIKELY(c->count == c->max_count))
68 Drain(c, allocator, class_id, c->max_count / 2);
69 CompactPtrT chunk = allocator->PointerToCompactPtr(
70 allocator->GetRegionBeginBySizeClass(class_id),
71 reinterpret_cast<uptr>(p));
72 c->chunks[c->count++] = chunk;
75 void Drain(SizeClassAllocator *allocator) {
76 for (uptr i = 0; i < kNumClasses; i++) {
77 PerClass *c = &per_class_[i];
79 Drain(c, allocator, i, c->count);
84 typedef typename Allocator::SizeClassMapT SizeClassMap;
85 static const uptr kNumClasses = SizeClassMap::kNumClasses;
86 typedef typename Allocator::CompactPtrT CompactPtrT;
92 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
94 PerClass per_class_[kNumClasses];
95 AllocatorStats stats_;
98 if (LIKELY(per_class_[1].max_count))
100 for (uptr i = 0; i < kNumClasses; i++) {
101 PerClass *c = &per_class_[i];
102 c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
103 c->class_size = Allocator::ClassIdToSize(i);
107 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
110 uptr num_requested_chunks = c->max_count / 2;
111 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
112 num_requested_chunks)))
114 c->count = num_requested_chunks;
118 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
121 CHECK_GE(c->count, count);
122 uptr first_idx_to_drain = c->count - count;
124 allocator->ReturnToAllocator(&stats_, class_id,
125 &c->chunks[first_idx_to_drain], count);
129 // Cache used by SizeClassAllocator32.
130 template <class SizeClassAllocator>
131 struct SizeClassAllocator32LocalCache {
132 typedef SizeClassAllocator Allocator;
133 typedef typename Allocator::TransferBatch TransferBatch;
135 void Init(AllocatorGlobalStats *s) {
138 s->Register(&stats_);
141 // Returns a TransferBatch suitable for class_id.
142 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
144 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
145 return (TransferBatch*)Allocate(allocator, batch_class_id);
149 // Destroys TransferBatch b.
150 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
152 if (uptr batch_class_id = per_class_[class_id].batch_class_id)
153 Deallocate(allocator, batch_class_id, b);
156 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
159 s->Unregister(&stats_);
162 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
163 CHECK_NE(class_id, 0UL);
164 CHECK_LT(class_id, kNumClasses);
165 PerClass *c = &per_class_[class_id];
166 if (UNLIKELY(c->count == 0)) {
167 if (UNLIKELY(!Refill(allocator, class_id)))
170 stats_.Add(AllocatorStatAllocated, c->class_size);
171 void *res = c->batch[--c->count];
172 PREFETCH(c->batch[c->count - 1]);
176 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
177 CHECK_NE(class_id, 0UL);
178 CHECK_LT(class_id, kNumClasses);
179 // If the first allocator call on a new thread is a deallocation, then
180 // max_count will be zero, leading to check failure.
182 PerClass *c = &per_class_[class_id];
183 stats_.Sub(AllocatorStatAllocated, c->class_size);
184 CHECK_NE(c->max_count, 0UL);
185 if (UNLIKELY(c->count == c->max_count))
186 Drain(allocator, class_id);
187 c->batch[c->count++] = p;
190 void Drain(SizeClassAllocator *allocator) {
191 for (uptr i = 0; i < kNumClasses; i++) {
192 PerClass *c = &per_class_[i];
199 typedef typename Allocator::SizeClassMapT SizeClassMap;
200 static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
201 static const uptr kNumClasses = SizeClassMap::kNumClasses;
202 // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
203 // allocated from kBatchClassID size class (except for those that are needed
204 // for kBatchClassID itself). The goal is to have TransferBatches in a totally
205 // different region of RAM to improve security.
206 static const bool kUseSeparateSizeClassForBatch =
207 Allocator::kUseSeparateSizeClassForBatch;
214 void *batch[2 * TransferBatch::kMaxNumCached];
216 PerClass per_class_[kNumClasses];
217 AllocatorStats stats_;
220 if (LIKELY(per_class_[1].max_count))
222 const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
223 for (uptr i = 0; i < kNumClasses; i++) {
224 PerClass *c = &per_class_[i];
225 uptr max_cached = TransferBatch::MaxCached(i);
226 c->max_count = 2 * max_cached;
227 c->class_size = Allocator::ClassIdToSize(i);
228 // Precompute the class id to use to store batches for the current class
229 // id. 0 means the class size is large enough to store a batch within one
230 // of the chunks. If using a separate size class, it will always be
231 // kBatchClassID, except for kBatchClassID itself.
232 if (kUseSeparateSizeClassForBatch) {
233 c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
235 c->batch_class_id = (c->class_size <
236 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
242 NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
244 PerClass *c = &per_class_[class_id];
245 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
248 CHECK_GT(b->Count(), 0);
249 b->CopyToArray(c->batch);
250 c->count = b->Count();
251 DestroyBatch(class_id, allocator, b);
255 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
257 PerClass *c = &per_class_[class_id];
258 uptr cnt = Min(c->max_count / 2, c->count);
259 uptr first_idx_to_drain = c->count - cnt;
260 TransferBatch *b = CreateBatch(
261 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
262 // Failure to allocate a batch while releasing memory is non recoverable.
263 // TODO(alekseys): Figure out how to do it without allocating a new batch.
265 DieOnFailure::OnOOM();
266 b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
267 &c->batch[first_idx_to_drain], cnt);
269 allocator->DeallocateBatch(&stats_, class_id, b);