1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // Objects of this type should be used as local caches for SizeClassAllocator64
18 // or SizeClassAllocator32. Since the typical use of this class is to have one
19 // object per thread in TLS, is has to be POD.
20 template<class SizeClassAllocator>
21 struct SizeClassAllocatorLocalCache
22 : SizeClassAllocator::AllocatorCache {
25 // Cache used by SizeClassAllocator64.
26 template <class SizeClassAllocator>
27 struct SizeClassAllocator64LocalCache {
28 typedef SizeClassAllocator Allocator;
29 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
30 typedef typename Allocator::SizeClassMapT SizeClassMap;
31 typedef typename Allocator::CompactPtrT CompactPtrT;
33 void Init(AllocatorGlobalStats *s) {
39 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
42 s->Unregister(&stats_);
45 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
46 CHECK_NE(class_id, 0UL);
47 CHECK_LT(class_id, kNumClasses);
48 PerClass *c = &per_class_[class_id];
49 if (UNLIKELY(c->count == 0)) {
50 if (UNLIKELY(!Refill(c, allocator, class_id)))
53 stats_.Add(AllocatorStatAllocated, c->class_size);
54 CHECK_GT(c->count, 0);
55 CompactPtrT chunk = c->chunks[--c->count];
56 void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
57 allocator->GetRegionBeginBySizeClass(class_id), chunk));
61 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
62 CHECK_NE(class_id, 0UL);
63 CHECK_LT(class_id, kNumClasses);
64 // If the first allocator call on a new thread is a deallocation, then
65 // max_count will be zero, leading to check failure.
67 PerClass *c = &per_class_[class_id];
68 stats_.Sub(AllocatorStatAllocated, c->class_size);
69 CHECK_NE(c->max_count, 0UL);
70 if (UNLIKELY(c->count == c->max_count))
71 Drain(c, allocator, class_id, c->max_count / 2);
72 CompactPtrT chunk = allocator->PointerToCompactPtr(
73 allocator->GetRegionBeginBySizeClass(class_id),
74 reinterpret_cast<uptr>(p));
75 c->chunks[c->count++] = chunk;
78 void Drain(SizeClassAllocator *allocator) {
79 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
80 PerClass *c = &per_class_[class_id];
82 Drain(c, allocator, class_id, c->count);
91 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
93 PerClass per_class_[kNumClasses];
94 AllocatorStats stats_;
97 if (per_class_[1].max_count)
99 for (uptr i = 0; i < kNumClasses; i++) {
100 PerClass *c = &per_class_[i];
101 c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
102 c->class_size = Allocator::ClassIdToSize(i);
106 NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
109 uptr num_requested_chunks = c->max_count / 2;
110 if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
111 num_requested_chunks)))
113 c->count = num_requested_chunks;
117 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
120 CHECK_GE(c->count, count);
121 uptr first_idx_to_drain = c->count - count;
123 allocator->ReturnToAllocator(&stats_, class_id,
124 &c->chunks[first_idx_to_drain], count);
128 // Cache used by SizeClassAllocator32.
129 template <class SizeClassAllocator>
130 struct SizeClassAllocator32LocalCache {
131 typedef SizeClassAllocator Allocator;
132 typedef typename Allocator::TransferBatch TransferBatch;
133 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
135 void Init(AllocatorGlobalStats *s) {
138 s->Register(&stats_);
141 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
144 s->Unregister(&stats_);
147 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
148 CHECK_NE(class_id, 0UL);
149 CHECK_LT(class_id, kNumClasses);
150 PerClass *c = &per_class_[class_id];
151 if (UNLIKELY(c->count == 0)) {
152 if (UNLIKELY(!Refill(allocator, class_id)))
155 stats_.Add(AllocatorStatAllocated, c->class_size);
156 void *res = c->batch[--c->count];
157 PREFETCH(c->batch[c->count - 1]);
161 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
162 CHECK_NE(class_id, 0UL);
163 CHECK_LT(class_id, kNumClasses);
164 // If the first allocator call on a new thread is a deallocation, then
165 // max_count will be zero, leading to check failure.
167 PerClass *c = &per_class_[class_id];
168 stats_.Sub(AllocatorStatAllocated, c->class_size);
169 CHECK_NE(c->max_count, 0UL);
170 if (UNLIKELY(c->count == c->max_count))
171 Drain(allocator, class_id);
172 c->batch[c->count++] = p;
175 void Drain(SizeClassAllocator *allocator) {
176 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
177 PerClass *c = &per_class_[class_id];
179 Drain(allocator, class_id);
184 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
189 uptr class_id_for_transfer_batch;
190 void *batch[2 * TransferBatch::kMaxNumCached];
192 PerClass per_class_[kNumClasses];
193 AllocatorStats stats_;
196 if (per_class_[1].max_count)
198 // TransferBatch class is declared in SizeClassAllocator.
199 uptr class_id_for_transfer_batch =
200 SizeClassMap::ClassID(sizeof(TransferBatch));
201 for (uptr i = 0; i < kNumClasses; i++) {
202 PerClass *c = &per_class_[i];
203 uptr max_cached = TransferBatch::MaxCached(i);
204 c->max_count = 2 * max_cached;
205 c->class_size = Allocator::ClassIdToSize(i);
206 // We transfer chunks between central and thread-local free lists in
207 // batches. For small size classes we allocate batches separately. For
208 // large size classes we may use one of the chunks to store the batch.
209 // sizeof(TransferBatch) must be a power of 2 for more efficient
211 c->class_id_for_transfer_batch = (c->class_size <
212 TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
213 class_id_for_transfer_batch : 0;
217 // Returns a TransferBatch suitable for class_id.
218 // For small size classes allocates the batch from the allocator.
219 // For large size classes simply returns b.
220 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
222 if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
223 return (TransferBatch*)Allocate(allocator, batch_class_id);
227 // Destroys TransferBatch b.
228 // For small size classes deallocates b to the allocator.
229 // Does notthing for large size classes.
230 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
232 if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
233 Deallocate(allocator, batch_class_id, b);
236 NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
238 PerClass *c = &per_class_[class_id];
239 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
242 CHECK_GT(b->Count(), 0);
243 b->CopyToArray(c->batch);
244 c->count = b->Count();
245 DestroyBatch(class_id, allocator, b);
249 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
251 PerClass *c = &per_class_[class_id];
252 uptr cnt = Min(c->max_count / 2, c->count);
253 uptr first_idx_to_drain = c->count - cnt;
254 TransferBatch *b = CreateBatch(
255 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
256 // Failure to allocate a batch while releasing memory is non recoverable.
257 // TODO(alekseys): Figure out how to do it without allocating a new batch.
259 DieOnFailure::OnOOM();
260 b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
261 &c->batch[first_idx_to_drain], cnt);
263 allocator->DeallocateBatch(&stats_, class_id, b);