1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
15 //===----------------------------------------------------------------------===//
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
24 namespace __sanitizer {
26 template<typename Node> class QuarantineCache;
28 struct QuarantineBatch {
29 static const uptr kSize = 1021;
30 QuarantineBatch *next;
36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback, typename Node>
45 typedef QuarantineCache<Callback> Cache;
47 explicit Quarantine(LinkerInitialized)
48 : cache_(LINKER_INITIALIZED) {
51 void Init(uptr size, uptr cache_size) {
52 atomic_store(&max_size_, size, memory_order_release);
53 atomic_store(&min_size_, size / 10 * 9,
54 memory_order_release); // 90% of max size.
55 max_cache_size_ = cache_size;
58 uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
60 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
61 c->Enqueue(cb, ptr, size);
62 if (c->Size() > max_cache_size_)
66 void NOINLINE Drain(Cache *c, Callback cb) {
68 SpinMutexLock l(&cache_mutex_);
71 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
77 char pad0_[kCacheLineSize];
78 atomic_uintptr_t max_size_;
79 atomic_uintptr_t min_size_;
81 char pad1_[kCacheLineSize];
82 SpinMutex cache_mutex_;
83 SpinMutex recycle_mutex_;
85 char pad2_[kCacheLineSize];
87 void NOINLINE Recycle(Callback cb) {
89 uptr min_size = atomic_load(&min_size_, memory_order_acquire);
91 SpinMutexLock l(&cache_mutex_);
92 while (cache_.Size() > min_size) {
93 QuarantineBatch *b = cache_.DequeueBatch();
97 recycle_mutex_.Unlock();
101 void NOINLINE DoRecycle(Cache *c, Callback cb) {
102 while (QuarantineBatch *b = c->DequeueBatch()) {
103 const uptr kPrefetch = 16;
104 CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
105 for (uptr i = 0; i < kPrefetch; i++)
106 PREFETCH(b->batch[i]);
107 for (uptr i = 0, count = b->count; i < count; i++) {
108 if (i + kPrefetch < count)
109 PREFETCH(b->batch[i + kPrefetch]);
110 cb.Recycle((Node*)b->batch[i]);
117 // Per-thread cache of memory blocks.
118 template<typename Callback>
119 class QuarantineCache {
121 explicit QuarantineCache(LinkerInitialized) {
130 return atomic_load(&size_, memory_order_relaxed);
133 void Enqueue(Callback cb, void *ptr, uptr size) {
134 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
136 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
138 QuarantineBatch *b = list_.back();
140 b->batch[b->count++] = ptr;
145 void Transfer(QuarantineCache *c) {
146 list_.append_back(&c->list_);
148 atomic_store(&c->size_, 0, memory_order_relaxed);
151 void EnqueueBatch(QuarantineBatch *b) {
156 QuarantineBatch *DequeueBatch() {
159 QuarantineBatch *b = list_.front();
166 IntrusiveList<QuarantineBatch> list_;
167 atomic_uintptr_t size_;
169 void SizeAdd(uptr add) {
170 atomic_store(&size_, Size() + add, memory_order_relaxed);
172 void SizeSub(uptr sub) {
173 atomic_store(&size_, Size() - sub, memory_order_relaxed);
176 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
177 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
185 } // namespace __sanitizer
187 #endif // SANITIZER_QUARANTINE_H