1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
15 //===----------------------------------------------------------------------===//
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
24 namespace __sanitizer {
26 template<typename Node> class QuarantineCache;
28 struct QuarantineBatch {
29 static const uptr kSize = 1021;
30 QuarantineBatch *next;
36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback, typename Node>
45 typedef QuarantineCache<Callback> Cache;
47 explicit Quarantine(LinkerInitialized)
48 : cache_(LINKER_INITIALIZED) {
51 void Init(uptr size, uptr cache_size) {
53 min_size_ = size / 10 * 9; // 90% of max size.
54 max_cache_size_ = cache_size;
57 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
58 c->Enqueue(cb, ptr, size);
59 if (c->Size() > max_cache_size_)
63 void NOINLINE Drain(Cache *c, Callback cb) {
65 SpinMutexLock l(&cache_mutex_);
68 if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
74 char pad0_[kCacheLineSize];
78 char pad1_[kCacheLineSize];
79 SpinMutex cache_mutex_;
80 SpinMutex recycle_mutex_;
82 char pad2_[kCacheLineSize];
84 void NOINLINE Recycle(Callback cb) {
87 SpinMutexLock l(&cache_mutex_);
88 while (cache_.Size() > min_size_) {
89 QuarantineBatch *b = cache_.DequeueBatch();
93 recycle_mutex_.Unlock();
97 void NOINLINE DoRecycle(Cache *c, Callback cb) {
98 while (QuarantineBatch *b = c->DequeueBatch()) {
99 const uptr kPrefetch = 16;
100 for (uptr i = 0; i < kPrefetch; i++)
101 PREFETCH(b->batch[i]);
102 for (uptr i = 0; i < b->count; i++) {
103 PREFETCH(b->batch[i + kPrefetch]);
104 cb.Recycle((Node*)b->batch[i]);
111 // Per-thread cache of memory blocks.
112 template<typename Callback>
113 class QuarantineCache {
115 explicit QuarantineCache(LinkerInitialized) {
124 return atomic_load(&size_, memory_order_relaxed);
127 void Enqueue(Callback cb, void *ptr, uptr size) {
128 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
130 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
132 QuarantineBatch *b = list_.back();
133 b->batch[b->count++] = ptr;
138 void Transfer(QuarantineCache *c) {
139 list_.append_back(&c->list_);
141 atomic_store(&c->size_, 0, memory_order_relaxed);
144 void EnqueueBatch(QuarantineBatch *b) {
149 QuarantineBatch *DequeueBatch() {
152 QuarantineBatch *b = list_.front();
159 IntrusiveList<QuarantineBatch> list_;
160 atomic_uintptr_t size_;
162 void SizeAdd(uptr add) {
163 atomic_store(&size_, Size() + add, memory_order_relaxed);
165 void SizeSub(uptr sub) {
166 atomic_store(&size_, Size() - sub, memory_order_relaxed);
169 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
170 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
177 } // namespace __sanitizer
179 #endif // #ifndef SANITIZER_QUARANTINE_H