1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Concurrent uptr->T hashmap.
12 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_ADDRHASHMAP_H
15 #define SANITIZER_ADDRHASHMAP_H
17 #include "sanitizer_common.h"
18 #include "sanitizer_mutex.h"
19 #include "sanitizer_atomic.h"
20 #include "sanitizer_allocator_internal.h"
22 namespace __sanitizer {
24 // Concurrent uptr->T hashmap.
25 // T must be a POD type, kSize is preferably a prime but can be any number.
28 // typedef AddrHashMap<uptr, 11> Map;
31 // Map::Handle h(&m, addr);
32 // use h.operator->() to access the data
33 // if h.created() then the element was just created, and the current thread
34 // has exclusive access to it
35 // otherwise the current thread has only read access to the data
38 // Map::Handle h(&m, addr, true);
39 // this will remove the data from the map in Handle dtor
40 // the current thread has exclusive access to the data
41 // if !h.exists() then the element never existed
43 template<typename T, uptr kSize>
47 atomic_uintptr_t addr;
54 Cell cells[1]; // variable len
57 static const uptr kBucketSize = 3;
62 Cell cells[kBucketSize];
70 Handle(AddrHashMap<T, kSize> *map, uptr addr);
71 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
72 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
77 const T &operator*() const;
82 friend AddrHashMap<T, kSize>;
83 AddrHashMap<T, kSize> *map_;
97 void acquire(Handle *h);
98 void release(Handle *h);
99 uptr calcHash(uptr addr);
102 template<typename T, uptr kSize>
103 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
111 template<typename T, uptr kSize>
112 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
121 template<typename T, uptr kSize>
122 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
123 bool remove, bool create) {
131 template<typename T, uptr kSize>
132 AddrHashMap<T, kSize>::Handle::~Handle() {
136 template <typename T, uptr kSize>
137 T *AddrHashMap<T, kSize>::Handle::operator->() {
141 template <typename T, uptr kSize>
142 const T &AddrHashMap<T, kSize>::Handle::operator*() const {
146 template <typename T, uptr kSize>
147 T &AddrHashMap<T, kSize>::Handle::operator*() {
151 template<typename T, uptr kSize>
152 bool AddrHashMap<T, kSize>::Handle::created() const {
156 template<typename T, uptr kSize>
157 bool AddrHashMap<T, kSize>::Handle::exists() const {
158 return cell_ != nullptr;
161 template<typename T, uptr kSize>
162 AddrHashMap<T, kSize>::AddrHashMap() {
163 table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
166 template<typename T, uptr kSize>
167 void AddrHashMap<T, kSize>::acquire(Handle *h) {
168 uptr addr = h->addr_;
169 uptr hash = calcHash(addr);
170 Bucket *b = &table_[hash];
177 // If we want to remove the element, we need exclusive access to the bucket,
178 // so skip the lock-free phase.
183 // First try to find an existing element w/o read mutex.
185 // Check the embed cells.
186 for (uptr i = 0; i < kBucketSize; i++) {
187 Cell *c = &b->cells[i];
188 uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
195 // Check the add cells with read lock.
196 if (atomic_load(&b->add, memory_order_relaxed)) {
198 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
199 for (uptr i = 0; i < add->size; i++) {
200 Cell *c = &add->cells[i];
201 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
212 // Re-check existence under write lock.
215 for (uptr i = 0; i < kBucketSize; i++) {
216 Cell *c = &b->cells[i];
217 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
229 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
231 for (uptr i = 0; i < add->size; i++) {
232 Cell *c = &add->cells[i];
233 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
246 // The element does not exist, no need to create it if we want to remove.
247 if (h->remove_ || !h->create_) {
252 // Now try to create it under the mutex.
254 // See if we have a free embed cell.
255 for (uptr i = 0; i < kBucketSize; i++) {
256 Cell *c = &b->cells[i];
257 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
264 // Store in the add cells.
266 // Allocate a new add array.
267 const uptr kInitSize = 64;
268 add = (AddBucket*)InternalAlloc(kInitSize);
269 internal_memset(add, 0, kInitSize);
270 add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
272 atomic_store(&b->add, (uptr)add, memory_order_relaxed);
274 if (add->size == add->cap) {
275 // Grow existing add array.
276 uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
277 uptr newsize = oldsize * 2;
278 AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
279 internal_memset(add1, 0, newsize);
280 add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
281 add1->size = add->size;
282 internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
284 atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
288 uptr i = add->size++;
289 Cell *c = &add->cells[i];
290 CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
295 template<typename T, uptr kSize>
296 void AddrHashMap<T, kSize>::release(Handle *h) {
299 Bucket *b = h->bucket_;
301 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
303 // Denote completion of insertion.
305 // After the following store, the element becomes available
306 // for lock-free reads.
307 atomic_store(&c->addr, h->addr_, memory_order_release);
309 } else if (h->remove_) {
310 // Denote that the cell is empty now.
311 CHECK_EQ(addr1, h->addr_);
312 atomic_store(&c->addr, 0, memory_order_release);
313 // See if we need to compact the bucket.
314 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
315 if (h->addidx_ == -1U) {
316 // Removed from embed array, move an add element into the freed cell.
317 if (add && add->size != 0) {
318 uptr last = --add->size;
319 Cell *c1 = &add->cells[last];
321 uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
322 atomic_store(&c->addr, addr1, memory_order_release);
323 atomic_store(&c1->addr, 0, memory_order_release);
326 // Removed from add array, compact it.
327 uptr last = --add->size;
328 Cell *c1 = &add->cells[last];
331 atomic_store(&c1->addr, 0, memory_order_relaxed);
334 if (add && add->size == 0) {
335 // FIXME(dvyukov): free add?
339 CHECK_EQ(addr1, h->addr_);
340 if (h->addidx_ != -1U)
345 template<typename T, uptr kSize>
346 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
352 } // namespace __sanitizer
354 #endif // SANITIZER_ADDRHASHMAP_H