2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains a hashtable with LRU keeping of entries.
44 #include "util/storage/lruhash.h"
45 #include "util/fptr_wlist.h"
48 bin_init(struct lruhash_bin* array, size_t size)
51 #ifdef THREADS_DISABLED
54 for(i=0; i<size; i++) {
55 lock_quick_init(&array[i].lock);
56 lock_protect(&array[i].lock, &array[i],
57 sizeof(struct lruhash_bin));
62 lruhash_create(size_t start_size, size_t maxmem,
63 lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
64 lruhash_delkeyfunc_type delkeyfunc,
65 lruhash_deldatafunc_type deldatafunc, void* arg)
67 struct lruhash* table = (struct lruhash*)calloc(1,
68 sizeof(struct lruhash));
71 lock_quick_init(&table->lock);
72 table->sizefunc = sizefunc;
73 table->compfunc = compfunc;
74 table->delkeyfunc = delkeyfunc;
75 table->deldatafunc = deldatafunc;
77 table->size = start_size;
78 table->size_mask = (int)(start_size-1);
79 table->lru_start = NULL;
80 table->lru_end = NULL;
82 table->space_used = 0;
83 table->space_max = maxmem;
84 table->array = calloc(table->size, sizeof(struct lruhash_bin));
86 lock_quick_destroy(&table->lock);
90 bin_init(table->array, table->size);
91 lock_protect(&table->lock, table, sizeof(*table));
92 lock_protect(&table->lock, table->array,
93 table->size*sizeof(struct lruhash_bin));
98 bin_delete(struct lruhash* table, struct lruhash_bin* bin)
100 struct lruhash_entry* p, *np;
104 lock_quick_destroy(&bin->lock);
105 p = bin->overflow_list;
106 bin->overflow_list = NULL;
108 np = p->overflow_next;
110 (*table->delkeyfunc)(p->key, table->cb_arg);
111 (*table->deldatafunc)(d, table->cb_arg);
117 bin_split(struct lruhash* table, struct lruhash_bin* newa,
121 struct lruhash_entry *p, *np;
122 struct lruhash_bin* newbin;
123 /* move entries to new table. Notice that since hash x is mapped to
124 * bin x & mask, and new mask uses one more bit, so all entries in
125 * one bin will go into the old bin or bin | newbit */
126 #ifndef THREADS_DISABLED
127 int newbit = newmask - table->size_mask;
129 /* so, really, this task could also be threaded, per bin. */
130 /* LRU list is not changed */
131 for(i=0; i<table->size; i++)
133 lock_quick_lock(&table->array[i].lock);
134 p = table->array[i].overflow_list;
135 /* lock both destination bins */
136 lock_quick_lock(&newa[i].lock);
137 lock_quick_lock(&newa[newbit|i].lock);
139 np = p->overflow_next;
140 /* link into correct new bin */
141 newbin = &newa[p->hash & newmask];
142 p->overflow_next = newbin->overflow_list;
143 newbin->overflow_list = p;
146 lock_quick_unlock(&newa[i].lock);
147 lock_quick_unlock(&newa[newbit|i].lock);
148 lock_quick_unlock(&table->array[i].lock);
153 lruhash_delete(struct lruhash* table)
158 /* delete lock on hashtable to force check its OK */
159 lock_quick_destroy(&table->lock);
160 for(i=0; i<table->size; i++)
161 bin_delete(table, &table->array[i]);
167 bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
169 struct lruhash_entry* p = bin->overflow_list;
170 struct lruhash_entry** prevp = &bin->overflow_list;
173 *prevp = p->overflow_next;
176 prevp = &p->overflow_next;
177 p = p->overflow_next;
182 reclaim_space(struct lruhash* table, struct lruhash_entry** list)
184 struct lruhash_entry* d;
185 struct lruhash_bin* bin;
187 /* does not delete MRU entry, so table will not be empty. */
188 while(table->num > 1 && table->space_used > table->space_max) {
189 /* notice that since we hold the hashtable lock, nobody
190 can change the lru chain. So it cannot be deleted underneath
191 us. We still need the hashbin and entry write lock to make
192 sure we flush all users away from the entry.
193 which is unlikely, since it is LRU, if someone got a rdlock
194 it would be moved to front, but to be sure. */
196 /* specialised, delete from end of double linked list,
197 and we know num>1, so there is a previous lru entry. */
198 log_assert(d && d->lru_prev);
199 table->lru_end = d->lru_prev;
200 d->lru_prev->lru_next = NULL;
201 /* schedule entry for deletion */
202 bin = &table->array[d->hash & table->size_mask];
204 lock_quick_lock(&bin->lock);
205 bin_overflow_remove(bin, d);
206 d->overflow_next = *list;
208 lock_rw_wrlock(&d->lock);
209 table->space_used -= table->sizefunc(d->key, d->data);
210 if(table->markdelfunc)
211 (*table->markdelfunc)(d->key);
212 lock_rw_unlock(&d->lock);
213 lock_quick_unlock(&bin->lock);
217 struct lruhash_entry*
218 bin_find_entry(struct lruhash* table,
219 struct lruhash_bin* bin, hashvalue_type hash, void* key)
221 struct lruhash_entry* p = bin->overflow_list;
223 if(p->hash == hash && table->compfunc(p->key, key) == 0)
225 p = p->overflow_next;
231 table_grow(struct lruhash* table)
233 struct lruhash_bin* newa;
236 if(table->size_mask == (int)(((size_t)-1)>>1)) {
237 log_err("hash array malloc: size_t too small");
240 /* try to allocate new array, if not fail */
241 newa = calloc(table->size*2, sizeof(struct lruhash_bin));
243 log_err("hash grow: malloc failed");
244 /* continue with smaller array. Though its slower. */
247 bin_init(newa, table->size*2);
248 newmask = (table->size_mask << 1) | 1;
249 bin_split(table, newa, newmask);
250 /* delete the old bins */
251 lock_unprotect(&table->lock, table->array);
252 for(i=0; i<table->size; i++) {
253 lock_quick_destroy(&table->array[i].lock);
258 table->size_mask = newmask;
260 lock_protect(&table->lock, table->array,
261 table->size*sizeof(struct lruhash_bin));
266 lru_front(struct lruhash* table, struct lruhash_entry* entry)
268 entry->lru_prev = NULL;
269 entry->lru_next = table->lru_start;
270 if(!table->lru_start)
271 table->lru_end = entry;
272 else table->lru_start->lru_prev = entry;
273 table->lru_start = entry;
277 lru_remove(struct lruhash* table, struct lruhash_entry* entry)
280 entry->lru_prev->lru_next = entry->lru_next;
281 else table->lru_start = entry->lru_next;
283 entry->lru_next->lru_prev = entry->lru_prev;
284 else table->lru_end = entry->lru_prev;
288 lru_touch(struct lruhash* table, struct lruhash_entry* entry)
290 log_assert(table && entry);
291 if(entry == table->lru_start)
292 return; /* nothing to do */
293 /* remove from current lru position */
294 lru_remove(table, entry);
296 lru_front(table, entry);
300 lruhash_insert(struct lruhash* table, hashvalue_type hash,
301 struct lruhash_entry* entry, void* data, void* cb_arg)
303 struct lruhash_bin* bin;
304 struct lruhash_entry* found, *reclaimlist=NULL;
306 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
307 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
308 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
309 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
310 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
311 need_size = table->sizefunc(entry->key, data);
312 if(cb_arg == NULL) cb_arg = table->cb_arg;
315 lock_quick_lock(&table->lock);
316 bin = &table->array[hash & table->size_mask];
317 lock_quick_lock(&bin->lock);
319 /* see if entry exists already */
320 if(!(found=bin_find_entry(table, bin, hash, entry->key))) {
321 /* if not: add to bin */
322 entry->overflow_next = bin->overflow_list;
323 bin->overflow_list = entry;
324 lru_front(table, entry);
326 table->space_used += need_size;
328 /* if so: update data - needs a writelock */
329 table->space_used += need_size -
330 (*table->sizefunc)(found->key, found->data);
331 (*table->delkeyfunc)(entry->key, cb_arg);
332 lru_touch(table, found);
333 lock_rw_wrlock(&found->lock);
334 (*table->deldatafunc)(found->data, cb_arg);
336 lock_rw_unlock(&found->lock);
338 lock_quick_unlock(&bin->lock);
339 if(table->space_used > table->space_max)
340 reclaim_space(table, &reclaimlist);
341 if(table->num >= table->size)
343 lock_quick_unlock(&table->lock);
345 /* finish reclaim if any (outside of critical region) */
347 struct lruhash_entry* n = reclaimlist->overflow_next;
348 void* d = reclaimlist->data;
349 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
350 (*table->deldatafunc)(d, cb_arg);
355 struct lruhash_entry*
356 lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
358 struct lruhash_entry* entry;
359 struct lruhash_bin* bin;
360 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
362 lock_quick_lock(&table->lock);
363 bin = &table->array[hash & table->size_mask];
364 lock_quick_lock(&bin->lock);
365 if((entry=bin_find_entry(table, bin, hash, key)))
366 lru_touch(table, entry);
367 lock_quick_unlock(&table->lock);
370 if(wr) { lock_rw_wrlock(&entry->lock); }
371 else { lock_rw_rdlock(&entry->lock); }
373 lock_quick_unlock(&bin->lock);
378 lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
380 struct lruhash_entry* entry;
381 struct lruhash_bin* bin;
383 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
384 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
385 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
386 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
387 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
389 lock_quick_lock(&table->lock);
390 bin = &table->array[hash & table->size_mask];
391 lock_quick_lock(&bin->lock);
392 if((entry=bin_find_entry(table, bin, hash, key))) {
393 bin_overflow_remove(bin, entry);
394 lru_remove(table, entry);
396 lock_quick_unlock(&table->lock);
397 lock_quick_unlock(&bin->lock);
401 table->space_used -= (*table->sizefunc)(entry->key, entry->data);
402 lock_quick_unlock(&table->lock);
403 lock_rw_wrlock(&entry->lock);
404 if(table->markdelfunc)
405 (*table->markdelfunc)(entry->key);
406 lock_rw_unlock(&entry->lock);
407 lock_quick_unlock(&bin->lock);
410 (*table->delkeyfunc)(entry->key, table->cb_arg);
411 (*table->deldatafunc)(d, table->cb_arg);
414 /** clear bin, respecting locks, does not do space, LRU */
416 bin_clear(struct lruhash* table, struct lruhash_bin* bin)
418 struct lruhash_entry* p, *np;
420 lock_quick_lock(&bin->lock);
421 p = bin->overflow_list;
423 lock_rw_wrlock(&p->lock);
424 np = p->overflow_next;
426 if(table->markdelfunc)
427 (*table->markdelfunc)(p->key);
428 lock_rw_unlock(&p->lock);
429 (*table->delkeyfunc)(p->key, table->cb_arg);
430 (*table->deldatafunc)(d, table->cb_arg);
433 bin->overflow_list = NULL;
434 lock_quick_unlock(&bin->lock);
438 lruhash_clear(struct lruhash* table)
443 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
444 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
445 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
447 lock_quick_lock(&table->lock);
448 for(i=0; i<table->size; i++) {
449 bin_clear(table, &table->array[i]);
451 table->lru_start = NULL;
452 table->lru_end = NULL;
454 table->space_used = 0;
455 lock_quick_unlock(&table->lock);
459 lruhash_status(struct lruhash* table, const char* id, int extended)
461 lock_quick_lock(&table->lock);
462 log_info("%s: %u entries, memory %u / %u",
463 id, (unsigned)table->num, (unsigned)table->space_used,
464 (unsigned)table->space_max);
465 log_info(" itemsize %u, array %u, mask %d",
466 (unsigned)(table->num? table->space_used/table->num : 0),
467 (unsigned)table->size, table->size_mask);
470 int min=(int)table->size*2, max=-2;
471 for(i=0; i<table->size; i++) {
473 struct lruhash_entry *en;
474 lock_quick_lock(&table->array[i].lock);
475 en = table->array[i].overflow_list;
478 en = en->overflow_next;
480 lock_quick_unlock(&table->array[i].lock);
482 log_info("bin[%d] %d", (int)i, here);
483 if(here > max) max = here;
484 if(here < min) min = here;
486 log_info(" bin min %d, avg %.2lf, max %d", min,
487 (double)table->num/(double)table->size, max);
489 lock_quick_unlock(&table->lock);
493 lruhash_get_mem(struct lruhash* table)
496 lock_quick_lock(&table->lock);
497 s = sizeof(struct lruhash) + table->space_used;
498 #ifdef USE_THREAD_DEBUG
499 if(table->size != 0) {
501 for(i=0; i<table->size; i++)
502 s += sizeof(struct lruhash_bin) +
503 lock_get_mem(&table->array[i].lock);
505 #else /* no THREAD_DEBUG */
507 s += (table->size)*(sizeof(struct lruhash_bin) +
508 lock_get_mem(&table->array[0].lock));
510 lock_quick_unlock(&table->lock);
511 s += lock_get_mem(&table->lock);
516 lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
518 lock_quick_lock(&table->lock);
519 table->markdelfunc = md;
520 lock_quick_unlock(&table->lock);
524 lruhash_traverse(struct lruhash* h, int wr,
525 void (*func)(struct lruhash_entry*, void*), void* arg)
528 struct lruhash_entry* e;
530 lock_quick_lock(&h->lock);
531 for(i=0; i<h->size; i++) {
532 lock_quick_lock(&h->array[i].lock);
533 for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
535 lock_rw_wrlock(&e->lock);
537 lock_rw_rdlock(&e->lock);
540 lock_rw_unlock(&e->lock);
542 lock_quick_unlock(&h->array[i].lock);
544 lock_quick_unlock(&h->lock);
548 * Demote: the opposite of touch, move an entry to the bottom
553 lru_demote(struct lruhash* table, struct lruhash_entry* entry)
555 log_assert(table && entry);
556 if (entry == table->lru_end)
557 return; /* nothing to do */
558 /* remove from current lru position */
559 lru_remove(table, entry);
561 entry->lru_next = NULL;
562 entry->lru_prev = table->lru_end;
564 if (table->lru_end == NULL)
566 table->lru_start = entry;
570 table->lru_end->lru_next = entry;
572 table->lru_end = entry;
575 struct lruhash_entry*
576 lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash,
577 struct lruhash_entry* entry, void* data, void* cb_arg)
579 struct lruhash_bin* bin;
580 struct lruhash_entry* found, *reclaimlist = NULL;
582 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
583 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
584 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
585 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
586 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
587 need_size = table->sizefunc(entry->key, data);
588 if (cb_arg == NULL) cb_arg = table->cb_arg;
591 lock_quick_lock(&table->lock);
592 bin = &table->array[hash & table->size_mask];
593 lock_quick_lock(&bin->lock);
595 /* see if entry exists already */
596 if ((found = bin_find_entry(table, bin, hash, entry->key)) != NULL) {
597 /* if so: keep the existing data - acquire a writelock */
598 lock_rw_wrlock(&found->lock);
602 /* if not: add to bin */
603 entry->overflow_next = bin->overflow_list;
604 bin->overflow_list = entry;
605 lru_front(table, entry);
607 table->space_used += need_size;
608 /* return the entry that was presented, and lock it */
610 lock_rw_wrlock(&found->lock);
612 lock_quick_unlock(&bin->lock);
613 if (table->space_used > table->space_max)
614 reclaim_space(table, &reclaimlist);
615 if (table->num >= table->size)
617 lock_quick_unlock(&table->lock);
619 /* finish reclaim if any (outside of critical region) */
620 while (reclaimlist) {
621 struct lruhash_entry* n = reclaimlist->overflow_next;
622 void* d = reclaimlist->data;
623 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
624 (*table->deldatafunc)(d, cb_arg);
628 /* return the entry that was selected */