2 * Copyright 2016 Jakub Klama <jceel@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted providing that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
18 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/types.h>
34 #include <sys/queue.h>
35 #include "lib9p_impl.h"
36 #include "hashtable.h"
38 static struct ht_item *ht_iter_advance(struct ht_iter *, struct ht_item *);
41 ht_init(struct ht *h, ssize_t size)
45 memset(h, 0, sizeof(struct ht));
46 h->ht_nentries = size;
47 h->ht_entries = l9p_calloc((size_t)size, sizeof(struct ht_entry));
48 pthread_rwlock_init(&h->ht_rwlock, NULL);
50 for (i = 0; i < size; i++)
51 TAILQ_INIT(&h->ht_entries[i].hte_items);
55 ht_destroy(struct ht *h)
58 struct ht_item *item, *tmp;
61 for (i = 0; i < h->ht_nentries; i++) {
62 he = &h->ht_entries[i];
63 TAILQ_FOREACH_SAFE(item, &he->hte_items, hti_link, tmp) {
68 pthread_rwlock_destroy(&h->ht_rwlock);
74 ht_find(struct ht *h, uint32_t hash)
79 result = ht_find_locked(h, hash);
85 ht_find_locked(struct ht *h, uint32_t hash)
87 struct ht_entry *entry;
90 entry = &h->ht_entries[hash % h->ht_nentries];
92 TAILQ_FOREACH(item, &entry->hte_items, hti_link) {
93 if (item->hti_hash == hash)
94 return (item->hti_data);
101 ht_add(struct ht *h, uint32_t hash, void *value)
103 struct ht_entry *entry;
104 struct ht_item *item;
107 entry = &h->ht_entries[hash % h->ht_nentries];
109 TAILQ_FOREACH(item, &entry->hte_items, hti_link) {
110 if (item->hti_hash == hash) {
117 item = l9p_calloc(1, sizeof(struct ht_item));
118 item->hti_hash = hash;
119 item->hti_data = value;
120 TAILQ_INSERT_TAIL(&entry->hte_items, item, hti_link);
127 ht_remove(struct ht *h, uint32_t hash)
132 result = ht_remove_locked(h, hash);
138 ht_remove_locked(struct ht *h, uint32_t hash)
140 struct ht_entry *entry;
141 struct ht_item *item, *tmp;
142 ssize_t slot = hash % h->ht_nentries;
144 entry = &h->ht_entries[slot];
146 TAILQ_FOREACH_SAFE(item, &entry->hte_items, hti_link, tmp) {
147 if (item->hti_hash == hash) {
148 TAILQ_REMOVE(&entry->hte_items, item, hti_link);
159 * Inner workings for advancing the iterator.
161 * If we have a current item, that tells us how to find the
162 * next item. If not, we get the first item from the next
163 * slot (well, the next slot with an item); in any case, we
164 * record the new slot and return the next item.
166 * For bootstrapping, iter->htit_slot can be -1 to start
167 * searching at slot 0.
169 * Caller must hold a lock on the table.
171 static struct ht_item *
172 ht_iter_advance(struct ht_iter *iter, struct ht_item *cur)
174 struct ht_item *next;
178 h = iter->htit_parent;
183 next = TAILQ_NEXT(cur, hti_link);
186 slot = iter->htit_slot;
187 while (++slot < h->ht_nentries) {
188 next = TAILQ_FIRST(&h->ht_entries[slot].hte_items);
192 iter->htit_slot = slot;
198 * Remove the current item - there must be one, or this is an
199 * error. This (necessarily) pre-locates the next item, so callers
200 * must not use it on an actively-changing table.
203 ht_remove_at_iter(struct ht_iter *iter)
205 struct ht_item *item;
209 assert(iter != NULL);
211 if ((item = iter->htit_curr) == NULL) {
216 /* remove the item from the table, saving the NEXT one */
217 h = iter->htit_parent;
219 slot = iter->htit_slot;
220 iter->htit_next = ht_iter_advance(iter, item);
221 TAILQ_REMOVE(&h->ht_entries[slot].hte_items, item, hti_link);
224 /* mark us as no longer on an item, then free it */
225 iter->htit_curr = NULL;
232 * Initialize iterator. Subsequent ht_next calls will find the
233 * first item, then the next, and so on. Callers should in general
234 * not use this on actively-changing tables, though we do our best
235 * to make it semi-sensible.
238 ht_iter(struct ht *h, struct ht_iter *iter)
241 iter->htit_parent = h;
242 iter->htit_curr = NULL;
243 iter->htit_next = NULL;
244 iter->htit_slot = -1; /* which will increment to 0 */
248 * Return the next item, which is the first item if we have not
249 * yet been called on this iterator, or the next item if we have.
252 ht_next(struct ht_iter *iter)
254 struct ht_item *item;
257 if ((item = iter->htit_next) == NULL) {
258 /* no pre-loaded next; find next from current */
259 h = iter->htit_parent;
261 item = ht_iter_advance(iter, iter->htit_curr);
264 iter->htit_next = NULL;
265 iter->htit_curr = item;
266 return (item == NULL ? NULL : item->hti_data);