2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
39 #include <sys/mutex.h>
41 #include <machine/stdarg.h>
43 #include <linux/bitmap.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
49 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
50 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
52 struct linux_idr_cache {
54 struct idr_layer *head;
58 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
63 * This is quick and dirty and not as re-entrant as the linux version
64 * however it should be fairly fast. It is basically a radix tree with
65 * a builtin bitmap for allocation.
67 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
69 static struct idr_layer *
70 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
72 struct idr_layer *retval;
74 /* check if wrong thread is trying to dequeue */
75 if (mtx_owned(&lic->lock.m) == 0)
79 if (likely(retval != NULL)) {
80 lic->head = retval->ary[0];
82 retval->ary[0] = NULL;
88 idr_preload_init(void *arg)
93 struct linux_idr_cache *lic =
94 DPCPU_ID_PTR(cpu, linux_idr_cache);
96 spin_lock_init(&lic->lock);
99 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
102 idr_preload_uninit(void *arg)
107 struct idr_layer *cacheval;
108 struct linux_idr_cache *lic =
109 DPCPU_ID_PTR(cpu, linux_idr_cache);
112 spin_lock(&lic->lock);
113 cacheval = idr_preload_dequeue_locked(lic);
114 spin_unlock(&lic->lock);
116 if (cacheval == NULL)
118 free(cacheval, M_IDR);
120 spin_lock_destroy(&lic->lock);
123 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
126 idr_preload(gfp_t gfp_mask)
128 struct linux_idr_cache *lic;
129 struct idr_layer *cacheval;
133 lic = &DPCPU_GET(linux_idr_cache);
136 spin_lock(&lic->lock);
137 while (lic->count < MAX_IDR_FREE) {
138 spin_unlock(&lic->lock);
139 cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
140 spin_lock(&lic->lock);
141 if (cacheval == NULL)
143 cacheval->ary[0] = lic->head;
144 lic->head = cacheval;
150 idr_preload_end(void)
152 struct linux_idr_cache *lic;
154 lic = &DPCPU_GET(linux_idr_cache);
155 spin_unlock(&lic->lock);
160 idr_max(struct idr *idr)
162 return (1 << (idr->layers * IDR_BITS)) - 1;
166 idr_pos(int id, int layer)
168 return (id >> (IDR_BITS * layer)) & IDR_MASK;
172 idr_init(struct idr *idr)
174 bzero(idr, sizeof(*idr));
175 mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
178 /* Only frees cached pages. */
180 idr_destroy(struct idr *idr)
182 struct idr_layer *il, *iln;
185 mtx_lock(&idr->lock);
186 for (il = idr->free; il != NULL; il = iln) {
190 mtx_unlock(&idr->lock);
191 mtx_destroy(&idr->lock);
195 idr_remove_layer(struct idr_layer *il, int layer)
205 for (i = 0; i < IDR_SIZE; i++)
207 idr_remove_layer(il->ary[i], layer - 1);
211 idr_remove_all(struct idr *idr)
214 mtx_lock(&idr->lock);
215 idr_remove_layer(idr->top, idr->layers - 1);
218 mtx_unlock(&idr->lock);
222 idr_remove_locked(struct idr *idr, int id)
224 struct idr_layer *il;
231 layer = idr->layers - 1;
232 if (il == NULL || id > idr_max(idr))
235 * Walk down the tree to this item setting bitmaps along the way
236 * as we know at least one item will be free along this path.
238 while (layer && il) {
239 idx = idr_pos(id, layer);
240 il->bitmap |= 1 << idx;
246 * At this point we've set free space bitmaps up the whole tree.
247 * We could make this non-fatal and unwind but linux dumps a stack
248 * and a warning so I don't think it's necessary.
250 if (il == NULL || (il->bitmap & (1 << idx)) != 0)
251 panic("idr_remove: Item %d not allocated (%p, %p)\n",
255 il->bitmap |= 1 << idx;
261 idr_remove(struct idr *idr, int id)
265 mtx_lock(&idr->lock);
266 res = idr_remove_locked(idr, id);
267 mtx_unlock(&idr->lock);
273 static inline struct idr_layer *
274 idr_find_layer_locked(struct idr *idr, int id)
276 struct idr_layer *il;
281 layer = idr->layers - 1;
282 if (il == NULL || id > idr_max(idr))
284 while (layer && il) {
285 il = il->ary[idr_pos(id, layer)];
292 idr_replace(struct idr *idr, void *ptr, int id)
294 struct idr_layer *il;
298 mtx_lock(&idr->lock);
299 il = idr_find_layer_locked(idr, id);
302 /* Replace still returns an error if the item was not allocated. */
303 if (il == NULL || (il->bitmap & (1 << idx))) {
304 res = ERR_PTR(-ENOENT);
309 mtx_unlock(&idr->lock);
314 idr_find_locked(struct idr *idr, int id)
316 struct idr_layer *il;
319 mtx_assert(&idr->lock, MA_OWNED);
320 il = idr_find_layer_locked(idr, id);
322 res = il->ary[id & IDR_MASK];
329 idr_find(struct idr *idr, int id)
333 mtx_lock(&idr->lock);
334 res = idr_find_locked(idr, id);
335 mtx_unlock(&idr->lock);
340 idr_get_next(struct idr *idr, int *nextidp)
345 mtx_lock(&idr->lock);
346 for (; id <= idr_max(idr); id++) {
347 res = idr_find_locked(idr, id);
353 mtx_unlock(&idr->lock);
358 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
360 struct idr_layer *il, *iln;
361 struct idr_layer *head;
364 mtx_lock(&idr->lock);
366 need = idr->layers + 1;
367 for (il = idr->free; il != NULL; il = il->ary[0])
369 mtx_unlock(&idr->lock);
372 for (head = NULL; need; need--) {
373 iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
376 bitmap_fill(&iln->bitmap, IDR_SIZE);
385 mtx_lock(&idr->lock);
386 il->ary[0] = idr->free;
392 static struct idr_layer *
393 idr_free_list_get(struct idr *idp)
395 struct idr_layer *il;
397 if ((il = idp->free) != NULL) {
398 idp->free = il->ary[0];
404 static inline struct idr_layer *
405 idr_get(struct idr *idp)
407 struct idr_layer *il;
409 if ((il = idr_free_list_get(idp)) != NULL) {
410 MPASS(il->bitmap != 0);
411 } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
412 bitmap_fill(&il->bitmap, IDR_SIZE);
413 } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
414 bitmap_fill(&il->bitmap, IDR_SIZE);
422 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
423 * first for simplicity sake.
426 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
428 struct idr_layer *stack[MAX_LEVEL];
429 struct idr_layer *il;
435 mtx_assert(&idr->lock, MA_OWNED);
439 * Expand the tree until there is free space.
441 if (idr->top == NULL || idr->top->bitmap == 0) {
442 if (idr->layers == MAX_LEVEL + 1) {
449 il->ary[0] = idr->top;
458 * Walk the tree following free bitmaps, record our path.
460 for (layer = idr->layers - 1;; layer--) {
462 idx = ffsl(il->bitmap);
464 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
467 id |= idx << (layer * IDR_BITS);
470 if (il->ary[idx] == NULL) {
471 il->ary[idx] = idr_get(idr);
472 if (il->ary[idx] == NULL)
478 * Allocate the leaf to the consumer.
480 il->bitmap &= ~(1 << idx);
484 * Clear bitmaps potentially up to the root.
486 while (il->bitmap == 0 && ++layer < idr->layers) {
488 il->bitmap &= ~(1 << idr_pos(id, layer));
493 if (error == 0 && idr_find_locked(idr, id) != ptr) {
494 panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
502 idr_get_new(struct idr *idr, void *ptr, int *idp)
506 mtx_lock(&idr->lock);
507 retval = idr_get_new_locked(idr, ptr, idp);
508 mtx_unlock(&idr->lock);
513 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
515 struct idr_layer *stack[MAX_LEVEL];
516 struct idr_layer *il;
522 mtx_assert(&idr->lock, MA_OWNED);
526 * Compute the layers required to support starting_id and the mask
532 while (idx & ~IDR_MASK) {
536 if (layer == MAX_LEVEL + 1) {
541 * Expand the tree until there is free space at or beyond starting_id.
543 while (idr->layers <= layer ||
544 idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
545 if (idr->layers == MAX_LEVEL + 1) {
552 il->ary[0] = idr->top;
553 if (idr->top && idr->top->bitmap == 0)
561 * Walk the tree following free bitmaps, record our path.
563 for (layer = idr->layers - 1;; layer--) {
565 sidx = idr_pos(starting_id, layer);
566 /* Returns index numbered from 0 or size if none exists. */
567 idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
568 if (idx == IDR_SIZE && sidx == 0)
569 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
572 * We may have walked a path where there was a free bit but
573 * it was lower than what we wanted. Restart the search with
574 * a larger starting id. id contains the progress we made so
575 * far. Search the leaf one above this level. This may
576 * restart as many as MAX_LEVEL times but that is expected
579 if (idx == IDR_SIZE) {
580 starting_id = id + (1 << ((layer + 1) * IDR_BITS));
584 starting_id = 0; /* Search the whole subtree. */
585 id |= idx << (layer * IDR_BITS);
588 if (il->ary[idx] == NULL) {
589 il->ary[idx] = idr_get(idr);
590 if (il->ary[idx] == NULL)
596 * Allocate the leaf to the consumer.
598 il->bitmap &= ~(1 << idx);
602 * Clear bitmaps potentially up to the root.
604 while (il->bitmap == 0 && ++layer < idr->layers) {
606 il->bitmap &= ~(1 << idr_pos(id, layer));
611 if (error == 0 && idr_find_locked(idr, id) != ptr) {
612 panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
620 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
624 mtx_lock(&idr->lock);
625 retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
626 mtx_unlock(&idr->lock);
631 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
633 return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
637 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
639 int max = end > 0 ? end - 1 : INT_MAX;
643 mtx_assert(&idr->lock, MA_OWNED);
645 if (unlikely(start < 0))
647 if (unlikely(max < start))
651 error = idr_get_new_locked(idr, ptr, &id);
653 error = idr_get_new_above_locked(idr, ptr, start, &id);
655 if (unlikely(error < 0))
657 if (unlikely(id > max)) {
658 idr_remove_locked(idr, id);
665 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
669 mtx_lock(&idr->lock);
670 retval = idr_alloc_locked(idr, ptr, start, end);
671 mtx_unlock(&idr->lock);
676 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
680 mtx_lock(&idr->lock);
681 retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
682 if (unlikely(retval == -ENOSPC))
683 retval = idr_alloc_locked(idr, ptr, start, end);
684 if (likely(retval >= 0))
685 idr->next_cyclic_id = retval + 1;
686 mtx_unlock(&idr->lock);
691 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
692 int (*f)(int id, void *p, void *data), void *data)
699 for (i = 0; i < IDR_SIZE; i++) {
700 if (il->ary[i] == NULL)
702 err = f(i + offset, il->ary[i], data);
708 for (i = 0; i < IDR_SIZE; i++) {
709 if (il->ary[i] == NULL)
711 err = idr_for_each_layer(il->ary[i],
712 (i + offset) * IDR_SIZE, layer - 1, f, data);
719 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
721 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
723 return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
727 idr_has_entry(int id, void *p, void *data)
734 idr_is_empty(struct idr *idp)
737 return (idr_for_each(idp, idr_has_entry, NULL) == 0);
741 ida_pre_get(struct ida *ida, gfp_t flags)
743 if (idr_pre_get(&ida->idr, flags) == 0)
746 if (ida->free_bitmap == NULL) {
748 malloc(sizeof(struct ida_bitmap), M_IDR, flags);
750 return (ida->free_bitmap != NULL);
754 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
760 MPASS((int)start >= 0);
761 MPASS((int)end >= 0);
770 if (!ida_pre_get(ida, flags))
773 if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
781 if (__predict_false(ret == -EAGAIN))
788 ida_simple_remove(struct ida *ida, unsigned int id)
790 idr_remove(&ida->idr, id);
794 ida_remove(struct ida *ida, int id)
796 idr_remove(&ida->idr, id);
800 ida_init(struct ida *ida)
806 ida_destroy(struct ida *ida)
808 idr_destroy(&ida->idr);
809 free(ida->free_bitmap, M_IDR);