2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
36 #include <sys/mutex.h>
38 #include <machine/stdarg.h>
40 #include <linux/bitops.h>
41 #include <linux/kobject.h>
42 #include <linux/slab.h>
43 #include <linux/idr.h>
44 #include <linux/err.h>
49 * This is quick and dirty and not as re-entrant as the linux version
50 * however it should be fairly fast. It is basically a radix tree with
51 * a builtin bitmap for allocation.
53 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
56 idr_max(struct idr *idr)
58 return (1 << (idr->layers * IDR_BITS)) - 1;
62 idr_pos(int id, int layer)
64 return (id >> (IDR_BITS * layer)) & IDR_MASK;
68 idr_init(struct idr *idr)
70 bzero(idr, sizeof(*idr));
71 mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
74 /* Only frees cached pages. */
76 idr_destroy(struct idr *idr)
78 struct idr_layer *il, *iln;
82 for (il = idr->free; il != NULL; il = iln) {
86 mtx_unlock(&idr->lock);
90 idr_remove_layer(struct idr_layer *il, int layer)
100 for (i = 0; i < IDR_SIZE; i++)
102 idr_remove_layer(il->ary[i], layer - 1);
106 idr_remove_all(struct idr *idr)
109 mtx_lock(&idr->lock);
110 idr_remove_layer(idr->top, idr->layers - 1);
113 mtx_unlock(&idr->lock);
117 idr_remove(struct idr *idr, int id)
119 struct idr_layer *il;
124 mtx_lock(&idr->lock);
126 layer = idr->layers - 1;
127 if (il == NULL || id > idr_max(idr)) {
128 mtx_unlock(&idr->lock);
132 * Walk down the tree to this item setting bitmaps along the way
133 * as we know at least one item will be free along this path.
135 while (layer && il) {
136 idx = idr_pos(id, layer);
137 il->bitmap |= 1 << idx;
143 * At this point we've set free space bitmaps up the whole tree.
144 * We could make this non-fatal and unwind but linux dumps a stack
145 * and a warning so I don't think it's necessary.
147 if (il == NULL || (il->bitmap & (1 << idx)) != 0)
148 panic("idr_remove: Item %d not allocated (%p, %p)\n",
151 il->bitmap |= 1 << idx;
152 mtx_unlock(&idr->lock);
157 idr_replace(struct idr *idr, void *ptr, int id)
159 struct idr_layer *il;
164 res = ERR_PTR(-EINVAL);
166 mtx_lock(&idr->lock);
168 layer = idr->layers - 1;
169 if (il == NULL || id > idr_max(idr))
171 while (layer && il) {
172 il = il->ary[idr_pos(id, layer)];
177 * Replace still returns an error if the item was not allocated.
179 if (il != NULL && (il->bitmap & (1 << idx)) != 0) {
184 mtx_unlock(&idr->lock);
189 idr_find(struct idr *idr, int id)
191 struct idr_layer *il;
197 mtx_lock(&idr->lock);
199 layer = idr->layers - 1;
200 if (il == NULL || id > idr_max(idr))
202 while (layer && il) {
203 il = il->ary[idr_pos(id, layer)];
207 res = il->ary[id & IDR_MASK];
209 mtx_unlock(&idr->lock);
214 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
216 struct idr_layer *il, *iln;
217 struct idr_layer *head;
220 mtx_lock(&idr->lock);
222 need = idr->layers + 1;
223 for (il = idr->free; il != NULL; il = il->ary[0])
225 mtx_unlock(&idr->lock);
228 for (head = NULL; need; need--) {
229 iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
232 bitmap_fill(&iln->bitmap, IDR_SIZE);
241 mtx_lock(&idr->lock);
242 il->ary[0] = idr->free;
248 static inline struct idr_layer *
249 idr_get(struct idr *idr)
251 struct idr_layer *il;
255 idr->free = il->ary[0];
259 il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
260 bitmap_fill(&il->bitmap, IDR_SIZE);
265 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
266 * first for simplicity sake.
269 idr_get_new(struct idr *idr, void *ptr, int *idp)
271 struct idr_layer *stack[MAX_LEVEL];
272 struct idr_layer *il;
279 mtx_lock(&idr->lock);
281 * Expand the tree until there is free space.
283 if (idr->top == NULL || idr->top->bitmap == 0) {
284 if (idr->layers == MAX_LEVEL + 1) {
291 il->ary[0] = idr->top;
300 * Walk the tree following free bitmaps, record our path.
302 for (layer = idr->layers - 1;; layer--) {
304 idx = ffsl(il->bitmap);
306 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
309 id |= idx << (layer * IDR_BITS);
312 if (il->ary[idx] == NULL) {
313 il->ary[idx] = idr_get(idr);
314 if (il->ary[idx] == NULL)
320 * Allocate the leaf to the consumer.
322 il->bitmap &= ~(1 << idx);
326 * Clear bitmaps potentially up to the root.
328 while (il->bitmap == 0 && ++layer < idr->layers) {
330 il->bitmap &= ~(1 << idr_pos(id, layer));
334 mtx_unlock(&idr->lock);
336 if (error == 0 && idr_find(idr, id) != ptr) {
337 panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
345 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
347 struct idr_layer *stack[MAX_LEVEL];
348 struct idr_layer *il;
355 mtx_lock(&idr->lock);
357 * Compute the layers required to support starting_id and the mask
363 while (idx & ~IDR_MASK) {
367 if (layer == MAX_LEVEL + 1) {
372 * Expand the tree until there is free space at or beyond starting_id.
374 while (idr->layers <= layer ||
375 idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
376 if (idr->layers == MAX_LEVEL + 1) {
383 il->ary[0] = idr->top;
384 if (idr->top && idr->top->bitmap == 0)
392 * Walk the tree following free bitmaps, record our path.
394 for (layer = idr->layers - 1;; layer--) {
396 sidx = idr_pos(starting_id, layer);
397 /* Returns index numbered from 0 or size if none exists. */
398 idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
399 if (idx == IDR_SIZE && sidx == 0)
400 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
403 * We may have walked a path where there was a free bit but
404 * it was lower than what we wanted. Restart the search with
405 * a larger starting id. id contains the progress we made so
406 * far. Search the leaf one above this level. This may
407 * restart as many as MAX_LEVEL times but that is expected
410 if (idx == IDR_SIZE) {
411 starting_id = id + (1 << (layer+1 * IDR_BITS));
415 starting_id = 0; /* Search the whole subtree. */
416 id |= idx << (layer * IDR_BITS);
419 if (il->ary[idx] == NULL) {
420 il->ary[idx] = idr_get(idr);
421 if (il->ary[idx] == NULL)
427 * Allocate the leaf to the consumer.
429 il->bitmap &= ~(1 << idx);
433 * Clear bitmaps potentially up to the root.
435 while (il->bitmap == 0 && ++layer < idr->layers) {
437 il->bitmap &= ~(1 << idr_pos(id, layer));
441 mtx_unlock(&idr->lock);
443 if (error == 0 && idr_find(idr, id) != ptr) {
444 panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",