2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
36 #include <sys/mutex.h>
38 #include <machine/stdarg.h>
40 #include <linux/bitops.h>
41 #include <linux/kobject.h>
42 #include <linux/slab.h>
43 #include <linux/idr.h>
44 #include <linux/err.h>
49 * This is quick and dirty and not as re-entrant as the linux version
50 * however it should be fairly fast. It is basically a radix tree with
51 * a builtin bitmap for allocation.
53 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
56 idr_max(struct idr *idr)
58 return (1 << (idr->layers * IDR_BITS)) - 1;
62 idr_pos(int id, int layer)
64 return (id >> (IDR_BITS * layer)) & IDR_MASK;
68 idr_init(struct idr *idr)
70 bzero(idr, sizeof(*idr));
71 mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
74 /* Only frees cached pages. */
76 idr_destroy(struct idr *idr)
78 struct idr_layer *il, *iln;
82 for (il = idr->free; il != NULL; il = iln) {
86 mtx_unlock(&idr->lock);
90 idr_remove_layer(struct idr_layer *il, int layer)
100 for (i = 0; i < IDR_SIZE; i++)
102 idr_remove_layer(il->ary[i], layer - 1);
106 idr_remove_all(struct idr *idr)
109 mtx_lock(&idr->lock);
110 idr_remove_layer(idr->top, idr->layers - 1);
113 mtx_unlock(&idr->lock);
117 idr_remove(struct idr *idr, int id)
119 struct idr_layer *il;
124 mtx_lock(&idr->lock);
126 layer = idr->layers - 1;
127 if (il == NULL || id > idr_max(idr)) {
128 mtx_unlock(&idr->lock);
132 * Walk down the tree to this item setting bitmaps along the way
133 * as we know at least one item will be free along this path.
135 while (layer && il) {
136 idx = idr_pos(id, layer);
137 il->bitmap |= 1 << idx;
143 * At this point we've set free space bitmaps up the whole tree.
144 * We could make this non-fatal and unwind but linux dumps a stack
145 * and a warning so I don't think it's necessary.
147 if (il == NULL || (il->bitmap & (1 << idx)) != 0)
148 panic("idr_remove: Item %d not allocated (%p, %p)\n",
151 il->bitmap |= 1 << idx;
152 mtx_unlock(&idr->lock);
157 idr_replace(struct idr *idr, void *ptr, int id)
159 struct idr_layer *il;
164 res = ERR_PTR(-EINVAL);
166 mtx_lock(&idr->lock);
168 layer = idr->layers - 1;
169 if (il == NULL || id > idr_max(idr))
171 while (layer && il) {
172 il = il->ary[idr_pos(id, layer)];
177 * Replace still returns an error if the item was not allocated.
179 if (il != NULL && (il->bitmap & (1 << idx)) != 0) {
184 mtx_unlock(&idr->lock);
189 idr_find_locked(struct idr *idr, int id)
191 struct idr_layer *il;
195 mtx_assert(&idr->lock, MA_OWNED);
200 layer = idr->layers - 1;
201 if (il == NULL || id > idr_max(idr))
203 while (layer && il) {
204 il = il->ary[idr_pos(id, layer)];
208 res = il->ary[id & IDR_MASK];
213 idr_find(struct idr *idr, int id)
217 mtx_lock(&idr->lock);
218 res = idr_find_locked(idr, id);
219 mtx_unlock(&idr->lock);
224 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
226 struct idr_layer *il, *iln;
227 struct idr_layer *head;
230 mtx_lock(&idr->lock);
232 need = idr->layers + 1;
233 for (il = idr->free; il != NULL; il = il->ary[0])
235 mtx_unlock(&idr->lock);
238 for (head = NULL; need; need--) {
239 iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
242 bitmap_fill(&iln->bitmap, IDR_SIZE);
251 mtx_lock(&idr->lock);
252 il->ary[0] = idr->free;
258 static inline struct idr_layer *
259 idr_get(struct idr *idr)
261 struct idr_layer *il;
265 idr->free = il->ary[0];
269 il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
270 bitmap_fill(&il->bitmap, IDR_SIZE);
275 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
276 * first for simplicity sake.
279 idr_get_new(struct idr *idr, void *ptr, int *idp)
281 struct idr_layer *stack[MAX_LEVEL];
282 struct idr_layer *il;
289 mtx_lock(&idr->lock);
291 * Expand the tree until there is free space.
293 if (idr->top == NULL || idr->top->bitmap == 0) {
294 if (idr->layers == MAX_LEVEL + 1) {
301 il->ary[0] = idr->top;
310 * Walk the tree following free bitmaps, record our path.
312 for (layer = idr->layers - 1;; layer--) {
314 idx = ffsl(il->bitmap);
316 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
319 id |= idx << (layer * IDR_BITS);
322 if (il->ary[idx] == NULL) {
323 il->ary[idx] = idr_get(idr);
324 if (il->ary[idx] == NULL)
330 * Allocate the leaf to the consumer.
332 il->bitmap &= ~(1 << idx);
336 * Clear bitmaps potentially up to the root.
338 while (il->bitmap == 0 && ++layer < idr->layers) {
340 il->bitmap &= ~(1 << idr_pos(id, layer));
345 if (error == 0 && idr_find_locked(idr, id) != ptr) {
346 panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
350 mtx_unlock(&idr->lock);
355 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
357 struct idr_layer *stack[MAX_LEVEL];
358 struct idr_layer *il;
365 mtx_lock(&idr->lock);
367 * Compute the layers required to support starting_id and the mask
373 while (idx & ~IDR_MASK) {
377 if (layer == MAX_LEVEL + 1) {
382 * Expand the tree until there is free space at or beyond starting_id.
384 while (idr->layers <= layer ||
385 idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
386 if (idr->layers == MAX_LEVEL + 1) {
393 il->ary[0] = idr->top;
394 if (idr->top && idr->top->bitmap == 0)
402 * Walk the tree following free bitmaps, record our path.
404 for (layer = idr->layers - 1;; layer--) {
406 sidx = idr_pos(starting_id, layer);
407 /* Returns index numbered from 0 or size if none exists. */
408 idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
409 if (idx == IDR_SIZE && sidx == 0)
410 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
413 * We may have walked a path where there was a free bit but
414 * it was lower than what we wanted. Restart the search with
415 * a larger starting id. id contains the progress we made so
416 * far. Search the leaf one above this level. This may
417 * restart as many as MAX_LEVEL times but that is expected
420 if (idx == IDR_SIZE) {
421 starting_id = id + (1 << ((layer + 1) * IDR_BITS));
425 starting_id = 0; /* Search the whole subtree. */
426 id |= idx << (layer * IDR_BITS);
429 if (il->ary[idx] == NULL) {
430 il->ary[idx] = idr_get(idr);
431 if (il->ary[idx] == NULL)
437 * Allocate the leaf to the consumer.
439 il->bitmap &= ~(1 << idx);
443 * Clear bitmaps potentially up to the root.
445 while (il->bitmap == 0 && ++layer < idr->layers) {
447 il->bitmap &= ~(1 << idr_pos(id, layer));
452 if (error == 0 && idr_find_locked(idr, id) != ptr) {
453 panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
457 mtx_unlock(&idr->lock);