2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
35 #include <sys/mutex.h>
37 #include <machine/stdarg.h>
39 #include <linux/bitops.h>
40 #include <linux/kobject.h>
41 #include <linux/slab.h>
42 #include <linux/idr.h>
43 #include <linux/err.h>
48 * This is quick and dirty and not as re-entrant as the linux version
49 * however it should be fairly fast. It is basically a radix tree with
50 * a builtin bitmap for allocation.
52 MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
55 idr_max(struct idr *idr)
57 return (1 << (idr->layers * IDR_BITS)) - 1;
61 idr_pos(int id, int layer)
63 return (id >> (IDR_BITS * layer)) & IDR_MASK;
67 idr_init(struct idr *idr)
69 bzero(idr, sizeof(*idr));
70 mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
73 /* Only frees cached pages. */
75 idr_destroy(struct idr *idr)
77 struct idr_layer *il, *iln;
80 for (il = idr->free; il != NULL; il = iln) {
84 mtx_unlock(&idr->lock);
88 idr_remove_layer(struct idr_layer *il, int layer)
98 for (i = 0; i < IDR_SIZE; i++)
100 idr_remove_layer(il->ary[i], layer - 1);
104 idr_remove_all(struct idr *idr)
107 mtx_lock(&idr->lock);
108 idr_remove_layer(idr->top, idr->layers - 1);
111 mtx_unlock(&idr->lock);
115 idr_remove(struct idr *idr, int id)
117 struct idr_layer *il;
122 mtx_lock(&idr->lock);
124 layer = idr->layers - 1;
125 if (il == NULL || id > idr_max(idr)) {
126 mtx_unlock(&idr->lock);
130 * Walk down the tree to this item setting bitmaps along the way
131 * as we know at least one item will be free along this path.
133 while (layer && il) {
134 idx = idr_pos(id, layer);
135 il->bitmap |= 1 << idx;
141 * At this point we've set free space bitmaps up the whole tree.
142 * We could make this non-fatal and unwind but linux dumps a stack
143 * and a warning so I don't think it's necessary.
145 if (il == NULL || (il->bitmap & (1 << idx)) != 0)
146 panic("idr_remove: Item %d not allocated (%p, %p)\n",
149 il->bitmap |= 1 << idx;
150 mtx_unlock(&idr->lock);
155 idr_replace(struct idr *idr, void *ptr, int id)
157 struct idr_layer *il;
162 res = ERR_PTR(-EINVAL);
164 mtx_lock(&idr->lock);
166 layer = idr->layers - 1;
167 if (il == NULL || id > idr_max(idr))
169 while (layer && il) {
170 il = il->ary[idr_pos(id, layer)];
175 * Replace still returns an error if the item was not allocated.
177 if (il != NULL && (il->bitmap & (1 << idx)) != 0) {
182 mtx_unlock(&idr->lock);
187 idr_find(struct idr *idr, int id)
189 struct idr_layer *il;
195 mtx_lock(&idr->lock);
197 layer = idr->layers - 1;
198 if (il == NULL || id > idr_max(idr))
200 while (layer && il) {
201 il = il->ary[idr_pos(id, layer)];
205 res = il->ary[id & IDR_MASK];
207 mtx_unlock(&idr->lock);
212 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
214 struct idr_layer *il, *iln;
215 struct idr_layer *head;
218 mtx_lock(&idr->lock);
220 need = idr->layers + 1;
221 for (il = idr->free; il != NULL; il = il->ary[0])
223 mtx_unlock(&idr->lock);
226 for (head = NULL; need; need--) {
227 iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
230 bitmap_fill(&iln->bitmap, IDR_SIZE);
239 mtx_lock(&idr->lock);
240 il->ary[0] = idr->free;
246 static inline struct idr_layer *
247 idr_get(struct idr *idr)
249 struct idr_layer *il;
253 idr->free = il->ary[0];
257 il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
258 bitmap_fill(&il->bitmap, IDR_SIZE);
263 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
264 * first for simplicity sake.
267 idr_get_new(struct idr *idr, void *ptr, int *idp)
269 struct idr_layer *stack[MAX_LEVEL];
270 struct idr_layer *il;
277 mtx_lock(&idr->lock);
279 * Expand the tree until there is free space.
281 if (idr->top == NULL || idr->top->bitmap == 0) {
282 if (idr->layers == MAX_LEVEL + 1) {
289 il->ary[0] = idr->top;
298 * Walk the tree following free bitmaps, record our path.
300 for (layer = idr->layers - 1;; layer--) {
302 idx = ffsl(il->bitmap);
304 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
307 id |= idx << (layer * IDR_BITS);
310 if (il->ary[idx] == NULL) {
311 il->ary[idx] = idr_get(idr);
312 if (il->ary[idx] == NULL)
318 * Allocate the leaf to the consumer.
320 il->bitmap &= ~(1 << idx);
324 * Clear bitmaps potentially up to the root.
326 while (il->bitmap == 0 && ++layer < idr->layers) {
328 il->bitmap &= ~(1 << idr_pos(id, layer));
332 mtx_unlock(&idr->lock);
334 if (error == 0 && idr_find(idr, id) != ptr) {
335 panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
343 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
345 struct idr_layer *stack[MAX_LEVEL];
346 struct idr_layer *il;
353 mtx_lock(&idr->lock);
355 * Compute the layers required to support starting_id and the mask
361 while (idx & ~IDR_MASK) {
365 if (layer == MAX_LEVEL + 1) {
370 * Expand the tree until there is free space at or beyond starting_id.
372 while (idr->layers <= layer ||
373 idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
374 if (idr->layers == MAX_LEVEL + 1) {
381 il->ary[0] = idr->top;
382 if (idr->top && idr->top->bitmap == 0)
390 * Walk the tree following free bitmaps, record our path.
392 for (layer = idr->layers - 1;; layer--) {
394 sidx = idr_pos(starting_id, layer);
395 /* Returns index numbered from 0 or size if none exists. */
396 idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
397 if (idx == IDR_SIZE && sidx == 0)
398 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
401 * We may have walked a path where there was a free bit but
402 * it was lower than what we wanted. Restart the search with
403 * a larger starting id. id contains the progress we made so
404 * far. Search the leaf one above this level. This may
405 * restart as many as MAX_LEVEL times but that is expected
408 if (idx == IDR_SIZE) {
409 starting_id = id + (1 << (layer+1 * IDR_BITS));
413 starting_id = 0; /* Search the whole subtree. */
414 id |= idx << (layer * IDR_BITS);
417 if (il->ary[idx] == NULL) {
418 il->ary[idx] = idr_get(idr);
419 if (il->ary[idx] == NULL)
425 * Allocate the leaf to the consumer.
427 il->bitmap &= ~(1 << idx);
431 * Clear bitmaps potentially up to the root.
433 while (il->bitmap == 0 && ++layer < idr->layers) {
435 il->bitmap &= ~(1 << idr_pos(id, layer));
439 mtx_unlock(&idr->lock);
441 if (error == 0 && idr_find(idr, id) != ptr) {
442 panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",