2 * Copyright (c) 2013 EMC Corp.
3 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
4 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Path-compressed radix trie implementation.
32 * The following code is not generalized into a general purpose library
33 * because there are way too many parameters embedded that should really
34 * be decided by the library consumers. At the same time, consumers
35 * of this code must achieve highest possible performance.
37 * The implementation takes into account the following rationale:
38 * - Size of the nodes should be as small as possible but still big enough
39 * to avoid a large maximum depth for the trie. This is a balance
40 * between the necessity to not wire too much physical memory for the nodes
41 * and the necessity to avoid too much cache pollution during the trie
43 * - There is not a huge bias toward the number of lookup operations over
44 * the number of insert and remove operations. This basically implies
45 * that optimizations supposedly helping one operation but hurting the
46 * other might be carefully evaluated.
47 * - On average not many nodes are expected to be fully populated, hence
48 * level compression may just complicate things.
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/vmmeter.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_radix.h>
72 * These widths should allow the pointers to a node's children to fit within
73 * a single cache line. The extra levels from a narrow width should not be
74 * a problem thanks to path compression.
77 #define VM_RADIX_WIDTH 4
79 #define VM_RADIX_WIDTH 3
82 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
83 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
84 #define VM_RADIX_LIMIT \
85 (howmany((sizeof(vm_pindex_t) * NBBY), VM_RADIX_WIDTH) - 1)
87 /* Flag bits stored in node pointers. */
88 #define VM_RADIX_ISLEAF 0x1
89 #define VM_RADIX_FLAGS 0x1
90 #define VM_RADIX_PAD VM_RADIX_FLAGS
92 /* Returns one unit associated with specified level. */
93 #define VM_RADIX_UNITLEVEL(lev) \
94 ((vm_pindex_t)1 << ((VM_RADIX_LIMIT - (lev)) * VM_RADIX_WIDTH))
96 struct vm_radix_node {
97 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
98 vm_pindex_t rn_owner; /* Owner of record. */
99 uint16_t rn_count; /* Valid children. */
100 uint16_t rn_clev; /* Current level. */
103 static uma_zone_t vm_radix_node_zone;
106 * Allocate a radix node. Pre-allocation should ensure that the request
107 * will always be satisfied.
109 static __inline struct vm_radix_node *
110 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
112 struct vm_radix_node *rnode;
114 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT);
117 * The required number of nodes should already be pre-allocated
118 * by vm_radix_prealloc(). However, UMA can hold a few nodes
119 * in per-CPU buckets, which will not be accessible by the
120 * current CPU. Thus, the allocation could return NULL when
121 * the pre-allocated pool is close to exhaustion. Anyway,
122 * in practice this should never occur because a new node
123 * is not always required for insert. Thus, the pre-allocated
124 * pool should have some extra pages that prevent this from
125 * becoming a problem.
128 panic("%s: uma_zalloc() returned NULL for a new node",
130 rnode->rn_owner = owner;
131 rnode->rn_count = count;
132 rnode->rn_clev = clevel;
140 vm_radix_node_put(struct vm_radix_node *rnode)
143 uma_zfree(vm_radix_node_zone, rnode);
147 * Return the position in the array for a given level.
150 vm_radix_slot(vm_pindex_t index, uint16_t level)
153 return ((index >> ((VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH)) &
157 /* Trims the key after the specified level. */
158 static __inline vm_pindex_t
159 vm_radix_trimkey(vm_pindex_t index, uint16_t level)
164 if (level < VM_RADIX_LIMIT) {
165 ret >>= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
166 ret <<= (VM_RADIX_LIMIT - level) * VM_RADIX_WIDTH;
172 * Get the root node for a radix tree.
174 static __inline struct vm_radix_node *
175 vm_radix_getroot(struct vm_radix *rtree)
178 return ((struct vm_radix_node *)(rtree->rt_root & ~VM_RADIX_FLAGS));
182 * Set the root node for a radix tree.
185 vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
188 rtree->rt_root = (uintptr_t)rnode;
192 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
194 static __inline boolean_t
195 vm_radix_isleaf(struct vm_radix_node *rnode)
198 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
202 * Returns the associated page extracted from rnode if available,
203 * and NULL otherwise.
205 static __inline vm_page_t
206 vm_radix_node_page(struct vm_radix_node *rnode)
209 return ((((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0) ?
210 (vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS) : NULL);
214 * Adds the page as a child of the provided node.
217 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
222 slot = vm_radix_slot(index, clev);
223 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF);
227 * Returns the slot where two keys differ.
228 * It cannot accept 2 equal keys.
230 static __inline uint16_t
231 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
235 KASSERT(index1 != index2, ("%s: passing the same key value %jx",
236 __func__, (uintmax_t)index1));
239 for (clev = 0; clev <= VM_RADIX_LIMIT ; clev++)
240 if (vm_radix_slot(index1, clev))
242 panic("%s: cannot reach this point", __func__);
247 * Returns TRUE if it can be determined that key does not belong to the
248 * specified rnode. Otherwise, returns FALSE.
250 static __inline boolean_t
251 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
254 if (rnode->rn_clev > 0) {
255 idx = vm_radix_trimkey(idx, rnode->rn_clev - 1);
256 idx -= rnode->rn_owner;
264 * Adjusts the idx key to the first upper level available, based on a valid
265 * initial level and map of available levels.
266 * Returns a value bigger than 0 to signal that there are not valid levels
270 vm_radix_addlev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
274 for (; levels[ilev] == FALSE ||
275 vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1); ilev--)
278 KASSERT(ilev > 0 || levels[0],
279 ("%s: levels back-scanning problem", __func__));
280 if (ilev == 0 && vm_radix_slot(*idx, ilev) == (VM_RADIX_COUNT - 1))
283 *idx = vm_radix_trimkey(*idx, ilev);
284 *idx += VM_RADIX_UNITLEVEL(ilev);
285 return (*idx < wrapidx);
289 * Adjusts the idx key to the first lower level available, based on a valid
290 * initial level and map of available levels.
291 * Returns a value bigger than 0 to signal that there are not valid levels
295 vm_radix_declev(vm_pindex_t *idx, boolean_t *levels, uint16_t ilev)
299 for (; levels[ilev] == FALSE ||
300 vm_radix_slot(*idx, ilev) == 0; ilev--)
303 KASSERT(ilev > 0 || levels[0],
304 ("%s: levels back-scanning problem", __func__));
305 if (ilev == 0 && vm_radix_slot(*idx, ilev) == 0)
308 *idx = vm_radix_trimkey(*idx, ilev);
309 *idx |= VM_RADIX_UNITLEVEL(ilev) - 1;
310 *idx -= VM_RADIX_UNITLEVEL(ilev);
311 return (*idx > wrapidx);
315 * Internal helper for vm_radix_reclaim_allnodes().
316 * This function is recursive.
319 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
323 KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
324 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
325 for (slot = 0; rnode->rn_count != 0; slot++) {
326 if (rnode->rn_child[slot] == NULL)
328 if (!vm_radix_isleaf(rnode->rn_child[slot]))
329 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]);
330 rnode->rn_child[slot] = NULL;
333 vm_radix_node_put(rnode);
338 * Radix node zone destructor.
341 vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
343 struct vm_radix_node *rnode;
347 KASSERT(rnode->rn_count == 0,
348 ("vm_radix_node_put: rnode %p has %d children", rnode,
350 for (slot = 0; slot < VM_RADIX_COUNT; slot++)
351 KASSERT(rnode->rn_child[slot] == NULL,
352 ("vm_radix_node_put: rnode %p has a child", rnode));
357 * Radix node zone initializer.
360 vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused)
362 struct vm_radix_node *rnode;
365 memset(rnode->rn_child, 0, sizeof(rnode->rn_child));
370 * Pre-allocate intermediate nodes from the UMA slab zone.
373 vm_radix_prealloc(void *arg __unused)
376 if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
377 panic("%s: unable to create new zone", __func__);
378 uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
380 SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc,
384 * Initialize the UMA slab zone.
385 * Until vm_radix_prealloc() is called, the zone will be served by the
386 * UMA boot-time pre-allocated pool of pages.
392 vm_radix_node_zone = uma_zcreate("RADIX NODE",
393 sizeof(struct vm_radix_node), NULL,
395 vm_radix_node_zone_dtor,
399 vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM |
404 * Inserts the key-value pair into the trie.
405 * Panics if the key already exists.
408 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
410 vm_pindex_t index, newind;
411 struct vm_radix_node *rnode, *tmp, *tmp2;
416 index = page->pindex;
419 * The owner of record for root is not really important because it
420 * will never be used.
422 rnode = vm_radix_getroot(rtree);
424 rnode = vm_radix_node_get(0, 1, 0);
425 vm_radix_setroot(rtree, rnode);
426 vm_radix_addpage(rnode, index, 0, page);
430 slot = vm_radix_slot(index, rnode->rn_clev);
431 m = vm_radix_node_page(rnode->rn_child[slot]);
433 if (m->pindex == index)
434 panic("%s: key %jx is already present",
435 __func__, (uintmax_t)index);
436 clev = vm_radix_keydiff(m->pindex, index);
437 tmp = vm_radix_node_get(vm_radix_trimkey(index,
439 rnode->rn_child[slot] = tmp;
440 vm_radix_addpage(tmp, index, clev, page);
441 vm_radix_addpage(tmp, m->pindex, clev, m);
444 if (rnode->rn_child[slot] == NULL) {
446 vm_radix_addpage(rnode, index, rnode->rn_clev, page);
449 rnode = rnode->rn_child[slot];
450 } while (!vm_radix_keybarr(rnode, index));
453 * Scan the trie from the top and find the parent to insert
456 newind = rnode->rn_owner;
457 clev = vm_radix_keydiff(newind, index);
458 slot = VM_RADIX_COUNT;
459 for (rnode = vm_radix_getroot(rtree); ; rnode = tmp) {
460 KASSERT(rnode != NULL, ("%s: edge cannot be NULL in the scan",
462 KASSERT(clev >= rnode->rn_clev,
463 ("%s: unexpected trie depth: clev: %d, rnode->rn_clev: %d",
464 __func__, clev, rnode->rn_clev));
465 slot = vm_radix_slot(index, rnode->rn_clev);
466 tmp = rnode->rn_child[slot];
467 KASSERT(tmp != NULL && !vm_radix_isleaf(tmp),
468 ("%s: unexpected lookup interruption", __func__));
469 if (tmp->rn_clev > clev)
472 KASSERT(rnode != NULL && tmp != NULL && slot < VM_RADIX_COUNT,
473 ("%s: invalid scan parameters rnode: %p, tmp: %p, slot: %d",
474 __func__, (void *)rnode, (void *)tmp, slot));
477 * A new node is needed because the right insertion level is reached.
478 * Setup the new intermediate node and add the 2 children: the
479 * new object and the older edge.
481 tmp2 = vm_radix_node_get(vm_radix_trimkey(index, clev - 1), 2,
483 rnode->rn_child[slot] = tmp2;
484 vm_radix_addpage(tmp2, index, clev, page);
485 slot = vm_radix_slot(newind, clev);
486 tmp2->rn_child[slot] = tmp;
490 * Returns the value stored at the index. If the index is not present,
494 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
496 struct vm_radix_node *rnode;
500 rnode = vm_radix_getroot(rtree);
501 while (rnode != NULL) {
502 if (vm_radix_keybarr(rnode, index))
504 slot = vm_radix_slot(index, rnode->rn_clev);
505 rnode = rnode->rn_child[slot];
506 m = vm_radix_node_page(rnode);
508 if (m->pindex == index)
518 * Look up the nearest entry at a position bigger than or equal to index.
521 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
525 struct vm_radix_node *rnode;
528 boolean_t maplevels[VM_RADIX_LIMIT + 1];
534 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
535 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
536 maplevels[difflev] = FALSE;
537 rnode = vm_radix_getroot(rtree);
538 while (rnode != NULL) {
539 maplevels[rnode->rn_clev] = TRUE;
542 * If the keys differ before the current bisection node
543 * the search key might rollback to the earliest
544 * available bisection node, or to the smaller value
545 * in the current domain (if the owner is bigger than the
547 * The maplevels array records any node has been seen
548 * at a given level. This aids the search for a valid
551 if (vm_radix_keybarr(rnode, index)) {
552 difflev = vm_radix_keydiff(index, rnode->rn_owner);
553 if (index > rnode->rn_owner) {
554 if (vm_radix_addlev(&index, maplevels,
558 index = vm_radix_trimkey(rnode->rn_owner,
562 slot = vm_radix_slot(index, rnode->rn_clev);
563 m = vm_radix_node_page(rnode->rn_child[slot]);
564 if (m != NULL && m->pindex >= index)
566 if (rnode->rn_child[slot] != NULL && m == NULL) {
567 rnode = rnode->rn_child[slot];
572 * Look for an available edge or page within the current
575 if (slot < (VM_RADIX_COUNT - 1)) {
576 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
577 index = vm_radix_trimkey(index, rnode->rn_clev);
580 for (;; index += inc, slot++) {
581 m = vm_radix_node_page(rnode->rn_child[slot]);
582 if (m != NULL && m->pindex >= index)
584 if ((rnode->rn_child[slot] != NULL &&
585 m == NULL) || slot == (VM_RADIX_COUNT - 1))
591 * If a valid page or edge bigger than the search slot is
592 * found in the traversal, skip to the next higher-level key.
594 if (slot == (VM_RADIX_COUNT - 1) &&
595 (rnode->rn_child[slot] == NULL || m != NULL)) {
596 if (rnode->rn_clev == 0 || vm_radix_addlev(&index,
597 maplevels, rnode->rn_clev - 1) > 0)
601 rnode = rnode->rn_child[slot];
607 * Look up the nearest entry at a position less than or equal to index.
610 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
614 struct vm_radix_node *rnode;
617 boolean_t maplevels[VM_RADIX_LIMIT + 1];
623 KASSERT(++loops < 1000, ("%s: too many loops", __func__));
624 for (difflev = 0; difflev < (VM_RADIX_LIMIT + 1); difflev++)
625 maplevels[difflev] = FALSE;
626 rnode = vm_radix_getroot(rtree);
627 while (rnode != NULL) {
628 maplevels[rnode->rn_clev] = TRUE;
631 * If the keys differ before the current bisection node
632 * the search key might rollback to the earliest
633 * available bisection node, or to the higher value
634 * in the current domain (if the owner is smaller than the
636 * The maplevels array records any node has been seen
637 * at a given level. This aids the search for a valid
640 if (vm_radix_keybarr(rnode, index)) {
641 difflev = vm_radix_keydiff(index, rnode->rn_owner);
642 if (index > rnode->rn_owner) {
643 index = vm_radix_trimkey(rnode->rn_owner,
645 index |= VM_RADIX_UNITLEVEL(difflev) - 1;
646 } else if (vm_radix_declev(&index, maplevels,
651 slot = vm_radix_slot(index, rnode->rn_clev);
652 m = vm_radix_node_page(rnode->rn_child[slot]);
653 if (m != NULL && m->pindex <= index)
655 if (rnode->rn_child[slot] != NULL && m == NULL) {
656 rnode = rnode->rn_child[slot];
661 * Look for an available edge or page within the current
665 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
666 index = vm_radix_trimkey(index, rnode->rn_clev);
670 for (;; index -= inc, slot--) {
671 m = vm_radix_node_page(rnode->rn_child[slot]);
672 if (m != NULL && m->pindex <= index)
674 if ((rnode->rn_child[slot] != NULL &&
675 m == NULL) || slot == 0)
681 * If a valid page or edge smaller than the search slot is
682 * found in the traversal, skip to the next higher-level key.
684 if (slot == 0 && (rnode->rn_child[slot] == NULL || m != NULL)) {
685 if (rnode->rn_clev == 0 || vm_radix_declev(&index,
686 maplevels, rnode->rn_clev - 1) > 0)
690 rnode = rnode->rn_child[slot];
696 * Remove the specified index from the tree.
697 * Panics if the key is not present.
700 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
702 struct vm_radix_node *rnode, *parent;
707 rnode = vm_radix_getroot(rtree);
710 panic("vm_radix_remove: impossible to locate the key");
711 slot = vm_radix_slot(index, rnode->rn_clev);
712 m = vm_radix_node_page(rnode->rn_child[slot]);
713 if (m != NULL && m->pindex == index) {
714 rnode->rn_child[slot] = NULL;
716 if (rnode->rn_count > 1)
718 if (parent == NULL) {
719 if (rnode->rn_count == 0) {
720 vm_radix_node_put(rnode);
721 vm_radix_setroot(rtree, NULL);
725 for (i = 0; i < VM_RADIX_COUNT; i++)
726 if (rnode->rn_child[i] != NULL)
728 KASSERT(i != VM_RADIX_COUNT,
729 ("%s: invalid node configuration", __func__));
730 slot = vm_radix_slot(index, parent->rn_clev);
731 KASSERT(parent->rn_child[slot] == rnode,
732 ("%s: invalid child value", __func__));
733 parent->rn_child[slot] = rnode->rn_child[i];
735 rnode->rn_child[i] = NULL;
736 vm_radix_node_put(rnode);
739 if (m != NULL && m->pindex != index)
740 panic("%s: invalid key found", __func__);
742 rnode = rnode->rn_child[slot];
747 * Remove and free all the nodes from the radix tree.
748 * This function is recursive but there is a tight control on it as the
749 * maximum depth of the tree is fixed.
752 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
754 struct vm_radix_node *root;
756 root = vm_radix_getroot(rtree);
759 vm_radix_setroot(rtree, NULL);
760 vm_radix_reclaim_allnodes_int(root);
765 * Show details about the given radix node.
767 DB_SHOW_COMMAND(radixnode, db_show_radixnode)
769 struct vm_radix_node *rnode;
774 rnode = (struct vm_radix_node *)addr;
775 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
776 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
778 for (i = 0; i < VM_RADIX_COUNT; i++)
779 if (rnode->rn_child[i] != NULL)
780 db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
781 i, (void *)rnode->rn_child[i],
782 (void *)vm_radix_node_page(rnode->rn_child[i]),