2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 EMC Corp.
5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Path-compressed radix trie implementation.
34 * The following code is not generalized into a general purpose library
35 * because there are way too many parameters embedded that should really
36 * be decided by the library consumers. At the same time, consumers
37 * of this code must achieve highest possible performance.
39 * The implementation takes into account the following rationale:
40 * - Size of the nodes should be as small as possible but still big enough
41 * to avoid a large maximum depth for the trie. This is a balance
42 * between the necessity to not wire too much physical memory for the nodes
43 * and the necessity to avoid too much cache pollution during the trie
45 * - There is not a huge bias toward the number of lookup operations over
46 * the number of insert and remove operations. This basically implies
47 * that optimizations supposedly helping one operation but hurting the
48 * other might be carefully evaluated.
49 * - On average not many nodes are expected to be fully populated, hence
50 * level compression may just complicate things.
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/vmmeter.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_radix.h>
74 * These widths should allow the pointers to a node's children to fit within
75 * a single cache line. The extra levels from a narrow width should not be
76 * a problem thanks to path compression.
79 #define VM_RADIX_WIDTH 4
81 #define VM_RADIX_WIDTH 3
84 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
85 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
86 #define VM_RADIX_LIMIT \
87 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1)
89 /* Flag bits stored in node pointers. */
90 #define VM_RADIX_ISLEAF 0x1
91 #define VM_RADIX_FLAGS 0x1
92 #define VM_RADIX_PAD VM_RADIX_FLAGS
94 /* Returns one unit associated with specified level. */
95 #define VM_RADIX_UNITLEVEL(lev) \
96 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH))
98 struct vm_radix_node {
99 vm_pindex_t rn_owner; /* Owner of record. */
100 uint16_t rn_count; /* Valid children. */
101 uint16_t rn_clev; /* Current level. */
102 void *rn_child[VM_RADIX_COUNT]; /* Child nodes. */
105 static uma_zone_t vm_radix_node_zone;
108 * Allocate a radix node.
110 static __inline struct vm_radix_node *
111 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
113 struct vm_radix_node *rnode;
115 rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT);
118 rnode->rn_owner = owner;
119 rnode->rn_count = count;
120 rnode->rn_clev = clevel;
128 vm_radix_node_put(struct vm_radix_node *rnode)
131 uma_zfree(vm_radix_node_zone, rnode);
135 * Return the position in the array for a given level.
138 vm_radix_slot(vm_pindex_t index, uint16_t level)
141 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
144 /* Trims the key after the specified level. */
145 static __inline vm_pindex_t
146 vm_radix_trimkey(vm_pindex_t index, uint16_t level)
152 ret >>= level * VM_RADIX_WIDTH;
153 ret <<= level * VM_RADIX_WIDTH;
159 * Get the root node for a radix tree.
161 static __inline struct vm_radix_node *
162 vm_radix_getroot(struct vm_radix *rtree)
165 return ((struct vm_radix_node *)rtree->rt_root);
169 * Set the root node for a radix tree.
172 vm_radix_setroot(struct vm_radix *rtree, struct vm_radix_node *rnode)
175 rtree->rt_root = (uintptr_t)rnode;
179 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
181 static __inline boolean_t
182 vm_radix_isleaf(struct vm_radix_node *rnode)
185 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
189 * Returns the associated page extracted from rnode.
191 static __inline vm_page_t
192 vm_radix_topage(struct vm_radix_node *rnode)
195 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
199 * Adds the page as a child of the provided node.
202 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
207 slot = vm_radix_slot(index, clev);
208 rnode->rn_child[slot] = (void *)((uintptr_t)page | VM_RADIX_ISLEAF);
212 * Returns the slot where two keys differ.
213 * It cannot accept 2 equal keys.
215 static __inline uint16_t
216 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
220 KASSERT(index1 != index2, ("%s: passing the same key value %jx",
221 __func__, (uintmax_t)index1));
224 for (clev = VM_RADIX_LIMIT;; clev--)
225 if (vm_radix_slot(index1, clev) != 0)
230 * Returns TRUE if it can be determined that key does not belong to the
231 * specified rnode. Otherwise, returns FALSE.
233 static __inline boolean_t
234 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
237 if (rnode->rn_clev < VM_RADIX_LIMIT) {
238 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1);
239 return (idx != rnode->rn_owner);
245 * Internal helper for vm_radix_reclaim_allnodes().
246 * This function is recursive.
249 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
253 KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
254 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
255 for (slot = 0; rnode->rn_count != 0; slot++) {
256 if (rnode->rn_child[slot] == NULL)
258 if (!vm_radix_isleaf(rnode->rn_child[slot]))
259 vm_radix_reclaim_allnodes_int(rnode->rn_child[slot]);
260 rnode->rn_child[slot] = NULL;
263 vm_radix_node_put(rnode);
268 * Radix node zone destructor.
271 vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
273 struct vm_radix_node *rnode;
277 KASSERT(rnode->rn_count == 0,
278 ("vm_radix_node_put: rnode %p has %d children", rnode,
280 for (slot = 0; slot < VM_RADIX_COUNT; slot++)
281 KASSERT(rnode->rn_child[slot] == NULL,
282 ("vm_radix_node_put: rnode %p has a child", rnode));
287 vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused)
289 struct vm_radix_node *rnode;
292 bzero(rnode, sizeof(*rnode));
296 #ifndef UMA_MD_SMALL_ALLOC
297 void vm_radix_reserve_kva(void);
299 * Reserve the KVA necessary to satisfy the node allocation.
300 * This is mandatory in architectures not supporting direct
301 * mapping as they will need otherwise to carve into the kernel maps for
302 * every node allocation, resulting into deadlocks for consumers already
303 * working with kernel maps.
306 vm_radix_reserve_kva(void)
310 * Calculate the number of reserved nodes, discounting the pages that
311 * are needed to store them.
313 if (!uma_zone_reserve_kva(vm_radix_node_zone,
314 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
315 sizeof(struct vm_radix_node))))
316 panic("%s: unable to reserve KVA", __func__);
321 * Initialize the UMA slab zone.
327 vm_radix_node_zone = uma_zcreate("RADIX NODE",
328 sizeof(struct vm_radix_node), NULL,
330 vm_radix_node_zone_dtor,
334 vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM);
338 * Inserts the key-value pair into the trie.
339 * Panics if the key already exists.
342 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
344 vm_pindex_t index, newind;
346 struct vm_radix_node *rnode, *tmp;
351 index = page->pindex;
354 * The owner of record for root is not really important because it
355 * will never be used.
357 rnode = vm_radix_getroot(rtree);
359 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF;
362 parentp = (void **)&rtree->rt_root;
364 if (vm_radix_isleaf(rnode)) {
365 m = vm_radix_topage(rnode);
366 if (m->pindex == index)
367 panic("%s: key %jx is already present",
368 __func__, (uintmax_t)index);
369 clev = vm_radix_keydiff(m->pindex, index);
370 tmp = vm_radix_node_get(vm_radix_trimkey(index,
375 vm_radix_addpage(tmp, index, clev, page);
376 vm_radix_addpage(tmp, m->pindex, clev, m);
378 } else if (vm_radix_keybarr(rnode, index))
380 slot = vm_radix_slot(index, rnode->rn_clev);
381 if (rnode->rn_child[slot] == NULL) {
383 vm_radix_addpage(rnode, index, rnode->rn_clev, page);
386 parentp = &rnode->rn_child[slot];
387 rnode = rnode->rn_child[slot];
391 * A new node is needed because the right insertion level is reached.
392 * Setup the new intermediate node and add the 2 children: the
393 * new object and the older edge.
395 newind = rnode->rn_owner;
396 clev = vm_radix_keydiff(newind, index);
397 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev);
401 vm_radix_addpage(tmp, index, clev, page);
402 slot = vm_radix_slot(newind, clev);
403 tmp->rn_child[slot] = rnode;
408 * Returns TRUE if the specified radix tree contains a single leaf and FALSE
412 vm_radix_is_singleton(struct vm_radix *rtree)
414 struct vm_radix_node *rnode;
416 rnode = vm_radix_getroot(rtree);
419 return (vm_radix_isleaf(rnode));
423 * Returns the value stored at the index. If the index is not present,
427 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
429 struct vm_radix_node *rnode;
433 rnode = vm_radix_getroot(rtree);
434 while (rnode != NULL) {
435 if (vm_radix_isleaf(rnode)) {
436 m = vm_radix_topage(rnode);
437 if (m->pindex == index)
441 } else if (vm_radix_keybarr(rnode, index))
443 slot = vm_radix_slot(index, rnode->rn_clev);
444 rnode = rnode->rn_child[slot];
450 * Look up the nearest entry at a position bigger than or equal to index.
453 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
455 struct vm_radix_node *stack[VM_RADIX_LIMIT];
458 struct vm_radix_node *child, *rnode;
464 rnode = vm_radix_getroot(rtree);
467 else if (vm_radix_isleaf(rnode)) {
468 m = vm_radix_topage(rnode);
469 if (m->pindex >= index)
477 * If the keys differ before the current bisection node,
478 * then the search key might rollback to the earliest
479 * available bisection node or to the smallest key
480 * in the current node (if the owner is bigger than the
483 if (vm_radix_keybarr(rnode, index)) {
484 if (index > rnode->rn_owner) {
486 KASSERT(++loops < 1000,
487 ("vm_radix_lookup_ge: too many loops"));
490 * Pop nodes from the stack until either the
491 * stack is empty or a node that could have a
492 * matching descendant is found.
497 rnode = stack[--tos];
498 } while (vm_radix_slot(index,
499 rnode->rn_clev) == (VM_RADIX_COUNT - 1));
502 * The following computation cannot overflow
503 * because index's slot at the current level
504 * is less than VM_RADIX_COUNT - 1.
506 index = vm_radix_trimkey(index,
508 index += VM_RADIX_UNITLEVEL(rnode->rn_clev);
510 index = rnode->rn_owner;
511 KASSERT(!vm_radix_keybarr(rnode, index),
512 ("vm_radix_lookup_ge: keybarr failed"));
514 slot = vm_radix_slot(index, rnode->rn_clev);
515 child = rnode->rn_child[slot];
516 if (vm_radix_isleaf(child)) {
517 m = vm_radix_topage(child);
518 if (m->pindex >= index)
520 } else if (child != NULL)
524 * Look for an available edge or page within the current
527 if (slot < (VM_RADIX_COUNT - 1)) {
528 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
529 index = vm_radix_trimkey(index, rnode->rn_clev);
533 child = rnode->rn_child[slot];
534 if (vm_radix_isleaf(child)) {
535 m = vm_radix_topage(child);
536 if (m->pindex >= index)
538 } else if (child != NULL)
540 } while (slot < (VM_RADIX_COUNT - 1));
542 KASSERT(child == NULL || vm_radix_isleaf(child),
543 ("vm_radix_lookup_ge: child is radix node"));
546 * If a page or edge bigger than the search slot is not found
547 * in the current node, ascend to the next higher-level node.
551 KASSERT(rnode->rn_clev > 0,
552 ("vm_radix_lookup_ge: pushing leaf's parent"));
553 KASSERT(tos < VM_RADIX_LIMIT,
554 ("vm_radix_lookup_ge: stack overflow"));
555 stack[tos++] = rnode;
561 * Look up the nearest entry at a position less than or equal to index.
564 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
566 struct vm_radix_node *stack[VM_RADIX_LIMIT];
569 struct vm_radix_node *child, *rnode;
575 rnode = vm_radix_getroot(rtree);
578 else if (vm_radix_isleaf(rnode)) {
579 m = vm_radix_topage(rnode);
580 if (m->pindex <= index)
588 * If the keys differ before the current bisection node,
589 * then the search key might rollback to the earliest
590 * available bisection node or to the largest key
591 * in the current node (if the owner is smaller than the
594 if (vm_radix_keybarr(rnode, index)) {
595 if (index > rnode->rn_owner) {
596 index = rnode->rn_owner + VM_RADIX_COUNT *
597 VM_RADIX_UNITLEVEL(rnode->rn_clev);
600 KASSERT(++loops < 1000,
601 ("vm_radix_lookup_le: too many loops"));
604 * Pop nodes from the stack until either the
605 * stack is empty or a node that could have a
606 * matching descendant is found.
611 rnode = stack[--tos];
612 } while (vm_radix_slot(index,
613 rnode->rn_clev) == 0);
616 * The following computation cannot overflow
617 * because index's slot at the current level
620 index = vm_radix_trimkey(index,
624 KASSERT(!vm_radix_keybarr(rnode, index),
625 ("vm_radix_lookup_le: keybarr failed"));
627 slot = vm_radix_slot(index, rnode->rn_clev);
628 child = rnode->rn_child[slot];
629 if (vm_radix_isleaf(child)) {
630 m = vm_radix_topage(child);
631 if (m->pindex <= index)
633 } else if (child != NULL)
637 * Look for an available edge or page within the current
641 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
646 child = rnode->rn_child[slot];
647 if (vm_radix_isleaf(child)) {
648 m = vm_radix_topage(child);
649 if (m->pindex <= index)
651 } else if (child != NULL)
655 KASSERT(child == NULL || vm_radix_isleaf(child),
656 ("vm_radix_lookup_le: child is radix node"));
659 * If a page or edge smaller than the search slot is not found
660 * in the current node, ascend to the next higher-level node.
664 KASSERT(rnode->rn_clev > 0,
665 ("vm_radix_lookup_le: pushing leaf's parent"));
666 KASSERT(tos < VM_RADIX_LIMIT,
667 ("vm_radix_lookup_le: stack overflow"));
668 stack[tos++] = rnode;
674 * Remove the specified index from the trie, and return the value stored at
675 * that index. If the index is not present, return NULL.
678 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
680 struct vm_radix_node *rnode, *parent;
684 rnode = vm_radix_getroot(rtree);
685 if (vm_radix_isleaf(rnode)) {
686 m = vm_radix_topage(rnode);
687 if (m->pindex != index)
689 vm_radix_setroot(rtree, NULL);
696 slot = vm_radix_slot(index, rnode->rn_clev);
697 if (vm_radix_isleaf(rnode->rn_child[slot])) {
698 m = vm_radix_topage(rnode->rn_child[slot]);
699 if (m->pindex != index)
701 rnode->rn_child[slot] = NULL;
703 if (rnode->rn_count > 1)
705 for (i = 0; i < VM_RADIX_COUNT; i++)
706 if (rnode->rn_child[i] != NULL)
708 KASSERT(i != VM_RADIX_COUNT,
709 ("%s: invalid node configuration", __func__));
711 vm_radix_setroot(rtree, rnode->rn_child[i]);
713 slot = vm_radix_slot(index, parent->rn_clev);
714 KASSERT(parent->rn_child[slot] == rnode,
715 ("%s: invalid child value", __func__));
716 parent->rn_child[slot] = rnode->rn_child[i];
719 rnode->rn_child[i] = NULL;
720 vm_radix_node_put(rnode);
724 rnode = rnode->rn_child[slot];
729 * Remove and free all the nodes from the radix tree.
730 * This function is recursive but there is a tight control on it as the
731 * maximum depth of the tree is fixed.
734 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
736 struct vm_radix_node *root;
738 root = vm_radix_getroot(rtree);
741 vm_radix_setroot(rtree, NULL);
742 if (!vm_radix_isleaf(root))
743 vm_radix_reclaim_allnodes_int(root);
747 * Replace an existing page in the trie with another one.
748 * Panics if there is not an old page in the trie at the new page's index.
751 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
753 struct vm_radix_node *rnode;
758 index = newpage->pindex;
759 rnode = vm_radix_getroot(rtree);
761 panic("%s: replacing page on an empty trie", __func__);
762 if (vm_radix_isleaf(rnode)) {
763 m = vm_radix_topage(rnode);
764 if (m->pindex != index)
765 panic("%s: original replacing root key not found",
767 rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF;
771 slot = vm_radix_slot(index, rnode->rn_clev);
772 if (vm_radix_isleaf(rnode->rn_child[slot])) {
773 m = vm_radix_topage(rnode->rn_child[slot]);
774 if (m->pindex == index) {
775 rnode->rn_child[slot] =
776 (void *)((uintptr_t)newpage |
781 } else if (rnode->rn_child[slot] == NULL ||
782 vm_radix_keybarr(rnode->rn_child[slot], index))
784 rnode = rnode->rn_child[slot];
786 panic("%s: original replacing page not found", __func__);
792 uma_zwait(vm_radix_node_zone);
797 * Show details about the given radix node.
799 DB_SHOW_COMMAND(radixnode, db_show_radixnode)
801 struct vm_radix_node *rnode;
806 rnode = (struct vm_radix_node *)addr;
807 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
808 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
810 for (i = 0; i < VM_RADIX_COUNT; i++)
811 if (rnode->rn_child[i] != NULL)
812 db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
813 i, (void *)rnode->rn_child[i],
814 vm_radix_isleaf(rnode->rn_child[i]) ?
815 vm_radix_topage(rnode->rn_child[i]) : NULL,