2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 EMC Corp.
5 * Copyright (c) 2011 Jeffrey Roberson <jeff@freebsd.org>
6 * Copyright (c) 2008 Mayur Shardul <mayur.shardul@gmail.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Path-compressed radix trie implementation.
34 * The following code is not generalized into a general purpose library
35 * because there are way too many parameters embedded that should really
36 * be decided by the library consumers. At the same time, consumers
37 * of this code must achieve highest possible performance.
39 * The implementation takes into account the following rationale:
40 * - Size of the nodes should be as small as possible but still big enough
41 * to avoid a large maximum depth for the trie. This is a balance
42 * between the necessity to not wire too much physical memory for the nodes
43 * and the necessity to avoid too much cache pollution during the trie
45 * - There is not a huge bias toward the number of lookup operations over
46 * the number of insert and remove operations. This basically implies
47 * that optimizations supposedly helping one operation but hurting the
48 * other might be carefully evaluated.
49 * - On average not many nodes are expected to be fully populated, hence
50 * level compression may just complicate things.
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
62 #include <sys/vmmeter.h>
64 #include <sys/smr_types.h>
68 #include <vm/vm_param.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_radix.h>
78 * These widths should allow the pointers to a node's children to fit within
79 * a single cache line. The extra levels from a narrow width should not be
80 * a problem thanks to path compression.
83 #define VM_RADIX_WIDTH 4
85 #define VM_RADIX_WIDTH 3
88 #define VM_RADIX_COUNT (1 << VM_RADIX_WIDTH)
89 #define VM_RADIX_MASK (VM_RADIX_COUNT - 1)
90 #define VM_RADIX_LIMIT \
91 (howmany(sizeof(vm_pindex_t) * NBBY, VM_RADIX_WIDTH) - 1)
93 /* Flag bits stored in node pointers. */
94 #define VM_RADIX_ISLEAF 0x1
95 #define VM_RADIX_FLAGS 0x1
96 #define VM_RADIX_PAD VM_RADIX_FLAGS
98 /* Returns one unit associated with specified level. */
99 #define VM_RADIX_UNITLEVEL(lev) \
100 ((vm_pindex_t)1 << ((lev) * VM_RADIX_WIDTH))
102 enum vm_radix_access { SMR, LOCKED, UNSERIALIZED };
104 struct vm_radix_node;
105 typedef SMR_POINTER(struct vm_radix_node *) smrnode_t;
107 struct vm_radix_node {
108 vm_pindex_t rn_owner; /* Owner of record. */
109 uint16_t rn_count; /* Valid children. */
110 uint8_t rn_clev; /* Current level. */
111 int8_t rn_last; /* zero last ptr. */
112 smrnode_t rn_child[VM_RADIX_COUNT]; /* Child nodes. */
115 static uma_zone_t vm_radix_node_zone;
116 static smr_t vm_radix_smr;
118 static void vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v,
119 enum vm_radix_access access);
122 * Allocate a radix node.
124 static struct vm_radix_node *
125 vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel)
127 struct vm_radix_node *rnode;
129 rnode = uma_zalloc_smr(vm_radix_node_zone, M_NOWAIT);
134 * We want to clear the last child pointer after the final section
135 * has exited so lookup can not return false negatives. It is done
136 * here because it will be cache-cold in the dtor callback.
138 if (rnode->rn_last != 0) {
139 vm_radix_node_store(&rnode->rn_child[rnode->rn_last - 1],
143 rnode->rn_owner = owner;
144 rnode->rn_count = count;
145 rnode->rn_clev = clevel;
153 vm_radix_node_put(struct vm_radix_node *rnode, int8_t last)
158 KASSERT(rnode->rn_count == 0,
159 ("vm_radix_node_put: rnode %p has %d children", rnode,
161 for (slot = 0; slot < VM_RADIX_COUNT; slot++) {
164 KASSERT(smr_unserialized_load(&rnode->rn_child[slot], true) ==
165 NULL, ("vm_radix_node_put: rnode %p has a child", rnode));
168 /* Off by one so a freshly zero'd node is not assigned to. */
169 rnode->rn_last = last + 1;
170 uma_zfree_smr(vm_radix_node_zone, rnode);
174 * Return the position in the array for a given level.
177 vm_radix_slot(vm_pindex_t index, uint16_t level)
180 return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
183 /* Trims the key after the specified level. */
184 static __inline vm_pindex_t
185 vm_radix_trimkey(vm_pindex_t index, uint16_t level)
191 ret >>= level * VM_RADIX_WIDTH;
192 ret <<= level * VM_RADIX_WIDTH;
198 * Fetch a node pointer from a slot in another node.
200 static __inline struct vm_radix_node *
201 vm_radix_node_load(smrnode_t *p, enum vm_radix_access access)
206 return (smr_unserialized_load(p, true));
208 return (smr_serialized_load(p, true));
210 return (smr_entered_load(p, vm_radix_smr));
212 __assert_unreachable();
216 vm_radix_node_store(smrnode_t *p, struct vm_radix_node *v,
217 enum vm_radix_access access)
222 smr_unserialized_store(p, v, true);
225 smr_serialized_store(p, v, true);
228 panic("vm_radix_node_store: Not supported in smr section.");
233 * Get the root node for a radix tree.
235 static __inline struct vm_radix_node *
236 vm_radix_root_load(struct vm_radix *rtree, enum vm_radix_access access)
239 return (vm_radix_node_load((smrnode_t *)&rtree->rt_root, access));
243 * Set the root node for a radix tree.
246 vm_radix_root_store(struct vm_radix *rtree, struct vm_radix_node *rnode,
247 enum vm_radix_access access)
250 vm_radix_node_store((smrnode_t *)&rtree->rt_root, rnode, access);
254 * Returns TRUE if the specified radix node is a leaf and FALSE otherwise.
256 static __inline boolean_t
257 vm_radix_isleaf(struct vm_radix_node *rnode)
260 return (((uintptr_t)rnode & VM_RADIX_ISLEAF) != 0);
264 * Returns the associated page extracted from rnode.
266 static __inline vm_page_t
267 vm_radix_topage(struct vm_radix_node *rnode)
270 return ((vm_page_t)((uintptr_t)rnode & ~VM_RADIX_FLAGS));
274 * Adds the page as a child of the provided node.
277 vm_radix_addpage(struct vm_radix_node *rnode, vm_pindex_t index, uint16_t clev,
278 vm_page_t page, enum vm_radix_access access)
282 slot = vm_radix_slot(index, clev);
283 vm_radix_node_store(&rnode->rn_child[slot],
284 (struct vm_radix_node *)((uintptr_t)page | VM_RADIX_ISLEAF), access);
288 * Returns the slot where two keys differ.
289 * It cannot accept 2 equal keys.
291 static __inline uint16_t
292 vm_radix_keydiff(vm_pindex_t index1, vm_pindex_t index2)
296 KASSERT(index1 != index2, ("%s: passing the same key value %jx",
297 __func__, (uintmax_t)index1));
300 for (clev = VM_RADIX_LIMIT;; clev--)
301 if (vm_radix_slot(index1, clev) != 0)
306 * Returns TRUE if it can be determined that key does not belong to the
307 * specified rnode. Otherwise, returns FALSE.
309 static __inline boolean_t
310 vm_radix_keybarr(struct vm_radix_node *rnode, vm_pindex_t idx)
313 if (rnode->rn_clev < VM_RADIX_LIMIT) {
314 idx = vm_radix_trimkey(idx, rnode->rn_clev + 1);
315 return (idx != rnode->rn_owner);
321 * Internal helper for vm_radix_reclaim_allnodes().
322 * This function is recursive.
325 vm_radix_reclaim_allnodes_int(struct vm_radix_node *rnode)
327 struct vm_radix_node *child;
330 KASSERT(rnode->rn_count <= VM_RADIX_COUNT,
331 ("vm_radix_reclaim_allnodes_int: bad count in rnode %p", rnode));
332 for (slot = 0; rnode->rn_count != 0; slot++) {
333 child = vm_radix_node_load(&rnode->rn_child[slot], UNSERIALIZED);
336 if (!vm_radix_isleaf(child))
337 vm_radix_reclaim_allnodes_int(child);
338 vm_radix_node_store(&rnode->rn_child[slot], NULL, UNSERIALIZED);
341 vm_radix_node_put(rnode, -1);
344 #ifndef UMA_MD_SMALL_ALLOC
345 void vm_radix_reserve_kva(void);
347 * Reserve the KVA necessary to satisfy the node allocation.
348 * This is mandatory in architectures not supporting direct
349 * mapping as they will need otherwise to carve into the kernel maps for
350 * every node allocation, resulting into deadlocks for consumers already
351 * working with kernel maps.
354 vm_radix_reserve_kva(void)
358 * Calculate the number of reserved nodes, discounting the pages that
359 * are needed to store them.
361 if (!uma_zone_reserve_kva(vm_radix_node_zone,
362 ((vm_paddr_t)vm_cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE +
363 sizeof(struct vm_radix_node))))
364 panic("%s: unable to reserve KVA", __func__);
369 * Initialize the UMA slab zone.
375 vm_radix_node_zone = uma_zcreate("RADIX NODE",
376 sizeof(struct vm_radix_node), NULL, NULL, NULL, NULL,
377 VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_SMR | UMA_ZONE_ZINIT);
378 vm_radix_smr = uma_zone_get_smr(vm_radix_node_zone);
382 * Inserts the key-value pair into the trie.
383 * Panics if the key already exists.
386 vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
388 vm_pindex_t index, newind;
389 struct vm_radix_node *rnode, *tmp;
395 index = page->pindex;
398 * The owner of record for root is not really important because it
399 * will never be used.
401 rnode = vm_radix_root_load(rtree, LOCKED);
403 rtree->rt_root = (uintptr_t)page | VM_RADIX_ISLEAF;
406 parentp = (smrnode_t *)&rtree->rt_root;
408 if (vm_radix_isleaf(rnode)) {
409 m = vm_radix_topage(rnode);
410 if (m->pindex == index)
411 panic("%s: key %jx is already present",
412 __func__, (uintmax_t)index);
413 clev = vm_radix_keydiff(m->pindex, index);
414 tmp = vm_radix_node_get(vm_radix_trimkey(index,
418 /* These writes are not yet visible due to ordering. */
419 vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED);
420 vm_radix_addpage(tmp, m->pindex, clev, m, UNSERIALIZED);
421 /* Synchronize to make leaf visible. */
422 vm_radix_node_store(parentp, tmp, LOCKED);
424 } else if (vm_radix_keybarr(rnode, index))
426 slot = vm_radix_slot(index, rnode->rn_clev);
427 parentp = &rnode->rn_child[slot];
428 tmp = vm_radix_node_load(parentp, LOCKED);
431 vm_radix_addpage(rnode, index, rnode->rn_clev, page,
439 * A new node is needed because the right insertion level is reached.
440 * Setup the new intermediate node and add the 2 children: the
441 * new object and the older edge.
443 newind = rnode->rn_owner;
444 clev = vm_radix_keydiff(newind, index);
445 tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev);
448 slot = vm_radix_slot(newind, clev);
449 /* These writes are not yet visible due to ordering. */
450 vm_radix_addpage(tmp, index, clev, page, UNSERIALIZED);
451 vm_radix_node_store(&tmp->rn_child[slot], rnode, UNSERIALIZED);
452 /* Serializing write to make the above visible. */
453 vm_radix_node_store(parentp, tmp, LOCKED);
459 * Returns TRUE if the specified radix tree contains a single leaf and FALSE
463 vm_radix_is_singleton(struct vm_radix *rtree)
465 struct vm_radix_node *rnode;
467 rnode = vm_radix_root_load(rtree, LOCKED);
470 return (vm_radix_isleaf(rnode));
474 * Returns the value stored at the index. If the index is not present,
477 static __always_inline vm_page_t
478 _vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index,
479 enum vm_radix_access access)
481 struct vm_radix_node *rnode;
485 rnode = vm_radix_root_load(rtree, access);
486 while (rnode != NULL) {
487 if (vm_radix_isleaf(rnode)) {
488 m = vm_radix_topage(rnode);
489 if (m->pindex == index)
493 if (vm_radix_keybarr(rnode, index))
495 slot = vm_radix_slot(index, rnode->rn_clev);
496 rnode = vm_radix_node_load(&rnode->rn_child[slot], access);
502 * Returns the value stored at the index assuming there is an external lock.
504 * If the index is not present, NULL is returned.
507 vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
510 return _vm_radix_lookup(rtree, index, LOCKED);
514 * Returns the value stored at the index without requiring an external lock.
516 * If the index is not present, NULL is returned.
519 vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
523 smr_enter(vm_radix_smr);
524 m = _vm_radix_lookup(rtree, index, SMR);
525 smr_exit(vm_radix_smr);
531 * Look up the nearest entry at a position greater than or equal to index.
534 vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
536 struct vm_radix_node *stack[VM_RADIX_LIMIT];
539 struct vm_radix_node *child, *rnode;
545 rnode = vm_radix_root_load(rtree, LOCKED);
548 else if (vm_radix_isleaf(rnode)) {
549 m = vm_radix_topage(rnode);
550 if (m->pindex >= index)
558 * If the keys differ before the current bisection node,
559 * then the search key might rollback to the earliest
560 * available bisection node or to the smallest key
561 * in the current node (if the owner is greater than the
564 if (vm_radix_keybarr(rnode, index)) {
565 if (index > rnode->rn_owner) {
567 KASSERT(++loops < 1000,
568 ("vm_radix_lookup_ge: too many loops"));
571 * Pop nodes from the stack until either the
572 * stack is empty or a node that could have a
573 * matching descendant is found.
578 rnode = stack[--tos];
579 } while (vm_radix_slot(index,
580 rnode->rn_clev) == (VM_RADIX_COUNT - 1));
583 * The following computation cannot overflow
584 * because index's slot at the current level
585 * is less than VM_RADIX_COUNT - 1.
587 index = vm_radix_trimkey(index,
589 index += VM_RADIX_UNITLEVEL(rnode->rn_clev);
591 index = rnode->rn_owner;
592 KASSERT(!vm_radix_keybarr(rnode, index),
593 ("vm_radix_lookup_ge: keybarr failed"));
595 slot = vm_radix_slot(index, rnode->rn_clev);
596 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
597 if (vm_radix_isleaf(child)) {
598 m = vm_radix_topage(child);
599 if (m->pindex >= index)
601 } else if (child != NULL)
605 * Look for an available edge or page within the current
608 if (slot < (VM_RADIX_COUNT - 1)) {
609 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
610 index = vm_radix_trimkey(index, rnode->rn_clev);
614 child = vm_radix_node_load(&rnode->rn_child[slot],
616 if (vm_radix_isleaf(child)) {
617 m = vm_radix_topage(child);
618 if (m->pindex >= index)
620 } else if (child != NULL)
622 } while (slot < (VM_RADIX_COUNT - 1));
624 KASSERT(child == NULL || vm_radix_isleaf(child),
625 ("vm_radix_lookup_ge: child is radix node"));
628 * If a page or edge greater than the search slot is not found
629 * in the current node, ascend to the next higher-level node.
633 KASSERT(rnode->rn_clev > 0,
634 ("vm_radix_lookup_ge: pushing leaf's parent"));
635 KASSERT(tos < VM_RADIX_LIMIT,
636 ("vm_radix_lookup_ge: stack overflow"));
637 stack[tos++] = rnode;
643 * Look up the nearest entry at a position less than or equal to index.
646 vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)
648 struct vm_radix_node *stack[VM_RADIX_LIMIT];
651 struct vm_radix_node *child, *rnode;
657 rnode = vm_radix_root_load(rtree, LOCKED);
660 else if (vm_radix_isleaf(rnode)) {
661 m = vm_radix_topage(rnode);
662 if (m->pindex <= index)
670 * If the keys differ before the current bisection node,
671 * then the search key might rollback to the earliest
672 * available bisection node or to the largest key
673 * in the current node (if the owner is smaller than the
676 if (vm_radix_keybarr(rnode, index)) {
677 if (index > rnode->rn_owner) {
678 index = rnode->rn_owner + VM_RADIX_COUNT *
679 VM_RADIX_UNITLEVEL(rnode->rn_clev);
682 KASSERT(++loops < 1000,
683 ("vm_radix_lookup_le: too many loops"));
686 * Pop nodes from the stack until either the
687 * stack is empty or a node that could have a
688 * matching descendant is found.
693 rnode = stack[--tos];
694 } while (vm_radix_slot(index,
695 rnode->rn_clev) == 0);
698 * The following computation cannot overflow
699 * because index's slot at the current level
702 index = vm_radix_trimkey(index,
706 KASSERT(!vm_radix_keybarr(rnode, index),
707 ("vm_radix_lookup_le: keybarr failed"));
709 slot = vm_radix_slot(index, rnode->rn_clev);
710 child = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
711 if (vm_radix_isleaf(child)) {
712 m = vm_radix_topage(child);
713 if (m->pindex <= index)
715 } else if (child != NULL)
719 * Look for an available edge or page within the current
723 inc = VM_RADIX_UNITLEVEL(rnode->rn_clev);
728 child = vm_radix_node_load(&rnode->rn_child[slot],
730 if (vm_radix_isleaf(child)) {
731 m = vm_radix_topage(child);
732 if (m->pindex <= index)
734 } else if (child != NULL)
738 KASSERT(child == NULL || vm_radix_isleaf(child),
739 ("vm_radix_lookup_le: child is radix node"));
742 * If a page or edge smaller than the search slot is not found
743 * in the current node, ascend to the next higher-level node.
747 KASSERT(rnode->rn_clev > 0,
748 ("vm_radix_lookup_le: pushing leaf's parent"));
749 KASSERT(tos < VM_RADIX_LIMIT,
750 ("vm_radix_lookup_le: stack overflow"));
751 stack[tos++] = rnode;
757 * Remove the specified index from the trie, and return the value stored at
758 * that index. If the index is not present, return NULL.
761 vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
763 struct vm_radix_node *rnode, *parent, *tmp;
767 rnode = vm_radix_root_load(rtree, LOCKED);
768 if (vm_radix_isleaf(rnode)) {
769 m = vm_radix_topage(rnode);
770 if (m->pindex != index)
772 vm_radix_root_store(rtree, NULL, LOCKED);
779 slot = vm_radix_slot(index, rnode->rn_clev);
780 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
781 if (vm_radix_isleaf(tmp)) {
782 m = vm_radix_topage(tmp);
783 if (m->pindex != index)
785 vm_radix_node_store(&rnode->rn_child[slot], NULL, LOCKED);
787 if (rnode->rn_count > 1)
789 for (i = 0; i < VM_RADIX_COUNT; i++)
790 if (vm_radix_node_load(&rnode->rn_child[i],
793 KASSERT(i != VM_RADIX_COUNT,
794 ("%s: invalid node configuration", __func__));
795 tmp = vm_radix_node_load(&rnode->rn_child[i], LOCKED);
797 vm_radix_root_store(rtree, tmp, LOCKED);
799 slot = vm_radix_slot(index, parent->rn_clev);
800 KASSERT(vm_radix_node_load(
801 &parent->rn_child[slot], LOCKED) == rnode,
802 ("%s: invalid child value", __func__));
803 vm_radix_node_store(&parent->rn_child[slot],
807 * The child is still valid and we can not zero the
808 * pointer until all smr references are gone.
811 vm_radix_node_put(rnode, i);
820 * Remove and free all the nodes from the radix tree.
821 * This function is recursive but there is a tight control on it as the
822 * maximum depth of the tree is fixed.
825 vm_radix_reclaim_allnodes(struct vm_radix *rtree)
827 struct vm_radix_node *root;
829 root = vm_radix_root_load(rtree, LOCKED);
832 vm_radix_root_store(rtree, NULL, UNSERIALIZED);
833 if (!vm_radix_isleaf(root))
834 vm_radix_reclaim_allnodes_int(root);
838 * Replace an existing page in the trie with another one.
839 * Panics if there is not an old page in the trie at the new page's index.
842 vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
844 struct vm_radix_node *rnode, *tmp;
849 index = newpage->pindex;
850 rnode = vm_radix_root_load(rtree, LOCKED);
852 panic("%s: replacing page on an empty trie", __func__);
853 if (vm_radix_isleaf(rnode)) {
854 m = vm_radix_topage(rnode);
855 if (m->pindex != index)
856 panic("%s: original replacing root key not found",
858 rtree->rt_root = (uintptr_t)newpage | VM_RADIX_ISLEAF;
862 slot = vm_radix_slot(index, rnode->rn_clev);
863 tmp = vm_radix_node_load(&rnode->rn_child[slot], LOCKED);
864 if (vm_radix_isleaf(tmp)) {
865 m = vm_radix_topage(tmp);
866 if (m->pindex == index) {
867 vm_radix_node_store(&rnode->rn_child[slot],
868 (struct vm_radix_node *)((uintptr_t)newpage |
869 VM_RADIX_ISLEAF), LOCKED);
873 } else if (tmp == NULL || vm_radix_keybarr(tmp, index))
877 panic("%s: original replacing page not found", __func__);
883 uma_zwait(vm_radix_node_zone);
888 * Show details about the given radix node.
890 DB_SHOW_COMMAND(radixnode, db_show_radixnode)
892 struct vm_radix_node *rnode, *tmp;
897 rnode = (struct vm_radix_node *)addr;
898 db_printf("radixnode %p, owner %jx, children count %u, level %u:\n",
899 (void *)rnode, (uintmax_t)rnode->rn_owner, rnode->rn_count,
901 for (i = 0; i < VM_RADIX_COUNT; i++) {
902 tmp = vm_radix_node_load(&rnode->rn_child[i], UNSERIALIZED);
904 db_printf("slot: %d, val: %p, page: %p, clev: %d\n",
906 vm_radix_isleaf(tmp) ? vm_radix_topage(tmp) : NULL,