2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/queue.h>
45 #include <sys/sysctl.h>
46 #include <sys/vmmeter.h>
47 #include <sys/vnode.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_phys.h>
57 #include <vm/vm_reserv.h>
68 struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
71 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
73 static int vm_phys_nsegs;
75 static struct vm_freelist
76 vm_phys_free_queues[VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
78 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
80 static int cnt_prezero;
81 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
82 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
84 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
85 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
86 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
88 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
89 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
90 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
92 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
93 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
94 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
98 * Outputs the state of the physical memory allocator, specifically,
99 * the amount of physical memory in each free list.
102 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
105 struct vm_freelist *fl;
107 const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
108 int error, flind, oind, pind;
110 cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
111 sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
112 for (flind = 0; flind < vm_nfreelists; flind++) {
113 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
114 "\n ORDER (SIZE) | NUMBER"
116 for (pind = 0; pind < VM_NFREEPOOL; pind++)
117 sbuf_printf(&sbuf, " | POOL %d", pind);
118 sbuf_printf(&sbuf, "\n-- ");
119 for (pind = 0; pind < VM_NFREEPOOL; pind++)
120 sbuf_printf(&sbuf, "-- -- ");
121 sbuf_printf(&sbuf, "--\n");
122 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
123 sbuf_printf(&sbuf, " %2.2d (%6.6dK)", oind,
124 1 << (PAGE_SHIFT - 10 + oind));
125 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
126 fl = vm_phys_free_queues[flind][pind];
127 sbuf_printf(&sbuf, " | %6.6d", fl[oind].lcnt);
129 sbuf_printf(&sbuf, "\n");
133 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
140 * Outputs the set of physical memory segments.
143 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
146 struct vm_phys_seg *seg;
148 const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
151 cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
152 sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
153 for (segind = 0; segind < vm_phys_nsegs; segind++) {
154 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
155 seg = &vm_phys_segs[segind];
156 sbuf_printf(&sbuf, "start: %#jx\n",
157 (uintmax_t)seg->start);
158 sbuf_printf(&sbuf, "end: %#jx\n",
159 (uintmax_t)seg->end);
160 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
163 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
170 * Create a physical memory segment.
173 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
175 struct vm_phys_seg *seg;
176 #ifdef VM_PHYSSEG_SPARSE
181 for (segind = 0; segind < vm_phys_nsegs; segind++) {
182 seg = &vm_phys_segs[segind];
183 pages += atop(seg->end - seg->start);
186 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
187 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
188 seg = &vm_phys_segs[vm_phys_nsegs++];
191 #ifdef VM_PHYSSEG_SPARSE
192 seg->first_page = &vm_page_array[pages];
194 seg->first_page = PHYS_TO_VM_PAGE(start);
196 seg->free_queues = &vm_phys_free_queues[flind];
200 * Initialize the physical memory allocator.
205 struct vm_freelist *fl;
206 int flind, i, oind, pind;
208 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
209 #ifdef VM_FREELIST_ISADMA
210 if (phys_avail[i] < 16777216) {
211 if (phys_avail[i + 1] > 16777216) {
212 vm_phys_create_seg(phys_avail[i], 16777216,
214 vm_phys_create_seg(16777216, phys_avail[i + 1],
215 VM_FREELIST_DEFAULT);
217 vm_phys_create_seg(phys_avail[i],
218 phys_avail[i + 1], VM_FREELIST_ISADMA);
220 if (VM_FREELIST_ISADMA >= vm_nfreelists)
221 vm_nfreelists = VM_FREELIST_ISADMA + 1;
224 #ifdef VM_FREELIST_HIGHMEM
225 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
226 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
227 vm_phys_create_seg(phys_avail[i],
228 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
229 vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
230 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
232 vm_phys_create_seg(phys_avail[i],
233 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
235 if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
236 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
239 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
240 VM_FREELIST_DEFAULT);
242 for (flind = 0; flind < vm_nfreelists; flind++) {
243 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
244 fl = vm_phys_free_queues[flind][pind];
245 for (oind = 0; oind < VM_NFREEORDER; oind++)
246 TAILQ_INIT(&fl[oind].pl);
252 * Split a contiguous, power of two-sized set of physical pages.
255 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
259 while (oind > order) {
261 m_buddy = &m[1 << oind];
262 KASSERT(m_buddy->order == VM_NFREEORDER,
263 ("vm_phys_split_pages: page %p has unexpected order %d",
264 m_buddy, m_buddy->order));
265 m_buddy->order = oind;
266 TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
272 * Initialize a physical page and add it to the free lists.
275 vm_phys_add_page(vm_paddr_t pa)
280 m = vm_phys_paddr_to_vm_page(pa);
282 m->segind = vm_phys_paddr_to_segind(pa);
284 KASSERT(m->order == VM_NFREEORDER,
285 ("vm_phys_add_page: page %p has unexpected order %d",
287 m->pool = VM_FREEPOOL_DEFAULT;
289 mtx_lock(&vm_page_queue_free_mtx);
291 vm_phys_free_pages(m, 0);
292 mtx_unlock(&vm_page_queue_free_mtx);
296 * Allocate a contiguous, power of two-sized set of physical pages
297 * from the free lists.
299 * The free page queues must be locked.
302 vm_phys_alloc_pages(int pool, int order)
304 struct vm_freelist *fl;
305 struct vm_freelist *alt;
306 int flind, oind, pind;
309 KASSERT(pool < VM_NFREEPOOL,
310 ("vm_phys_alloc_pages: pool %d is out of range", pool));
311 KASSERT(order < VM_NFREEORDER,
312 ("vm_phys_alloc_pages: order %d is out of range", order));
313 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
314 for (flind = 0; flind < vm_nfreelists; flind++) {
315 fl = vm_phys_free_queues[flind][pool];
316 for (oind = order; oind < VM_NFREEORDER; oind++) {
317 m = TAILQ_FIRST(&fl[oind].pl);
319 TAILQ_REMOVE(&fl[oind].pl, m, pageq);
321 m->order = VM_NFREEORDER;
322 vm_phys_split_pages(m, oind, fl, order);
328 * The given pool was empty. Find the largest
329 * contiguous, power-of-two-sized set of pages in any
330 * pool. Transfer these pages to the given pool, and
331 * use them to satisfy the allocation.
333 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
334 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
335 alt = vm_phys_free_queues[flind][pind];
336 m = TAILQ_FIRST(&alt[oind].pl);
338 TAILQ_REMOVE(&alt[oind].pl, m, pageq);
340 m->order = VM_NFREEORDER;
341 vm_phys_set_pool(pool, m, oind);
342 vm_phys_split_pages(m, oind, fl, order);
352 * Allocate physical memory from phys_avail[].
355 vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
360 size = round_page(size);
361 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
362 if (phys_avail[i + 1] - phys_avail[i] < size)
365 phys_avail[i] += size;
368 panic("vm_phys_bootstrap_alloc");
372 * Find the vm_page corresponding to the given physical address.
375 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
377 struct vm_phys_seg *seg;
380 for (segind = 0; segind < vm_phys_nsegs; segind++) {
381 seg = &vm_phys_segs[segind];
382 if (pa >= seg->start && pa < seg->end)
383 return (&seg->first_page[atop(pa - seg->start)]);
385 panic("vm_phys_paddr_to_vm_page: paddr %#jx is not in any segment",
390 * Find the segment containing the given physical address.
393 vm_phys_paddr_to_segind(vm_paddr_t pa)
395 struct vm_phys_seg *seg;
398 for (segind = 0; segind < vm_phys_nsegs; segind++) {
399 seg = &vm_phys_segs[segind];
400 if (pa >= seg->start && pa < seg->end)
403 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
408 * Free a contiguous, power of two-sized set of physical pages.
410 * The free page queues must be locked.
413 vm_phys_free_pages(vm_page_t m, int order)
415 struct vm_freelist *fl;
416 struct vm_phys_seg *seg;
417 vm_paddr_t pa, pa_buddy;
420 KASSERT(m->order == VM_NFREEORDER,
421 ("vm_phys_free_pages: page %p has unexpected order %d",
423 KASSERT(m->pool < VM_NFREEPOOL,
424 ("vm_phys_free_pages: page %p has unexpected pool %d",
426 KASSERT(order < VM_NFREEORDER,
427 ("vm_phys_free_pages: order %d is out of range", order));
428 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
429 pa = VM_PAGE_TO_PHYS(m);
430 seg = &vm_phys_segs[m->segind];
431 while (order < VM_NFREEORDER - 1) {
432 pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
433 if (pa_buddy < seg->start ||
434 pa_buddy >= seg->end)
436 m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
437 if (m_buddy->order != order)
439 fl = (*seg->free_queues)[m_buddy->pool];
440 TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
441 fl[m_buddy->order].lcnt--;
442 m_buddy->order = VM_NFREEORDER;
443 if (m_buddy->pool != m->pool)
444 vm_phys_set_pool(m->pool, m_buddy, order);
446 pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
447 m = &seg->first_page[atop(pa - seg->start)];
450 fl = (*seg->free_queues)[m->pool];
451 TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
456 * Set the pool for a contiguous, power of two-sized set of physical pages.
459 vm_phys_set_pool(int pool, vm_page_t m, int order)
463 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
468 * Search for the given physical page "m" in the free lists. If the search
469 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
470 * FALSE, indicating that "m" is not in the free lists.
472 * The free page queues must be locked.
475 vm_phys_unfree_page(vm_page_t m)
477 struct vm_freelist *fl;
478 struct vm_phys_seg *seg;
479 vm_paddr_t pa, pa_half;
480 vm_page_t m_set, m_tmp;
483 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
486 * First, find the contiguous, power of two-sized set of free
487 * physical pages containing the given physical page "m" and
488 * assign it to "m_set".
490 seg = &vm_phys_segs[m->segind];
491 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
492 order < VM_NFREEORDER - 1; ) {
494 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
495 if (pa >= seg->start)
496 m_set = &seg->first_page[atop(pa - seg->start)];
500 if (m_set->order < order)
502 if (m_set->order == VM_NFREEORDER)
504 KASSERT(m_set->order < VM_NFREEORDER,
505 ("vm_phys_unfree_page: page %p has unexpected order %d",
506 m_set, m_set->order));
509 * Next, remove "m_set" from the free lists. Finally, extract
510 * "m" from "m_set" using an iterative algorithm: While "m_set"
511 * is larger than a page, shrink "m_set" by returning the half
512 * of "m_set" that does not contain "m" to the free lists.
514 fl = (*seg->free_queues)[m_set->pool];
515 order = m_set->order;
516 TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
518 m_set->order = VM_NFREEORDER;
521 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
522 if (m->phys_addr < pa_half)
523 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
526 m_set = &seg->first_page[atop(pa_half - seg->start)];
528 m_tmp->order = order;
529 TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
532 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
537 * Try to zero one physical page. Used by an idle priority thread.
540 vm_phys_zero_pages_idle(void)
542 static struct vm_freelist *fl = vm_phys_free_queues[0][0];
543 static int flind, oind, pind;
546 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
548 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
549 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
550 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
551 vm_phys_unfree_page(m_tmp);
553 mtx_unlock(&vm_page_queue_free_mtx);
554 pmap_zero_page_idle(m_tmp);
555 m_tmp->flags |= PG_ZERO;
556 mtx_lock(&vm_page_queue_free_mtx);
558 vm_phys_free_pages(m_tmp, 0);
559 vm_page_zero_count++;
566 if (oind == VM_NFREEORDER) {
569 if (pind == VM_NFREEPOOL) {
572 if (flind == vm_nfreelists)
575 fl = vm_phys_free_queues[flind][pind];
581 * Allocate a contiguous set of physical pages of the given size
582 * "npages" from the free lists. All of the physical pages must be at
583 * or above the given physical address "low" and below the given
584 * physical address "high". The given value "alignment" determines the
585 * alignment of the first physical page in the set. If the given value
586 * "boundary" is non-zero, then the set of physical pages cannot cross
587 * any physical address boundary that is a multiple of that value. Both
588 * "alignment" and "boundary" must be a power of two.
591 vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
592 unsigned long alignment, unsigned long boundary)
594 struct vm_freelist *fl;
595 struct vm_phys_seg *seg;
596 vm_object_t m_object;
597 vm_paddr_t pa, pa_last, size;
599 int flind, i, oind, order, pind;
601 size = npages << PAGE_SHIFT;
603 ("vm_phys_alloc_contig: size must not be 0"));
604 KASSERT((alignment & (alignment - 1)) == 0,
605 ("vm_phys_alloc_contig: alignment must be a power of 2"));
606 KASSERT((boundary & (boundary - 1)) == 0,
607 ("vm_phys_alloc_contig: boundary must be a power of 2"));
608 /* Compute the queue that is the best fit for npages. */
609 for (order = 0; (1 << order) < npages; order++);
610 mtx_lock(&vm_page_queue_free_mtx);
611 #if VM_NRESERVLEVEL > 0
614 for (flind = 0; flind < vm_nfreelists; flind++) {
615 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
616 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
617 fl = vm_phys_free_queues[flind][pind];
618 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
620 * A free list may contain physical pages
621 * from one or more segments.
623 seg = &vm_phys_segs[m_ret->segind];
624 if (seg->start > high ||
629 * Is the size of this allocation request
630 * larger than the largest block size?
632 if (order >= VM_NFREEORDER) {
634 * Determine if a sufficient number
635 * of subsequent blocks to satisfy
636 * the allocation request are free.
638 pa = VM_PAGE_TO_PHYS(m_ret);
641 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
644 if (pa < seg->start ||
647 m = &seg->first_page[atop(pa - seg->start)];
648 if (m->order != VM_NFREEORDER - 1)
651 /* If not, continue to the next block. */
657 * Determine if the blocks are within the given range,
658 * satisfy the given alignment, and do not cross the
661 pa = VM_PAGE_TO_PHYS(m_ret);
664 (pa & (alignment - 1)) == 0 &&
665 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
671 #if VM_NRESERVLEVEL > 0
672 if (vm_reserv_reclaim_contig(size, low, high, alignment, boundary))
675 mtx_unlock(&vm_page_queue_free_mtx);
678 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
679 fl = (*seg->free_queues)[m->pool];
680 TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
682 m->order = VM_NFREEORDER;
684 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
685 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
686 fl = (*seg->free_queues)[m_ret->pool];
687 vm_phys_split_pages(m_ret, oind, fl, order);
688 for (i = 0; i < npages; i++) {
690 KASSERT(m->queue == PQ_NONE,
691 ("vm_phys_alloc_contig: page %p has unexpected queue %d",
693 m_object = m->object;
694 if ((m->flags & PG_CACHED) != 0)
695 vm_page_cache_remove(m);
697 KASSERT(VM_PAGE_IS_FREE(m),
698 ("vm_phys_alloc_contig: page %p is not free", m));
701 m->valid = VM_PAGE_BITS_ALL;
702 if (m->flags & PG_ZERO)
703 vm_page_zero_count--;
704 /* Don't clear the PG_ZERO flag; we'll need it later. */
705 m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
707 KASSERT(m->dirty == 0,
708 ("vm_phys_alloc_contig: page %p was dirty", m));
711 if (m_object != NULL &&
712 m_object->type == OBJT_VNODE &&
713 m_object->cache == NULL) {
714 mtx_unlock(&vm_page_queue_free_mtx);
715 vdrop(m_object->handle);
716 mtx_lock(&vm_page_queue_free_mtx);
719 for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
721 KASSERT(m->order == VM_NFREEORDER,
722 ("vm_phys_alloc_contig: page %p has unexpected order %d",
724 vm_phys_free_pages(m, 0);
726 mtx_unlock(&vm_page_queue_free_mtx);
732 * Show the number of physical pages in each of the free lists.
734 DB_SHOW_COMMAND(freepages, db_show_freepages)
736 struct vm_freelist *fl;
737 int flind, oind, pind;
739 for (flind = 0; flind < vm_nfreelists; flind++) {
740 db_printf("FREE LIST %d:\n"
741 "\n ORDER (SIZE) | NUMBER"
743 for (pind = 0; pind < VM_NFREEPOOL; pind++)
744 db_printf(" | POOL %d", pind);
746 for (pind = 0; pind < VM_NFREEPOOL; pind++)
749 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
750 db_printf(" %2.2d (%6.6dK)", oind,
751 1 << (PAGE_SHIFT - 10 + oind));
752 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
753 fl = vm_phys_free_queues[flind][pind];
754 db_printf(" | %6.6d", fl[oind].lcnt);