]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_phys.c
Add the llvm-cov and llvm-profdata tools, when WITH_CLANG_EXTRAS is
[FreeBSD/FreeBSD.git] / sys / vm / vm_phys.c
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 /*
33  *      Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #if MAXMEMDOM > 1
52 #include <sys/proc.h>
53 #endif
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/tree.h>
59 #include <sys/vmmeter.h>
60
61 #include <ddb/ddb.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_phys.h>
69
70 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
71     "Too many physsegs.");
72
73 struct mem_affinity *mem_affinity;
74
75 int vm_ndomains = 1;
76
77 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
78 int vm_phys_nsegs;
79
80 struct vm_phys_fictitious_seg;
81 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
82     struct vm_phys_fictitious_seg *);
83
84 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
85     RB_INITIALIZER(_vm_phys_fictitious_tree);
86
87 struct vm_phys_fictitious_seg {
88         RB_ENTRY(vm_phys_fictitious_seg) node;
89         /* Memory region data */
90         vm_paddr_t      start;
91         vm_paddr_t      end;
92         vm_page_t       first_page;
93 };
94
95 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
96     vm_phys_fictitious_cmp);
97
98 static struct rwlock vm_phys_fictitious_reg_lock;
99 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
100
101 static struct vm_freelist
102     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
103
104 static int vm_nfreelists;
105
106 /*
107  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
108  */
109 static int vm_freelist_to_flind[VM_NFREELIST];
110
111 CTASSERT(VM_FREELIST_DEFAULT == 0);
112
113 #ifdef VM_FREELIST_ISADMA
114 #define VM_ISADMA_BOUNDARY      16777216
115 #endif
116 #ifdef VM_FREELIST_DMA32
117 #define VM_DMA32_BOUNDARY       ((vm_paddr_t)1 << 32)
118 #endif
119
120 /*
121  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
122  * the ordering of the free list boundaries.
123  */
124 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
125 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
126 #endif
127 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
128 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
129 #endif
130
131 static int cnt_prezero;
132 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
133     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
134
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
138
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142
143 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
144     &vm_ndomains, 0, "Number of physical memory domains available.");
145
146 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
147     int order);
148 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
149 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
150 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
151 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
152     int order);
153
154 /*
155  * Red-black tree helpers for vm fictitious range management.
156  */
157 static inline int
158 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
159     struct vm_phys_fictitious_seg *range)
160 {
161
162         KASSERT(range->start != 0 && range->end != 0,
163             ("Invalid range passed on search for vm_fictitious page"));
164         if (p->start >= range->end)
165                 return (1);
166         if (p->start < range->start)
167                 return (-1);
168
169         return (0);
170 }
171
172 static int
173 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
174     struct vm_phys_fictitious_seg *p2)
175 {
176
177         /* Check if this is a search for a page */
178         if (p1->end == 0)
179                 return (vm_phys_fictitious_in_range(p1, p2));
180
181         KASSERT(p2->end != 0,
182     ("Invalid range passed as second parameter to vm fictitious comparison"));
183
184         /* Searching to add a new range */
185         if (p1->end <= p2->start)
186                 return (-1);
187         if (p1->start >= p2->end)
188                 return (1);
189
190         panic("Trying to add overlapping vm fictitious ranges:\n"
191             "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
192             (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
193 }
194
195 static __inline int
196 vm_rr_selectdomain(void)
197 {
198 #if MAXMEMDOM > 1
199         struct thread *td;
200
201         td = curthread;
202
203         td->td_dom_rr_idx++;
204         td->td_dom_rr_idx %= vm_ndomains;
205         return (td->td_dom_rr_idx);
206 #else
207         return (0);
208 #endif
209 }
210
211 boolean_t
212 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
213 {
214         struct vm_phys_seg *s;
215         int idx;
216
217         while ((idx = ffsl(mask)) != 0) {
218                 idx--;  /* ffsl counts from 1 */
219                 mask &= ~(1UL << idx);
220                 s = &vm_phys_segs[idx];
221                 if (low < s->end && high > s->start)
222                         return (TRUE);
223         }
224         return (FALSE);
225 }
226
227 /*
228  * Outputs the state of the physical memory allocator, specifically,
229  * the amount of physical memory in each free list.
230  */
231 static int
232 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
233 {
234         struct sbuf sbuf;
235         struct vm_freelist *fl;
236         int dom, error, flind, oind, pind;
237
238         error = sysctl_wire_old_buffer(req, 0);
239         if (error != 0)
240                 return (error);
241         sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
242         for (dom = 0; dom < vm_ndomains; dom++) {
243                 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
244                 for (flind = 0; flind < vm_nfreelists; flind++) {
245                         sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
246                             "\n  ORDER (SIZE)  |  NUMBER"
247                             "\n              ", flind);
248                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
249                                 sbuf_printf(&sbuf, "  |  POOL %d", pind);
250                         sbuf_printf(&sbuf, "\n--            ");
251                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
252                                 sbuf_printf(&sbuf, "-- --      ");
253                         sbuf_printf(&sbuf, "--\n");
254                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
255                                 sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
256                                     1 << (PAGE_SHIFT - 10 + oind));
257                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
258                                 fl = vm_phys_free_queues[dom][flind][pind];
259                                         sbuf_printf(&sbuf, "  |  %6d",
260                                             fl[oind].lcnt);
261                                 }
262                                 sbuf_printf(&sbuf, "\n");
263                         }
264                 }
265         }
266         error = sbuf_finish(&sbuf);
267         sbuf_delete(&sbuf);
268         return (error);
269 }
270
271 /*
272  * Outputs the set of physical memory segments.
273  */
274 static int
275 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
276 {
277         struct sbuf sbuf;
278         struct vm_phys_seg *seg;
279         int error, segind;
280
281         error = sysctl_wire_old_buffer(req, 0);
282         if (error != 0)
283                 return (error);
284         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
285         for (segind = 0; segind < vm_phys_nsegs; segind++) {
286                 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
287                 seg = &vm_phys_segs[segind];
288                 sbuf_printf(&sbuf, "start:     %#jx\n",
289                     (uintmax_t)seg->start);
290                 sbuf_printf(&sbuf, "end:       %#jx\n",
291                     (uintmax_t)seg->end);
292                 sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
293                 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
294         }
295         error = sbuf_finish(&sbuf);
296         sbuf_delete(&sbuf);
297         return (error);
298 }
299
300 static void
301 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
302 {
303
304         m->order = order;
305         if (tail)
306                 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
307         else
308                 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
309         fl[order].lcnt++;
310 }
311
312 static void
313 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
314 {
315
316         TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
317         fl[order].lcnt--;
318         m->order = VM_NFREEORDER;
319 }
320
321 /*
322  * Create a physical memory segment.
323  */
324 static void
325 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
326 {
327         struct vm_phys_seg *seg;
328
329         KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
330             ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
331         KASSERT(domain < vm_ndomains,
332             ("vm_phys_create_seg: invalid domain provided"));
333         seg = &vm_phys_segs[vm_phys_nsegs++];
334         while (seg > vm_phys_segs && (seg - 1)->start >= end) {
335                 *seg = *(seg - 1);
336                 seg--;
337         }
338         seg->start = start;
339         seg->end = end;
340         seg->domain = domain;
341 }
342
343 static void
344 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
345 {
346         int i;
347
348         if (mem_affinity == NULL) {
349                 _vm_phys_create_seg(start, end, 0);
350                 return;
351         }
352
353         for (i = 0;; i++) {
354                 if (mem_affinity[i].end == 0)
355                         panic("Reached end of affinity info");
356                 if (mem_affinity[i].end <= start)
357                         continue;
358                 if (mem_affinity[i].start > start)
359                         panic("No affinity info for start %jx",
360                             (uintmax_t)start);
361                 if (mem_affinity[i].end >= end) {
362                         _vm_phys_create_seg(start, end,
363                             mem_affinity[i].domain);
364                         break;
365                 }
366                 _vm_phys_create_seg(start, mem_affinity[i].end,
367                     mem_affinity[i].domain);
368                 start = mem_affinity[i].end;
369         }
370 }
371
372 /*
373  * Add a physical memory segment.
374  */
375 void
376 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
377 {
378         vm_paddr_t paddr;
379
380         KASSERT((start & PAGE_MASK) == 0,
381             ("vm_phys_define_seg: start is not page aligned"));
382         KASSERT((end & PAGE_MASK) == 0,
383             ("vm_phys_define_seg: end is not page aligned"));
384
385         /*
386          * Split the physical memory segment if it spans two or more free
387          * list boundaries.
388          */
389         paddr = start;
390 #ifdef  VM_FREELIST_ISADMA
391         if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
392                 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
393                 paddr = VM_ISADMA_BOUNDARY;
394         }
395 #endif
396 #ifdef  VM_FREELIST_LOWMEM
397         if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
398                 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
399                 paddr = VM_LOWMEM_BOUNDARY;
400         }
401 #endif
402 #ifdef  VM_FREELIST_DMA32
403         if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
404                 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
405                 paddr = VM_DMA32_BOUNDARY;
406         }
407 #endif
408         vm_phys_create_seg(paddr, end);
409 }
410
411 /*
412  * Initialize the physical memory allocator.
413  *
414  * Requires that vm_page_array is initialized!
415  */
416 void
417 vm_phys_init(void)
418 {
419         struct vm_freelist *fl;
420         struct vm_phys_seg *seg;
421         u_long npages;
422         int dom, flind, freelist, oind, pind, segind;
423
424         /*
425          * Compute the number of free lists, and generate the mapping from the
426          * manifest constants VM_FREELIST_* to the free list indices.
427          *
428          * Initially, the entries of vm_freelist_to_flind[] are set to either
429          * 0 or 1 to indicate which free lists should be created.
430          */
431         npages = 0;
432         for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
433                 seg = &vm_phys_segs[segind];
434 #ifdef  VM_FREELIST_ISADMA
435                 if (seg->end <= VM_ISADMA_BOUNDARY)
436                         vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
437                 else
438 #endif
439 #ifdef  VM_FREELIST_LOWMEM
440                 if (seg->end <= VM_LOWMEM_BOUNDARY)
441                         vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
442                 else
443 #endif
444 #ifdef  VM_FREELIST_DMA32
445                 if (
446 #ifdef  VM_DMA32_NPAGES_THRESHOLD
447                     /*
448                      * Create the DMA32 free list only if the amount of
449                      * physical memory above physical address 4G exceeds the
450                      * given threshold.
451                      */
452                     npages > VM_DMA32_NPAGES_THRESHOLD &&
453 #endif
454                     seg->end <= VM_DMA32_BOUNDARY)
455                         vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
456                 else
457 #endif
458                 {
459                         npages += atop(seg->end - seg->start);
460                         vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
461                 }
462         }
463         /* Change each entry into a running total of the free lists. */
464         for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
465                 vm_freelist_to_flind[freelist] +=
466                     vm_freelist_to_flind[freelist - 1];
467         }
468         vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
469         KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
470         /* Change each entry into a free list index. */
471         for (freelist = 0; freelist < VM_NFREELIST; freelist++)
472                 vm_freelist_to_flind[freelist]--;
473
474         /*
475          * Initialize the first_page and free_queues fields of each physical
476          * memory segment.
477          */
478 #ifdef VM_PHYSSEG_SPARSE
479         npages = 0;
480 #endif
481         for (segind = 0; segind < vm_phys_nsegs; segind++) {
482                 seg = &vm_phys_segs[segind];
483 #ifdef VM_PHYSSEG_SPARSE
484                 seg->first_page = &vm_page_array[npages];
485                 npages += atop(seg->end - seg->start);
486 #else
487                 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
488 #endif
489 #ifdef  VM_FREELIST_ISADMA
490                 if (seg->end <= VM_ISADMA_BOUNDARY) {
491                         flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
492                         KASSERT(flind >= 0,
493                             ("vm_phys_init: ISADMA flind < 0"));
494                 } else
495 #endif
496 #ifdef  VM_FREELIST_LOWMEM
497                 if (seg->end <= VM_LOWMEM_BOUNDARY) {
498                         flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
499                         KASSERT(flind >= 0,
500                             ("vm_phys_init: LOWMEM flind < 0"));
501                 } else
502 #endif
503 #ifdef  VM_FREELIST_DMA32
504                 if (seg->end <= VM_DMA32_BOUNDARY) {
505                         flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
506                         KASSERT(flind >= 0,
507                             ("vm_phys_init: DMA32 flind < 0"));
508                 } else
509 #endif
510                 {
511                         flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
512                         KASSERT(flind >= 0,
513                             ("vm_phys_init: DEFAULT flind < 0"));
514                 }
515                 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
516         }
517
518         /*
519          * Initialize the free queues.
520          */
521         for (dom = 0; dom < vm_ndomains; dom++) {
522                 for (flind = 0; flind < vm_nfreelists; flind++) {
523                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
524                                 fl = vm_phys_free_queues[dom][flind][pind];
525                                 for (oind = 0; oind < VM_NFREEORDER; oind++)
526                                         TAILQ_INIT(&fl[oind].pl);
527                         }
528                 }
529         }
530
531         rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
532 }
533
534 /*
535  * Split a contiguous, power of two-sized set of physical pages.
536  */
537 static __inline void
538 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
539 {
540         vm_page_t m_buddy;
541
542         while (oind > order) {
543                 oind--;
544                 m_buddy = &m[1 << oind];
545                 KASSERT(m_buddy->order == VM_NFREEORDER,
546                     ("vm_phys_split_pages: page %p has unexpected order %d",
547                     m_buddy, m_buddy->order));
548                 vm_freelist_add(fl, m_buddy, oind, 0);
549         }
550 }
551
552 /*
553  * Initialize a physical page and add it to the free lists.
554  */
555 void
556 vm_phys_add_page(vm_paddr_t pa)
557 {
558         vm_page_t m;
559         struct vm_domain *vmd;
560
561         vm_cnt.v_page_count++;
562         m = vm_phys_paddr_to_vm_page(pa);
563         m->phys_addr = pa;
564         m->queue = PQ_NONE;
565         m->segind = vm_phys_paddr_to_segind(pa);
566         vmd = vm_phys_domain(m);
567         vmd->vmd_page_count++;
568         vmd->vmd_segs |= 1UL << m->segind;
569         KASSERT(m->order == VM_NFREEORDER,
570             ("vm_phys_add_page: page %p has unexpected order %d",
571             m, m->order));
572         m->pool = VM_FREEPOOL_DEFAULT;
573         pmap_page_init(m);
574         mtx_lock(&vm_page_queue_free_mtx);
575         vm_phys_freecnt_adj(m, 1);
576         vm_phys_free_pages(m, 0);
577         mtx_unlock(&vm_page_queue_free_mtx);
578 }
579
580 /*
581  * Allocate a contiguous, power of two-sized set of physical pages
582  * from the free lists.
583  *
584  * The free page queues must be locked.
585  */
586 vm_page_t
587 vm_phys_alloc_pages(int pool, int order)
588 {
589         vm_page_t m;
590         int dom, domain, flind;
591
592         KASSERT(pool < VM_NFREEPOOL,
593             ("vm_phys_alloc_pages: pool %d is out of range", pool));
594         KASSERT(order < VM_NFREEORDER,
595             ("vm_phys_alloc_pages: order %d is out of range", order));
596
597         for (dom = 0; dom < vm_ndomains; dom++) {
598                 domain = vm_rr_selectdomain();
599                 for (flind = 0; flind < vm_nfreelists; flind++) {
600                         m = vm_phys_alloc_domain_pages(domain, flind, pool,
601                             order);
602                         if (m != NULL)
603                                 return (m);
604                 }
605         }
606         return (NULL);
607 }
608
609 /*
610  * Allocate a contiguous, power of two-sized set of physical pages from the
611  * specified free list.  The free list must be specified using one of the
612  * manifest constants VM_FREELIST_*.
613  *
614  * The free page queues must be locked.
615  */
616 vm_page_t
617 vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
618 {
619         vm_page_t m;
620         int dom, domain;
621
622         KASSERT(freelist < VM_NFREELIST,
623             ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
624             freelist));
625         KASSERT(pool < VM_NFREEPOOL,
626             ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
627         KASSERT(order < VM_NFREEORDER,
628             ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
629         for (dom = 0; dom < vm_ndomains; dom++) {
630                 domain = vm_rr_selectdomain();
631                 m = vm_phys_alloc_domain_pages(domain,
632                     vm_freelist_to_flind[freelist], pool, order);
633                 if (m != NULL)
634                         return (m);
635         }
636         return (NULL);
637 }
638
639 static vm_page_t
640 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
641 {       
642         struct vm_freelist *fl;
643         struct vm_freelist *alt;
644         int oind, pind;
645         vm_page_t m;
646
647         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
648         fl = &vm_phys_free_queues[domain][flind][pool][0];
649         for (oind = order; oind < VM_NFREEORDER; oind++) {
650                 m = TAILQ_FIRST(&fl[oind].pl);
651                 if (m != NULL) {
652                         vm_freelist_rem(fl, m, oind);
653                         vm_phys_split_pages(m, oind, fl, order);
654                         return (m);
655                 }
656         }
657
658         /*
659          * The given pool was empty.  Find the largest
660          * contiguous, power-of-two-sized set of pages in any
661          * pool.  Transfer these pages to the given pool, and
662          * use them to satisfy the allocation.
663          */
664         for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
665                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
666                         alt = &vm_phys_free_queues[domain][flind][pind][0];
667                         m = TAILQ_FIRST(&alt[oind].pl);
668                         if (m != NULL) {
669                                 vm_freelist_rem(alt, m, oind);
670                                 vm_phys_set_pool(pool, m, oind);
671                                 vm_phys_split_pages(m, oind, fl, order);
672                                 return (m);
673                         }
674                 }
675         }
676         return (NULL);
677 }
678
679 /*
680  * Find the vm_page corresponding to the given physical address.
681  */
682 vm_page_t
683 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
684 {
685         struct vm_phys_seg *seg;
686         int segind;
687
688         for (segind = 0; segind < vm_phys_nsegs; segind++) {
689                 seg = &vm_phys_segs[segind];
690                 if (pa >= seg->start && pa < seg->end)
691                         return (&seg->first_page[atop(pa - seg->start)]);
692         }
693         return (NULL);
694 }
695
696 vm_page_t
697 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
698 {
699         struct vm_phys_fictitious_seg tmp, *seg;
700         vm_page_t m;
701
702         m = NULL;
703         tmp.start = pa;
704         tmp.end = 0;
705
706         rw_rlock(&vm_phys_fictitious_reg_lock);
707         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
708         rw_runlock(&vm_phys_fictitious_reg_lock);
709         if (seg == NULL)
710                 return (NULL);
711
712         m = &seg->first_page[atop(pa - seg->start)];
713         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
714
715         return (m);
716 }
717
718 static inline void
719 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
720     long page_count, vm_memattr_t memattr)
721 {
722         long i;
723
724         for (i = 0; i < page_count; i++) {
725                 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
726                 range[i].oflags &= ~VPO_UNMANAGED;
727                 range[i].busy_lock = VPB_UNBUSIED;
728         }
729 }
730
731 int
732 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
733     vm_memattr_t memattr)
734 {
735         struct vm_phys_fictitious_seg *seg;
736         vm_page_t fp;
737         long page_count;
738 #ifdef VM_PHYSSEG_DENSE
739         long pi, pe;
740         long dpage_count;
741 #endif
742
743         KASSERT(start < end,
744             ("Start of segment isn't less than end (start: %jx end: %jx)",
745             (uintmax_t)start, (uintmax_t)end));
746
747         page_count = (end - start) / PAGE_SIZE;
748
749 #ifdef VM_PHYSSEG_DENSE
750         pi = atop(start);
751         pe = atop(end);
752         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
753                 fp = &vm_page_array[pi - first_page];
754                 if ((pe - first_page) > vm_page_array_size) {
755                         /*
756                          * We have a segment that starts inside
757                          * of vm_page_array, but ends outside of it.
758                          *
759                          * Use vm_page_array pages for those that are
760                          * inside of the vm_page_array range, and
761                          * allocate the remaining ones.
762                          */
763                         dpage_count = vm_page_array_size - (pi - first_page);
764                         vm_phys_fictitious_init_range(fp, start, dpage_count,
765                             memattr);
766                         page_count -= dpage_count;
767                         start += ptoa(dpage_count);
768                         goto alloc;
769                 }
770                 /*
771                  * We can allocate the full range from vm_page_array,
772                  * so there's no need to register the range in the tree.
773                  */
774                 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
775                 return (0);
776         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
777                 /*
778                  * We have a segment that ends inside of vm_page_array,
779                  * but starts outside of it.
780                  */
781                 fp = &vm_page_array[0];
782                 dpage_count = pe - first_page;
783                 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
784                     memattr);
785                 end -= ptoa(dpage_count);
786                 page_count -= dpage_count;
787                 goto alloc;
788         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
789                 /*
790                  * Trying to register a fictitious range that expands before
791                  * and after vm_page_array.
792                  */
793                 return (EINVAL);
794         } else {
795 alloc:
796 #endif
797                 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
798                     M_WAITOK | M_ZERO);
799 #ifdef VM_PHYSSEG_DENSE
800         }
801 #endif
802         vm_phys_fictitious_init_range(fp, start, page_count, memattr);
803
804         seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
805         seg->start = start;
806         seg->end = end;
807         seg->first_page = fp;
808
809         rw_wlock(&vm_phys_fictitious_reg_lock);
810         RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
811         rw_wunlock(&vm_phys_fictitious_reg_lock);
812
813         return (0);
814 }
815
816 void
817 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
818 {
819         struct vm_phys_fictitious_seg *seg, tmp;
820 #ifdef VM_PHYSSEG_DENSE
821         long pi, pe;
822 #endif
823
824         KASSERT(start < end,
825             ("Start of segment isn't less than end (start: %jx end: %jx)",
826             (uintmax_t)start, (uintmax_t)end));
827
828 #ifdef VM_PHYSSEG_DENSE
829         pi = atop(start);
830         pe = atop(end);
831         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
832                 if ((pe - first_page) <= vm_page_array_size) {
833                         /*
834                          * This segment was allocated using vm_page_array
835                          * only, there's nothing to do since those pages
836                          * were never added to the tree.
837                          */
838                         return;
839                 }
840                 /*
841                  * We have a segment that starts inside
842                  * of vm_page_array, but ends outside of it.
843                  *
844                  * Calculate how many pages were added to the
845                  * tree and free them.
846                  */
847                 start = ptoa(first_page + vm_page_array_size);
848         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
849                 /*
850                  * We have a segment that ends inside of vm_page_array,
851                  * but starts outside of it.
852                  */
853                 end = ptoa(first_page);
854         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
855                 /* Since it's not possible to register such a range, panic. */
856                 panic(
857                     "Unregistering not registered fictitious range [%#jx:%#jx]",
858                     (uintmax_t)start, (uintmax_t)end);
859         }
860 #endif
861         tmp.start = start;
862         tmp.end = 0;
863
864         rw_wlock(&vm_phys_fictitious_reg_lock);
865         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
866         if (seg->start != start || seg->end != end) {
867                 rw_wunlock(&vm_phys_fictitious_reg_lock);
868                 panic(
869                     "Unregistering not registered fictitious range [%#jx:%#jx]",
870                     (uintmax_t)start, (uintmax_t)end);
871         }
872         RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
873         rw_wunlock(&vm_phys_fictitious_reg_lock);
874         free(seg->first_page, M_FICT_PAGES);
875         free(seg, M_FICT_PAGES);
876 }
877
878 /*
879  * Find the segment containing the given physical address.
880  */
881 static int
882 vm_phys_paddr_to_segind(vm_paddr_t pa)
883 {
884         struct vm_phys_seg *seg;
885         int segind;
886
887         for (segind = 0; segind < vm_phys_nsegs; segind++) {
888                 seg = &vm_phys_segs[segind];
889                 if (pa >= seg->start && pa < seg->end)
890                         return (segind);
891         }
892         panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
893             (uintmax_t)pa);
894 }
895
896 /*
897  * Free a contiguous, power of two-sized set of physical pages.
898  *
899  * The free page queues must be locked.
900  */
901 void
902 vm_phys_free_pages(vm_page_t m, int order)
903 {
904         struct vm_freelist *fl;
905         struct vm_phys_seg *seg;
906         vm_paddr_t pa;
907         vm_page_t m_buddy;
908
909         KASSERT(m->order == VM_NFREEORDER,
910             ("vm_phys_free_pages: page %p has unexpected order %d",
911             m, m->order));
912         KASSERT(m->pool < VM_NFREEPOOL,
913             ("vm_phys_free_pages: page %p has unexpected pool %d",
914             m, m->pool));
915         KASSERT(order < VM_NFREEORDER,
916             ("vm_phys_free_pages: order %d is out of range", order));
917         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
918         seg = &vm_phys_segs[m->segind];
919         if (order < VM_NFREEORDER - 1) {
920                 pa = VM_PAGE_TO_PHYS(m);
921                 do {
922                         pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
923                         if (pa < seg->start || pa >= seg->end)
924                                 break;
925                         m_buddy = &seg->first_page[atop(pa - seg->start)];
926                         if (m_buddy->order != order)
927                                 break;
928                         fl = (*seg->free_queues)[m_buddy->pool];
929                         vm_freelist_rem(fl, m_buddy, order);
930                         if (m_buddy->pool != m->pool)
931                                 vm_phys_set_pool(m->pool, m_buddy, order);
932                         order++;
933                         pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
934                         m = &seg->first_page[atop(pa - seg->start)];
935                 } while (order < VM_NFREEORDER - 1);
936         }
937         fl = (*seg->free_queues)[m->pool];
938         vm_freelist_add(fl, m, order, 1);
939 }
940
941 /*
942  * Free a contiguous, arbitrarily sized set of physical pages.
943  *
944  * The free page queues must be locked.
945  */
946 void
947 vm_phys_free_contig(vm_page_t m, u_long npages)
948 {
949         u_int n;
950         int order;
951
952         /*
953          * Avoid unnecessary coalescing by freeing the pages in the largest
954          * possible power-of-two-sized subsets.
955          */
956         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
957         for (;; npages -= n) {
958                 /*
959                  * Unsigned "min" is used here so that "order" is assigned
960                  * "VM_NFREEORDER - 1" when "m"'s physical address is zero
961                  * or the low-order bits of its physical address are zero
962                  * because the size of a physical address exceeds the size of
963                  * a long.
964                  */
965                 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
966                     VM_NFREEORDER - 1);
967                 n = 1 << order;
968                 if (npages < n)
969                         break;
970                 vm_phys_free_pages(m, order);
971                 m += n;
972         }
973         /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
974         for (; npages > 0; npages -= n) {
975                 order = flsl(npages) - 1;
976                 n = 1 << order;
977                 vm_phys_free_pages(m, order);
978                 m += n;
979         }
980 }
981
982 /*
983  * Set the pool for a contiguous, power of two-sized set of physical pages. 
984  */
985 void
986 vm_phys_set_pool(int pool, vm_page_t m, int order)
987 {
988         vm_page_t m_tmp;
989
990         for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
991                 m_tmp->pool = pool;
992 }
993
994 /*
995  * Search for the given physical page "m" in the free lists.  If the search
996  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
997  * FALSE, indicating that "m" is not in the free lists.
998  *
999  * The free page queues must be locked.
1000  */
1001 boolean_t
1002 vm_phys_unfree_page(vm_page_t m)
1003 {
1004         struct vm_freelist *fl;
1005         struct vm_phys_seg *seg;
1006         vm_paddr_t pa, pa_half;
1007         vm_page_t m_set, m_tmp;
1008         int order;
1009
1010         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1011
1012         /*
1013          * First, find the contiguous, power of two-sized set of free
1014          * physical pages containing the given physical page "m" and
1015          * assign it to "m_set".
1016          */
1017         seg = &vm_phys_segs[m->segind];
1018         for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1019             order < VM_NFREEORDER - 1; ) {
1020                 order++;
1021                 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1022                 if (pa >= seg->start)
1023                         m_set = &seg->first_page[atop(pa - seg->start)];
1024                 else
1025                         return (FALSE);
1026         }
1027         if (m_set->order < order)
1028                 return (FALSE);
1029         if (m_set->order == VM_NFREEORDER)
1030                 return (FALSE);
1031         KASSERT(m_set->order < VM_NFREEORDER,
1032             ("vm_phys_unfree_page: page %p has unexpected order %d",
1033             m_set, m_set->order));
1034
1035         /*
1036          * Next, remove "m_set" from the free lists.  Finally, extract
1037          * "m" from "m_set" using an iterative algorithm: While "m_set"
1038          * is larger than a page, shrink "m_set" by returning the half
1039          * of "m_set" that does not contain "m" to the free lists.
1040          */
1041         fl = (*seg->free_queues)[m_set->pool];
1042         order = m_set->order;
1043         vm_freelist_rem(fl, m_set, order);
1044         while (order > 0) {
1045                 order--;
1046                 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1047                 if (m->phys_addr < pa_half)
1048                         m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1049                 else {
1050                         m_tmp = m_set;
1051                         m_set = &seg->first_page[atop(pa_half - seg->start)];
1052                 }
1053                 vm_freelist_add(fl, m_tmp, order, 0);
1054         }
1055         KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1056         return (TRUE);
1057 }
1058
1059 /*
1060  * Try to zero one physical page.  Used by an idle priority thread.
1061  */
1062 boolean_t
1063 vm_phys_zero_pages_idle(void)
1064 {
1065         static struct vm_freelist *fl;
1066         static int flind, oind, pind;
1067         vm_page_t m, m_tmp;
1068         int domain;
1069
1070         domain = vm_rr_selectdomain();
1071         fl = vm_phys_free_queues[domain][0][0];
1072         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1073         for (;;) {
1074                 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
1075                         for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
1076                                 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
1077                                         vm_phys_unfree_page(m_tmp);
1078                                         vm_phys_freecnt_adj(m, -1);
1079                                         mtx_unlock(&vm_page_queue_free_mtx);
1080                                         pmap_zero_page_idle(m_tmp);
1081                                         m_tmp->flags |= PG_ZERO;
1082                                         mtx_lock(&vm_page_queue_free_mtx);
1083                                         vm_phys_freecnt_adj(m, 1);
1084                                         vm_phys_free_pages(m_tmp, 0);
1085                                         vm_page_zero_count++;
1086                                         cnt_prezero++;
1087                                         return (TRUE);
1088                                 }
1089                         }
1090                 }
1091                 oind++;
1092                 if (oind == VM_NFREEORDER) {
1093                         oind = 0;
1094                         pind++;
1095                         if (pind == VM_NFREEPOOL) {
1096                                 pind = 0;
1097                                 flind++;
1098                                 if (flind == vm_nfreelists)
1099                                         flind = 0;
1100                         }
1101                         fl = vm_phys_free_queues[domain][flind][pind];
1102                 }
1103         }
1104 }
1105
1106 /*
1107  * Allocate a contiguous set of physical pages of the given size
1108  * "npages" from the free lists.  All of the physical pages must be at
1109  * or above the given physical address "low" and below the given
1110  * physical address "high".  The given value "alignment" determines the
1111  * alignment of the first physical page in the set.  If the given value
1112  * "boundary" is non-zero, then the set of physical pages cannot cross
1113  * any physical address boundary that is a multiple of that value.  Both
1114  * "alignment" and "boundary" must be a power of two.
1115  */
1116 vm_page_t
1117 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1118     u_long alignment, vm_paddr_t boundary)
1119 {
1120         struct vm_freelist *fl;
1121         struct vm_phys_seg *seg;
1122         vm_paddr_t pa, pa_last, size;
1123         vm_page_t m, m_ret;
1124         u_long npages_end;
1125         int dom, domain, flind, oind, order, pind;
1126
1127         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1128         size = npages << PAGE_SHIFT;
1129         KASSERT(size != 0,
1130             ("vm_phys_alloc_contig: size must not be 0"));
1131         KASSERT((alignment & (alignment - 1)) == 0,
1132             ("vm_phys_alloc_contig: alignment must be a power of 2"));
1133         KASSERT((boundary & (boundary - 1)) == 0,
1134             ("vm_phys_alloc_contig: boundary must be a power of 2"));
1135         /* Compute the queue that is the best fit for npages. */
1136         for (order = 0; (1 << order) < npages; order++);
1137         dom = 0;
1138 restartdom:
1139         domain = vm_rr_selectdomain();
1140         for (flind = 0; flind < vm_nfreelists; flind++) {
1141                 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
1142                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1143                                 fl = &vm_phys_free_queues[domain][flind][pind][0];
1144                                 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1145                                         /*
1146                                          * A free list may contain physical pages
1147                                          * from one or more segments.
1148                                          */
1149                                         seg = &vm_phys_segs[m_ret->segind];
1150                                         if (seg->start > high ||
1151                                             low >= seg->end)
1152                                                 continue;
1153
1154                                         /*
1155                                          * Is the size of this allocation request
1156                                          * larger than the largest block size?
1157                                          */
1158                                         if (order >= VM_NFREEORDER) {
1159                                                 /*
1160                                                  * Determine if a sufficient number
1161                                                  * of subsequent blocks to satisfy
1162                                                  * the allocation request are free.
1163                                                  */
1164                                                 pa = VM_PAGE_TO_PHYS(m_ret);
1165                                                 pa_last = pa + size;
1166                                                 for (;;) {
1167                                                         pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
1168                                                         if (pa >= pa_last)
1169                                                                 break;
1170                                                         if (pa < seg->start ||
1171                                                             pa >= seg->end)
1172                                                                 break;
1173                                                         m = &seg->first_page[atop(pa - seg->start)];
1174                                                         if (m->order != VM_NFREEORDER - 1)
1175                                                                 break;
1176                                                 }
1177                                                 /* If not, continue to the next block. */
1178                                                 if (pa < pa_last)
1179                                                         continue;
1180                                         }
1181
1182                                         /*
1183                                          * Determine if the blocks are within the given range,
1184                                          * satisfy the given alignment, and do not cross the
1185                                          * given boundary.
1186                                          */
1187                                         pa = VM_PAGE_TO_PHYS(m_ret);
1188                                         if (pa >= low &&
1189                                             pa + size <= high &&
1190                                             (pa & (alignment - 1)) == 0 &&
1191                                             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
1192                                                 goto done;
1193                                 }
1194                         }
1195                 }
1196         }
1197         if (++dom < vm_ndomains)
1198                 goto restartdom;
1199         return (NULL);
1200 done:
1201         for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1202                 fl = (*seg->free_queues)[m->pool];
1203                 vm_freelist_rem(fl, m, m->order);
1204         }
1205         if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1206                 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1207         fl = (*seg->free_queues)[m_ret->pool];
1208         vm_phys_split_pages(m_ret, oind, fl, order);
1209         /* Return excess pages to the free lists. */
1210         npages_end = roundup2(npages, 1 << imin(oind, order));
1211         if (npages < npages_end)
1212                 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1213         return (m_ret);
1214 }
1215
1216 #ifdef DDB
1217 /*
1218  * Show the number of physical pages in each of the free lists.
1219  */
1220 DB_SHOW_COMMAND(freepages, db_show_freepages)
1221 {
1222         struct vm_freelist *fl;
1223         int flind, oind, pind, dom;
1224
1225         for (dom = 0; dom < vm_ndomains; dom++) {
1226                 db_printf("DOMAIN: %d\n", dom);
1227                 for (flind = 0; flind < vm_nfreelists; flind++) {
1228                         db_printf("FREE LIST %d:\n"
1229                             "\n  ORDER (SIZE)  |  NUMBER"
1230                             "\n              ", flind);
1231                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1232                                 db_printf("  |  POOL %d", pind);
1233                         db_printf("\n--            ");
1234                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1235                                 db_printf("-- --      ");
1236                         db_printf("--\n");
1237                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1238                                 db_printf("  %2.2d (%6.6dK)", oind,
1239                                     1 << (PAGE_SHIFT - 10 + oind));
1240                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1241                                 fl = vm_phys_free_queues[dom][flind][pind];
1242                                         db_printf("  |  %6.6d", fl[oind].lcnt);
1243                                 }
1244                                 db_printf("\n");
1245                         }
1246                         db_printf("\n");
1247                 }
1248                 db_printf("\n");
1249         }
1250 }
1251 #endif