]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_phys.c
Merge dtracetoolkit/dtruss from the vendor.
[FreeBSD/FreeBSD.git] / sys / vm / vm_phys.c
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_ddb.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/queue.h>
44 #include <sys/sbuf.h>
45 #include <sys/sysctl.h>
46 #include <sys/vmmeter.h>
47 #include <sys/vnode.h>
48
49 #include <ddb/ddb.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_phys.h>
57 #include <vm/vm_reserv.h>
58
59 /*
60  * VM_FREELIST_DEFAULT is split into VM_NDOMAIN lists, one for each
61  * domain.  These extra lists are stored at the end of the regular
62  * free lists starting with VM_NFREELIST.
63  */
64 #define VM_RAW_NFREELIST        (VM_NFREELIST + VM_NDOMAIN - 1)
65
66 struct vm_freelist {
67         struct pglist pl;
68         int lcnt;
69 };
70
71 struct vm_phys_seg {
72         vm_paddr_t      start;
73         vm_paddr_t      end;
74         vm_page_t       first_page;
75         int             domain;
76         struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
77 };
78
79 struct mem_affinity *mem_affinity;
80
81 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82
83 static int vm_phys_nsegs;
84
85 static struct vm_freelist
86     vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
87 static struct vm_freelist
88 (*vm_phys_lookup_lists[VM_NDOMAIN][VM_RAW_NFREELIST])[VM_NFREEPOOL][VM_NFREEORDER];
89
90 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
91
92 static int cnt_prezero;
93 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
94     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
95
96 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
97 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
98     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
99
100 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
101 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
102     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
103
104 #if VM_NDOMAIN > 1
105 static int sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS);
106 SYSCTL_OID(_vm, OID_AUTO, phys_lookup_lists, CTLTYPE_STRING | CTLFLAG_RD,
107     NULL, 0, sysctl_vm_phys_lookup_lists, "A", "Phys Lookup Lists");
108 #endif
109
110 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
111     int domain);
112 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
113 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
114 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
115     int order);
116
117 /*
118  * Outputs the state of the physical memory allocator, specifically,
119  * the amount of physical memory in each free list.
120  */
121 static int
122 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
123 {
124         struct sbuf sbuf;
125         struct vm_freelist *fl;
126         char *cbuf;
127         const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
128         int error, flind, oind, pind;
129
130         cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
131         sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
132         for (flind = 0; flind < vm_nfreelists; flind++) {
133                 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
134                     "\n  ORDER (SIZE)  |  NUMBER"
135                     "\n              ", flind);
136                 for (pind = 0; pind < VM_NFREEPOOL; pind++)
137                         sbuf_printf(&sbuf, "  |  POOL %d", pind);
138                 sbuf_printf(&sbuf, "\n--            ");
139                 for (pind = 0; pind < VM_NFREEPOOL; pind++)
140                         sbuf_printf(&sbuf, "-- --      ");
141                 sbuf_printf(&sbuf, "--\n");
142                 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
143                         sbuf_printf(&sbuf, "  %2.2d (%6.6dK)", oind,
144                             1 << (PAGE_SHIFT - 10 + oind));
145                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
146                                 fl = vm_phys_free_queues[flind][pind];
147                                 sbuf_printf(&sbuf, "  |  %6.6d", fl[oind].lcnt);
148                         }
149                         sbuf_printf(&sbuf, "\n");
150                 }
151         }
152         sbuf_finish(&sbuf);
153         error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
154         sbuf_delete(&sbuf);
155         free(cbuf, M_TEMP);
156         return (error);
157 }
158
159 /*
160  * Outputs the set of physical memory segments.
161  */
162 static int
163 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
164 {
165         struct sbuf sbuf;
166         struct vm_phys_seg *seg;
167         char *cbuf;
168         const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
169         int error, segind;
170
171         cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
172         sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
173         for (segind = 0; segind < vm_phys_nsegs; segind++) {
174                 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
175                 seg = &vm_phys_segs[segind];
176                 sbuf_printf(&sbuf, "start:     %#jx\n",
177                     (uintmax_t)seg->start);
178                 sbuf_printf(&sbuf, "end:       %#jx\n",
179                     (uintmax_t)seg->end);
180                 sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
181                 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
182         }
183         sbuf_finish(&sbuf);
184         error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
185         sbuf_delete(&sbuf);
186         free(cbuf, M_TEMP);
187         return (error);
188 }
189
190 #if VM_NDOMAIN > 1
191 /*
192  * Outputs the set of free list lookup lists.
193  */
194 static int
195 sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
196 {
197         struct sbuf sbuf;
198         char *cbuf;
199         const int cbufsize = (vm_nfreelists + 1) * VM_NDOMAIN * 81;
200         int domain, error, flind, ndomains;
201
202         ndomains = vm_nfreelists - VM_NFREELIST + 1;
203         cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
204         sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
205         for (domain = 0; domain < ndomains; domain++) {
206                 sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
207                 for (flind = 0; flind < vm_nfreelists; flind++)
208                         sbuf_printf(&sbuf, "  [%d]:\t%p\n", flind,
209                             vm_phys_lookup_lists[domain][flind]);
210         }
211         sbuf_finish(&sbuf);
212         error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
213         sbuf_delete(&sbuf);
214         free(cbuf, M_TEMP);
215         return (error);
216 }
217 #endif
218         
219 /*
220  * Create a physical memory segment.
221  */
222 static void
223 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
224 {
225         struct vm_phys_seg *seg;
226 #ifdef VM_PHYSSEG_SPARSE
227         long pages;
228         int segind;
229
230         pages = 0;
231         for (segind = 0; segind < vm_phys_nsegs; segind++) {
232                 seg = &vm_phys_segs[segind];
233                 pages += atop(seg->end - seg->start);
234         }
235 #endif
236         KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
237             ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
238         seg = &vm_phys_segs[vm_phys_nsegs++];
239         seg->start = start;
240         seg->end = end;
241         seg->domain = domain;
242 #ifdef VM_PHYSSEG_SPARSE
243         seg->first_page = &vm_page_array[pages];
244 #else
245         seg->first_page = PHYS_TO_VM_PAGE(start);
246 #endif
247 #if VM_NDOMAIN > 1
248         if (flind == VM_FREELIST_DEFAULT && domain != 0) {
249                 flind = VM_NFREELIST + (domain - 1);
250                 if (flind >= vm_nfreelists)
251                         vm_nfreelists = flind + 1;
252         }
253 #endif
254         seg->free_queues = &vm_phys_free_queues[flind];
255 }
256
257 static void
258 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
259 {
260         int i;
261
262         if (mem_affinity == NULL) {
263                 _vm_phys_create_seg(start, end, flind, 0);
264                 return;
265         }
266
267         for (i = 0;; i++) {
268                 if (mem_affinity[i].end == 0)
269                         panic("Reached end of affinity info");
270                 if (mem_affinity[i].end <= start)
271                         continue;
272                 if (mem_affinity[i].start > start)
273                         panic("No affinity info for start %jx",
274                             (uintmax_t)start);
275                 if (mem_affinity[i].end >= end) {
276                         _vm_phys_create_seg(start, end, flind,
277                             mem_affinity[i].domain);
278                         break;
279                 }
280                 _vm_phys_create_seg(start, mem_affinity[i].end, flind,
281                     mem_affinity[i].domain);
282                 start = mem_affinity[i].end;
283         }
284 }
285
286 /*
287  * Initialize the physical memory allocator.
288  */
289 void
290 vm_phys_init(void)
291 {
292         struct vm_freelist *fl;
293         int flind, i, oind, pind;
294 #if VM_NDOMAIN > 1
295         int ndomains, j;
296 #endif
297
298         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
299 #ifdef  VM_FREELIST_ISADMA
300                 if (phys_avail[i] < 16777216) {
301                         if (phys_avail[i + 1] > 16777216) {
302                                 vm_phys_create_seg(phys_avail[i], 16777216,
303                                     VM_FREELIST_ISADMA);
304                                 vm_phys_create_seg(16777216, phys_avail[i + 1],
305                                     VM_FREELIST_DEFAULT);
306                         } else {
307                                 vm_phys_create_seg(phys_avail[i],
308                                     phys_avail[i + 1], VM_FREELIST_ISADMA);
309                         }
310                         if (VM_FREELIST_ISADMA >= vm_nfreelists)
311                                 vm_nfreelists = VM_FREELIST_ISADMA + 1;
312                 } else
313 #endif
314 #ifdef  VM_FREELIST_HIGHMEM
315                 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
316                         if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
317                                 vm_phys_create_seg(phys_avail[i],
318                                     VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
319                                 vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
320                                     phys_avail[i + 1], VM_FREELIST_HIGHMEM);
321                         } else {
322                                 vm_phys_create_seg(phys_avail[i],
323                                     phys_avail[i + 1], VM_FREELIST_HIGHMEM);
324                         }
325                         if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
326                                 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
327                 } else
328 #endif
329                 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
330                     VM_FREELIST_DEFAULT);
331         }
332         for (flind = 0; flind < vm_nfreelists; flind++) {
333                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
334                         fl = vm_phys_free_queues[flind][pind];
335                         for (oind = 0; oind < VM_NFREEORDER; oind++)
336                                 TAILQ_INIT(&fl[oind].pl);
337                 }
338         }
339 #if VM_NDOMAIN > 1
340         /*
341          * Build a free list lookup list for each domain.  All of the
342          * memory domain lists are inserted at the VM_FREELIST_DEFAULT
343          * index in a round-robin order starting with the current
344          * domain.
345          */
346         ndomains = vm_nfreelists - VM_NFREELIST + 1;
347         for (flind = 0; flind < VM_FREELIST_DEFAULT; flind++)
348                 for (i = 0; i < ndomains; i++)
349                         vm_phys_lookup_lists[i][flind] =
350                             &vm_phys_free_queues[flind];
351         for (i = 0; i < ndomains; i++)
352                 for (j = 0; j < ndomains; j++) {
353                         flind = (i + j) % ndomains;
354                         if (flind == 0)
355                                 flind = VM_FREELIST_DEFAULT;
356                         else
357                                 flind += VM_NFREELIST - 1;
358                         vm_phys_lookup_lists[i][VM_FREELIST_DEFAULT + j] =
359                             &vm_phys_free_queues[flind];
360                 }
361         for (flind = VM_FREELIST_DEFAULT + 1; flind < VM_NFREELIST;
362              flind++)
363                 for (i = 0; i < ndomains; i++)
364                         vm_phys_lookup_lists[i][flind + ndomains - 1] =
365                             &vm_phys_free_queues[flind];
366 #else
367         for (flind = 0; flind < vm_nfreelists; flind++)
368                 vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind];
369 #endif
370 }
371
372 /*
373  * Split a contiguous, power of two-sized set of physical pages.
374  */
375 static __inline void
376 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
377 {
378         vm_page_t m_buddy;
379
380         while (oind > order) {
381                 oind--;
382                 m_buddy = &m[1 << oind];
383                 KASSERT(m_buddy->order == VM_NFREEORDER,
384                     ("vm_phys_split_pages: page %p has unexpected order %d",
385                     m_buddy, m_buddy->order));
386                 m_buddy->order = oind;
387                 TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
388                 fl[oind].lcnt++;
389         }
390 }
391
392 /*
393  * Initialize a physical page and add it to the free lists.
394  */
395 void
396 vm_phys_add_page(vm_paddr_t pa)
397 {
398         vm_page_t m;
399
400         cnt.v_page_count++;
401         m = vm_phys_paddr_to_vm_page(pa);
402         m->phys_addr = pa;
403         m->segind = vm_phys_paddr_to_segind(pa);
404         m->flags = PG_FREE;
405         KASSERT(m->order == VM_NFREEORDER,
406             ("vm_phys_add_page: page %p has unexpected order %d",
407             m, m->order));
408         m->pool = VM_FREEPOOL_DEFAULT;
409         pmap_page_init(m);
410         mtx_lock(&vm_page_queue_free_mtx);
411         cnt.v_free_count++;
412         vm_phys_free_pages(m, 0);
413         mtx_unlock(&vm_page_queue_free_mtx);
414 }
415
416 /*
417  * Allocate a contiguous, power of two-sized set of physical pages
418  * from the free lists.
419  *
420  * The free page queues must be locked.
421  */
422 vm_page_t
423 vm_phys_alloc_pages(int pool, int order)
424 {
425         vm_page_t m;
426         int flind;
427
428         for (flind = 0; flind < vm_nfreelists; flind++) {
429                 m = vm_phys_alloc_freelist_pages(flind, pool, order);
430                 if (m != NULL)
431                         return (m);
432         }
433         return (NULL);
434 }
435
436 /*
437  * Find and dequeue a free page on the given free list, with the 
438  * specified pool and order
439  */
440 vm_page_t
441 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
442 {       
443         struct vm_freelist *fl;
444         struct vm_freelist *alt;
445         int domain, oind, pind;
446         vm_page_t m;
447
448         KASSERT(flind < VM_NFREELIST,
449             ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
450         KASSERT(pool < VM_NFREEPOOL,
451             ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
452         KASSERT(order < VM_NFREEORDER,
453             ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
454
455 #if VM_NDOMAIN > 1
456         domain = PCPU_GET(domain);
457 #else
458         domain = 0;
459 #endif
460         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
461         fl = (*vm_phys_lookup_lists[domain][flind])[pool];
462         for (oind = order; oind < VM_NFREEORDER; oind++) {
463                 m = TAILQ_FIRST(&fl[oind].pl);
464                 if (m != NULL) {
465                         TAILQ_REMOVE(&fl[oind].pl, m, pageq);
466                         fl[oind].lcnt--;
467                         m->order = VM_NFREEORDER;
468                         vm_phys_split_pages(m, oind, fl, order);
469                         return (m);
470                 }
471         }
472
473         /*
474          * The given pool was empty.  Find the largest
475          * contiguous, power-of-two-sized set of pages in any
476          * pool.  Transfer these pages to the given pool, and
477          * use them to satisfy the allocation.
478          */
479         for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
480                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
481                         alt = (*vm_phys_lookup_lists[domain][flind])[pind];
482                         m = TAILQ_FIRST(&alt[oind].pl);
483                         if (m != NULL) {
484                                 TAILQ_REMOVE(&alt[oind].pl, m, pageq);
485                                 alt[oind].lcnt--;
486                                 m->order = VM_NFREEORDER;
487                                 vm_phys_set_pool(pool, m, oind);
488                                 vm_phys_split_pages(m, oind, fl, order);
489                                 return (m);
490                         }
491                 }
492         }
493         return (NULL);
494 }
495
496 /*
497  * Allocate physical memory from phys_avail[].
498  */
499 vm_paddr_t
500 vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
501 {
502         vm_paddr_t pa;
503         int i;
504
505         size = round_page(size);
506         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
507                 if (phys_avail[i + 1] - phys_avail[i] < size)
508                         continue;
509                 pa = phys_avail[i];
510                 phys_avail[i] += size;
511                 return (pa);
512         }
513         panic("vm_phys_bootstrap_alloc");
514 }
515
516 /*
517  * Find the vm_page corresponding to the given physical address.
518  */
519 vm_page_t
520 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
521 {
522         struct vm_phys_seg *seg;
523         int segind;
524
525         for (segind = 0; segind < vm_phys_nsegs; segind++) {
526                 seg = &vm_phys_segs[segind];
527                 if (pa >= seg->start && pa < seg->end)
528                         return (&seg->first_page[atop(pa - seg->start)]);
529         }
530         return (NULL);
531 }
532
533 /*
534  * Find the segment containing the given physical address.
535  */
536 static int
537 vm_phys_paddr_to_segind(vm_paddr_t pa)
538 {
539         struct vm_phys_seg *seg;
540         int segind;
541
542         for (segind = 0; segind < vm_phys_nsegs; segind++) {
543                 seg = &vm_phys_segs[segind];
544                 if (pa >= seg->start && pa < seg->end)
545                         return (segind);
546         }
547         panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
548             (uintmax_t)pa);
549 }
550
551 /*
552  * Free a contiguous, power of two-sized set of physical pages.
553  *
554  * The free page queues must be locked.
555  */
556 void
557 vm_phys_free_pages(vm_page_t m, int order)
558 {
559         struct vm_freelist *fl;
560         struct vm_phys_seg *seg;
561         vm_paddr_t pa, pa_buddy;
562         vm_page_t m_buddy;
563
564         KASSERT(m->order == VM_NFREEORDER,
565             ("vm_phys_free_pages: page %p has unexpected order %d",
566             m, m->order));
567         KASSERT(m->pool < VM_NFREEPOOL,
568             ("vm_phys_free_pages: page %p has unexpected pool %d",
569             m, m->pool));
570         KASSERT(order < VM_NFREEORDER,
571             ("vm_phys_free_pages: order %d is out of range", order));
572         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
573         pa = VM_PAGE_TO_PHYS(m);
574         seg = &vm_phys_segs[m->segind];
575         while (order < VM_NFREEORDER - 1) {
576                 pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
577                 if (pa_buddy < seg->start ||
578                     pa_buddy >= seg->end)
579                         break;
580                 m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
581                 if (m_buddy->order != order)
582                         break;
583                 fl = (*seg->free_queues)[m_buddy->pool];
584                 TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
585                 fl[m_buddy->order].lcnt--;
586                 m_buddy->order = VM_NFREEORDER;
587                 if (m_buddy->pool != m->pool)
588                         vm_phys_set_pool(m->pool, m_buddy, order);
589                 order++;
590                 pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
591                 m = &seg->first_page[atop(pa - seg->start)];
592         }
593         m->order = order;
594         fl = (*seg->free_queues)[m->pool];
595         TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
596         fl[order].lcnt++;
597 }
598
599 /*
600  * Set the pool for a contiguous, power of two-sized set of physical pages. 
601  */
602 void
603 vm_phys_set_pool(int pool, vm_page_t m, int order)
604 {
605         vm_page_t m_tmp;
606
607         for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
608                 m_tmp->pool = pool;
609 }
610
611 /*
612  * Search for the given physical page "m" in the free lists.  If the search
613  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
614  * FALSE, indicating that "m" is not in the free lists.
615  *
616  * The free page queues must be locked.
617  */
618 boolean_t
619 vm_phys_unfree_page(vm_page_t m)
620 {
621         struct vm_freelist *fl;
622         struct vm_phys_seg *seg;
623         vm_paddr_t pa, pa_half;
624         vm_page_t m_set, m_tmp;
625         int order;
626
627         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
628
629         /*
630          * First, find the contiguous, power of two-sized set of free
631          * physical pages containing the given physical page "m" and
632          * assign it to "m_set".
633          */
634         seg = &vm_phys_segs[m->segind];
635         for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
636             order < VM_NFREEORDER - 1; ) {
637                 order++;
638                 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
639                 if (pa >= seg->start)
640                         m_set = &seg->first_page[atop(pa - seg->start)];
641                 else
642                         return (FALSE);
643         }
644         if (m_set->order < order)
645                 return (FALSE);
646         if (m_set->order == VM_NFREEORDER)
647                 return (FALSE);
648         KASSERT(m_set->order < VM_NFREEORDER,
649             ("vm_phys_unfree_page: page %p has unexpected order %d",
650             m_set, m_set->order));
651
652         /*
653          * Next, remove "m_set" from the free lists.  Finally, extract
654          * "m" from "m_set" using an iterative algorithm: While "m_set"
655          * is larger than a page, shrink "m_set" by returning the half
656          * of "m_set" that does not contain "m" to the free lists.
657          */
658         fl = (*seg->free_queues)[m_set->pool];
659         order = m_set->order;
660         TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
661         fl[order].lcnt--;
662         m_set->order = VM_NFREEORDER;
663         while (order > 0) {
664                 order--;
665                 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
666                 if (m->phys_addr < pa_half)
667                         m_tmp = &seg->first_page[atop(pa_half - seg->start)];
668                 else {
669                         m_tmp = m_set;
670                         m_set = &seg->first_page[atop(pa_half - seg->start)];
671                 }
672                 m_tmp->order = order;
673                 TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
674                 fl[order].lcnt++;
675         }
676         KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
677         return (TRUE);
678 }
679
680 /*
681  * Try to zero one physical page.  Used by an idle priority thread.
682  */
683 boolean_t
684 vm_phys_zero_pages_idle(void)
685 {
686         static struct vm_freelist *fl = vm_phys_free_queues[0][0];
687         static int flind, oind, pind;
688         vm_page_t m, m_tmp;
689
690         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
691         for (;;) {
692                 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
693                         for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
694                                 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
695                                         vm_phys_unfree_page(m_tmp);
696                                         cnt.v_free_count--;
697                                         mtx_unlock(&vm_page_queue_free_mtx);
698                                         pmap_zero_page_idle(m_tmp);
699                                         m_tmp->flags |= PG_ZERO;
700                                         mtx_lock(&vm_page_queue_free_mtx);
701                                         cnt.v_free_count++;
702                                         vm_phys_free_pages(m_tmp, 0);
703                                         vm_page_zero_count++;
704                                         cnt_prezero++;
705                                         return (TRUE);
706                                 }
707                         }
708                 }
709                 oind++;
710                 if (oind == VM_NFREEORDER) {
711                         oind = 0;
712                         pind++;
713                         if (pind == VM_NFREEPOOL) {
714                                 pind = 0;
715                                 flind++;
716                                 if (flind == vm_nfreelists)
717                                         flind = 0;
718                         }
719                         fl = vm_phys_free_queues[flind][pind];
720                 }
721         }
722 }
723
724 /*
725  * Allocate a contiguous set of physical pages of the given size
726  * "npages" from the free lists.  All of the physical pages must be at
727  * or above the given physical address "low" and below the given
728  * physical address "high".  The given value "alignment" determines the
729  * alignment of the first physical page in the set.  If the given value
730  * "boundary" is non-zero, then the set of physical pages cannot cross
731  * any physical address boundary that is a multiple of that value.  Both
732  * "alignment" and "boundary" must be a power of two.
733  */
734 vm_page_t
735 vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
736     unsigned long alignment, unsigned long boundary)
737 {
738         struct vm_freelist *fl;
739         struct vm_phys_seg *seg;
740         struct vnode *vp;
741         vm_paddr_t pa, pa_last, size;
742         vm_page_t deferred_vdrop_list, m, m_ret;
743         int domain, flind, i, oind, order, pind;
744
745 #if VM_NDOMAIN > 1
746         domain = PCPU_GET(domain);
747 #else
748         domain = 0;
749 #endif
750         size = npages << PAGE_SHIFT;
751         KASSERT(size != 0,
752             ("vm_phys_alloc_contig: size must not be 0"));
753         KASSERT((alignment & (alignment - 1)) == 0,
754             ("vm_phys_alloc_contig: alignment must be a power of 2"));
755         KASSERT((boundary & (boundary - 1)) == 0,
756             ("vm_phys_alloc_contig: boundary must be a power of 2"));
757         deferred_vdrop_list = NULL;
758         /* Compute the queue that is the best fit for npages. */
759         for (order = 0; (1 << order) < npages; order++);
760         mtx_lock(&vm_page_queue_free_mtx);
761 #if VM_NRESERVLEVEL > 0
762 retry:
763 #endif
764         for (flind = 0; flind < vm_nfreelists; flind++) {
765                 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
766                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
767                                 fl = (*vm_phys_lookup_lists[domain][flind])
768                                     [pind];
769                                 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
770                                         /*
771                                          * A free list may contain physical pages
772                                          * from one or more segments.
773                                          */
774                                         seg = &vm_phys_segs[m_ret->segind];
775                                         if (seg->start > high ||
776                                             low >= seg->end)
777                                                 continue;
778
779                                         /*
780                                          * Is the size of this allocation request
781                                          * larger than the largest block size?
782                                          */
783                                         if (order >= VM_NFREEORDER) {
784                                                 /*
785                                                  * Determine if a sufficient number
786                                                  * of subsequent blocks to satisfy
787                                                  * the allocation request are free.
788                                                  */
789                                                 pa = VM_PAGE_TO_PHYS(m_ret);
790                                                 pa_last = pa + size;
791                                                 for (;;) {
792                                                         pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
793                                                         if (pa >= pa_last)
794                                                                 break;
795                                                         if (pa < seg->start ||
796                                                             pa >= seg->end)
797                                                                 break;
798                                                         m = &seg->first_page[atop(pa - seg->start)];
799                                                         if (m->order != VM_NFREEORDER - 1)
800                                                                 break;
801                                                 }
802                                                 /* If not, continue to the next block. */
803                                                 if (pa < pa_last)
804                                                         continue;
805                                         }
806
807                                         /*
808                                          * Determine if the blocks are within the given range,
809                                          * satisfy the given alignment, and do not cross the
810                                          * given boundary.
811                                          */
812                                         pa = VM_PAGE_TO_PHYS(m_ret);
813                                         if (pa >= low &&
814                                             pa + size <= high &&
815                                             (pa & (alignment - 1)) == 0 &&
816                                             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
817                                                 goto done;
818                                 }
819                         }
820                 }
821         }
822 #if VM_NRESERVLEVEL > 0
823         if (vm_reserv_reclaim_contig(size, low, high, alignment, boundary))
824                 goto retry;
825 #endif
826         mtx_unlock(&vm_page_queue_free_mtx);
827         return (NULL);
828 done:
829         for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
830                 fl = (*seg->free_queues)[m->pool];
831                 TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
832                 fl[m->order].lcnt--;
833                 m->order = VM_NFREEORDER;
834         }
835         if (m_ret->pool != VM_FREEPOOL_DEFAULT)
836                 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
837         fl = (*seg->free_queues)[m_ret->pool];
838         vm_phys_split_pages(m_ret, oind, fl, order);
839         for (i = 0; i < npages; i++) {
840                 m = &m_ret[i];
841                 vp = vm_page_alloc_init(m);
842                 if (vp != NULL) {
843                         /*
844                          * Enqueue the vnode for deferred vdrop().
845                          *
846                          * Unmanaged pages don't use "pageq", so it
847                          * can be safely abused to construct a short-
848                          * lived queue of vnodes.
849                          */
850                         m->pageq.tqe_prev = (void *)vp;
851                         m->pageq.tqe_next = deferred_vdrop_list;
852                         deferred_vdrop_list = m;
853                 }
854         }
855         for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
856                 m = &m_ret[i];
857                 KASSERT(m->order == VM_NFREEORDER,
858                     ("vm_phys_alloc_contig: page %p has unexpected order %d",
859                     m, m->order));
860                 vm_phys_free_pages(m, 0);
861         }
862         mtx_unlock(&vm_page_queue_free_mtx);
863         while (deferred_vdrop_list != NULL) {
864                 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
865                 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
866         }
867         return (m_ret);
868 }
869
870 #ifdef DDB
871 /*
872  * Show the number of physical pages in each of the free lists.
873  */
874 DB_SHOW_COMMAND(freepages, db_show_freepages)
875 {
876         struct vm_freelist *fl;
877         int flind, oind, pind;
878
879         for (flind = 0; flind < vm_nfreelists; flind++) {
880                 db_printf("FREE LIST %d:\n"
881                     "\n  ORDER (SIZE)  |  NUMBER"
882                     "\n              ", flind);
883                 for (pind = 0; pind < VM_NFREEPOOL; pind++)
884                         db_printf("  |  POOL %d", pind);
885                 db_printf("\n--            ");
886                 for (pind = 0; pind < VM_NFREEPOOL; pind++)
887                         db_printf("-- --      ");
888                 db_printf("--\n");
889                 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
890                         db_printf("  %2.2d (%6.6dK)", oind,
891                             1 << (PAGE_SHIFT - 10 + oind));
892                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
893                                 fl = vm_phys_free_queues[flind][pind];
894                                 db_printf("  |  %6.6d", fl[oind].lcnt);
895                         }
896                         db_printf("\n");
897                 }
898                 db_printf("\n");
899         }
900 }
901 #endif