2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
46 #include <sys/sysctl.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_phys.h>
58 #include <vm/vm_reserv.h>
61 * VM_FREELIST_DEFAULT is split into VM_NDOMAIN lists, one for each
62 * domain. These extra lists are stored at the end of the regular
63 * free lists starting with VM_NFREELIST.
65 #define VM_RAW_NFREELIST (VM_NFREELIST + VM_NDOMAIN - 1)
77 struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
80 struct mem_affinity *mem_affinity;
82 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
84 static int vm_phys_nsegs;
86 #define VM_PHYS_FICTITIOUS_NSEGS 8
87 static struct vm_phys_fictitious_seg {
91 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
92 static struct mtx vm_phys_fictitious_reg_mtx;
93 MALLOC_DEFINE(M_FICT_PAGES, "", "");
95 static struct vm_freelist
96 vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
97 static struct vm_freelist
98 (*vm_phys_lookup_lists[VM_NDOMAIN][VM_RAW_NFREELIST])[VM_NFREEPOOL][VM_NFREEORDER];
100 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
102 static int cnt_prezero;
103 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
104 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
106 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
107 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
108 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
110 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
111 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
112 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
115 static int sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS);
116 SYSCTL_OID(_vm, OID_AUTO, phys_lookup_lists, CTLTYPE_STRING | CTLFLAG_RD,
117 NULL, 0, sysctl_vm_phys_lookup_lists, "A", "Phys Lookup Lists");
120 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
122 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
123 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
124 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
128 * Outputs the state of the physical memory allocator, specifically,
129 * the amount of physical memory in each free list.
132 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
135 struct vm_freelist *fl;
136 int error, flind, oind, pind;
138 error = sysctl_wire_old_buffer(req, 0);
141 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
142 for (flind = 0; flind < vm_nfreelists; flind++) {
143 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
144 "\n ORDER (SIZE) | NUMBER"
146 for (pind = 0; pind < VM_NFREEPOOL; pind++)
147 sbuf_printf(&sbuf, " | POOL %d", pind);
148 sbuf_printf(&sbuf, "\n-- ");
149 for (pind = 0; pind < VM_NFREEPOOL; pind++)
150 sbuf_printf(&sbuf, "-- -- ");
151 sbuf_printf(&sbuf, "--\n");
152 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
153 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
154 1 << (PAGE_SHIFT - 10 + oind));
155 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
156 fl = vm_phys_free_queues[flind][pind];
157 sbuf_printf(&sbuf, " | %6d", fl[oind].lcnt);
159 sbuf_printf(&sbuf, "\n");
162 error = sbuf_finish(&sbuf);
168 * Outputs the set of physical memory segments.
171 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
174 struct vm_phys_seg *seg;
177 error = sysctl_wire_old_buffer(req, 0);
180 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
181 for (segind = 0; segind < vm_phys_nsegs; segind++) {
182 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
183 seg = &vm_phys_segs[segind];
184 sbuf_printf(&sbuf, "start: %#jx\n",
185 (uintmax_t)seg->start);
186 sbuf_printf(&sbuf, "end: %#jx\n",
187 (uintmax_t)seg->end);
188 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
189 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
191 error = sbuf_finish(&sbuf);
198 * Outputs the set of free list lookup lists.
201 sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
204 int domain, error, flind, ndomains;
206 error = sysctl_wire_old_buffer(req, 0);
209 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
210 ndomains = vm_nfreelists - VM_NFREELIST + 1;
211 for (domain = 0; domain < ndomains; domain++) {
212 sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
213 for (flind = 0; flind < vm_nfreelists; flind++)
214 sbuf_printf(&sbuf, " [%d]:\t%p\n", flind,
215 vm_phys_lookup_lists[domain][flind]);
217 error = sbuf_finish(&sbuf);
224 * Create a physical memory segment.
227 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
229 struct vm_phys_seg *seg;
230 #ifdef VM_PHYSSEG_SPARSE
235 for (segind = 0; segind < vm_phys_nsegs; segind++) {
236 seg = &vm_phys_segs[segind];
237 pages += atop(seg->end - seg->start);
240 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
241 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
242 seg = &vm_phys_segs[vm_phys_nsegs++];
245 seg->domain = domain;
246 #ifdef VM_PHYSSEG_SPARSE
247 seg->first_page = &vm_page_array[pages];
249 seg->first_page = PHYS_TO_VM_PAGE(start);
252 if (flind == VM_FREELIST_DEFAULT && domain != 0) {
253 flind = VM_NFREELIST + (domain - 1);
254 if (flind >= vm_nfreelists)
255 vm_nfreelists = flind + 1;
258 seg->free_queues = &vm_phys_free_queues[flind];
262 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
266 if (mem_affinity == NULL) {
267 _vm_phys_create_seg(start, end, flind, 0);
272 if (mem_affinity[i].end == 0)
273 panic("Reached end of affinity info");
274 if (mem_affinity[i].end <= start)
276 if (mem_affinity[i].start > start)
277 panic("No affinity info for start %jx",
279 if (mem_affinity[i].end >= end) {
280 _vm_phys_create_seg(start, end, flind,
281 mem_affinity[i].domain);
284 _vm_phys_create_seg(start, mem_affinity[i].end, flind,
285 mem_affinity[i].domain);
286 start = mem_affinity[i].end;
291 * Initialize the physical memory allocator.
296 struct vm_freelist *fl;
297 int flind, i, oind, pind;
302 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
303 #ifdef VM_FREELIST_ISADMA
304 if (phys_avail[i] < 16777216) {
305 if (phys_avail[i + 1] > 16777216) {
306 vm_phys_create_seg(phys_avail[i], 16777216,
308 vm_phys_create_seg(16777216, phys_avail[i + 1],
309 VM_FREELIST_DEFAULT);
311 vm_phys_create_seg(phys_avail[i],
312 phys_avail[i + 1], VM_FREELIST_ISADMA);
314 if (VM_FREELIST_ISADMA >= vm_nfreelists)
315 vm_nfreelists = VM_FREELIST_ISADMA + 1;
318 #ifdef VM_FREELIST_HIGHMEM
319 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
320 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
321 vm_phys_create_seg(phys_avail[i],
322 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
323 vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
324 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
326 vm_phys_create_seg(phys_avail[i],
327 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
329 if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
330 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
333 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
334 VM_FREELIST_DEFAULT);
336 for (flind = 0; flind < vm_nfreelists; flind++) {
337 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
338 fl = vm_phys_free_queues[flind][pind];
339 for (oind = 0; oind < VM_NFREEORDER; oind++)
340 TAILQ_INIT(&fl[oind].pl);
345 * Build a free list lookup list for each domain. All of the
346 * memory domain lists are inserted at the VM_FREELIST_DEFAULT
347 * index in a round-robin order starting with the current
350 ndomains = vm_nfreelists - VM_NFREELIST + 1;
351 for (flind = 0; flind < VM_FREELIST_DEFAULT; flind++)
352 for (i = 0; i < ndomains; i++)
353 vm_phys_lookup_lists[i][flind] =
354 &vm_phys_free_queues[flind];
355 for (i = 0; i < ndomains; i++)
356 for (j = 0; j < ndomains; j++) {
357 flind = (i + j) % ndomains;
359 flind = VM_FREELIST_DEFAULT;
361 flind += VM_NFREELIST - 1;
362 vm_phys_lookup_lists[i][VM_FREELIST_DEFAULT + j] =
363 &vm_phys_free_queues[flind];
365 for (flind = VM_FREELIST_DEFAULT + 1; flind < VM_NFREELIST;
367 for (i = 0; i < ndomains; i++)
368 vm_phys_lookup_lists[i][flind + ndomains - 1] =
369 &vm_phys_free_queues[flind];
371 for (flind = 0; flind < vm_nfreelists; flind++)
372 vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind];
375 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
379 * Split a contiguous, power of two-sized set of physical pages.
382 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
386 while (oind > order) {
388 m_buddy = &m[1 << oind];
389 KASSERT(m_buddy->order == VM_NFREEORDER,
390 ("vm_phys_split_pages: page %p has unexpected order %d",
391 m_buddy, m_buddy->order));
392 m_buddy->order = oind;
393 TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
399 * Initialize a physical page and add it to the free lists.
402 vm_phys_add_page(vm_paddr_t pa)
407 m = vm_phys_paddr_to_vm_page(pa);
410 m->segind = vm_phys_paddr_to_segind(pa);
412 KASSERT(m->order == VM_NFREEORDER,
413 ("vm_phys_add_page: page %p has unexpected order %d",
415 m->pool = VM_FREEPOOL_DEFAULT;
417 mtx_lock(&vm_page_queue_free_mtx);
419 vm_phys_free_pages(m, 0);
420 mtx_unlock(&vm_page_queue_free_mtx);
424 * Allocate a contiguous, power of two-sized set of physical pages
425 * from the free lists.
427 * The free page queues must be locked.
430 vm_phys_alloc_pages(int pool, int order)
435 for (flind = 0; flind < vm_nfreelists; flind++) {
436 m = vm_phys_alloc_freelist_pages(flind, pool, order);
444 * Find and dequeue a free page on the given free list, with the
445 * specified pool and order
448 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
450 struct vm_freelist *fl;
451 struct vm_freelist *alt;
452 int domain, oind, pind;
455 KASSERT(flind < VM_NFREELIST,
456 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
457 KASSERT(pool < VM_NFREEPOOL,
458 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
459 KASSERT(order < VM_NFREEORDER,
460 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
463 domain = PCPU_GET(domain);
467 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
468 fl = (*vm_phys_lookup_lists[domain][flind])[pool];
469 for (oind = order; oind < VM_NFREEORDER; oind++) {
470 m = TAILQ_FIRST(&fl[oind].pl);
472 TAILQ_REMOVE(&fl[oind].pl, m, pageq);
474 m->order = VM_NFREEORDER;
475 vm_phys_split_pages(m, oind, fl, order);
481 * The given pool was empty. Find the largest
482 * contiguous, power-of-two-sized set of pages in any
483 * pool. Transfer these pages to the given pool, and
484 * use them to satisfy the allocation.
486 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
487 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
488 alt = (*vm_phys_lookup_lists[domain][flind])[pind];
489 m = TAILQ_FIRST(&alt[oind].pl);
491 TAILQ_REMOVE(&alt[oind].pl, m, pageq);
493 m->order = VM_NFREEORDER;
494 vm_phys_set_pool(pool, m, oind);
495 vm_phys_split_pages(m, oind, fl, order);
504 * Allocate physical memory from phys_avail[].
507 vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
512 size = round_page(size);
513 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
514 if (phys_avail[i + 1] - phys_avail[i] < size)
517 phys_avail[i] += size;
520 panic("vm_phys_bootstrap_alloc");
524 * Find the vm_page corresponding to the given physical address.
527 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
529 struct vm_phys_seg *seg;
532 for (segind = 0; segind < vm_phys_nsegs; segind++) {
533 seg = &vm_phys_segs[segind];
534 if (pa >= seg->start && pa < seg->end)
535 return (&seg->first_page[atop(pa - seg->start)]);
541 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
543 struct vm_phys_fictitious_seg *seg;
548 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
549 seg = &vm_phys_fictitious_segs[segind];
550 if (pa >= seg->start && pa < seg->end) {
551 m = &seg->first_page[atop(pa - seg->start)];
552 KASSERT((m->flags & PG_FICTITIOUS) != 0,
553 ("%p not fictitious", m));
561 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
562 vm_memattr_t memattr)
564 struct vm_phys_fictitious_seg *seg;
568 #ifdef VM_PHYSSEG_DENSE
573 page_count = (end - start) / PAGE_SIZE;
575 #ifdef VM_PHYSSEG_DENSE
577 if (pi >= first_page && atop(end) < vm_page_array_size) {
578 fp = &vm_page_array[pi - first_page];
583 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
585 #ifdef VM_PHYSSEG_DENSE
589 for (i = 0; i < page_count; i++) {
590 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
591 pmap_page_init(&fp[i]);
592 fp[i].oflags &= ~(VPO_BUSY | VPO_UNMANAGED);
594 mtx_lock(&vm_phys_fictitious_reg_mtx);
595 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
596 seg = &vm_phys_fictitious_segs[segind];
597 if (seg->start == 0 && seg->end == 0) {
600 seg->first_page = fp;
601 mtx_unlock(&vm_phys_fictitious_reg_mtx);
605 mtx_unlock(&vm_phys_fictitious_reg_mtx);
606 #ifdef VM_PHYSSEG_DENSE
609 free(fp, M_FICT_PAGES);
614 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
616 struct vm_phys_fictitious_seg *seg;
619 #ifdef VM_PHYSSEG_DENSE
623 #ifdef VM_PHYSSEG_DENSE
627 mtx_lock(&vm_phys_fictitious_reg_mtx);
628 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
629 seg = &vm_phys_fictitious_segs[segind];
630 if (seg->start == start && seg->end == end) {
631 seg->start = seg->end = 0;
632 fp = seg->first_page;
633 seg->first_page = NULL;
634 mtx_unlock(&vm_phys_fictitious_reg_mtx);
635 #ifdef VM_PHYSSEG_DENSE
636 if (pi < first_page || atop(end) >= vm_page_array_size)
638 free(fp, M_FICT_PAGES);
642 mtx_unlock(&vm_phys_fictitious_reg_mtx);
643 KASSERT(0, ("Unregistering not registered fictitious range"));
647 * Find the segment containing the given physical address.
650 vm_phys_paddr_to_segind(vm_paddr_t pa)
652 struct vm_phys_seg *seg;
655 for (segind = 0; segind < vm_phys_nsegs; segind++) {
656 seg = &vm_phys_segs[segind];
657 if (pa >= seg->start && pa < seg->end)
660 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
665 * Free a contiguous, power of two-sized set of physical pages.
667 * The free page queues must be locked.
670 vm_phys_free_pages(vm_page_t m, int order)
672 struct vm_freelist *fl;
673 struct vm_phys_seg *seg;
674 vm_paddr_t pa, pa_buddy;
677 KASSERT(m->order == VM_NFREEORDER,
678 ("vm_phys_free_pages: page %p has unexpected order %d",
680 KASSERT(m->pool < VM_NFREEPOOL,
681 ("vm_phys_free_pages: page %p has unexpected pool %d",
683 KASSERT(order < VM_NFREEORDER,
684 ("vm_phys_free_pages: order %d is out of range", order));
685 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
686 pa = VM_PAGE_TO_PHYS(m);
687 seg = &vm_phys_segs[m->segind];
688 while (order < VM_NFREEORDER - 1) {
689 pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
690 if (pa_buddy < seg->start ||
691 pa_buddy >= seg->end)
693 m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
694 if (m_buddy->order != order)
696 fl = (*seg->free_queues)[m_buddy->pool];
697 TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
698 fl[m_buddy->order].lcnt--;
699 m_buddy->order = VM_NFREEORDER;
700 if (m_buddy->pool != m->pool)
701 vm_phys_set_pool(m->pool, m_buddy, order);
703 pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
704 m = &seg->first_page[atop(pa - seg->start)];
707 fl = (*seg->free_queues)[m->pool];
708 TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
713 * Set the pool for a contiguous, power of two-sized set of physical pages.
716 vm_phys_set_pool(int pool, vm_page_t m, int order)
720 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
725 * Search for the given physical page "m" in the free lists. If the search
726 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
727 * FALSE, indicating that "m" is not in the free lists.
729 * The free page queues must be locked.
732 vm_phys_unfree_page(vm_page_t m)
734 struct vm_freelist *fl;
735 struct vm_phys_seg *seg;
736 vm_paddr_t pa, pa_half;
737 vm_page_t m_set, m_tmp;
740 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
743 * First, find the contiguous, power of two-sized set of free
744 * physical pages containing the given physical page "m" and
745 * assign it to "m_set".
747 seg = &vm_phys_segs[m->segind];
748 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
749 order < VM_NFREEORDER - 1; ) {
751 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
752 if (pa >= seg->start)
753 m_set = &seg->first_page[atop(pa - seg->start)];
757 if (m_set->order < order)
759 if (m_set->order == VM_NFREEORDER)
761 KASSERT(m_set->order < VM_NFREEORDER,
762 ("vm_phys_unfree_page: page %p has unexpected order %d",
763 m_set, m_set->order));
766 * Next, remove "m_set" from the free lists. Finally, extract
767 * "m" from "m_set" using an iterative algorithm: While "m_set"
768 * is larger than a page, shrink "m_set" by returning the half
769 * of "m_set" that does not contain "m" to the free lists.
771 fl = (*seg->free_queues)[m_set->pool];
772 order = m_set->order;
773 TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
775 m_set->order = VM_NFREEORDER;
778 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
779 if (m->phys_addr < pa_half)
780 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
783 m_set = &seg->first_page[atop(pa_half - seg->start)];
785 m_tmp->order = order;
786 TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
789 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
794 * Try to zero one physical page. Used by an idle priority thread.
797 vm_phys_zero_pages_idle(void)
799 static struct vm_freelist *fl = vm_phys_free_queues[0][0];
800 static int flind, oind, pind;
803 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
805 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
806 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
807 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
808 vm_phys_unfree_page(m_tmp);
810 mtx_unlock(&vm_page_queue_free_mtx);
811 pmap_zero_page_idle(m_tmp);
812 m_tmp->flags |= PG_ZERO;
813 mtx_lock(&vm_page_queue_free_mtx);
815 vm_phys_free_pages(m_tmp, 0);
816 vm_page_zero_count++;
823 if (oind == VM_NFREEORDER) {
826 if (pind == VM_NFREEPOOL) {
829 if (flind == vm_nfreelists)
832 fl = vm_phys_free_queues[flind][pind];
838 * Allocate a contiguous set of physical pages of the given size
839 * "npages" from the free lists. All of the physical pages must be at
840 * or above the given physical address "low" and below the given
841 * physical address "high". The given value "alignment" determines the
842 * alignment of the first physical page in the set. If the given value
843 * "boundary" is non-zero, then the set of physical pages cannot cross
844 * any physical address boundary that is a multiple of that value. Both
845 * "alignment" and "boundary" must be a power of two.
848 vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
849 unsigned long alignment, unsigned long boundary)
851 struct vm_freelist *fl;
852 struct vm_phys_seg *seg;
854 vm_paddr_t pa, pa_last, size;
855 vm_page_t deferred_vdrop_list, m, m_ret;
856 int domain, flind, i, oind, order, pind;
859 domain = PCPU_GET(domain);
863 size = npages << PAGE_SHIFT;
865 ("vm_phys_alloc_contig: size must not be 0"));
866 KASSERT((alignment & (alignment - 1)) == 0,
867 ("vm_phys_alloc_contig: alignment must be a power of 2"));
868 KASSERT((boundary & (boundary - 1)) == 0,
869 ("vm_phys_alloc_contig: boundary must be a power of 2"));
870 deferred_vdrop_list = NULL;
871 /* Compute the queue that is the best fit for npages. */
872 for (order = 0; (1 << order) < npages; order++);
873 mtx_lock(&vm_page_queue_free_mtx);
874 #if VM_NRESERVLEVEL > 0
877 for (flind = 0; flind < vm_nfreelists; flind++) {
878 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
879 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
880 fl = (*vm_phys_lookup_lists[domain][flind])
882 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
884 * A free list may contain physical pages
885 * from one or more segments.
887 seg = &vm_phys_segs[m_ret->segind];
888 if (seg->start > high ||
893 * Is the size of this allocation request
894 * larger than the largest block size?
896 if (order >= VM_NFREEORDER) {
898 * Determine if a sufficient number
899 * of subsequent blocks to satisfy
900 * the allocation request are free.
902 pa = VM_PAGE_TO_PHYS(m_ret);
905 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
908 if (pa < seg->start ||
911 m = &seg->first_page[atop(pa - seg->start)];
912 if (m->order != VM_NFREEORDER - 1)
915 /* If not, continue to the next block. */
921 * Determine if the blocks are within the given range,
922 * satisfy the given alignment, and do not cross the
925 pa = VM_PAGE_TO_PHYS(m_ret);
928 (pa & (alignment - 1)) == 0 &&
929 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
935 #if VM_NRESERVLEVEL > 0
936 if (vm_reserv_reclaim_contig(size, low, high, alignment, boundary))
939 mtx_unlock(&vm_page_queue_free_mtx);
942 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
943 fl = (*seg->free_queues)[m->pool];
944 TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
946 m->order = VM_NFREEORDER;
948 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
949 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
950 fl = (*seg->free_queues)[m_ret->pool];
951 vm_phys_split_pages(m_ret, oind, fl, order);
952 for (i = 0; i < npages; i++) {
954 vp = vm_page_alloc_init(m);
957 * Enqueue the vnode for deferred vdrop().
959 * Unmanaged pages don't use "pageq", so it
960 * can be safely abused to construct a short-
961 * lived queue of vnodes.
963 m->pageq.tqe_prev = (void *)vp;
964 m->pageq.tqe_next = deferred_vdrop_list;
965 deferred_vdrop_list = m;
968 for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
970 KASSERT(m->order == VM_NFREEORDER,
971 ("vm_phys_alloc_contig: page %p has unexpected order %d",
973 vm_phys_free_pages(m, 0);
975 mtx_unlock(&vm_page_queue_free_mtx);
976 while (deferred_vdrop_list != NULL) {
977 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
978 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
985 * Show the number of physical pages in each of the free lists.
987 DB_SHOW_COMMAND(freepages, db_show_freepages)
989 struct vm_freelist *fl;
990 int flind, oind, pind;
992 for (flind = 0; flind < vm_nfreelists; flind++) {
993 db_printf("FREE LIST %d:\n"
994 "\n ORDER (SIZE) | NUMBER"
996 for (pind = 0; pind < VM_NFREEPOOL; pind++)
997 db_printf(" | POOL %d", pind);
999 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1000 db_printf("-- -- ");
1002 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1003 db_printf(" %2.2d (%6.6dK)", oind,
1004 1 << (PAGE_SHIFT - 10 + oind));
1005 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1006 fl = vm_phys_free_queues[flind][pind];
1007 db_printf(" | %6.6d", fl[oind].lcnt);