2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Physical memory system implementation
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
54 #include <sys/queue.h>
56 #include <sys/sysctl.h>
57 #include <sys/vmmeter.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
68 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
69 "Too many physsegs.");
71 struct mem_affinity *mem_affinity;
75 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
78 #define VM_PHYS_FICTITIOUS_NSEGS 8
79 static struct vm_phys_fictitious_seg {
83 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
84 static struct mtx vm_phys_fictitious_reg_mtx;
85 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
87 static struct vm_freelist
88 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
90 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
92 static int cnt_prezero;
93 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
94 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
96 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
97 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
98 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
100 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
101 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
102 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
104 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
105 &vm_ndomains, 0, "Number of physical memory domains available.");
107 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
109 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
111 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
112 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
113 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
117 vm_rr_selectdomain(void)
125 td->td_dom_rr_idx %= vm_ndomains;
126 return (td->td_dom_rr_idx);
133 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
135 struct vm_phys_seg *s;
138 while ((idx = ffsl(mask)) != 0) {
139 idx--; /* ffsl counts from 1 */
140 mask &= ~(1UL << idx);
141 s = &vm_phys_segs[idx];
142 if (low < s->end && high > s->start)
149 * Outputs the state of the physical memory allocator, specifically,
150 * the amount of physical memory in each free list.
153 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
156 struct vm_freelist *fl;
157 int dom, error, flind, oind, pind;
159 error = sysctl_wire_old_buffer(req, 0);
162 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
163 for (dom = 0; dom < vm_ndomains; dom++) {
164 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
165 for (flind = 0; flind < vm_nfreelists; flind++) {
166 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
167 "\n ORDER (SIZE) | NUMBER"
169 for (pind = 0; pind < VM_NFREEPOOL; pind++)
170 sbuf_printf(&sbuf, " | POOL %d", pind);
171 sbuf_printf(&sbuf, "\n-- ");
172 for (pind = 0; pind < VM_NFREEPOOL; pind++)
173 sbuf_printf(&sbuf, "-- -- ");
174 sbuf_printf(&sbuf, "--\n");
175 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
176 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
177 1 << (PAGE_SHIFT - 10 + oind));
178 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
179 fl = vm_phys_free_queues[dom][flind][pind];
180 sbuf_printf(&sbuf, " | %6d",
183 sbuf_printf(&sbuf, "\n");
187 error = sbuf_finish(&sbuf);
193 * Outputs the set of physical memory segments.
196 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
199 struct vm_phys_seg *seg;
202 error = sysctl_wire_old_buffer(req, 0);
205 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
206 for (segind = 0; segind < vm_phys_nsegs; segind++) {
207 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
208 seg = &vm_phys_segs[segind];
209 sbuf_printf(&sbuf, "start: %#jx\n",
210 (uintmax_t)seg->start);
211 sbuf_printf(&sbuf, "end: %#jx\n",
212 (uintmax_t)seg->end);
213 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
214 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
216 error = sbuf_finish(&sbuf);
222 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
227 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
229 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
234 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
237 TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
239 m->order = VM_NFREEORDER;
243 * Create a physical memory segment.
246 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
248 struct vm_phys_seg *seg;
250 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
251 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
252 KASSERT(domain < vm_ndomains,
253 ("vm_phys_create_seg: invalid domain provided"));
254 seg = &vm_phys_segs[vm_phys_nsegs++];
255 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
261 seg->domain = domain;
262 seg->free_queues = &vm_phys_free_queues[domain][flind];
266 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
270 if (mem_affinity == NULL) {
271 _vm_phys_create_seg(start, end, flind, 0);
276 if (mem_affinity[i].end == 0)
277 panic("Reached end of affinity info");
278 if (mem_affinity[i].end <= start)
280 if (mem_affinity[i].start > start)
281 panic("No affinity info for start %jx",
283 if (mem_affinity[i].end >= end) {
284 _vm_phys_create_seg(start, end, flind,
285 mem_affinity[i].domain);
288 _vm_phys_create_seg(start, mem_affinity[i].end, flind,
289 mem_affinity[i].domain);
290 start = mem_affinity[i].end;
295 * Add a physical memory segment.
298 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
301 KASSERT((start & PAGE_MASK) == 0,
302 ("vm_phys_define_seg: start is not page aligned"));
303 KASSERT((end & PAGE_MASK) == 0,
304 ("vm_phys_define_seg: end is not page aligned"));
305 #ifdef VM_FREELIST_ISADMA
306 if (start < 16777216) {
307 if (end > 16777216) {
308 vm_phys_create_seg(start, 16777216,
310 vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT);
312 vm_phys_create_seg(start, end, VM_FREELIST_ISADMA);
313 if (VM_FREELIST_ISADMA >= vm_nfreelists)
314 vm_nfreelists = VM_FREELIST_ISADMA + 1;
317 #ifdef VM_FREELIST_HIGHMEM
318 if (end > VM_HIGHMEM_ADDRESS) {
319 if (start < VM_HIGHMEM_ADDRESS) {
320 vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS,
321 VM_FREELIST_DEFAULT);
322 vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end,
323 VM_FREELIST_HIGHMEM);
325 vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM);
326 if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
327 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
330 vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT);
334 * Initialize the physical memory allocator.
339 struct vm_freelist *fl;
340 struct vm_phys_seg *seg;
341 #ifdef VM_PHYSSEG_SPARSE
344 int dom, flind, oind, pind, segind;
346 #ifdef VM_PHYSSEG_SPARSE
349 for (segind = 0; segind < vm_phys_nsegs; segind++) {
350 seg = &vm_phys_segs[segind];
351 #ifdef VM_PHYSSEG_SPARSE
352 seg->first_page = &vm_page_array[pages];
353 pages += atop(seg->end - seg->start);
355 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
358 for (dom = 0; dom < vm_ndomains; dom++) {
359 for (flind = 0; flind < vm_nfreelists; flind++) {
360 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
361 fl = vm_phys_free_queues[dom][flind][pind];
362 for (oind = 0; oind < VM_NFREEORDER; oind++)
363 TAILQ_INIT(&fl[oind].pl);
367 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
371 * Split a contiguous, power of two-sized set of physical pages.
374 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
378 while (oind > order) {
380 m_buddy = &m[1 << oind];
381 KASSERT(m_buddy->order == VM_NFREEORDER,
382 ("vm_phys_split_pages: page %p has unexpected order %d",
383 m_buddy, m_buddy->order));
384 vm_freelist_add(fl, m_buddy, oind, 0);
389 * Initialize a physical page and add it to the free lists.
392 vm_phys_add_page(vm_paddr_t pa)
395 struct vm_domain *vmd;
398 m = vm_phys_paddr_to_vm_page(pa);
401 m->segind = vm_phys_paddr_to_segind(pa);
402 vmd = vm_phys_domain(m);
403 vmd->vmd_page_count++;
404 vmd->vmd_segs |= 1UL << m->segind;
406 KASSERT(m->order == VM_NFREEORDER,
407 ("vm_phys_add_page: page %p has unexpected order %d",
409 m->pool = VM_FREEPOOL_DEFAULT;
411 mtx_lock(&vm_page_queue_free_mtx);
412 vm_phys_freecnt_adj(m, 1);
413 vm_phys_free_pages(m, 0);
414 mtx_unlock(&vm_page_queue_free_mtx);
418 * Allocate a contiguous, power of two-sized set of physical pages
419 * from the free lists.
421 * The free page queues must be locked.
424 vm_phys_alloc_pages(int pool, int order)
427 int dom, domain, flind;
429 KASSERT(pool < VM_NFREEPOOL,
430 ("vm_phys_alloc_pages: pool %d is out of range", pool));
431 KASSERT(order < VM_NFREEORDER,
432 ("vm_phys_alloc_pages: order %d is out of range", order));
434 for (dom = 0; dom < vm_ndomains; dom++) {
435 domain = vm_rr_selectdomain();
436 for (flind = 0; flind < vm_nfreelists; flind++) {
437 m = vm_phys_alloc_domain_pages(domain, flind, pool,
447 * Find and dequeue a free page on the given free list, with the
448 * specified pool and order
451 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
456 KASSERT(flind < VM_NFREELIST,
457 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
458 KASSERT(pool < VM_NFREEPOOL,
459 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
460 KASSERT(order < VM_NFREEORDER,
461 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
463 for (dom = 0; dom < vm_ndomains; dom++) {
464 domain = vm_rr_selectdomain();
465 m = vm_phys_alloc_domain_pages(domain, flind, pool, order);
473 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
475 struct vm_freelist *fl;
476 struct vm_freelist *alt;
480 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
481 fl = &vm_phys_free_queues[domain][flind][pool][0];
482 for (oind = order; oind < VM_NFREEORDER; oind++) {
483 m = TAILQ_FIRST(&fl[oind].pl);
485 vm_freelist_rem(fl, m, oind);
486 vm_phys_split_pages(m, oind, fl, order);
492 * The given pool was empty. Find the largest
493 * contiguous, power-of-two-sized set of pages in any
494 * pool. Transfer these pages to the given pool, and
495 * use them to satisfy the allocation.
497 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
498 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
499 alt = &vm_phys_free_queues[domain][flind][pind][0];
500 m = TAILQ_FIRST(&alt[oind].pl);
502 vm_freelist_rem(alt, m, oind);
503 vm_phys_set_pool(pool, m, oind);
504 vm_phys_split_pages(m, oind, fl, order);
513 * Find the vm_page corresponding to the given physical address.
516 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
518 struct vm_phys_seg *seg;
521 for (segind = 0; segind < vm_phys_nsegs; segind++) {
522 seg = &vm_phys_segs[segind];
523 if (pa >= seg->start && pa < seg->end)
524 return (&seg->first_page[atop(pa - seg->start)]);
530 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
532 struct vm_phys_fictitious_seg *seg;
537 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
538 seg = &vm_phys_fictitious_segs[segind];
539 if (pa >= seg->start && pa < seg->end) {
540 m = &seg->first_page[atop(pa - seg->start)];
541 KASSERT((m->flags & PG_FICTITIOUS) != 0,
542 ("%p not fictitious", m));
550 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
551 vm_memattr_t memattr)
553 struct vm_phys_fictitious_seg *seg;
557 #ifdef VM_PHYSSEG_DENSE
562 page_count = (end - start) / PAGE_SIZE;
564 #ifdef VM_PHYSSEG_DENSE
566 if (pi >= first_page && pi < vm_page_array_size + first_page) {
567 if (atop(end) >= vm_page_array_size + first_page)
569 fp = &vm_page_array[pi - first_page];
574 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
576 #ifdef VM_PHYSSEG_DENSE
580 for (i = 0; i < page_count; i++) {
581 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
582 fp[i].oflags &= ~VPO_UNMANAGED;
583 fp[i].busy_lock = VPB_UNBUSIED;
585 mtx_lock(&vm_phys_fictitious_reg_mtx);
586 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
587 seg = &vm_phys_fictitious_segs[segind];
588 if (seg->start == 0 && seg->end == 0) {
591 seg->first_page = fp;
592 mtx_unlock(&vm_phys_fictitious_reg_mtx);
596 mtx_unlock(&vm_phys_fictitious_reg_mtx);
597 #ifdef VM_PHYSSEG_DENSE
600 free(fp, M_FICT_PAGES);
605 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
607 struct vm_phys_fictitious_seg *seg;
610 #ifdef VM_PHYSSEG_DENSE
614 #ifdef VM_PHYSSEG_DENSE
618 mtx_lock(&vm_phys_fictitious_reg_mtx);
619 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
620 seg = &vm_phys_fictitious_segs[segind];
621 if (seg->start == start && seg->end == end) {
622 seg->start = seg->end = 0;
623 fp = seg->first_page;
624 seg->first_page = NULL;
625 mtx_unlock(&vm_phys_fictitious_reg_mtx);
626 #ifdef VM_PHYSSEG_DENSE
627 if (pi < first_page || atop(end) >= vm_page_array_size)
629 free(fp, M_FICT_PAGES);
633 mtx_unlock(&vm_phys_fictitious_reg_mtx);
634 KASSERT(0, ("Unregistering not registered fictitious range"));
638 * Find the segment containing the given physical address.
641 vm_phys_paddr_to_segind(vm_paddr_t pa)
643 struct vm_phys_seg *seg;
646 for (segind = 0; segind < vm_phys_nsegs; segind++) {
647 seg = &vm_phys_segs[segind];
648 if (pa >= seg->start && pa < seg->end)
651 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
656 * Free a contiguous, power of two-sized set of physical pages.
658 * The free page queues must be locked.
661 vm_phys_free_pages(vm_page_t m, int order)
663 struct vm_freelist *fl;
664 struct vm_phys_seg *seg;
668 KASSERT(m->order == VM_NFREEORDER,
669 ("vm_phys_free_pages: page %p has unexpected order %d",
671 KASSERT(m->pool < VM_NFREEPOOL,
672 ("vm_phys_free_pages: page %p has unexpected pool %d",
674 KASSERT(order < VM_NFREEORDER,
675 ("vm_phys_free_pages: order %d is out of range", order));
676 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
677 seg = &vm_phys_segs[m->segind];
678 if (order < VM_NFREEORDER - 1) {
679 pa = VM_PAGE_TO_PHYS(m);
681 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
682 if (pa < seg->start || pa >= seg->end)
684 m_buddy = &seg->first_page[atop(pa - seg->start)];
685 if (m_buddy->order != order)
687 fl = (*seg->free_queues)[m_buddy->pool];
688 vm_freelist_rem(fl, m_buddy, order);
689 if (m_buddy->pool != m->pool)
690 vm_phys_set_pool(m->pool, m_buddy, order);
692 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
693 m = &seg->first_page[atop(pa - seg->start)];
694 } while (order < VM_NFREEORDER - 1);
696 fl = (*seg->free_queues)[m->pool];
697 vm_freelist_add(fl, m, order, 1);
701 * Free a contiguous, arbitrarily sized set of physical pages.
703 * The free page queues must be locked.
706 vm_phys_free_contig(vm_page_t m, u_long npages)
712 * Avoid unnecessary coalescing by freeing the pages in the largest
713 * possible power-of-two-sized subsets.
715 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
716 for (;; npages -= n) {
718 * Unsigned "min" is used here so that "order" is assigned
719 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
720 * or the low-order bits of its physical address are zero
721 * because the size of a physical address exceeds the size of
724 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
729 vm_phys_free_pages(m, order);
732 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
733 for (; npages > 0; npages -= n) {
734 order = flsl(npages) - 1;
736 vm_phys_free_pages(m, order);
742 * Set the pool for a contiguous, power of two-sized set of physical pages.
745 vm_phys_set_pool(int pool, vm_page_t m, int order)
749 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
754 * Search for the given physical page "m" in the free lists. If the search
755 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
756 * FALSE, indicating that "m" is not in the free lists.
758 * The free page queues must be locked.
761 vm_phys_unfree_page(vm_page_t m)
763 struct vm_freelist *fl;
764 struct vm_phys_seg *seg;
765 vm_paddr_t pa, pa_half;
766 vm_page_t m_set, m_tmp;
769 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
772 * First, find the contiguous, power of two-sized set of free
773 * physical pages containing the given physical page "m" and
774 * assign it to "m_set".
776 seg = &vm_phys_segs[m->segind];
777 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
778 order < VM_NFREEORDER - 1; ) {
780 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
781 if (pa >= seg->start)
782 m_set = &seg->first_page[atop(pa - seg->start)];
786 if (m_set->order < order)
788 if (m_set->order == VM_NFREEORDER)
790 KASSERT(m_set->order < VM_NFREEORDER,
791 ("vm_phys_unfree_page: page %p has unexpected order %d",
792 m_set, m_set->order));
795 * Next, remove "m_set" from the free lists. Finally, extract
796 * "m" from "m_set" using an iterative algorithm: While "m_set"
797 * is larger than a page, shrink "m_set" by returning the half
798 * of "m_set" that does not contain "m" to the free lists.
800 fl = (*seg->free_queues)[m_set->pool];
801 order = m_set->order;
802 vm_freelist_rem(fl, m_set, order);
805 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
806 if (m->phys_addr < pa_half)
807 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
810 m_set = &seg->first_page[atop(pa_half - seg->start)];
812 vm_freelist_add(fl, m_tmp, order, 0);
814 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
819 * Try to zero one physical page. Used by an idle priority thread.
822 vm_phys_zero_pages_idle(void)
824 static struct vm_freelist *fl;
825 static int flind, oind, pind;
829 domain = vm_rr_selectdomain();
830 fl = vm_phys_free_queues[domain][0][0];
831 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
833 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
834 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
835 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
836 vm_phys_unfree_page(m_tmp);
837 vm_phys_freecnt_adj(m, -1);
838 mtx_unlock(&vm_page_queue_free_mtx);
839 pmap_zero_page_idle(m_tmp);
840 m_tmp->flags |= PG_ZERO;
841 mtx_lock(&vm_page_queue_free_mtx);
842 vm_phys_freecnt_adj(m, 1);
843 vm_phys_free_pages(m_tmp, 0);
844 vm_page_zero_count++;
851 if (oind == VM_NFREEORDER) {
854 if (pind == VM_NFREEPOOL) {
857 if (flind == vm_nfreelists)
860 fl = vm_phys_free_queues[domain][flind][pind];
866 * Allocate a contiguous set of physical pages of the given size
867 * "npages" from the free lists. All of the physical pages must be at
868 * or above the given physical address "low" and below the given
869 * physical address "high". The given value "alignment" determines the
870 * alignment of the first physical page in the set. If the given value
871 * "boundary" is non-zero, then the set of physical pages cannot cross
872 * any physical address boundary that is a multiple of that value. Both
873 * "alignment" and "boundary" must be a power of two.
876 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
877 u_long alignment, vm_paddr_t boundary)
879 struct vm_freelist *fl;
880 struct vm_phys_seg *seg;
881 vm_paddr_t pa, pa_last, size;
884 int dom, domain, flind, oind, order, pind;
886 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
887 size = npages << PAGE_SHIFT;
889 ("vm_phys_alloc_contig: size must not be 0"));
890 KASSERT((alignment & (alignment - 1)) == 0,
891 ("vm_phys_alloc_contig: alignment must be a power of 2"));
892 KASSERT((boundary & (boundary - 1)) == 0,
893 ("vm_phys_alloc_contig: boundary must be a power of 2"));
894 /* Compute the queue that is the best fit for npages. */
895 for (order = 0; (1 << order) < npages; order++);
898 domain = vm_rr_selectdomain();
899 for (flind = 0; flind < vm_nfreelists; flind++) {
900 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
901 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
902 fl = &vm_phys_free_queues[domain][flind][pind][0];
903 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
905 * A free list may contain physical pages
906 * from one or more segments.
908 seg = &vm_phys_segs[m_ret->segind];
909 if (seg->start > high ||
914 * Is the size of this allocation request
915 * larger than the largest block size?
917 if (order >= VM_NFREEORDER) {
919 * Determine if a sufficient number
920 * of subsequent blocks to satisfy
921 * the allocation request are free.
923 pa = VM_PAGE_TO_PHYS(m_ret);
926 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
929 if (pa < seg->start ||
932 m = &seg->first_page[atop(pa - seg->start)];
933 if (m->order != VM_NFREEORDER - 1)
936 /* If not, continue to the next block. */
942 * Determine if the blocks are within the given range,
943 * satisfy the given alignment, and do not cross the
946 pa = VM_PAGE_TO_PHYS(m_ret);
949 (pa & (alignment - 1)) == 0 &&
950 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
956 if (++dom < vm_ndomains)
960 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
961 fl = (*seg->free_queues)[m->pool];
962 vm_freelist_rem(fl, m, m->order);
964 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
965 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
966 fl = (*seg->free_queues)[m_ret->pool];
967 vm_phys_split_pages(m_ret, oind, fl, order);
968 /* Return excess pages to the free lists. */
969 npages_end = roundup2(npages, 1 << imin(oind, order));
970 if (npages < npages_end)
971 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
977 * Show the number of physical pages in each of the free lists.
979 DB_SHOW_COMMAND(freepages, db_show_freepages)
981 struct vm_freelist *fl;
982 int flind, oind, pind, dom;
984 for (dom = 0; dom < vm_ndomains; dom++) {
985 db_printf("DOMAIN: %d\n", dom);
986 for (flind = 0; flind < vm_nfreelists; flind++) {
987 db_printf("FREE LIST %d:\n"
988 "\n ORDER (SIZE) | NUMBER"
990 for (pind = 0; pind < VM_NFREEPOOL; pind++)
991 db_printf(" | POOL %d", pind);
993 for (pind = 0; pind < VM_NFREEPOOL; pind++)
996 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
997 db_printf(" %2.2d (%6.6dK)", oind,
998 1 << (PAGE_SHIFT - 10 + oind));
999 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1000 fl = vm_phys_free_queues[dom][flind][pind];
1001 db_printf(" | %6.6d", fl[oind].lcnt);