2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Physical memory system implementation
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
54 #include <sys/queue.h>
56 #include <sys/sysctl.h>
57 #include <sys/vmmeter.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
68 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
69 "Too many physsegs.");
71 struct mem_affinity *mem_affinity;
75 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
78 #define VM_PHYS_FICTITIOUS_NSEGS 8
79 static struct vm_phys_fictitious_seg {
83 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
84 static struct mtx vm_phys_fictitious_reg_mtx;
85 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
87 static struct vm_freelist
88 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
90 static int vm_nfreelists;
93 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
95 static int vm_freelist_to_flind[VM_NFREELIST];
97 CTASSERT(VM_FREELIST_DEFAULT == 0);
99 #ifdef VM_FREELIST_ISADMA
100 #define VM_ISADMA_BOUNDARY 16777216
102 #ifdef VM_FREELIST_DMA32
103 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
107 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
108 * the ordering of the free list boundaries.
110 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
111 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
113 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
114 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
117 static int cnt_prezero;
118 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
119 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
121 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
122 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
123 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
125 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
126 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
127 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
129 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
130 &vm_ndomains, 0, "Number of physical memory domains available.");
132 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
134 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
135 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
136 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
137 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
141 vm_rr_selectdomain(void)
149 td->td_dom_rr_idx %= vm_ndomains;
150 return (td->td_dom_rr_idx);
157 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
159 struct vm_phys_seg *s;
162 while ((idx = ffsl(mask)) != 0) {
163 idx--; /* ffsl counts from 1 */
164 mask &= ~(1UL << idx);
165 s = &vm_phys_segs[idx];
166 if (low < s->end && high > s->start)
173 * Outputs the state of the physical memory allocator, specifically,
174 * the amount of physical memory in each free list.
177 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
180 struct vm_freelist *fl;
181 int dom, error, flind, oind, pind;
183 error = sysctl_wire_old_buffer(req, 0);
186 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
187 for (dom = 0; dom < vm_ndomains; dom++) {
188 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
189 for (flind = 0; flind < vm_nfreelists; flind++) {
190 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
191 "\n ORDER (SIZE) | NUMBER"
193 for (pind = 0; pind < VM_NFREEPOOL; pind++)
194 sbuf_printf(&sbuf, " | POOL %d", pind);
195 sbuf_printf(&sbuf, "\n-- ");
196 for (pind = 0; pind < VM_NFREEPOOL; pind++)
197 sbuf_printf(&sbuf, "-- -- ");
198 sbuf_printf(&sbuf, "--\n");
199 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
200 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
201 1 << (PAGE_SHIFT - 10 + oind));
202 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
203 fl = vm_phys_free_queues[dom][flind][pind];
204 sbuf_printf(&sbuf, " | %6d",
207 sbuf_printf(&sbuf, "\n");
211 error = sbuf_finish(&sbuf);
217 * Outputs the set of physical memory segments.
220 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
223 struct vm_phys_seg *seg;
226 error = sysctl_wire_old_buffer(req, 0);
229 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
230 for (segind = 0; segind < vm_phys_nsegs; segind++) {
231 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
232 seg = &vm_phys_segs[segind];
233 sbuf_printf(&sbuf, "start: %#jx\n",
234 (uintmax_t)seg->start);
235 sbuf_printf(&sbuf, "end: %#jx\n",
236 (uintmax_t)seg->end);
237 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
238 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
240 error = sbuf_finish(&sbuf);
246 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
251 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
253 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
258 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
261 TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
263 m->order = VM_NFREEORDER;
267 * Create a physical memory segment.
270 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
272 struct vm_phys_seg *seg;
274 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
275 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
276 KASSERT(domain < vm_ndomains,
277 ("vm_phys_create_seg: invalid domain provided"));
278 seg = &vm_phys_segs[vm_phys_nsegs++];
279 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
285 seg->domain = domain;
289 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
293 if (mem_affinity == NULL) {
294 _vm_phys_create_seg(start, end, 0);
299 if (mem_affinity[i].end == 0)
300 panic("Reached end of affinity info");
301 if (mem_affinity[i].end <= start)
303 if (mem_affinity[i].start > start)
304 panic("No affinity info for start %jx",
306 if (mem_affinity[i].end >= end) {
307 _vm_phys_create_seg(start, end,
308 mem_affinity[i].domain);
311 _vm_phys_create_seg(start, mem_affinity[i].end,
312 mem_affinity[i].domain);
313 start = mem_affinity[i].end;
318 * Add a physical memory segment.
321 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
325 KASSERT((start & PAGE_MASK) == 0,
326 ("vm_phys_define_seg: start is not page aligned"));
327 KASSERT((end & PAGE_MASK) == 0,
328 ("vm_phys_define_seg: end is not page aligned"));
331 * Split the physical memory segment if it spans two or more free
335 #ifdef VM_FREELIST_ISADMA
336 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
337 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
338 paddr = VM_ISADMA_BOUNDARY;
341 #ifdef VM_FREELIST_LOWMEM
342 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
343 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
344 paddr = VM_LOWMEM_BOUNDARY;
347 #ifdef VM_FREELIST_DMA32
348 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
349 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
350 paddr = VM_DMA32_BOUNDARY;
353 vm_phys_create_seg(paddr, end);
357 * Initialize the physical memory allocator.
359 * Requires that vm_page_array is initialized!
364 struct vm_freelist *fl;
365 struct vm_phys_seg *seg;
367 int dom, flind, freelist, oind, pind, segind;
370 * Compute the number of free lists, and generate the mapping from the
371 * manifest constants VM_FREELIST_* to the free list indices.
373 * Initially, the entries of vm_freelist_to_flind[] are set to either
374 * 0 or 1 to indicate which free lists should be created.
377 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
378 seg = &vm_phys_segs[segind];
379 #ifdef VM_FREELIST_ISADMA
380 if (seg->end <= VM_ISADMA_BOUNDARY)
381 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
384 #ifdef VM_FREELIST_LOWMEM
385 if (seg->end <= VM_LOWMEM_BOUNDARY)
386 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
389 #ifdef VM_FREELIST_DMA32
391 #ifdef VM_DMA32_NPAGES_THRESHOLD
393 * Create the DMA32 free list only if the amount of
394 * physical memory above physical address 4G exceeds the
397 npages > VM_DMA32_NPAGES_THRESHOLD &&
399 seg->end <= VM_DMA32_BOUNDARY)
400 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
404 npages += atop(seg->end - seg->start);
405 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
408 /* Change each entry into a running total of the free lists. */
409 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
410 vm_freelist_to_flind[freelist] +=
411 vm_freelist_to_flind[freelist - 1];
413 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
414 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
415 /* Change each entry into a free list index. */
416 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
417 vm_freelist_to_flind[freelist]--;
420 * Initialize the first_page and free_queues fields of each physical
423 #ifdef VM_PHYSSEG_SPARSE
426 for (segind = 0; segind < vm_phys_nsegs; segind++) {
427 seg = &vm_phys_segs[segind];
428 #ifdef VM_PHYSSEG_SPARSE
429 seg->first_page = &vm_page_array[npages];
430 npages += atop(seg->end - seg->start);
432 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
434 #ifdef VM_FREELIST_ISADMA
435 if (seg->end <= VM_ISADMA_BOUNDARY) {
436 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
438 ("vm_phys_init: ISADMA flind < 0"));
441 #ifdef VM_FREELIST_LOWMEM
442 if (seg->end <= VM_LOWMEM_BOUNDARY) {
443 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
445 ("vm_phys_init: LOWMEM flind < 0"));
448 #ifdef VM_FREELIST_DMA32
449 if (seg->end <= VM_DMA32_BOUNDARY) {
450 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
452 ("vm_phys_init: DMA32 flind < 0"));
456 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
458 ("vm_phys_init: DEFAULT flind < 0"));
460 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
464 * Initialize the free queues.
466 for (dom = 0; dom < vm_ndomains; dom++) {
467 for (flind = 0; flind < vm_nfreelists; flind++) {
468 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
469 fl = vm_phys_free_queues[dom][flind][pind];
470 for (oind = 0; oind < VM_NFREEORDER; oind++)
471 TAILQ_INIT(&fl[oind].pl);
475 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
479 * Split a contiguous, power of two-sized set of physical pages.
482 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
486 while (oind > order) {
488 m_buddy = &m[1 << oind];
489 KASSERT(m_buddy->order == VM_NFREEORDER,
490 ("vm_phys_split_pages: page %p has unexpected order %d",
491 m_buddy, m_buddy->order));
492 vm_freelist_add(fl, m_buddy, oind, 0);
497 * Initialize a physical page and add it to the free lists.
500 vm_phys_add_page(vm_paddr_t pa)
503 struct vm_domain *vmd;
506 m = vm_phys_paddr_to_vm_page(pa);
509 m->segind = vm_phys_paddr_to_segind(pa);
510 vmd = vm_phys_domain(m);
511 vmd->vmd_page_count++;
512 vmd->vmd_segs |= 1UL << m->segind;
514 KASSERT(m->order == VM_NFREEORDER,
515 ("vm_phys_add_page: page %p has unexpected order %d",
517 m->pool = VM_FREEPOOL_DEFAULT;
519 mtx_lock(&vm_page_queue_free_mtx);
520 vm_phys_freecnt_adj(m, 1);
521 vm_phys_free_pages(m, 0);
522 mtx_unlock(&vm_page_queue_free_mtx);
526 * Allocate a contiguous, power of two-sized set of physical pages
527 * from the free lists.
529 * The free page queues must be locked.
532 vm_phys_alloc_pages(int pool, int order)
535 int dom, domain, flind;
537 KASSERT(pool < VM_NFREEPOOL,
538 ("vm_phys_alloc_pages: pool %d is out of range", pool));
539 KASSERT(order < VM_NFREEORDER,
540 ("vm_phys_alloc_pages: order %d is out of range", order));
542 for (dom = 0; dom < vm_ndomains; dom++) {
543 domain = vm_rr_selectdomain();
544 for (flind = 0; flind < vm_nfreelists; flind++) {
545 m = vm_phys_alloc_domain_pages(domain, flind, pool,
555 * Allocate a contiguous, power of two-sized set of physical pages from the
556 * specified free list. The free list must be specified using one of the
557 * manifest constants VM_FREELIST_*.
559 * The free page queues must be locked.
562 vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
567 KASSERT(freelist < VM_NFREELIST,
568 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
570 KASSERT(pool < VM_NFREEPOOL,
571 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
572 KASSERT(order < VM_NFREEORDER,
573 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
574 for (dom = 0; dom < vm_ndomains; dom++) {
575 domain = vm_rr_selectdomain();
576 m = vm_phys_alloc_domain_pages(domain,
577 vm_freelist_to_flind[freelist], pool, order);
585 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
587 struct vm_freelist *fl;
588 struct vm_freelist *alt;
592 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
593 fl = &vm_phys_free_queues[domain][flind][pool][0];
594 for (oind = order; oind < VM_NFREEORDER; oind++) {
595 m = TAILQ_FIRST(&fl[oind].pl);
597 vm_freelist_rem(fl, m, oind);
598 vm_phys_split_pages(m, oind, fl, order);
604 * The given pool was empty. Find the largest
605 * contiguous, power-of-two-sized set of pages in any
606 * pool. Transfer these pages to the given pool, and
607 * use them to satisfy the allocation.
609 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
610 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
611 alt = &vm_phys_free_queues[domain][flind][pind][0];
612 m = TAILQ_FIRST(&alt[oind].pl);
614 vm_freelist_rem(alt, m, oind);
615 vm_phys_set_pool(pool, m, oind);
616 vm_phys_split_pages(m, oind, fl, order);
625 * Find the vm_page corresponding to the given physical address.
628 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
630 struct vm_phys_seg *seg;
633 for (segind = 0; segind < vm_phys_nsegs; segind++) {
634 seg = &vm_phys_segs[segind];
635 if (pa >= seg->start && pa < seg->end)
636 return (&seg->first_page[atop(pa - seg->start)]);
642 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
644 struct vm_phys_fictitious_seg *seg;
649 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
650 seg = &vm_phys_fictitious_segs[segind];
651 if (pa >= seg->start && pa < seg->end) {
652 m = &seg->first_page[atop(pa - seg->start)];
653 KASSERT((m->flags & PG_FICTITIOUS) != 0,
654 ("%p not fictitious", m));
662 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
663 vm_memattr_t memattr)
665 struct vm_phys_fictitious_seg *seg;
669 #ifdef VM_PHYSSEG_DENSE
674 page_count = (end - start) / PAGE_SIZE;
676 #ifdef VM_PHYSSEG_DENSE
678 if (pi >= first_page && pi < vm_page_array_size + first_page) {
679 if (atop(end) >= vm_page_array_size + first_page)
681 fp = &vm_page_array[pi - first_page];
686 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
688 #ifdef VM_PHYSSEG_DENSE
692 for (i = 0; i < page_count; i++) {
693 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
694 fp[i].oflags &= ~VPO_UNMANAGED;
695 fp[i].busy_lock = VPB_UNBUSIED;
697 mtx_lock(&vm_phys_fictitious_reg_mtx);
698 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
699 seg = &vm_phys_fictitious_segs[segind];
700 if (seg->start == 0 && seg->end == 0) {
703 seg->first_page = fp;
704 mtx_unlock(&vm_phys_fictitious_reg_mtx);
708 mtx_unlock(&vm_phys_fictitious_reg_mtx);
709 #ifdef VM_PHYSSEG_DENSE
712 free(fp, M_FICT_PAGES);
717 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
719 struct vm_phys_fictitious_seg *seg;
722 #ifdef VM_PHYSSEG_DENSE
726 #ifdef VM_PHYSSEG_DENSE
730 mtx_lock(&vm_phys_fictitious_reg_mtx);
731 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
732 seg = &vm_phys_fictitious_segs[segind];
733 if (seg->start == start && seg->end == end) {
734 seg->start = seg->end = 0;
735 fp = seg->first_page;
736 seg->first_page = NULL;
737 mtx_unlock(&vm_phys_fictitious_reg_mtx);
738 #ifdef VM_PHYSSEG_DENSE
739 if (pi < first_page || atop(end) >= vm_page_array_size)
741 free(fp, M_FICT_PAGES);
745 mtx_unlock(&vm_phys_fictitious_reg_mtx);
746 KASSERT(0, ("Unregistering not registered fictitious range"));
750 * Find the segment containing the given physical address.
753 vm_phys_paddr_to_segind(vm_paddr_t pa)
755 struct vm_phys_seg *seg;
758 for (segind = 0; segind < vm_phys_nsegs; segind++) {
759 seg = &vm_phys_segs[segind];
760 if (pa >= seg->start && pa < seg->end)
763 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
768 * Free a contiguous, power of two-sized set of physical pages.
770 * The free page queues must be locked.
773 vm_phys_free_pages(vm_page_t m, int order)
775 struct vm_freelist *fl;
776 struct vm_phys_seg *seg;
780 KASSERT(m->order == VM_NFREEORDER,
781 ("vm_phys_free_pages: page %p has unexpected order %d",
783 KASSERT(m->pool < VM_NFREEPOOL,
784 ("vm_phys_free_pages: page %p has unexpected pool %d",
786 KASSERT(order < VM_NFREEORDER,
787 ("vm_phys_free_pages: order %d is out of range", order));
788 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
789 seg = &vm_phys_segs[m->segind];
790 if (order < VM_NFREEORDER - 1) {
791 pa = VM_PAGE_TO_PHYS(m);
793 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
794 if (pa < seg->start || pa >= seg->end)
796 m_buddy = &seg->first_page[atop(pa - seg->start)];
797 if (m_buddy->order != order)
799 fl = (*seg->free_queues)[m_buddy->pool];
800 vm_freelist_rem(fl, m_buddy, order);
801 if (m_buddy->pool != m->pool)
802 vm_phys_set_pool(m->pool, m_buddy, order);
804 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
805 m = &seg->first_page[atop(pa - seg->start)];
806 } while (order < VM_NFREEORDER - 1);
808 fl = (*seg->free_queues)[m->pool];
809 vm_freelist_add(fl, m, order, 1);
813 * Free a contiguous, arbitrarily sized set of physical pages.
815 * The free page queues must be locked.
818 vm_phys_free_contig(vm_page_t m, u_long npages)
824 * Avoid unnecessary coalescing by freeing the pages in the largest
825 * possible power-of-two-sized subsets.
827 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
828 for (;; npages -= n) {
830 * Unsigned "min" is used here so that "order" is assigned
831 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
832 * or the low-order bits of its physical address are zero
833 * because the size of a physical address exceeds the size of
836 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
841 vm_phys_free_pages(m, order);
844 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
845 for (; npages > 0; npages -= n) {
846 order = flsl(npages) - 1;
848 vm_phys_free_pages(m, order);
854 * Set the pool for a contiguous, power of two-sized set of physical pages.
857 vm_phys_set_pool(int pool, vm_page_t m, int order)
861 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
866 * Search for the given physical page "m" in the free lists. If the search
867 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
868 * FALSE, indicating that "m" is not in the free lists.
870 * The free page queues must be locked.
873 vm_phys_unfree_page(vm_page_t m)
875 struct vm_freelist *fl;
876 struct vm_phys_seg *seg;
877 vm_paddr_t pa, pa_half;
878 vm_page_t m_set, m_tmp;
881 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
884 * First, find the contiguous, power of two-sized set of free
885 * physical pages containing the given physical page "m" and
886 * assign it to "m_set".
888 seg = &vm_phys_segs[m->segind];
889 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
890 order < VM_NFREEORDER - 1; ) {
892 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
893 if (pa >= seg->start)
894 m_set = &seg->first_page[atop(pa - seg->start)];
898 if (m_set->order < order)
900 if (m_set->order == VM_NFREEORDER)
902 KASSERT(m_set->order < VM_NFREEORDER,
903 ("vm_phys_unfree_page: page %p has unexpected order %d",
904 m_set, m_set->order));
907 * Next, remove "m_set" from the free lists. Finally, extract
908 * "m" from "m_set" using an iterative algorithm: While "m_set"
909 * is larger than a page, shrink "m_set" by returning the half
910 * of "m_set" that does not contain "m" to the free lists.
912 fl = (*seg->free_queues)[m_set->pool];
913 order = m_set->order;
914 vm_freelist_rem(fl, m_set, order);
917 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
918 if (m->phys_addr < pa_half)
919 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
922 m_set = &seg->first_page[atop(pa_half - seg->start)];
924 vm_freelist_add(fl, m_tmp, order, 0);
926 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
931 * Try to zero one physical page. Used by an idle priority thread.
934 vm_phys_zero_pages_idle(void)
936 static struct vm_freelist *fl;
937 static int flind, oind, pind;
941 domain = vm_rr_selectdomain();
942 fl = vm_phys_free_queues[domain][0][0];
943 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
945 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
946 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
947 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
948 vm_phys_unfree_page(m_tmp);
949 vm_phys_freecnt_adj(m, -1);
950 mtx_unlock(&vm_page_queue_free_mtx);
951 pmap_zero_page_idle(m_tmp);
952 m_tmp->flags |= PG_ZERO;
953 mtx_lock(&vm_page_queue_free_mtx);
954 vm_phys_freecnt_adj(m, 1);
955 vm_phys_free_pages(m_tmp, 0);
956 vm_page_zero_count++;
963 if (oind == VM_NFREEORDER) {
966 if (pind == VM_NFREEPOOL) {
969 if (flind == vm_nfreelists)
972 fl = vm_phys_free_queues[domain][flind][pind];
978 * Allocate a contiguous set of physical pages of the given size
979 * "npages" from the free lists. All of the physical pages must be at
980 * or above the given physical address "low" and below the given
981 * physical address "high". The given value "alignment" determines the
982 * alignment of the first physical page in the set. If the given value
983 * "boundary" is non-zero, then the set of physical pages cannot cross
984 * any physical address boundary that is a multiple of that value. Both
985 * "alignment" and "boundary" must be a power of two.
988 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
989 u_long alignment, vm_paddr_t boundary)
991 struct vm_freelist *fl;
992 struct vm_phys_seg *seg;
993 vm_paddr_t pa, pa_last, size;
996 int dom, domain, flind, oind, order, pind;
998 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
999 size = npages << PAGE_SHIFT;
1001 ("vm_phys_alloc_contig: size must not be 0"));
1002 KASSERT((alignment & (alignment - 1)) == 0,
1003 ("vm_phys_alloc_contig: alignment must be a power of 2"));
1004 KASSERT((boundary & (boundary - 1)) == 0,
1005 ("vm_phys_alloc_contig: boundary must be a power of 2"));
1006 /* Compute the queue that is the best fit for npages. */
1007 for (order = 0; (1 << order) < npages; order++);
1010 domain = vm_rr_selectdomain();
1011 for (flind = 0; flind < vm_nfreelists; flind++) {
1012 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
1013 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1014 fl = &vm_phys_free_queues[domain][flind][pind][0];
1015 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1017 * A free list may contain physical pages
1018 * from one or more segments.
1020 seg = &vm_phys_segs[m_ret->segind];
1021 if (seg->start > high ||
1026 * Is the size of this allocation request
1027 * larger than the largest block size?
1029 if (order >= VM_NFREEORDER) {
1031 * Determine if a sufficient number
1032 * of subsequent blocks to satisfy
1033 * the allocation request are free.
1035 pa = VM_PAGE_TO_PHYS(m_ret);
1036 pa_last = pa + size;
1038 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
1041 if (pa < seg->start ||
1044 m = &seg->first_page[atop(pa - seg->start)];
1045 if (m->order != VM_NFREEORDER - 1)
1048 /* If not, continue to the next block. */
1054 * Determine if the blocks are within the given range,
1055 * satisfy the given alignment, and do not cross the
1058 pa = VM_PAGE_TO_PHYS(m_ret);
1060 pa + size <= high &&
1061 (pa & (alignment - 1)) == 0 &&
1062 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
1068 if (++dom < vm_ndomains)
1072 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1073 fl = (*seg->free_queues)[m->pool];
1074 vm_freelist_rem(fl, m, m->order);
1076 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1077 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1078 fl = (*seg->free_queues)[m_ret->pool];
1079 vm_phys_split_pages(m_ret, oind, fl, order);
1080 /* Return excess pages to the free lists. */
1081 npages_end = roundup2(npages, 1 << imin(oind, order));
1082 if (npages < npages_end)
1083 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1089 * Show the number of physical pages in each of the free lists.
1091 DB_SHOW_COMMAND(freepages, db_show_freepages)
1093 struct vm_freelist *fl;
1094 int flind, oind, pind, dom;
1096 for (dom = 0; dom < vm_ndomains; dom++) {
1097 db_printf("DOMAIN: %d\n", dom);
1098 for (flind = 0; flind < vm_nfreelists; flind++) {
1099 db_printf("FREE LIST %d:\n"
1100 "\n ORDER (SIZE) | NUMBER"
1102 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1103 db_printf(" | POOL %d", pind);
1105 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1106 db_printf("-- -- ");
1108 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1109 db_printf(" %2.2d (%6.6dK)", oind,
1110 1 << (PAGE_SHIFT - 10 + oind));
1111 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1112 fl = vm_phys_free_queues[dom][flind][pind];
1113 db_printf(" | %6.6d", fl[oind].lcnt);