2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Physical memory system implementation
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
58 #include <sys/sysctl.h>
60 #include <sys/vmmeter.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_phys.h>
71 #include <vm/vm_pagequeue.h>
73 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
74 "Too many physsegs.");
77 struct mem_affinity __read_mostly *mem_affinity;
78 int __read_mostly *mem_locality;
81 int __read_mostly vm_ndomains = 1;
82 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
84 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
85 int __read_mostly vm_phys_nsegs;
86 static struct vm_phys_seg vm_phys_early_segs[8];
87 static int vm_phys_early_nsegs;
89 struct vm_phys_fictitious_seg;
90 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
91 struct vm_phys_fictitious_seg *);
93 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
94 RB_INITIALIZER(&vm_phys_fictitious_tree);
96 struct vm_phys_fictitious_seg {
97 RB_ENTRY(vm_phys_fictitious_seg) node;
98 /* Memory region data */
101 vm_page_t first_page;
104 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
105 vm_phys_fictitious_cmp);
107 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
108 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
110 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
111 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
114 static int __read_mostly vm_nfreelists;
117 * These "avail lists" are globals used to communicate boot-time physical
118 * memory layout to other parts of the kernel. Each physically contiguous
119 * region of memory is defined by a start address at an even index and an
120 * end address at the following odd index. Each list is terminated by a
121 * pair of zero entries.
123 * dump_avail tells the dump code what regions to include in a crash dump, and
124 * phys_avail is all of the remaining physical memory that is available for
127 * Initially dump_avail and phys_avail are identical. Boot time memory
128 * allocations remove extents from phys_avail that may still be included
131 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
132 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
135 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
137 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
139 CTASSERT(VM_FREELIST_DEFAULT == 0);
141 #ifdef VM_FREELIST_DMA32
142 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
146 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
147 * the ordering of the free list boundaries.
149 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
150 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
153 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
154 SYSCTL_OID(_vm, OID_AUTO, phys_free,
155 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
156 sysctl_vm_phys_free, "A",
159 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
160 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
161 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
162 sysctl_vm_phys_segs, "A",
166 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
167 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
168 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
169 sysctl_vm_phys_locality, "A",
170 "Phys Locality Info");
173 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
174 &vm_ndomains, 0, "Number of physical memory domains available.");
176 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
177 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
178 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
179 int order, int tail);
182 * Red-black tree helpers for vm fictitious range management.
185 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
186 struct vm_phys_fictitious_seg *range)
189 KASSERT(range->start != 0 && range->end != 0,
190 ("Invalid range passed on search for vm_fictitious page"));
191 if (p->start >= range->end)
193 if (p->start < range->start)
200 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
201 struct vm_phys_fictitious_seg *p2)
204 /* Check if this is a search for a page */
206 return (vm_phys_fictitious_in_range(p1, p2));
208 KASSERT(p2->end != 0,
209 ("Invalid range passed as second parameter to vm fictitious comparison"));
211 /* Searching to add a new range */
212 if (p1->end <= p2->start)
214 if (p1->start >= p2->end)
217 panic("Trying to add overlapping vm fictitious ranges:\n"
218 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
219 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
223 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
229 if (vm_ndomains == 1 || mem_affinity == NULL)
232 DOMAINSET_ZERO(&mask);
234 * Check for any memory that overlaps low, high.
236 for (i = 0; mem_affinity[i].end != 0; i++)
237 if (mem_affinity[i].start <= high &&
238 mem_affinity[i].end >= low)
239 DOMAINSET_SET(mem_affinity[i].domain, &mask);
240 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
242 if (DOMAINSET_EMPTY(&mask))
243 panic("vm_phys_domain_match: Impossible constraint");
244 return (DOMAINSET_FFS(&mask) - 1);
251 * Outputs the state of the physical memory allocator, specifically,
252 * the amount of physical memory in each free list.
255 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
258 struct vm_freelist *fl;
259 int dom, error, flind, oind, pind;
261 error = sysctl_wire_old_buffer(req, 0);
264 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
265 for (dom = 0; dom < vm_ndomains; dom++) {
266 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
267 for (flind = 0; flind < vm_nfreelists; flind++) {
268 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
269 "\n ORDER (SIZE) | NUMBER"
271 for (pind = 0; pind < VM_NFREEPOOL; pind++)
272 sbuf_printf(&sbuf, " | POOL %d", pind);
273 sbuf_printf(&sbuf, "\n-- ");
274 for (pind = 0; pind < VM_NFREEPOOL; pind++)
275 sbuf_printf(&sbuf, "-- -- ");
276 sbuf_printf(&sbuf, "--\n");
277 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
278 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
279 1 << (PAGE_SHIFT - 10 + oind));
280 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
281 fl = vm_phys_free_queues[dom][flind][pind];
282 sbuf_printf(&sbuf, " | %6d",
285 sbuf_printf(&sbuf, "\n");
289 error = sbuf_finish(&sbuf);
295 * Outputs the set of physical memory segments.
298 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
301 struct vm_phys_seg *seg;
304 error = sysctl_wire_old_buffer(req, 0);
307 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
308 for (segind = 0; segind < vm_phys_nsegs; segind++) {
309 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
310 seg = &vm_phys_segs[segind];
311 sbuf_printf(&sbuf, "start: %#jx\n",
312 (uintmax_t)seg->start);
313 sbuf_printf(&sbuf, "end: %#jx\n",
314 (uintmax_t)seg->end);
315 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
316 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
318 error = sbuf_finish(&sbuf);
324 * Return affinity, or -1 if there's no affinity information.
327 vm_phys_mem_affinity(int f, int t)
331 if (mem_locality == NULL)
333 if (f >= vm_ndomains || t >= vm_ndomains)
335 return (mem_locality[f * vm_ndomains + t]);
343 * Outputs the VM locality table.
346 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
351 error = sysctl_wire_old_buffer(req, 0);
354 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
356 sbuf_printf(&sbuf, "\n");
358 for (i = 0; i < vm_ndomains; i++) {
359 sbuf_printf(&sbuf, "%d: ", i);
360 for (j = 0; j < vm_ndomains; j++) {
361 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
363 sbuf_printf(&sbuf, "\n");
365 error = sbuf_finish(&sbuf);
372 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
377 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
379 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
384 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
387 TAILQ_REMOVE(&fl[order].pl, m, listq);
389 m->order = VM_NFREEORDER;
393 * Create a physical memory segment.
396 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
398 struct vm_phys_seg *seg;
400 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
401 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
402 KASSERT(domain >= 0 && domain < vm_ndomains,
403 ("vm_phys_create_seg: invalid domain provided"));
404 seg = &vm_phys_segs[vm_phys_nsegs++];
405 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
411 seg->domain = domain;
415 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
420 if (mem_affinity == NULL) {
421 _vm_phys_create_seg(start, end, 0);
426 if (mem_affinity[i].end == 0)
427 panic("Reached end of affinity info");
428 if (mem_affinity[i].end <= start)
430 if (mem_affinity[i].start > start)
431 panic("No affinity info for start %jx",
433 if (mem_affinity[i].end >= end) {
434 _vm_phys_create_seg(start, end,
435 mem_affinity[i].domain);
438 _vm_phys_create_seg(start, mem_affinity[i].end,
439 mem_affinity[i].domain);
440 start = mem_affinity[i].end;
443 _vm_phys_create_seg(start, end, 0);
448 * Add a physical memory segment.
451 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
455 KASSERT((start & PAGE_MASK) == 0,
456 ("vm_phys_define_seg: start is not page aligned"));
457 KASSERT((end & PAGE_MASK) == 0,
458 ("vm_phys_define_seg: end is not page aligned"));
461 * Split the physical memory segment if it spans two or more free
465 #ifdef VM_FREELIST_LOWMEM
466 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
467 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
468 paddr = VM_LOWMEM_BOUNDARY;
471 #ifdef VM_FREELIST_DMA32
472 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
473 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
474 paddr = VM_DMA32_BOUNDARY;
477 vm_phys_create_seg(paddr, end);
481 * Initialize the physical memory allocator.
483 * Requires that vm_page_array is initialized!
488 struct vm_freelist *fl;
489 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
490 #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE)
493 int dom, flind, freelist, oind, pind, segind;
496 * Compute the number of free lists, and generate the mapping from the
497 * manifest constants VM_FREELIST_* to the free list indices.
499 * Initially, the entries of vm_freelist_to_flind[] are set to either
500 * 0 or 1 to indicate which free lists should be created.
502 #ifdef VM_DMA32_NPAGES_THRESHOLD
505 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
506 seg = &vm_phys_segs[segind];
507 #ifdef VM_FREELIST_LOWMEM
508 if (seg->end <= VM_LOWMEM_BOUNDARY)
509 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
512 #ifdef VM_FREELIST_DMA32
514 #ifdef VM_DMA32_NPAGES_THRESHOLD
516 * Create the DMA32 free list only if the amount of
517 * physical memory above physical address 4G exceeds the
520 npages > VM_DMA32_NPAGES_THRESHOLD &&
522 seg->end <= VM_DMA32_BOUNDARY)
523 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
527 #ifdef VM_DMA32_NPAGES_THRESHOLD
528 npages += atop(seg->end - seg->start);
530 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
533 /* Change each entry into a running total of the free lists. */
534 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
535 vm_freelist_to_flind[freelist] +=
536 vm_freelist_to_flind[freelist - 1];
538 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
539 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
540 /* Change each entry into a free list index. */
541 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
542 vm_freelist_to_flind[freelist]--;
545 * Initialize the first_page and free_queues fields of each physical
548 #ifdef VM_PHYSSEG_SPARSE
551 for (segind = 0; segind < vm_phys_nsegs; segind++) {
552 seg = &vm_phys_segs[segind];
553 #ifdef VM_PHYSSEG_SPARSE
554 seg->first_page = &vm_page_array[npages];
555 npages += atop(seg->end - seg->start);
557 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
559 #ifdef VM_FREELIST_LOWMEM
560 if (seg->end <= VM_LOWMEM_BOUNDARY) {
561 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
563 ("vm_phys_init: LOWMEM flind < 0"));
566 #ifdef VM_FREELIST_DMA32
567 if (seg->end <= VM_DMA32_BOUNDARY) {
568 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
570 ("vm_phys_init: DMA32 flind < 0"));
574 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
576 ("vm_phys_init: DEFAULT flind < 0"));
578 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
582 * Coalesce physical memory segments that are contiguous and share the
583 * same per-domain free queues.
585 prev_seg = vm_phys_segs;
586 seg = &vm_phys_segs[1];
587 end_seg = &vm_phys_segs[vm_phys_nsegs];
588 while (seg < end_seg) {
589 if (prev_seg->end == seg->start &&
590 prev_seg->free_queues == seg->free_queues) {
591 prev_seg->end = seg->end;
592 KASSERT(prev_seg->domain == seg->domain,
593 ("vm_phys_init: free queues cannot span domains"));
596 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
597 *tmp_seg = *(tmp_seg + 1);
605 * Initialize the free queues.
607 for (dom = 0; dom < vm_ndomains; dom++) {
608 for (flind = 0; flind < vm_nfreelists; flind++) {
609 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
610 fl = vm_phys_free_queues[dom][flind][pind];
611 for (oind = 0; oind < VM_NFREEORDER; oind++)
612 TAILQ_INIT(&fl[oind].pl);
617 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
621 * Register info about the NUMA topology of the system.
623 * Invoked by platform-dependent code prior to vm_phys_init().
626 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
633 * For now the only override value that we support is 1, which
634 * effectively disables NUMA-awareness in the allocators.
637 TUNABLE_INT_FETCH("vm.numa.disabled", &d);
642 vm_ndomains = ndomains;
643 mem_affinity = affinity;
644 mem_locality = locality;
647 for (i = 0; i < vm_ndomains; i++)
648 DOMAINSET_SET(i, &all_domains);
657 * Split a contiguous, power of two-sized set of physical pages.
659 * When this function is called by a page allocation function, the caller
660 * should request insertion at the head unless the order [order, oind) queues
661 * are known to be empty. The objective being to reduce the likelihood of
662 * long-term fragmentation by promoting contemporaneous allocation and
663 * (hopefully) deallocation.
666 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
671 while (oind > order) {
673 m_buddy = &m[1 << oind];
674 KASSERT(m_buddy->order == VM_NFREEORDER,
675 ("vm_phys_split_pages: page %p has unexpected order %d",
676 m_buddy, m_buddy->order));
677 vm_freelist_add(fl, m_buddy, oind, tail);
682 * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
683 * and sized set to the specified free list.
685 * When this function is called by a page allocation function, the caller
686 * should request insertion at the head unless the lower-order queues are
687 * known to be empty. The objective being to reduce the likelihood of long-
688 * term fragmentation by promoting contemporaneous allocation and (hopefully)
691 * The physical page m's buddy must not be free.
694 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
699 KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
700 KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
701 ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
702 ("vm_phys_enq_range: page %p and npages %u are misaligned",
705 KASSERT(m->order == VM_NFREEORDER,
706 ("vm_phys_enq_range: page %p has unexpected order %d",
708 order = ffs(npages) - 1;
709 KASSERT(order < VM_NFREEORDER,
710 ("vm_phys_enq_range: order %d is out of range", order));
711 vm_freelist_add(fl, m, order, tail);
715 } while (npages > 0);
719 * Set the pool for a contiguous, power of two-sized set of physical pages.
722 vm_phys_set_pool(int pool, vm_page_t m, int order)
726 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
731 * Tries to allocate the specified number of pages from the specified pool
732 * within the specified domain. Returns the actual number of allocated pages
733 * and a pointer to each page through the array ma[].
735 * The returned pages may not be physically contiguous. However, in contrast
736 * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
737 * calling this function once to allocate the desired number of pages will
738 * avoid wasted time in vm_phys_split_pages().
740 * The free page queues for the specified domain must be locked.
743 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
745 struct vm_freelist *alt, *fl;
747 int avail, end, flind, freelist, i, need, oind, pind;
749 KASSERT(domain >= 0 && domain < vm_ndomains,
750 ("vm_phys_alloc_npages: domain %d is out of range", domain));
751 KASSERT(pool < VM_NFREEPOOL,
752 ("vm_phys_alloc_npages: pool %d is out of range", pool));
753 KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
754 ("vm_phys_alloc_npages: npages %d is out of range", npages));
755 vm_domain_free_assert_locked(VM_DOMAIN(domain));
757 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
758 flind = vm_freelist_to_flind[freelist];
761 fl = vm_phys_free_queues[domain][flind][pool];
762 for (oind = 0; oind < VM_NFREEORDER; oind++) {
763 while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
764 vm_freelist_rem(fl, m, oind);
766 need = imin(npages - i, avail);
767 for (end = i + need; i < end;)
771 * Return excess pages to fl. Its
772 * order [0, oind) queues are empty.
774 vm_phys_enq_range(m, avail - need, fl,
777 } else if (i == npages)
781 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
782 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
783 alt = vm_phys_free_queues[domain][flind][pind];
784 while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
786 vm_freelist_rem(alt, m, oind);
787 vm_phys_set_pool(pool, m, oind);
789 need = imin(npages - i, avail);
790 for (end = i + need; i < end;)
794 * Return excess pages to fl.
795 * Its order [0, oind) queues
798 vm_phys_enq_range(m, avail -
801 } else if (i == npages)
811 * Allocate a contiguous, power of two-sized set of physical pages
812 * from the free lists.
814 * The free page queues must be locked.
817 vm_phys_alloc_pages(int domain, int pool, int order)
822 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
823 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
831 * Allocate a contiguous, power of two-sized set of physical pages from the
832 * specified free list. The free list must be specified using one of the
833 * manifest constants VM_FREELIST_*.
835 * The free page queues must be locked.
838 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
840 struct vm_freelist *alt, *fl;
842 int oind, pind, flind;
844 KASSERT(domain >= 0 && domain < vm_ndomains,
845 ("vm_phys_alloc_freelist_pages: domain %d is out of range",
847 KASSERT(freelist < VM_NFREELIST,
848 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
850 KASSERT(pool < VM_NFREEPOOL,
851 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
852 KASSERT(order < VM_NFREEORDER,
853 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
855 flind = vm_freelist_to_flind[freelist];
856 /* Check if freelist is present */
860 vm_domain_free_assert_locked(VM_DOMAIN(domain));
861 fl = &vm_phys_free_queues[domain][flind][pool][0];
862 for (oind = order; oind < VM_NFREEORDER; oind++) {
863 m = TAILQ_FIRST(&fl[oind].pl);
865 vm_freelist_rem(fl, m, oind);
866 /* The order [order, oind) queues are empty. */
867 vm_phys_split_pages(m, oind, fl, order, 1);
873 * The given pool was empty. Find the largest
874 * contiguous, power-of-two-sized set of pages in any
875 * pool. Transfer these pages to the given pool, and
876 * use them to satisfy the allocation.
878 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
879 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
880 alt = &vm_phys_free_queues[domain][flind][pind][0];
881 m = TAILQ_FIRST(&alt[oind].pl);
883 vm_freelist_rem(alt, m, oind);
884 vm_phys_set_pool(pool, m, oind);
885 /* The order [order, oind) queues are empty. */
886 vm_phys_split_pages(m, oind, fl, order, 1);
895 * Find the vm_page corresponding to the given physical address.
898 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
900 struct vm_phys_seg *seg;
902 if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
903 return (&seg->first_page[atop(pa - seg->start)]);
908 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
910 struct vm_phys_fictitious_seg tmp, *seg;
917 rw_rlock(&vm_phys_fictitious_reg_lock);
918 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
919 rw_runlock(&vm_phys_fictitious_reg_lock);
923 m = &seg->first_page[atop(pa - seg->start)];
924 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
930 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
931 long page_count, vm_memattr_t memattr)
935 bzero(range, page_count * sizeof(*range));
936 for (i = 0; i < page_count; i++) {
937 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
938 range[i].oflags &= ~VPO_UNMANAGED;
939 range[i].busy_lock = VPB_UNBUSIED;
944 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
945 vm_memattr_t memattr)
947 struct vm_phys_fictitious_seg *seg;
950 #ifdef VM_PHYSSEG_DENSE
956 ("Start of segment isn't less than end (start: %jx end: %jx)",
957 (uintmax_t)start, (uintmax_t)end));
959 page_count = (end - start) / PAGE_SIZE;
961 #ifdef VM_PHYSSEG_DENSE
964 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
965 fp = &vm_page_array[pi - first_page];
966 if ((pe - first_page) > vm_page_array_size) {
968 * We have a segment that starts inside
969 * of vm_page_array, but ends outside of it.
971 * Use vm_page_array pages for those that are
972 * inside of the vm_page_array range, and
973 * allocate the remaining ones.
975 dpage_count = vm_page_array_size - (pi - first_page);
976 vm_phys_fictitious_init_range(fp, start, dpage_count,
978 page_count -= dpage_count;
979 start += ptoa(dpage_count);
983 * We can allocate the full range from vm_page_array,
984 * so there's no need to register the range in the tree.
986 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
988 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
990 * We have a segment that ends inside of vm_page_array,
991 * but starts outside of it.
993 fp = &vm_page_array[0];
994 dpage_count = pe - first_page;
995 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
997 end -= ptoa(dpage_count);
998 page_count -= dpage_count;
1000 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1002 * Trying to register a fictitious range that expands before
1003 * and after vm_page_array.
1009 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1011 #ifdef VM_PHYSSEG_DENSE
1014 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1016 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1019 seg->first_page = fp;
1021 rw_wlock(&vm_phys_fictitious_reg_lock);
1022 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1023 rw_wunlock(&vm_phys_fictitious_reg_lock);
1029 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1031 struct vm_phys_fictitious_seg *seg, tmp;
1032 #ifdef VM_PHYSSEG_DENSE
1036 KASSERT(start < end,
1037 ("Start of segment isn't less than end (start: %jx end: %jx)",
1038 (uintmax_t)start, (uintmax_t)end));
1040 #ifdef VM_PHYSSEG_DENSE
1043 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1044 if ((pe - first_page) <= vm_page_array_size) {
1046 * This segment was allocated using vm_page_array
1047 * only, there's nothing to do since those pages
1048 * were never added to the tree.
1053 * We have a segment that starts inside
1054 * of vm_page_array, but ends outside of it.
1056 * Calculate how many pages were added to the
1057 * tree and free them.
1059 start = ptoa(first_page + vm_page_array_size);
1060 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1062 * We have a segment that ends inside of vm_page_array,
1063 * but starts outside of it.
1065 end = ptoa(first_page);
1066 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1067 /* Since it's not possible to register such a range, panic. */
1069 "Unregistering not registered fictitious range [%#jx:%#jx]",
1070 (uintmax_t)start, (uintmax_t)end);
1076 rw_wlock(&vm_phys_fictitious_reg_lock);
1077 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1078 if (seg->start != start || seg->end != end) {
1079 rw_wunlock(&vm_phys_fictitious_reg_lock);
1081 "Unregistering not registered fictitious range [%#jx:%#jx]",
1082 (uintmax_t)start, (uintmax_t)end);
1084 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1085 rw_wunlock(&vm_phys_fictitious_reg_lock);
1086 free(seg->first_page, M_FICT_PAGES);
1087 free(seg, M_FICT_PAGES);
1091 * Free a contiguous, power of two-sized set of physical pages.
1093 * The free page queues must be locked.
1096 vm_phys_free_pages(vm_page_t m, int order)
1098 struct vm_freelist *fl;
1099 struct vm_phys_seg *seg;
1103 KASSERT(m->order == VM_NFREEORDER,
1104 ("vm_phys_free_pages: page %p has unexpected order %d",
1106 KASSERT(m->pool < VM_NFREEPOOL,
1107 ("vm_phys_free_pages: page %p has unexpected pool %d",
1109 KASSERT(order < VM_NFREEORDER,
1110 ("vm_phys_free_pages: order %d is out of range", order));
1111 seg = &vm_phys_segs[m->segind];
1112 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1113 if (order < VM_NFREEORDER - 1) {
1114 pa = VM_PAGE_TO_PHYS(m);
1116 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1117 if (pa < seg->start || pa >= seg->end)
1119 m_buddy = &seg->first_page[atop(pa - seg->start)];
1120 if (m_buddy->order != order)
1122 fl = (*seg->free_queues)[m_buddy->pool];
1123 vm_freelist_rem(fl, m_buddy, order);
1124 if (m_buddy->pool != m->pool)
1125 vm_phys_set_pool(m->pool, m_buddy, order);
1127 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1128 m = &seg->first_page[atop(pa - seg->start)];
1129 } while (order < VM_NFREEORDER - 1);
1131 fl = (*seg->free_queues)[m->pool];
1132 vm_freelist_add(fl, m, order, 1);
1136 * Return the largest possible order of a set of pages starting at m.
1139 max_order(vm_page_t m)
1143 * Unsigned "min" is used here so that "order" is assigned
1144 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1145 * or the low-order bits of its physical address are zero
1146 * because the size of a physical address exceeds the size of
1149 return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1150 VM_NFREEORDER - 1));
1154 * Free a contiguous, arbitrarily sized set of physical pages, without
1155 * merging across set boundaries.
1157 * The free page queues must be locked.
1160 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1162 struct vm_freelist *fl;
1163 struct vm_phys_seg *seg;
1168 * Avoid unnecessary coalescing by freeing the pages in the largest
1169 * possible power-of-two-sized subsets.
1171 vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1172 seg = &vm_phys_segs[m->segind];
1173 fl = (*seg->free_queues)[m->pool];
1175 /* Free blocks of increasing size. */
1176 while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
1177 m + (1 << order) <= m_end) {
1178 KASSERT(seg == &vm_phys_segs[m->segind],
1179 ("%s: page range [%p,%p) spans multiple segments",
1180 __func__, m_end - npages, m));
1181 vm_freelist_add(fl, m, order, 1);
1184 /* Free blocks of maximum size. */
1185 while (m + (1 << order) <= m_end) {
1186 KASSERT(seg == &vm_phys_segs[m->segind],
1187 ("%s: page range [%p,%p) spans multiple segments",
1188 __func__, m_end - npages, m));
1189 vm_freelist_add(fl, m, order, 1);
1192 /* Free blocks of diminishing size. */
1194 KASSERT(seg == &vm_phys_segs[m->segind],
1195 ("%s: page range [%p,%p) spans multiple segments",
1196 __func__, m_end - npages, m));
1197 order = flsl(m_end - m) - 1;
1198 vm_freelist_add(fl, m, order, 1);
1204 * Free a contiguous, arbitrarily sized set of physical pages.
1206 * The free page queues must be locked.
1209 vm_phys_free_contig(vm_page_t m, u_long npages)
1211 int order_start, order_end;
1212 vm_page_t m_start, m_end;
1214 vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1217 order_start = max_order(m_start);
1218 if (order_start < VM_NFREEORDER - 1)
1219 m_start += 1 << order_start;
1221 order_end = max_order(m_end);
1222 if (order_end < VM_NFREEORDER - 1)
1223 m_end -= 1 << order_end;
1225 * Avoid unnecessary coalescing by freeing the pages at the start and
1226 * end of the range last.
1228 if (m_start < m_end)
1229 vm_phys_enqueue_contig(m_start, m_end - m_start);
1230 if (order_start < VM_NFREEORDER - 1)
1231 vm_phys_free_pages(m, order_start);
1232 if (order_end < VM_NFREEORDER - 1)
1233 vm_phys_free_pages(m_end, order_end);
1237 * Identify the first address range within segment segind or greater
1238 * that matches the domain, lies within the low/high range, and has
1239 * enough pages. Return -1 if there is none.
1242 vm_phys_find_range(vm_page_t bounds[], int segind, int domain,
1243 u_long npages, vm_paddr_t low, vm_paddr_t high)
1245 vm_paddr_t pa_end, pa_start;
1246 struct vm_phys_seg *end_seg, *seg;
1248 KASSERT(npages > 0, ("npages is zero"));
1249 KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range"));
1250 end_seg = &vm_phys_segs[vm_phys_nsegs];
1251 for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
1252 if (seg->domain != domain)
1254 if (seg->start >= high)
1256 pa_start = MAX(low, seg->start);
1257 pa_end = MIN(high, seg->end);
1258 if (pa_end - pa_start < ptoa(npages))
1260 bounds[0] = &seg->first_page[atop(pa_start - seg->start)];
1261 bounds[1] = &seg->first_page[atop(pa_end - seg->start)];
1262 return (seg - vm_phys_segs);
1268 * Search for the given physical page "m" in the free lists. If the search
1269 * succeeds, remove "m" from the free lists and return true. Otherwise, return
1270 * false, indicating that "m" is not in the free lists.
1272 * The free page queues must be locked.
1275 vm_phys_unfree_page(vm_page_t m)
1277 struct vm_freelist *fl;
1278 struct vm_phys_seg *seg;
1279 vm_paddr_t pa, pa_half;
1280 vm_page_t m_set, m_tmp;
1284 * First, find the contiguous, power of two-sized set of free
1285 * physical pages containing the given physical page "m" and
1286 * assign it to "m_set".
1288 seg = &vm_phys_segs[m->segind];
1289 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1290 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1291 order < VM_NFREEORDER - 1; ) {
1293 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1294 if (pa >= seg->start)
1295 m_set = &seg->first_page[atop(pa - seg->start)];
1299 if (m_set->order < order)
1301 if (m_set->order == VM_NFREEORDER)
1303 KASSERT(m_set->order < VM_NFREEORDER,
1304 ("vm_phys_unfree_page: page %p has unexpected order %d",
1305 m_set, m_set->order));
1308 * Next, remove "m_set" from the free lists. Finally, extract
1309 * "m" from "m_set" using an iterative algorithm: While "m_set"
1310 * is larger than a page, shrink "m_set" by returning the half
1311 * of "m_set" that does not contain "m" to the free lists.
1313 fl = (*seg->free_queues)[m_set->pool];
1314 order = m_set->order;
1315 vm_freelist_rem(fl, m_set, order);
1318 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1319 if (m->phys_addr < pa_half)
1320 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1323 m_set = &seg->first_page[atop(pa_half - seg->start)];
1325 vm_freelist_add(fl, m_tmp, order, 0);
1327 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1332 * Find a run of contiguous physical pages from the specified page list.
1335 vm_phys_find_freelist_contig(struct vm_freelist *fl, int oind, u_long npages,
1336 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1338 struct vm_phys_seg *seg;
1339 vm_paddr_t frag, lbound, pa, page_size, pa_end, pa_pre, size;
1340 vm_page_t m, m_listed, m_ret;
1343 KASSERT(npages > 0, ("npages is 0"));
1344 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1345 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1346 /* Search for a run satisfying the specified conditions. */
1347 page_size = PAGE_SIZE;
1348 size = npages << PAGE_SHIFT;
1349 frag = (npages & ~(~0UL << oind)) << PAGE_SHIFT;
1350 TAILQ_FOREACH(m_listed, &fl[oind].pl, listq) {
1352 * Determine if the address range starting at pa is
1355 pa = VM_PAGE_TO_PHYS(m_listed);
1360 * If this is not the first free oind-block in this range, bail
1361 * out. We have seen the first free block already, or will see
1362 * it before failing to find an appropriate range.
1364 seg = &vm_phys_segs[m_listed->segind];
1365 lbound = low > seg->start ? low : seg->start;
1366 pa_pre = pa - (page_size << oind);
1367 m = &seg->first_page[atop(pa_pre - seg->start)];
1368 if (pa != 0 && pa_pre >= lbound && m->order == oind)
1371 if (!vm_addr_align_ok(pa, alignment))
1372 /* Advance to satisfy alignment condition. */
1373 pa = roundup2(pa, alignment);
1374 else if (frag != 0 && lbound + frag <= pa) {
1376 * Back up to the first aligned free block in this
1377 * range, without moving below lbound.
1380 for (order = oind - 1; order >= 0; order--) {
1381 pa_pre = pa_end - (page_size << order);
1382 if (!vm_addr_align_ok(pa_pre, alignment))
1384 m = &seg->first_page[atop(pa_pre - seg->start)];
1385 if (pa_pre >= lbound && m->order == order)
1389 * If the extra small blocks are enough to complete the
1390 * fragment, use them. Otherwise, look to allocate the
1391 * fragment at the other end.
1393 if (pa_end + frag <= pa)
1397 /* Advance as necessary to satisfy boundary conditions. */
1398 if (!vm_addr_bound_ok(pa, size, boundary))
1399 pa = roundup2(pa + 1, boundary);
1403 * Determine if the address range is valid (without overflow in
1404 * pa_end calculation), and fits within the segment.
1406 if (pa_end < pa || seg->end < pa_end)
1409 m_ret = &seg->first_page[atop(pa - seg->start)];
1412 * Determine whether there are enough free oind-blocks here to
1413 * satisfy the allocation request.
1415 pa = VM_PAGE_TO_PHYS(m_listed);
1417 pa += page_size << oind;
1420 m = &seg->first_page[atop(pa - seg->start)];
1421 } while (oind == m->order);
1424 * Determine if an additional series of free blocks of
1425 * diminishing size can help to satisfy the allocation request.
1427 while (m->order < oind &&
1428 pa + 2 * (page_size << m->order) > pa_end) {
1429 pa += page_size << m->order;
1432 m = &seg->first_page[atop(pa - seg->start)];
1439 * Find a run of contiguous physical pages from the specified free list
1443 vm_phys_find_queues_contig(
1444 struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1445 u_long npages, vm_paddr_t low, vm_paddr_t high,
1446 u_long alignment, vm_paddr_t boundary)
1448 struct vm_freelist *fl;
1450 vm_paddr_t pa, pa_end, size;
1451 int oind, order, pind;
1453 KASSERT(npages > 0, ("npages is 0"));
1454 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1455 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1456 /* Compute the queue that is the best fit for npages. */
1457 order = flsl(npages - 1);
1458 /* Search for a large enough free block. */
1459 size = npages << PAGE_SHIFT;
1460 for (oind = order; oind < VM_NFREEORDER; oind++) {
1461 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1462 fl = (*queues)[pind];
1463 TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1465 * Determine if the address range starting at pa
1466 * is within the given range, satisfies the
1467 * given alignment, and does not cross the given
1470 pa = VM_PAGE_TO_PHYS(m_ret);
1472 if (low <= pa && pa_end <= high &&
1473 vm_addr_ok(pa, size, alignment, boundary))
1478 if (order < VM_NFREEORDER)
1480 /* Search for a long-enough sequence of small blocks. */
1481 oind = VM_NFREEORDER - 1;
1482 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1483 fl = (*queues)[pind];
1484 m_ret = vm_phys_find_freelist_contig(fl, oind, npages,
1485 low, high, alignment, boundary);
1493 * Allocate a contiguous set of physical pages of the given size
1494 * "npages" from the free lists. All of the physical pages must be at
1495 * or above the given physical address "low" and below the given
1496 * physical address "high". The given value "alignment" determines the
1497 * alignment of the first physical page in the set. If the given value
1498 * "boundary" is non-zero, then the set of physical pages cannot cross
1499 * any physical address boundary that is a multiple of that value. Both
1500 * "alignment" and "boundary" must be a power of two.
1503 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1504 u_long alignment, vm_paddr_t boundary)
1506 vm_paddr_t pa_end, pa_start;
1507 struct vm_freelist *fl;
1509 struct vm_phys_seg *seg;
1510 struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1513 KASSERT(npages > 0, ("npages is 0"));
1514 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1515 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1516 vm_domain_free_assert_locked(VM_DOMAIN(domain));
1521 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1522 seg = &vm_phys_segs[segind];
1523 if (seg->start >= high || seg->domain != domain)
1525 if (low >= seg->end)
1527 if (low <= seg->start)
1528 pa_start = seg->start;
1531 if (high < seg->end)
1535 if (pa_end - pa_start < ptoa(npages))
1538 * If a previous segment led to a search using
1539 * the same free lists as would this segment, then
1540 * we've actually already searched within this
1543 if (seg->free_queues == queues)
1545 queues = seg->free_queues;
1546 m_run = vm_phys_find_queues_contig(queues, npages,
1547 low, high, alignment, boundary);
1554 /* Allocate pages from the page-range found. */
1555 for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) {
1556 fl = (*queues)[m->pool];
1558 vm_freelist_rem(fl, m, oind);
1559 if (m->pool != VM_FREEPOOL_DEFAULT)
1560 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1562 /* Return excess pages to the free lists. */
1563 if (&m_run[npages] < m) {
1564 fl = (*queues)[VM_FREEPOOL_DEFAULT];
1565 vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
1571 * Return the index of the first unused slot which may be the terminating
1575 vm_phys_avail_count(void)
1579 for (i = 0; phys_avail[i + 1]; i += 2)
1581 if (i > PHYS_AVAIL_ENTRIES)
1582 panic("Improperly terminated phys_avail %d entries", i);
1588 * Assert that a phys_avail entry is valid.
1591 vm_phys_avail_check(int i)
1593 if (phys_avail[i] & PAGE_MASK)
1594 panic("Unaligned phys_avail[%d]: %#jx", i,
1595 (intmax_t)phys_avail[i]);
1596 if (phys_avail[i+1] & PAGE_MASK)
1597 panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1598 (intmax_t)phys_avail[i]);
1599 if (phys_avail[i + 1] < phys_avail[i])
1600 panic("phys_avail[%d] start %#jx < end %#jx", i,
1601 (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1605 * Return the index of an overlapping phys_avail entry or -1.
1609 vm_phys_avail_find(vm_paddr_t pa)
1613 for (i = 0; phys_avail[i + 1]; i += 2)
1614 if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1621 * Return the index of the largest entry.
1624 vm_phys_avail_largest(void)
1626 vm_paddr_t sz, largesz;
1632 for (i = 0; phys_avail[i + 1]; i += 2) {
1633 sz = vm_phys_avail_size(i);
1644 vm_phys_avail_size(int i)
1647 return (phys_avail[i + 1] - phys_avail[i]);
1651 * Split an entry at the address 'pa'. Return zero on success or errno.
1654 vm_phys_avail_split(vm_paddr_t pa, int i)
1658 vm_phys_avail_check(i);
1659 if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1660 panic("vm_phys_avail_split: invalid address");
1661 cnt = vm_phys_avail_count();
1662 if (cnt >= PHYS_AVAIL_ENTRIES)
1664 memmove(&phys_avail[i + 2], &phys_avail[i],
1665 (cnt - i) * sizeof(phys_avail[0]));
1666 phys_avail[i + 1] = pa;
1667 phys_avail[i + 2] = pa;
1668 vm_phys_avail_check(i);
1669 vm_phys_avail_check(i+2);
1675 * Check if a given physical address can be included as part of a crash dump.
1678 vm_phys_is_dumpable(vm_paddr_t pa)
1683 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1684 return ((m->flags & PG_NODUMP) == 0);
1686 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1687 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1694 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1696 struct vm_phys_seg *seg;
1698 if (vm_phys_early_nsegs == -1)
1699 panic("%s: called after initialization", __func__);
1700 if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1701 panic("%s: ran out of early segments", __func__);
1703 seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1709 * This routine allocates NUMA node specific memory before the page
1710 * allocator is bootstrapped.
1713 vm_phys_early_alloc(int domain, size_t alloc_size)
1719 vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1721 KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1722 ("%s: invalid domain index %d", __func__, domain));
1725 * Search the mem_affinity array for the biggest address
1726 * range in the desired domain. This is used to constrain
1727 * the phys_avail selection below.
1734 if (mem_affinity != NULL) {
1736 size = mem_affinity[i].end - mem_affinity[i].start;
1739 if (domain != -1 && mem_affinity[i].domain != domain)
1741 if (size > biggestsize) {
1746 mem_start = mem_affinity[mem_index].start;
1747 mem_end = mem_affinity[mem_index].end;
1752 * Now find biggest physical segment in within the desired
1757 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1758 /* skip regions that are out of range */
1759 if (phys_avail[i+1] - alloc_size < mem_start ||
1760 phys_avail[i+1] > mem_end)
1762 size = vm_phys_avail_size(i);
1763 if (size > biggestsize) {
1768 alloc_size = round_page(alloc_size);
1771 * Grab single pages from the front to reduce fragmentation.
1773 if (alloc_size == PAGE_SIZE) {
1774 pa = phys_avail[biggestone];
1775 phys_avail[biggestone] += PAGE_SIZE;
1776 vm_phys_avail_check(biggestone);
1781 * Naturally align large allocations.
1783 align = phys_avail[biggestone + 1] & (alloc_size - 1);
1784 if (alloc_size + align > biggestsize)
1785 panic("cannot find a large enough size\n");
1787 vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1789 /* Wasting memory. */
1790 phys_avail[biggestone + 1] -= align;
1792 phys_avail[biggestone + 1] -= alloc_size;
1793 vm_phys_avail_check(biggestone);
1794 pa = phys_avail[biggestone + 1];
1799 vm_phys_early_startup(void)
1801 struct vm_phys_seg *seg;
1804 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1805 phys_avail[i] = round_page(phys_avail[i]);
1806 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1809 for (i = 0; i < vm_phys_early_nsegs; i++) {
1810 seg = &vm_phys_early_segs[i];
1811 vm_phys_add_seg(seg->start, seg->end);
1813 vm_phys_early_nsegs = -1;
1816 /* Force phys_avail to be split by domain. */
1817 if (mem_affinity != NULL) {
1820 for (i = 0; mem_affinity[i].end != 0; i++) {
1821 idx = vm_phys_avail_find(mem_affinity[i].start);
1823 phys_avail[idx] != mem_affinity[i].start)
1824 vm_phys_avail_split(mem_affinity[i].start, idx);
1825 idx = vm_phys_avail_find(mem_affinity[i].end);
1827 phys_avail[idx] != mem_affinity[i].end)
1828 vm_phys_avail_split(mem_affinity[i].end, idx);
1836 * Show the number of physical pages in each of the free lists.
1838 DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE)
1840 struct vm_freelist *fl;
1841 int flind, oind, pind, dom;
1843 for (dom = 0; dom < vm_ndomains; dom++) {
1844 db_printf("DOMAIN: %d\n", dom);
1845 for (flind = 0; flind < vm_nfreelists; flind++) {
1846 db_printf("FREE LIST %d:\n"
1847 "\n ORDER (SIZE) | NUMBER"
1849 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1850 db_printf(" | POOL %d", pind);
1852 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1853 db_printf("-- -- ");
1855 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1856 db_printf(" %2.2d (%6.6dK)", oind,
1857 1 << (PAGE_SHIFT - 10 + oind));
1858 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1859 fl = vm_phys_free_queues[dom][flind][pind];
1860 db_printf(" | %6.6d", fl[oind].lcnt);