2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Physical memory system implementation
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
58 #include <sys/sysctl.h>
60 #include <sys/vmmeter.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_phys.h>
70 #include <vm/vm_pagequeue.h>
72 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
73 "Too many physsegs.");
76 struct mem_affinity __read_mostly *mem_affinity;
77 int __read_mostly *mem_locality;
80 int __read_mostly vm_ndomains = 1;
81 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
83 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
84 int __read_mostly vm_phys_nsegs;
85 static struct vm_phys_seg vm_phys_early_segs[8];
86 static int vm_phys_early_nsegs;
88 struct vm_phys_fictitious_seg;
89 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
90 struct vm_phys_fictitious_seg *);
92 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
93 RB_INITIALIZER(&vm_phys_fictitious_tree);
95 struct vm_phys_fictitious_seg {
96 RB_ENTRY(vm_phys_fictitious_seg) node;
97 /* Memory region data */
100 vm_page_t first_page;
103 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
104 vm_phys_fictitious_cmp);
106 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
107 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
109 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
110 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
113 static int __read_mostly vm_nfreelists;
116 * These "avail lists" are globals used to communicate boot-time physical
117 * memory layout to other parts of the kernel. Each physically contiguous
118 * region of memory is defined by a start address at an even index and an
119 * end address at the following odd index. Each list is terminated by a
120 * pair of zero entries.
122 * dump_avail tells the dump code what regions to include in a crash dump, and
123 * phys_avail is all of the remaining physical memory that is available for
126 * Initially dump_avail and phys_avail are identical. Boot time memory
127 * allocations remove extents from phys_avail that may still be included
130 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
131 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
134 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
136 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
138 CTASSERT(VM_FREELIST_DEFAULT == 0);
140 #ifdef VM_FREELIST_DMA32
141 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
145 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
146 * the ordering of the free list boundaries.
148 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
149 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
152 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
153 SYSCTL_OID(_vm, OID_AUTO, phys_free,
154 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
155 sysctl_vm_phys_free, "A",
158 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
159 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
160 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
161 sysctl_vm_phys_segs, "A",
165 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
166 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
167 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
168 sysctl_vm_phys_locality, "A",
169 "Phys Locality Info");
172 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
173 &vm_ndomains, 0, "Number of physical memory domains available.");
175 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
176 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
177 vm_paddr_t boundary);
178 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
179 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
180 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
181 int order, int tail);
184 * Red-black tree helpers for vm fictitious range management.
187 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
188 struct vm_phys_fictitious_seg *range)
191 KASSERT(range->start != 0 && range->end != 0,
192 ("Invalid range passed on search for vm_fictitious page"));
193 if (p->start >= range->end)
195 if (p->start < range->start)
202 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
203 struct vm_phys_fictitious_seg *p2)
206 /* Check if this is a search for a page */
208 return (vm_phys_fictitious_in_range(p1, p2));
210 KASSERT(p2->end != 0,
211 ("Invalid range passed as second parameter to vm fictitious comparison"));
213 /* Searching to add a new range */
214 if (p1->end <= p2->start)
216 if (p1->start >= p2->end)
219 panic("Trying to add overlapping vm fictitious ranges:\n"
220 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
221 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
225 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
231 if (vm_ndomains == 1 || mem_affinity == NULL)
234 DOMAINSET_ZERO(&mask);
236 * Check for any memory that overlaps low, high.
238 for (i = 0; mem_affinity[i].end != 0; i++)
239 if (mem_affinity[i].start <= high &&
240 mem_affinity[i].end >= low)
241 DOMAINSET_SET(mem_affinity[i].domain, &mask);
242 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
244 if (DOMAINSET_EMPTY(&mask))
245 panic("vm_phys_domain_match: Impossible constraint");
246 return (DOMAINSET_FFS(&mask) - 1);
253 * Outputs the state of the physical memory allocator, specifically,
254 * the amount of physical memory in each free list.
257 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
260 struct vm_freelist *fl;
261 int dom, error, flind, oind, pind;
263 error = sysctl_wire_old_buffer(req, 0);
266 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
267 for (dom = 0; dom < vm_ndomains; dom++) {
268 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
269 for (flind = 0; flind < vm_nfreelists; flind++) {
270 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
271 "\n ORDER (SIZE) | NUMBER"
273 for (pind = 0; pind < VM_NFREEPOOL; pind++)
274 sbuf_printf(&sbuf, " | POOL %d", pind);
275 sbuf_printf(&sbuf, "\n-- ");
276 for (pind = 0; pind < VM_NFREEPOOL; pind++)
277 sbuf_printf(&sbuf, "-- -- ");
278 sbuf_printf(&sbuf, "--\n");
279 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
280 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
281 1 << (PAGE_SHIFT - 10 + oind));
282 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
283 fl = vm_phys_free_queues[dom][flind][pind];
284 sbuf_printf(&sbuf, " | %6d",
287 sbuf_printf(&sbuf, "\n");
291 error = sbuf_finish(&sbuf);
297 * Outputs the set of physical memory segments.
300 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
303 struct vm_phys_seg *seg;
306 error = sysctl_wire_old_buffer(req, 0);
309 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
310 for (segind = 0; segind < vm_phys_nsegs; segind++) {
311 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
312 seg = &vm_phys_segs[segind];
313 sbuf_printf(&sbuf, "start: %#jx\n",
314 (uintmax_t)seg->start);
315 sbuf_printf(&sbuf, "end: %#jx\n",
316 (uintmax_t)seg->end);
317 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
318 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
320 error = sbuf_finish(&sbuf);
326 * Return affinity, or -1 if there's no affinity information.
329 vm_phys_mem_affinity(int f, int t)
333 if (mem_locality == NULL)
335 if (f >= vm_ndomains || t >= vm_ndomains)
337 return (mem_locality[f * vm_ndomains + t]);
345 * Outputs the VM locality table.
348 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
353 error = sysctl_wire_old_buffer(req, 0);
356 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
358 sbuf_printf(&sbuf, "\n");
360 for (i = 0; i < vm_ndomains; i++) {
361 sbuf_printf(&sbuf, "%d: ", i);
362 for (j = 0; j < vm_ndomains; j++) {
363 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
365 sbuf_printf(&sbuf, "\n");
367 error = sbuf_finish(&sbuf);
374 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
379 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
381 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
386 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
389 TAILQ_REMOVE(&fl[order].pl, m, listq);
391 m->order = VM_NFREEORDER;
395 * Create a physical memory segment.
398 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
400 struct vm_phys_seg *seg;
402 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
403 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
404 KASSERT(domain >= 0 && domain < vm_ndomains,
405 ("vm_phys_create_seg: invalid domain provided"));
406 seg = &vm_phys_segs[vm_phys_nsegs++];
407 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
413 seg->domain = domain;
417 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
422 if (mem_affinity == NULL) {
423 _vm_phys_create_seg(start, end, 0);
428 if (mem_affinity[i].end == 0)
429 panic("Reached end of affinity info");
430 if (mem_affinity[i].end <= start)
432 if (mem_affinity[i].start > start)
433 panic("No affinity info for start %jx",
435 if (mem_affinity[i].end >= end) {
436 _vm_phys_create_seg(start, end,
437 mem_affinity[i].domain);
440 _vm_phys_create_seg(start, mem_affinity[i].end,
441 mem_affinity[i].domain);
442 start = mem_affinity[i].end;
445 _vm_phys_create_seg(start, end, 0);
450 * Add a physical memory segment.
453 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
457 KASSERT((start & PAGE_MASK) == 0,
458 ("vm_phys_define_seg: start is not page aligned"));
459 KASSERT((end & PAGE_MASK) == 0,
460 ("vm_phys_define_seg: end is not page aligned"));
463 * Split the physical memory segment if it spans two or more free
467 #ifdef VM_FREELIST_LOWMEM
468 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
469 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
470 paddr = VM_LOWMEM_BOUNDARY;
473 #ifdef VM_FREELIST_DMA32
474 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
475 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
476 paddr = VM_DMA32_BOUNDARY;
479 vm_phys_create_seg(paddr, end);
483 * Initialize the physical memory allocator.
485 * Requires that vm_page_array is initialized!
490 struct vm_freelist *fl;
491 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
493 int dom, flind, freelist, oind, pind, segind;
496 * Compute the number of free lists, and generate the mapping from the
497 * manifest constants VM_FREELIST_* to the free list indices.
499 * Initially, the entries of vm_freelist_to_flind[] are set to either
500 * 0 or 1 to indicate which free lists should be created.
503 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
504 seg = &vm_phys_segs[segind];
505 #ifdef VM_FREELIST_LOWMEM
506 if (seg->end <= VM_LOWMEM_BOUNDARY)
507 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
510 #ifdef VM_FREELIST_DMA32
512 #ifdef VM_DMA32_NPAGES_THRESHOLD
514 * Create the DMA32 free list only if the amount of
515 * physical memory above physical address 4G exceeds the
518 npages > VM_DMA32_NPAGES_THRESHOLD &&
520 seg->end <= VM_DMA32_BOUNDARY)
521 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
525 npages += atop(seg->end - seg->start);
526 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
529 /* Change each entry into a running total of the free lists. */
530 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
531 vm_freelist_to_flind[freelist] +=
532 vm_freelist_to_flind[freelist - 1];
534 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
535 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
536 /* Change each entry into a free list index. */
537 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
538 vm_freelist_to_flind[freelist]--;
541 * Initialize the first_page and free_queues fields of each physical
544 #ifdef VM_PHYSSEG_SPARSE
547 for (segind = 0; segind < vm_phys_nsegs; segind++) {
548 seg = &vm_phys_segs[segind];
549 #ifdef VM_PHYSSEG_SPARSE
550 seg->first_page = &vm_page_array[npages];
551 npages += atop(seg->end - seg->start);
553 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
555 #ifdef VM_FREELIST_LOWMEM
556 if (seg->end <= VM_LOWMEM_BOUNDARY) {
557 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
559 ("vm_phys_init: LOWMEM flind < 0"));
562 #ifdef VM_FREELIST_DMA32
563 if (seg->end <= VM_DMA32_BOUNDARY) {
564 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
566 ("vm_phys_init: DMA32 flind < 0"));
570 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
572 ("vm_phys_init: DEFAULT flind < 0"));
574 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
578 * Coalesce physical memory segments that are contiguous and share the
579 * same per-domain free queues.
581 prev_seg = vm_phys_segs;
582 seg = &vm_phys_segs[1];
583 end_seg = &vm_phys_segs[vm_phys_nsegs];
584 while (seg < end_seg) {
585 if (prev_seg->end == seg->start &&
586 prev_seg->free_queues == seg->free_queues) {
587 prev_seg->end = seg->end;
588 KASSERT(prev_seg->domain == seg->domain,
589 ("vm_phys_init: free queues cannot span domains"));
592 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
593 *tmp_seg = *(tmp_seg + 1);
601 * Initialize the free queues.
603 for (dom = 0; dom < vm_ndomains; dom++) {
604 for (flind = 0; flind < vm_nfreelists; flind++) {
605 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
606 fl = vm_phys_free_queues[dom][flind][pind];
607 for (oind = 0; oind < VM_NFREEORDER; oind++)
608 TAILQ_INIT(&fl[oind].pl);
613 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
617 * Register info about the NUMA topology of the system.
619 * Invoked by platform-dependent code prior to vm_phys_init().
622 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
629 * For now the only override value that we support is 1, which
630 * effectively disables NUMA-awareness in the allocators.
633 TUNABLE_INT_FETCH("vm.numa.disabled", &d);
638 vm_ndomains = ndomains;
639 mem_affinity = affinity;
640 mem_locality = locality;
643 for (i = 0; i < vm_ndomains; i++)
644 DOMAINSET_SET(i, &all_domains);
653 _vm_phys_domain(vm_paddr_t pa)
658 if (vm_ndomains == 1)
660 for (i = 0; mem_affinity[i].end != 0; i++)
661 if (mem_affinity[i].start <= pa &&
662 mem_affinity[i].end >= pa)
663 return (mem_affinity[i].domain);
671 * Split a contiguous, power of two-sized set of physical pages.
673 * When this function is called by a page allocation function, the caller
674 * should request insertion at the head unless the order [order, oind) queues
675 * are known to be empty. The objective being to reduce the likelihood of
676 * long-term fragmentation by promoting contemporaneous allocation and
677 * (hopefully) deallocation.
680 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
685 while (oind > order) {
687 m_buddy = &m[1 << oind];
688 KASSERT(m_buddy->order == VM_NFREEORDER,
689 ("vm_phys_split_pages: page %p has unexpected order %d",
690 m_buddy, m_buddy->order));
691 vm_freelist_add(fl, m_buddy, oind, tail);
696 * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
697 * and sized set to the specified free list.
699 * When this function is called by a page allocation function, the caller
700 * should request insertion at the head unless the lower-order queues are
701 * known to be empty. The objective being to reduce the likelihood of long-
702 * term fragmentation by promoting contemporaneous allocation and (hopefully)
705 * The physical page m's buddy must not be free.
708 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
713 KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
714 KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
715 ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
716 ("vm_phys_enq_range: page %p and npages %u are misaligned",
719 KASSERT(m->order == VM_NFREEORDER,
720 ("vm_phys_enq_range: page %p has unexpected order %d",
722 order = ffs(npages) - 1;
723 KASSERT(order < VM_NFREEORDER,
724 ("vm_phys_enq_range: order %d is out of range", order));
725 vm_freelist_add(fl, m, order, tail);
729 } while (npages > 0);
733 * Tries to allocate the specified number of pages from the specified pool
734 * within the specified domain. Returns the actual number of allocated pages
735 * and a pointer to each page through the array ma[].
737 * The returned pages may not be physically contiguous. However, in contrast
738 * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
739 * calling this function once to allocate the desired number of pages will
740 * avoid wasted time in vm_phys_split_pages().
742 * The free page queues for the specified domain must be locked.
745 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
747 struct vm_freelist *alt, *fl;
749 int avail, end, flind, freelist, i, need, oind, pind;
751 KASSERT(domain >= 0 && domain < vm_ndomains,
752 ("vm_phys_alloc_npages: domain %d is out of range", domain));
753 KASSERT(pool < VM_NFREEPOOL,
754 ("vm_phys_alloc_npages: pool %d is out of range", pool));
755 KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
756 ("vm_phys_alloc_npages: npages %d is out of range", npages));
757 vm_domain_free_assert_locked(VM_DOMAIN(domain));
759 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
760 flind = vm_freelist_to_flind[freelist];
763 fl = vm_phys_free_queues[domain][flind][pool];
764 for (oind = 0; oind < VM_NFREEORDER; oind++) {
765 while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
766 vm_freelist_rem(fl, m, oind);
768 need = imin(npages - i, avail);
769 for (end = i + need; i < end;)
773 * Return excess pages to fl. Its
774 * order [0, oind) queues are empty.
776 vm_phys_enq_range(m, avail - need, fl,
779 } else if (i == npages)
783 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
784 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
785 alt = vm_phys_free_queues[domain][flind][pind];
786 while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
788 vm_freelist_rem(alt, m, oind);
789 vm_phys_set_pool(pool, m, oind);
791 need = imin(npages - i, avail);
792 for (end = i + need; i < end;)
796 * Return excess pages to fl.
797 * Its order [0, oind) queues
800 vm_phys_enq_range(m, avail -
803 } else if (i == npages)
813 * Allocate a contiguous, power of two-sized set of physical pages
814 * from the free lists.
816 * The free page queues must be locked.
819 vm_phys_alloc_pages(int domain, int pool, int order)
824 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
825 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
833 * Allocate a contiguous, power of two-sized set of physical pages from the
834 * specified free list. The free list must be specified using one of the
835 * manifest constants VM_FREELIST_*.
837 * The free page queues must be locked.
840 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
842 struct vm_freelist *alt, *fl;
844 int oind, pind, flind;
846 KASSERT(domain >= 0 && domain < vm_ndomains,
847 ("vm_phys_alloc_freelist_pages: domain %d is out of range",
849 KASSERT(freelist < VM_NFREELIST,
850 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
852 KASSERT(pool < VM_NFREEPOOL,
853 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
854 KASSERT(order < VM_NFREEORDER,
855 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
857 flind = vm_freelist_to_flind[freelist];
858 /* Check if freelist is present */
862 vm_domain_free_assert_locked(VM_DOMAIN(domain));
863 fl = &vm_phys_free_queues[domain][flind][pool][0];
864 for (oind = order; oind < VM_NFREEORDER; oind++) {
865 m = TAILQ_FIRST(&fl[oind].pl);
867 vm_freelist_rem(fl, m, oind);
868 /* The order [order, oind) queues are empty. */
869 vm_phys_split_pages(m, oind, fl, order, 1);
875 * The given pool was empty. Find the largest
876 * contiguous, power-of-two-sized set of pages in any
877 * pool. Transfer these pages to the given pool, and
878 * use them to satisfy the allocation.
880 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
881 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
882 alt = &vm_phys_free_queues[domain][flind][pind][0];
883 m = TAILQ_FIRST(&alt[oind].pl);
885 vm_freelist_rem(alt, m, oind);
886 vm_phys_set_pool(pool, m, oind);
887 /* The order [order, oind) queues are empty. */
888 vm_phys_split_pages(m, oind, fl, order, 1);
897 * Find the vm_page corresponding to the given physical address.
900 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
902 struct vm_phys_seg *seg;
905 for (segind = 0; segind < vm_phys_nsegs; segind++) {
906 seg = &vm_phys_segs[segind];
907 if (pa >= seg->start && pa < seg->end)
908 return (&seg->first_page[atop(pa - seg->start)]);
914 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
916 struct vm_phys_fictitious_seg tmp, *seg;
923 rw_rlock(&vm_phys_fictitious_reg_lock);
924 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
925 rw_runlock(&vm_phys_fictitious_reg_lock);
929 m = &seg->first_page[atop(pa - seg->start)];
930 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
936 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
937 long page_count, vm_memattr_t memattr)
941 bzero(range, page_count * sizeof(*range));
942 for (i = 0; i < page_count; i++) {
943 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
944 range[i].oflags &= ~VPO_UNMANAGED;
945 range[i].busy_lock = VPB_UNBUSIED;
950 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
951 vm_memattr_t memattr)
953 struct vm_phys_fictitious_seg *seg;
956 #ifdef VM_PHYSSEG_DENSE
962 ("Start of segment isn't less than end (start: %jx end: %jx)",
963 (uintmax_t)start, (uintmax_t)end));
965 page_count = (end - start) / PAGE_SIZE;
967 #ifdef VM_PHYSSEG_DENSE
970 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
971 fp = &vm_page_array[pi - first_page];
972 if ((pe - first_page) > vm_page_array_size) {
974 * We have a segment that starts inside
975 * of vm_page_array, but ends outside of it.
977 * Use vm_page_array pages for those that are
978 * inside of the vm_page_array range, and
979 * allocate the remaining ones.
981 dpage_count = vm_page_array_size - (pi - first_page);
982 vm_phys_fictitious_init_range(fp, start, dpage_count,
984 page_count -= dpage_count;
985 start += ptoa(dpage_count);
989 * We can allocate the full range from vm_page_array,
990 * so there's no need to register the range in the tree.
992 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
994 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
996 * We have a segment that ends inside of vm_page_array,
997 * but starts outside of it.
999 fp = &vm_page_array[0];
1000 dpage_count = pe - first_page;
1001 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
1003 end -= ptoa(dpage_count);
1004 page_count -= dpage_count;
1006 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1008 * Trying to register a fictitious range that expands before
1009 * and after vm_page_array.
1015 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1017 #ifdef VM_PHYSSEG_DENSE
1020 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1022 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1025 seg->first_page = fp;
1027 rw_wlock(&vm_phys_fictitious_reg_lock);
1028 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1029 rw_wunlock(&vm_phys_fictitious_reg_lock);
1035 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1037 struct vm_phys_fictitious_seg *seg, tmp;
1038 #ifdef VM_PHYSSEG_DENSE
1042 KASSERT(start < end,
1043 ("Start of segment isn't less than end (start: %jx end: %jx)",
1044 (uintmax_t)start, (uintmax_t)end));
1046 #ifdef VM_PHYSSEG_DENSE
1049 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1050 if ((pe - first_page) <= vm_page_array_size) {
1052 * This segment was allocated using vm_page_array
1053 * only, there's nothing to do since those pages
1054 * were never added to the tree.
1059 * We have a segment that starts inside
1060 * of vm_page_array, but ends outside of it.
1062 * Calculate how many pages were added to the
1063 * tree and free them.
1065 start = ptoa(first_page + vm_page_array_size);
1066 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1068 * We have a segment that ends inside of vm_page_array,
1069 * but starts outside of it.
1071 end = ptoa(first_page);
1072 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1073 /* Since it's not possible to register such a range, panic. */
1075 "Unregistering not registered fictitious range [%#jx:%#jx]",
1076 (uintmax_t)start, (uintmax_t)end);
1082 rw_wlock(&vm_phys_fictitious_reg_lock);
1083 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1084 if (seg->start != start || seg->end != end) {
1085 rw_wunlock(&vm_phys_fictitious_reg_lock);
1087 "Unregistering not registered fictitious range [%#jx:%#jx]",
1088 (uintmax_t)start, (uintmax_t)end);
1090 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1091 rw_wunlock(&vm_phys_fictitious_reg_lock);
1092 free(seg->first_page, M_FICT_PAGES);
1093 free(seg, M_FICT_PAGES);
1097 * Free a contiguous, power of two-sized set of physical pages.
1099 * The free page queues must be locked.
1102 vm_phys_free_pages(vm_page_t m, int order)
1104 struct vm_freelist *fl;
1105 struct vm_phys_seg *seg;
1109 KASSERT(m->order == VM_NFREEORDER,
1110 ("vm_phys_free_pages: page %p has unexpected order %d",
1112 KASSERT(m->pool < VM_NFREEPOOL,
1113 ("vm_phys_free_pages: page %p has unexpected pool %d",
1115 KASSERT(order < VM_NFREEORDER,
1116 ("vm_phys_free_pages: order %d is out of range", order));
1117 seg = &vm_phys_segs[m->segind];
1118 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1119 if (order < VM_NFREEORDER - 1) {
1120 pa = VM_PAGE_TO_PHYS(m);
1122 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1123 if (pa < seg->start || pa >= seg->end)
1125 m_buddy = &seg->first_page[atop(pa - seg->start)];
1126 if (m_buddy->order != order)
1128 fl = (*seg->free_queues)[m_buddy->pool];
1129 vm_freelist_rem(fl, m_buddy, order);
1130 if (m_buddy->pool != m->pool)
1131 vm_phys_set_pool(m->pool, m_buddy, order);
1133 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1134 m = &seg->first_page[atop(pa - seg->start)];
1135 } while (order < VM_NFREEORDER - 1);
1137 fl = (*seg->free_queues)[m->pool];
1138 vm_freelist_add(fl, m, order, 1);
1142 * Return the largest possible order of a set of pages starting at m.
1145 max_order(vm_page_t m)
1149 * Unsigned "min" is used here so that "order" is assigned
1150 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1151 * or the low-order bits of its physical address are zero
1152 * because the size of a physical address exceeds the size of
1155 return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1156 VM_NFREEORDER - 1));
1160 * Free a contiguous, arbitrarily sized set of physical pages, without
1161 * merging across set boundaries.
1163 * The free page queues must be locked.
1166 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1168 struct vm_freelist *fl;
1169 struct vm_phys_seg *seg;
1174 * Avoid unnecessary coalescing by freeing the pages in the largest
1175 * possible power-of-two-sized subsets.
1177 vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1178 seg = &vm_phys_segs[m->segind];
1179 fl = (*seg->free_queues)[m->pool];
1181 /* Free blocks of increasing size. */
1182 while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
1183 m + (1 << order) <= m_end) {
1184 KASSERT(seg == &vm_phys_segs[m->segind],
1185 ("%s: page range [%p,%p) spans multiple segments",
1186 __func__, m_end - npages, m));
1187 vm_freelist_add(fl, m, order, 1);
1190 /* Free blocks of maximum size. */
1191 while (m + (1 << order) <= m_end) {
1192 KASSERT(seg == &vm_phys_segs[m->segind],
1193 ("%s: page range [%p,%p) spans multiple segments",
1194 __func__, m_end - npages, m));
1195 vm_freelist_add(fl, m, order, 1);
1198 /* Free blocks of diminishing size. */
1200 KASSERT(seg == &vm_phys_segs[m->segind],
1201 ("%s: page range [%p,%p) spans multiple segments",
1202 __func__, m_end - npages, m));
1203 order = flsl(m_end - m) - 1;
1204 vm_freelist_add(fl, m, order, 1);
1210 * Free a contiguous, arbitrarily sized set of physical pages.
1212 * The free page queues must be locked.
1215 vm_phys_free_contig(vm_page_t m, u_long npages)
1217 int order_start, order_end;
1218 vm_page_t m_start, m_end;
1220 vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1223 order_start = max_order(m_start);
1224 if (order_start < VM_NFREEORDER - 1)
1225 m_start += 1 << order_start;
1227 order_end = max_order(m_end);
1228 if (order_end < VM_NFREEORDER - 1)
1229 m_end -= 1 << order_end;
1231 * Avoid unnecessary coalescing by freeing the pages at the start and
1232 * end of the range last.
1234 if (m_start < m_end)
1235 vm_phys_enqueue_contig(m_start, m_end - m_start);
1236 if (order_start < VM_NFREEORDER - 1)
1237 vm_phys_free_pages(m, order_start);
1238 if (order_end < VM_NFREEORDER - 1)
1239 vm_phys_free_pages(m_end, order_end);
1243 * Scan physical memory between the specified addresses "low" and "high" for a
1244 * run of contiguous physical pages that satisfy the specified conditions, and
1245 * return the lowest page in the run. The specified "alignment" determines
1246 * the alignment of the lowest physical page in the run. If the specified
1247 * "boundary" is non-zero, then the run of physical pages cannot span a
1248 * physical address that is a multiple of "boundary".
1250 * "npages" must be greater than zero. Both "alignment" and "boundary" must
1251 * be a power of two.
1254 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1255 u_long alignment, vm_paddr_t boundary, int options)
1258 vm_page_t m_end, m_run, m_start;
1259 struct vm_phys_seg *seg;
1262 KASSERT(npages > 0, ("npages is 0"));
1263 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1264 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1267 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1268 seg = &vm_phys_segs[segind];
1269 if (seg->domain != domain)
1271 if (seg->start >= high)
1273 if (low >= seg->end)
1275 if (low <= seg->start)
1276 m_start = seg->first_page;
1278 m_start = &seg->first_page[atop(low - seg->start)];
1279 if (high < seg->end)
1283 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1285 m_end = &seg->first_page[atop(pa_end - seg->start)];
1286 m_run = vm_page_scan_contig(npages, m_start, m_end,
1287 alignment, boundary, options);
1295 * Set the pool for a contiguous, power of two-sized set of physical pages.
1298 vm_phys_set_pool(int pool, vm_page_t m, int order)
1302 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1307 * Search for the given physical page "m" in the free lists. If the search
1308 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
1309 * FALSE, indicating that "m" is not in the free lists.
1311 * The free page queues must be locked.
1314 vm_phys_unfree_page(vm_page_t m)
1316 struct vm_freelist *fl;
1317 struct vm_phys_seg *seg;
1318 vm_paddr_t pa, pa_half;
1319 vm_page_t m_set, m_tmp;
1323 * First, find the contiguous, power of two-sized set of free
1324 * physical pages containing the given physical page "m" and
1325 * assign it to "m_set".
1327 seg = &vm_phys_segs[m->segind];
1328 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1329 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1330 order < VM_NFREEORDER - 1; ) {
1332 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1333 if (pa >= seg->start)
1334 m_set = &seg->first_page[atop(pa - seg->start)];
1338 if (m_set->order < order)
1340 if (m_set->order == VM_NFREEORDER)
1342 KASSERT(m_set->order < VM_NFREEORDER,
1343 ("vm_phys_unfree_page: page %p has unexpected order %d",
1344 m_set, m_set->order));
1347 * Next, remove "m_set" from the free lists. Finally, extract
1348 * "m" from "m_set" using an iterative algorithm: While "m_set"
1349 * is larger than a page, shrink "m_set" by returning the half
1350 * of "m_set" that does not contain "m" to the free lists.
1352 fl = (*seg->free_queues)[m_set->pool];
1353 order = m_set->order;
1354 vm_freelist_rem(fl, m_set, order);
1357 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1358 if (m->phys_addr < pa_half)
1359 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1362 m_set = &seg->first_page[atop(pa_half - seg->start)];
1364 vm_freelist_add(fl, m_tmp, order, 0);
1366 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1371 * Allocate a contiguous set of physical pages of the given size
1372 * "npages" from the free lists. All of the physical pages must be at
1373 * or above the given physical address "low" and below the given
1374 * physical address "high". The given value "alignment" determines the
1375 * alignment of the first physical page in the set. If the given value
1376 * "boundary" is non-zero, then the set of physical pages cannot cross
1377 * any physical address boundary that is a multiple of that value. Both
1378 * "alignment" and "boundary" must be a power of two.
1381 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1382 u_long alignment, vm_paddr_t boundary)
1384 vm_paddr_t pa_end, pa_start;
1386 struct vm_phys_seg *seg;
1389 KASSERT(npages > 0, ("npages is 0"));
1390 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1391 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1392 vm_domain_free_assert_locked(VM_DOMAIN(domain));
1396 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1397 seg = &vm_phys_segs[segind];
1398 if (seg->start >= high || seg->domain != domain)
1400 if (low >= seg->end)
1402 if (low <= seg->start)
1403 pa_start = seg->start;
1406 if (high < seg->end)
1410 if (pa_end - pa_start < ptoa(npages))
1412 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1413 alignment, boundary);
1421 * Allocate a run of contiguous physical pages from the free list for the
1422 * specified segment.
1425 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1426 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1428 struct vm_freelist *fl;
1429 vm_paddr_t pa, pa_end, size;
1432 int oind, order, pind;
1434 KASSERT(npages > 0, ("npages is 0"));
1435 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1436 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1437 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1438 /* Compute the queue that is the best fit for npages. */
1439 order = flsl(npages - 1);
1440 /* Search for a run satisfying the specified conditions. */
1441 size = npages << PAGE_SHIFT;
1442 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1444 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1445 fl = (*seg->free_queues)[pind];
1446 TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1448 * Is the size of this allocation request
1449 * larger than the largest block size?
1451 if (order >= VM_NFREEORDER) {
1453 * Determine if a sufficient number of
1454 * subsequent blocks to satisfy the
1455 * allocation request are free.
1457 pa = VM_PAGE_TO_PHYS(m_ret);
1462 pa += 1 << (PAGE_SHIFT +
1468 m = &seg->first_page[atop(pa -
1470 if (m->order != VM_NFREEORDER -
1474 /* If not, go to the next block. */
1480 * Determine if the blocks are within the
1481 * given range, satisfy the given alignment,
1482 * and do not cross the given boundary.
1484 pa = VM_PAGE_TO_PHYS(m_ret);
1486 if (pa >= low && pa_end <= high &&
1487 (pa & (alignment - 1)) == 0 &&
1488 rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1495 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1496 fl = (*seg->free_queues)[m->pool];
1497 vm_freelist_rem(fl, m, oind);
1498 if (m->pool != VM_FREEPOOL_DEFAULT)
1499 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1501 /* Return excess pages to the free lists. */
1502 npages_end = roundup2(npages, 1 << oind);
1503 if (npages < npages_end) {
1504 fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
1505 vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1511 * Return the index of the first unused slot which may be the terminating
1515 vm_phys_avail_count(void)
1519 for (i = 0; phys_avail[i + 1]; i += 2)
1521 if (i > PHYS_AVAIL_ENTRIES)
1522 panic("Improperly terminated phys_avail %d entries", i);
1528 * Assert that a phys_avail entry is valid.
1531 vm_phys_avail_check(int i)
1533 if (phys_avail[i] & PAGE_MASK)
1534 panic("Unaligned phys_avail[%d]: %#jx", i,
1535 (intmax_t)phys_avail[i]);
1536 if (phys_avail[i+1] & PAGE_MASK)
1537 panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1538 (intmax_t)phys_avail[i]);
1539 if (phys_avail[i + 1] < phys_avail[i])
1540 panic("phys_avail[%d] start %#jx < end %#jx", i,
1541 (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1545 * Return the index of an overlapping phys_avail entry or -1.
1549 vm_phys_avail_find(vm_paddr_t pa)
1553 for (i = 0; phys_avail[i + 1]; i += 2)
1554 if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1561 * Return the index of the largest entry.
1564 vm_phys_avail_largest(void)
1566 vm_paddr_t sz, largesz;
1572 for (i = 0; phys_avail[i + 1]; i += 2) {
1573 sz = vm_phys_avail_size(i);
1584 vm_phys_avail_size(int i)
1587 return (phys_avail[i + 1] - phys_avail[i]);
1591 * Split an entry at the address 'pa'. Return zero on success or errno.
1594 vm_phys_avail_split(vm_paddr_t pa, int i)
1598 vm_phys_avail_check(i);
1599 if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1600 panic("vm_phys_avail_split: invalid address");
1601 cnt = vm_phys_avail_count();
1602 if (cnt >= PHYS_AVAIL_ENTRIES)
1604 memmove(&phys_avail[i + 2], &phys_avail[i],
1605 (cnt - i) * sizeof(phys_avail[0]));
1606 phys_avail[i + 1] = pa;
1607 phys_avail[i + 2] = pa;
1608 vm_phys_avail_check(i);
1609 vm_phys_avail_check(i+2);
1615 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1617 struct vm_phys_seg *seg;
1619 if (vm_phys_early_nsegs == -1)
1620 panic("%s: called after initialization", __func__);
1621 if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1622 panic("%s: ran out of early segments", __func__);
1624 seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1630 * This routine allocates NUMA node specific memory before the page
1631 * allocator is bootstrapped.
1634 vm_phys_early_alloc(int domain, size_t alloc_size)
1636 int i, mem_index, biggestone;
1637 vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1639 KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1640 ("%s: invalid domain index %d", __func__, domain));
1643 * Search the mem_affinity array for the biggest address
1644 * range in the desired domain. This is used to constrain
1645 * the phys_avail selection below.
1652 if (mem_affinity != NULL) {
1654 size = mem_affinity[i].end - mem_affinity[i].start;
1657 if (domain != -1 && mem_affinity[i].domain != domain)
1659 if (size > biggestsize) {
1664 mem_start = mem_affinity[mem_index].start;
1665 mem_end = mem_affinity[mem_index].end;
1670 * Now find biggest physical segment in within the desired
1675 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1676 /* skip regions that are out of range */
1677 if (phys_avail[i+1] - alloc_size < mem_start ||
1678 phys_avail[i+1] > mem_end)
1680 size = vm_phys_avail_size(i);
1681 if (size > biggestsize) {
1686 alloc_size = round_page(alloc_size);
1689 * Grab single pages from the front to reduce fragmentation.
1691 if (alloc_size == PAGE_SIZE) {
1692 pa = phys_avail[biggestone];
1693 phys_avail[biggestone] += PAGE_SIZE;
1694 vm_phys_avail_check(biggestone);
1699 * Naturally align large allocations.
1701 align = phys_avail[biggestone + 1] & (alloc_size - 1);
1702 if (alloc_size + align > biggestsize)
1703 panic("cannot find a large enough size\n");
1705 vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1707 /* Wasting memory. */
1708 phys_avail[biggestone + 1] -= align;
1710 phys_avail[biggestone + 1] -= alloc_size;
1711 vm_phys_avail_check(biggestone);
1712 pa = phys_avail[biggestone + 1];
1717 vm_phys_early_startup(void)
1719 struct vm_phys_seg *seg;
1722 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1723 phys_avail[i] = round_page(phys_avail[i]);
1724 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1727 for (i = 0; i < vm_phys_early_nsegs; i++) {
1728 seg = &vm_phys_early_segs[i];
1729 vm_phys_add_seg(seg->start, seg->end);
1731 vm_phys_early_nsegs = -1;
1734 /* Force phys_avail to be split by domain. */
1735 if (mem_affinity != NULL) {
1738 for (i = 0; mem_affinity[i].end != 0; i++) {
1739 idx = vm_phys_avail_find(mem_affinity[i].start);
1741 phys_avail[idx] != mem_affinity[i].start)
1742 vm_phys_avail_split(mem_affinity[i].start, idx);
1743 idx = vm_phys_avail_find(mem_affinity[i].end);
1745 phys_avail[idx] != mem_affinity[i].end)
1746 vm_phys_avail_split(mem_affinity[i].end, idx);
1754 * Show the number of physical pages in each of the free lists.
1756 DB_SHOW_COMMAND(freepages, db_show_freepages)
1758 struct vm_freelist *fl;
1759 int flind, oind, pind, dom;
1761 for (dom = 0; dom < vm_ndomains; dom++) {
1762 db_printf("DOMAIN: %d\n", dom);
1763 for (flind = 0; flind < vm_nfreelists; flind++) {
1764 db_printf("FREE LIST %d:\n"
1765 "\n ORDER (SIZE) | NUMBER"
1767 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1768 db_printf(" | POOL %d", pind);
1770 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1771 db_printf("-- -- ");
1773 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1774 db_printf(" %2.2d (%6.6dK)", oind,
1775 1 << (PAGE_SHIFT - 10 + oind));
1776 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1777 fl = vm_phys_free_queues[dom][flind][pind];
1778 db_printf(" | %6.6d", fl[oind].lcnt);