2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Physical memory system implementation
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
58 #include <sys/sysctl.h>
60 #include <sys/vmmeter.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_phys.h>
71 #include <vm/vm_pagequeue.h>
73 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
74 "Too many physsegs.");
77 struct mem_affinity __read_mostly *mem_affinity;
78 int __read_mostly *mem_locality;
81 int __read_mostly vm_ndomains = 1;
82 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
84 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
85 int __read_mostly vm_phys_nsegs;
87 struct vm_phys_fictitious_seg;
88 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
89 struct vm_phys_fictitious_seg *);
91 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
92 RB_INITIALIZER(_vm_phys_fictitious_tree);
94 struct vm_phys_fictitious_seg {
95 RB_ENTRY(vm_phys_fictitious_seg) node;
96 /* Memory region data */
102 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
103 vm_phys_fictitious_cmp);
105 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
106 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
108 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
109 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
112 static int __read_mostly vm_nfreelists;
115 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
117 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
119 CTASSERT(VM_FREELIST_DEFAULT == 0);
121 #ifdef VM_FREELIST_DMA32
122 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
126 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
127 * the ordering of the free list boundaries.
129 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
130 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
134 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
135 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
137 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
138 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
139 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
142 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
143 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
144 NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
147 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
148 &vm_ndomains, 0, "Number of physical memory domains available.");
150 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
151 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
152 vm_paddr_t boundary);
153 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
154 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
155 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
156 int order, int tail);
159 * Red-black tree helpers for vm fictitious range management.
162 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
163 struct vm_phys_fictitious_seg *range)
166 KASSERT(range->start != 0 && range->end != 0,
167 ("Invalid range passed on search for vm_fictitious page"));
168 if (p->start >= range->end)
170 if (p->start < range->start)
177 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
178 struct vm_phys_fictitious_seg *p2)
181 /* Check if this is a search for a page */
183 return (vm_phys_fictitious_in_range(p1, p2));
185 KASSERT(p2->end != 0,
186 ("Invalid range passed as second parameter to vm fictitious comparison"));
188 /* Searching to add a new range */
189 if (p1->end <= p2->start)
191 if (p1->start >= p2->end)
194 panic("Trying to add overlapping vm fictitious ranges:\n"
195 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
196 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
200 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
206 if (vm_ndomains == 1 || mem_affinity == NULL)
209 DOMAINSET_ZERO(&mask);
211 * Check for any memory that overlaps low, high.
213 for (i = 0; mem_affinity[i].end != 0; i++)
214 if (mem_affinity[i].start <= high &&
215 mem_affinity[i].end >= low)
216 DOMAINSET_SET(mem_affinity[i].domain, &mask);
217 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
219 if (DOMAINSET_EMPTY(&mask))
220 panic("vm_phys_domain_match: Impossible constraint");
221 return (DOMAINSET_FFS(&mask) - 1);
228 * Outputs the state of the physical memory allocator, specifically,
229 * the amount of physical memory in each free list.
232 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
235 struct vm_freelist *fl;
236 int dom, error, flind, oind, pind;
238 error = sysctl_wire_old_buffer(req, 0);
241 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
242 for (dom = 0; dom < vm_ndomains; dom++) {
243 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
244 for (flind = 0; flind < vm_nfreelists; flind++) {
245 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
246 "\n ORDER (SIZE) | NUMBER"
248 for (pind = 0; pind < VM_NFREEPOOL; pind++)
249 sbuf_printf(&sbuf, " | POOL %d", pind);
250 sbuf_printf(&sbuf, "\n-- ");
251 for (pind = 0; pind < VM_NFREEPOOL; pind++)
252 sbuf_printf(&sbuf, "-- -- ");
253 sbuf_printf(&sbuf, "--\n");
254 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
255 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
256 1 << (PAGE_SHIFT - 10 + oind));
257 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
258 fl = vm_phys_free_queues[dom][flind][pind];
259 sbuf_printf(&sbuf, " | %6d",
262 sbuf_printf(&sbuf, "\n");
266 error = sbuf_finish(&sbuf);
272 * Outputs the set of physical memory segments.
275 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
278 struct vm_phys_seg *seg;
281 error = sysctl_wire_old_buffer(req, 0);
284 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
285 for (segind = 0; segind < vm_phys_nsegs; segind++) {
286 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
287 seg = &vm_phys_segs[segind];
288 sbuf_printf(&sbuf, "start: %#jx\n",
289 (uintmax_t)seg->start);
290 sbuf_printf(&sbuf, "end: %#jx\n",
291 (uintmax_t)seg->end);
292 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
293 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
295 error = sbuf_finish(&sbuf);
301 * Return affinity, or -1 if there's no affinity information.
304 vm_phys_mem_affinity(int f, int t)
308 if (mem_locality == NULL)
310 if (f >= vm_ndomains || t >= vm_ndomains)
312 return (mem_locality[f * vm_ndomains + t]);
320 * Outputs the VM locality table.
323 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
328 error = sysctl_wire_old_buffer(req, 0);
331 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
333 sbuf_printf(&sbuf, "\n");
335 for (i = 0; i < vm_ndomains; i++) {
336 sbuf_printf(&sbuf, "%d: ", i);
337 for (j = 0; j < vm_ndomains; j++) {
338 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
340 sbuf_printf(&sbuf, "\n");
342 error = sbuf_finish(&sbuf);
349 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
354 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
356 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
361 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
364 TAILQ_REMOVE(&fl[order].pl, m, listq);
366 m->order = VM_NFREEORDER;
370 * Create a physical memory segment.
373 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
375 struct vm_phys_seg *seg;
377 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
378 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
379 KASSERT(domain >= 0 && domain < vm_ndomains,
380 ("vm_phys_create_seg: invalid domain provided"));
381 seg = &vm_phys_segs[vm_phys_nsegs++];
382 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
388 seg->domain = domain;
392 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
397 if (mem_affinity == NULL) {
398 _vm_phys_create_seg(start, end, 0);
403 if (mem_affinity[i].end == 0)
404 panic("Reached end of affinity info");
405 if (mem_affinity[i].end <= start)
407 if (mem_affinity[i].start > start)
408 panic("No affinity info for start %jx",
410 if (mem_affinity[i].end >= end) {
411 _vm_phys_create_seg(start, end,
412 mem_affinity[i].domain);
415 _vm_phys_create_seg(start, mem_affinity[i].end,
416 mem_affinity[i].domain);
417 start = mem_affinity[i].end;
420 _vm_phys_create_seg(start, end, 0);
425 * Add a physical memory segment.
428 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
432 KASSERT((start & PAGE_MASK) == 0,
433 ("vm_phys_define_seg: start is not page aligned"));
434 KASSERT((end & PAGE_MASK) == 0,
435 ("vm_phys_define_seg: end is not page aligned"));
438 * Split the physical memory segment if it spans two or more free
442 #ifdef VM_FREELIST_LOWMEM
443 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
444 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
445 paddr = VM_LOWMEM_BOUNDARY;
448 #ifdef VM_FREELIST_DMA32
449 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
450 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
451 paddr = VM_DMA32_BOUNDARY;
454 vm_phys_create_seg(paddr, end);
458 * Initialize the physical memory allocator.
460 * Requires that vm_page_array is initialized!
465 struct vm_freelist *fl;
466 struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
468 int dom, flind, freelist, oind, pind, segind;
471 * Compute the number of free lists, and generate the mapping from the
472 * manifest constants VM_FREELIST_* to the free list indices.
474 * Initially, the entries of vm_freelist_to_flind[] are set to either
475 * 0 or 1 to indicate which free lists should be created.
478 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
479 seg = &vm_phys_segs[segind];
480 #ifdef VM_FREELIST_LOWMEM
481 if (seg->end <= VM_LOWMEM_BOUNDARY)
482 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
485 #ifdef VM_FREELIST_DMA32
487 #ifdef VM_DMA32_NPAGES_THRESHOLD
489 * Create the DMA32 free list only if the amount of
490 * physical memory above physical address 4G exceeds the
493 npages > VM_DMA32_NPAGES_THRESHOLD &&
495 seg->end <= VM_DMA32_BOUNDARY)
496 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
500 npages += atop(seg->end - seg->start);
501 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
504 /* Change each entry into a running total of the free lists. */
505 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
506 vm_freelist_to_flind[freelist] +=
507 vm_freelist_to_flind[freelist - 1];
509 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
510 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
511 /* Change each entry into a free list index. */
512 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
513 vm_freelist_to_flind[freelist]--;
516 * Initialize the first_page and free_queues fields of each physical
519 #ifdef VM_PHYSSEG_SPARSE
522 for (segind = 0; segind < vm_phys_nsegs; segind++) {
523 seg = &vm_phys_segs[segind];
524 #ifdef VM_PHYSSEG_SPARSE
525 seg->first_page = &vm_page_array[npages];
526 npages += atop(seg->end - seg->start);
528 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
530 #ifdef VM_FREELIST_LOWMEM
531 if (seg->end <= VM_LOWMEM_BOUNDARY) {
532 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
534 ("vm_phys_init: LOWMEM flind < 0"));
537 #ifdef VM_FREELIST_DMA32
538 if (seg->end <= VM_DMA32_BOUNDARY) {
539 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
541 ("vm_phys_init: DMA32 flind < 0"));
545 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
547 ("vm_phys_init: DEFAULT flind < 0"));
549 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
553 * Coalesce physical memory segments that are contiguous and share the
554 * same per-domain free queues.
556 prev_seg = vm_phys_segs;
557 seg = &vm_phys_segs[1];
558 end_seg = &vm_phys_segs[vm_phys_nsegs];
559 while (seg < end_seg) {
560 if (prev_seg->end == seg->start &&
561 prev_seg->free_queues == seg->free_queues) {
562 prev_seg->end = seg->end;
563 KASSERT(prev_seg->domain == seg->domain,
564 ("vm_phys_init: free queues cannot span domains"));
567 for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
568 *tmp_seg = *(tmp_seg + 1);
576 * Initialize the free queues.
578 for (dom = 0; dom < vm_ndomains; dom++) {
579 for (flind = 0; flind < vm_nfreelists; flind++) {
580 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
581 fl = vm_phys_free_queues[dom][flind][pind];
582 for (oind = 0; oind < VM_NFREEORDER; oind++)
583 TAILQ_INIT(&fl[oind].pl);
588 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
592 * Register info about the NUMA topology of the system.
594 * Invoked by platform-dependent code prior to vm_phys_init().
597 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
604 * For now the only override value that we support is 1, which
605 * effectively disables NUMA-awareness in the allocators.
608 TUNABLE_INT_FETCH("vm.numa.disabled", &d);
613 vm_ndomains = ndomains;
614 mem_affinity = affinity;
615 mem_locality = locality;
618 for (i = 0; i < vm_ndomains; i++)
619 DOMAINSET_SET(i, &all_domains);
628 _vm_phys_domain(vm_paddr_t pa)
633 if (vm_ndomains == 1 || mem_affinity == NULL)
637 * Check for any memory that overlaps.
639 for (i = 0; mem_affinity[i].end != 0; i++)
640 if (mem_affinity[i].start <= pa &&
641 mem_affinity[i].end >= pa)
642 return (mem_affinity[i].domain);
648 * Split a contiguous, power of two-sized set of physical pages.
650 * When this function is called by a page allocation function, the caller
651 * should request insertion at the head unless the order [order, oind) queues
652 * are known to be empty. The objective being to reduce the likelihood of
653 * long-term fragmentation by promoting contemporaneous allocation and
654 * (hopefully) deallocation.
657 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
662 while (oind > order) {
664 m_buddy = &m[1 << oind];
665 KASSERT(m_buddy->order == VM_NFREEORDER,
666 ("vm_phys_split_pages: page %p has unexpected order %d",
667 m_buddy, m_buddy->order));
668 vm_freelist_add(fl, m_buddy, oind, tail);
673 * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
674 * and sized set to the specified free list.
676 * When this function is called by a page allocation function, the caller
677 * should request insertion at the head unless the lower-order queues are
678 * known to be empty. The objective being to reduce the likelihood of long-
679 * term fragmentation by promoting contemporaneous allocation and (hopefully)
682 * The physical page m's buddy must not be free.
685 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
690 KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
691 KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
692 ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
693 ("vm_phys_enq_range: page %p and npages %u are misaligned",
696 KASSERT(m->order == VM_NFREEORDER,
697 ("vm_phys_enq_range: page %p has unexpected order %d",
699 order = ffs(npages) - 1;
700 KASSERT(order < VM_NFREEORDER,
701 ("vm_phys_enq_range: order %d is out of range", order));
702 vm_freelist_add(fl, m, order, tail);
706 } while (npages > 0);
710 * Tries to allocate the specified number of pages from the specified pool
711 * within the specified domain. Returns the actual number of allocated pages
712 * and a pointer to each page through the array ma[].
714 * The returned pages may not be physically contiguous. However, in contrast
715 * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
716 * calling this function once to allocate the desired number of pages will
717 * avoid wasted time in vm_phys_split_pages().
719 * The free page queues for the specified domain must be locked.
722 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
724 struct vm_freelist *alt, *fl;
726 int avail, end, flind, freelist, i, need, oind, pind;
728 KASSERT(domain >= 0 && domain < vm_ndomains,
729 ("vm_phys_alloc_npages: domain %d is out of range", domain));
730 KASSERT(pool < VM_NFREEPOOL,
731 ("vm_phys_alloc_npages: pool %d is out of range", pool));
732 KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
733 ("vm_phys_alloc_npages: npages %d is out of range", npages));
734 vm_domain_free_assert_locked(VM_DOMAIN(domain));
736 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
737 flind = vm_freelist_to_flind[freelist];
740 fl = vm_phys_free_queues[domain][flind][pool];
741 for (oind = 0; oind < VM_NFREEORDER; oind++) {
742 while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
743 vm_freelist_rem(fl, m, oind);
745 need = imin(npages - i, avail);
746 for (end = i + need; i < end;)
750 * Return excess pages to fl. Its
751 * order [0, oind) queues are empty.
753 vm_phys_enq_range(m, avail - need, fl,
756 } else if (i == npages)
760 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
761 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
762 alt = vm_phys_free_queues[domain][flind][pind];
763 while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
765 vm_freelist_rem(alt, m, oind);
766 vm_phys_set_pool(pool, m, oind);
768 need = imin(npages - i, avail);
769 for (end = i + need; i < end;)
773 * Return excess pages to fl.
774 * Its order [0, oind) queues
777 vm_phys_enq_range(m, avail -
780 } else if (i == npages)
790 * Allocate a contiguous, power of two-sized set of physical pages
791 * from the free lists.
793 * The free page queues must be locked.
796 vm_phys_alloc_pages(int domain, int pool, int order)
801 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
802 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
810 * Allocate a contiguous, power of two-sized set of physical pages from the
811 * specified free list. The free list must be specified using one of the
812 * manifest constants VM_FREELIST_*.
814 * The free page queues must be locked.
817 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
819 struct vm_freelist *alt, *fl;
821 int oind, pind, flind;
823 KASSERT(domain >= 0 && domain < vm_ndomains,
824 ("vm_phys_alloc_freelist_pages: domain %d is out of range",
826 KASSERT(freelist < VM_NFREELIST,
827 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
829 KASSERT(pool < VM_NFREEPOOL,
830 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
831 KASSERT(order < VM_NFREEORDER,
832 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
834 flind = vm_freelist_to_flind[freelist];
835 /* Check if freelist is present */
839 vm_domain_free_assert_locked(VM_DOMAIN(domain));
840 fl = &vm_phys_free_queues[domain][flind][pool][0];
841 for (oind = order; oind < VM_NFREEORDER; oind++) {
842 m = TAILQ_FIRST(&fl[oind].pl);
844 vm_freelist_rem(fl, m, oind);
845 /* The order [order, oind) queues are empty. */
846 vm_phys_split_pages(m, oind, fl, order, 1);
852 * The given pool was empty. Find the largest
853 * contiguous, power-of-two-sized set of pages in any
854 * pool. Transfer these pages to the given pool, and
855 * use them to satisfy the allocation.
857 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
858 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
859 alt = &vm_phys_free_queues[domain][flind][pind][0];
860 m = TAILQ_FIRST(&alt[oind].pl);
862 vm_freelist_rem(alt, m, oind);
863 vm_phys_set_pool(pool, m, oind);
864 /* The order [order, oind) queues are empty. */
865 vm_phys_split_pages(m, oind, fl, order, 1);
874 * Find the vm_page corresponding to the given physical address.
877 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
879 struct vm_phys_seg *seg;
882 for (segind = 0; segind < vm_phys_nsegs; segind++) {
883 seg = &vm_phys_segs[segind];
884 if (pa >= seg->start && pa < seg->end)
885 return (&seg->first_page[atop(pa - seg->start)]);
891 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
893 struct vm_phys_fictitious_seg tmp, *seg;
900 rw_rlock(&vm_phys_fictitious_reg_lock);
901 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
902 rw_runlock(&vm_phys_fictitious_reg_lock);
906 m = &seg->first_page[atop(pa - seg->start)];
907 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
913 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
914 long page_count, vm_memattr_t memattr)
918 bzero(range, page_count * sizeof(*range));
919 for (i = 0; i < page_count; i++) {
920 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
921 range[i].oflags &= ~VPO_UNMANAGED;
922 range[i].busy_lock = VPB_UNBUSIED;
927 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
928 vm_memattr_t memattr)
930 struct vm_phys_fictitious_seg *seg;
933 #ifdef VM_PHYSSEG_DENSE
939 ("Start of segment isn't less than end (start: %jx end: %jx)",
940 (uintmax_t)start, (uintmax_t)end));
942 page_count = (end - start) / PAGE_SIZE;
944 #ifdef VM_PHYSSEG_DENSE
947 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
948 fp = &vm_page_array[pi - first_page];
949 if ((pe - first_page) > vm_page_array_size) {
951 * We have a segment that starts inside
952 * of vm_page_array, but ends outside of it.
954 * Use vm_page_array pages for those that are
955 * inside of the vm_page_array range, and
956 * allocate the remaining ones.
958 dpage_count = vm_page_array_size - (pi - first_page);
959 vm_phys_fictitious_init_range(fp, start, dpage_count,
961 page_count -= dpage_count;
962 start += ptoa(dpage_count);
966 * We can allocate the full range from vm_page_array,
967 * so there's no need to register the range in the tree.
969 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
971 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
973 * We have a segment that ends inside of vm_page_array,
974 * but starts outside of it.
976 fp = &vm_page_array[0];
977 dpage_count = pe - first_page;
978 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
980 end -= ptoa(dpage_count);
981 page_count -= dpage_count;
983 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
985 * Trying to register a fictitious range that expands before
986 * and after vm_page_array.
992 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
994 #ifdef VM_PHYSSEG_DENSE
997 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
999 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1002 seg->first_page = fp;
1004 rw_wlock(&vm_phys_fictitious_reg_lock);
1005 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1006 rw_wunlock(&vm_phys_fictitious_reg_lock);
1012 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1014 struct vm_phys_fictitious_seg *seg, tmp;
1015 #ifdef VM_PHYSSEG_DENSE
1019 KASSERT(start < end,
1020 ("Start of segment isn't less than end (start: %jx end: %jx)",
1021 (uintmax_t)start, (uintmax_t)end));
1023 #ifdef VM_PHYSSEG_DENSE
1026 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1027 if ((pe - first_page) <= vm_page_array_size) {
1029 * This segment was allocated using vm_page_array
1030 * only, there's nothing to do since those pages
1031 * were never added to the tree.
1036 * We have a segment that starts inside
1037 * of vm_page_array, but ends outside of it.
1039 * Calculate how many pages were added to the
1040 * tree and free them.
1042 start = ptoa(first_page + vm_page_array_size);
1043 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1045 * We have a segment that ends inside of vm_page_array,
1046 * but starts outside of it.
1048 end = ptoa(first_page);
1049 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1050 /* Since it's not possible to register such a range, panic. */
1052 "Unregistering not registered fictitious range [%#jx:%#jx]",
1053 (uintmax_t)start, (uintmax_t)end);
1059 rw_wlock(&vm_phys_fictitious_reg_lock);
1060 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1061 if (seg->start != start || seg->end != end) {
1062 rw_wunlock(&vm_phys_fictitious_reg_lock);
1064 "Unregistering not registered fictitious range [%#jx:%#jx]",
1065 (uintmax_t)start, (uintmax_t)end);
1067 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1068 rw_wunlock(&vm_phys_fictitious_reg_lock);
1069 free(seg->first_page, M_FICT_PAGES);
1070 free(seg, M_FICT_PAGES);
1074 * Free a contiguous, power of two-sized set of physical pages.
1076 * The free page queues must be locked.
1079 vm_phys_free_pages(vm_page_t m, int order)
1081 struct vm_freelist *fl;
1082 struct vm_phys_seg *seg;
1086 KASSERT(m->order == VM_NFREEORDER,
1087 ("vm_phys_free_pages: page %p has unexpected order %d",
1089 KASSERT(m->pool < VM_NFREEPOOL,
1090 ("vm_phys_free_pages: page %p has unexpected pool %d",
1092 KASSERT(order < VM_NFREEORDER,
1093 ("vm_phys_free_pages: order %d is out of range", order));
1094 seg = &vm_phys_segs[m->segind];
1095 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1096 if (order < VM_NFREEORDER - 1) {
1097 pa = VM_PAGE_TO_PHYS(m);
1099 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1100 if (pa < seg->start || pa >= seg->end)
1102 m_buddy = &seg->first_page[atop(pa - seg->start)];
1103 if (m_buddy->order != order)
1105 fl = (*seg->free_queues)[m_buddy->pool];
1106 vm_freelist_rem(fl, m_buddy, order);
1107 if (m_buddy->pool != m->pool)
1108 vm_phys_set_pool(m->pool, m_buddy, order);
1110 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1111 m = &seg->first_page[atop(pa - seg->start)];
1112 } while (order < VM_NFREEORDER - 1);
1114 fl = (*seg->free_queues)[m->pool];
1115 vm_freelist_add(fl, m, order, 1);
1119 * Free a contiguous, arbitrarily sized set of physical pages.
1121 * The free page queues must be locked.
1124 vm_phys_free_contig(vm_page_t m, u_long npages)
1130 * Avoid unnecessary coalescing by freeing the pages in the largest
1131 * possible power-of-two-sized subsets.
1133 vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1134 for (;; npages -= n) {
1136 * Unsigned "min" is used here so that "order" is assigned
1137 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1138 * or the low-order bits of its physical address are zero
1139 * because the size of a physical address exceeds the size of
1142 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1147 vm_phys_free_pages(m, order);
1150 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
1151 for (; npages > 0; npages -= n) {
1152 order = flsl(npages) - 1;
1154 vm_phys_free_pages(m, order);
1160 * Scan physical memory between the specified addresses "low" and "high" for a
1161 * run of contiguous physical pages that satisfy the specified conditions, and
1162 * return the lowest page in the run. The specified "alignment" determines
1163 * the alignment of the lowest physical page in the run. If the specified
1164 * "boundary" is non-zero, then the run of physical pages cannot span a
1165 * physical address that is a multiple of "boundary".
1167 * "npages" must be greater than zero. Both "alignment" and "boundary" must
1168 * be a power of two.
1171 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1172 u_long alignment, vm_paddr_t boundary, int options)
1175 vm_page_t m_end, m_run, m_start;
1176 struct vm_phys_seg *seg;
1179 KASSERT(npages > 0, ("npages is 0"));
1180 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1181 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1184 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1185 seg = &vm_phys_segs[segind];
1186 if (seg->domain != domain)
1188 if (seg->start >= high)
1190 if (low >= seg->end)
1192 if (low <= seg->start)
1193 m_start = seg->first_page;
1195 m_start = &seg->first_page[atop(low - seg->start)];
1196 if (high < seg->end)
1200 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1202 m_end = &seg->first_page[atop(pa_end - seg->start)];
1203 m_run = vm_page_scan_contig(npages, m_start, m_end,
1204 alignment, boundary, options);
1212 * Set the pool for a contiguous, power of two-sized set of physical pages.
1215 vm_phys_set_pool(int pool, vm_page_t m, int order)
1219 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1224 * Search for the given physical page "m" in the free lists. If the search
1225 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
1226 * FALSE, indicating that "m" is not in the free lists.
1228 * The free page queues must be locked.
1231 vm_phys_unfree_page(vm_page_t m)
1233 struct vm_freelist *fl;
1234 struct vm_phys_seg *seg;
1235 vm_paddr_t pa, pa_half;
1236 vm_page_t m_set, m_tmp;
1240 * First, find the contiguous, power of two-sized set of free
1241 * physical pages containing the given physical page "m" and
1242 * assign it to "m_set".
1244 seg = &vm_phys_segs[m->segind];
1245 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1246 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1247 order < VM_NFREEORDER - 1; ) {
1249 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1250 if (pa >= seg->start)
1251 m_set = &seg->first_page[atop(pa - seg->start)];
1255 if (m_set->order < order)
1257 if (m_set->order == VM_NFREEORDER)
1259 KASSERT(m_set->order < VM_NFREEORDER,
1260 ("vm_phys_unfree_page: page %p has unexpected order %d",
1261 m_set, m_set->order));
1264 * Next, remove "m_set" from the free lists. Finally, extract
1265 * "m" from "m_set" using an iterative algorithm: While "m_set"
1266 * is larger than a page, shrink "m_set" by returning the half
1267 * of "m_set" that does not contain "m" to the free lists.
1269 fl = (*seg->free_queues)[m_set->pool];
1270 order = m_set->order;
1271 vm_freelist_rem(fl, m_set, order);
1274 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1275 if (m->phys_addr < pa_half)
1276 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1279 m_set = &seg->first_page[atop(pa_half - seg->start)];
1281 vm_freelist_add(fl, m_tmp, order, 0);
1283 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1288 * Allocate a contiguous set of physical pages of the given size
1289 * "npages" from the free lists. All of the physical pages must be at
1290 * or above the given physical address "low" and below the given
1291 * physical address "high". The given value "alignment" determines the
1292 * alignment of the first physical page in the set. If the given value
1293 * "boundary" is non-zero, then the set of physical pages cannot cross
1294 * any physical address boundary that is a multiple of that value. Both
1295 * "alignment" and "boundary" must be a power of two.
1298 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1299 u_long alignment, vm_paddr_t boundary)
1301 vm_paddr_t pa_end, pa_start;
1303 struct vm_phys_seg *seg;
1306 KASSERT(npages > 0, ("npages is 0"));
1307 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1308 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1309 vm_domain_free_assert_locked(VM_DOMAIN(domain));
1313 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1314 seg = &vm_phys_segs[segind];
1315 if (seg->start >= high || seg->domain != domain)
1317 if (low >= seg->end)
1319 if (low <= seg->start)
1320 pa_start = seg->start;
1323 if (high < seg->end)
1327 if (pa_end - pa_start < ptoa(npages))
1329 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1330 alignment, boundary);
1338 * Allocate a run of contiguous physical pages from the free list for the
1339 * specified segment.
1342 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1343 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1345 struct vm_freelist *fl;
1346 vm_paddr_t pa, pa_end, size;
1349 int oind, order, pind;
1351 KASSERT(npages > 0, ("npages is 0"));
1352 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1353 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1354 vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1355 /* Compute the queue that is the best fit for npages. */
1356 order = flsl(npages - 1);
1357 /* Search for a run satisfying the specified conditions. */
1358 size = npages << PAGE_SHIFT;
1359 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1361 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1362 fl = (*seg->free_queues)[pind];
1363 TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1365 * Is the size of this allocation request
1366 * larger than the largest block size?
1368 if (order >= VM_NFREEORDER) {
1370 * Determine if a sufficient number of
1371 * subsequent blocks to satisfy the
1372 * allocation request are free.
1374 pa = VM_PAGE_TO_PHYS(m_ret);
1379 pa += 1 << (PAGE_SHIFT +
1385 m = &seg->first_page[atop(pa -
1387 if (m->order != VM_NFREEORDER -
1391 /* If not, go to the next block. */
1397 * Determine if the blocks are within the
1398 * given range, satisfy the given alignment,
1399 * and do not cross the given boundary.
1401 pa = VM_PAGE_TO_PHYS(m_ret);
1403 if (pa >= low && pa_end <= high &&
1404 (pa & (alignment - 1)) == 0 &&
1405 rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1412 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1413 fl = (*seg->free_queues)[m->pool];
1414 vm_freelist_rem(fl, m, oind);
1415 if (m->pool != VM_FREEPOOL_DEFAULT)
1416 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1418 /* Return excess pages to the free lists. */
1419 npages_end = roundup2(npages, 1 << oind);
1420 if (npages < npages_end) {
1421 fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
1422 vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1429 * Show the number of physical pages in each of the free lists.
1431 DB_SHOW_COMMAND(freepages, db_show_freepages)
1433 struct vm_freelist *fl;
1434 int flind, oind, pind, dom;
1436 for (dom = 0; dom < vm_ndomains; dom++) {
1437 db_printf("DOMAIN: %d\n", dom);
1438 for (flind = 0; flind < vm_nfreelists; flind++) {
1439 db_printf("FREE LIST %d:\n"
1440 "\n ORDER (SIZE) | NUMBER"
1442 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1443 db_printf(" | POOL %d", pind);
1445 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1446 db_printf("-- -- ");
1448 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1449 db_printf(" %2.2d (%6.6dK)", oind,
1450 1 << (PAGE_SHIFT - 10 + oind));
1451 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1452 fl = vm_phys_free_queues[dom][flind][pind];
1453 db_printf(" | %6.6d", fl[oind].lcnt);