2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Physical memory system implementation
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
57 #include <sys/sysctl.h>
59 #include <sys/vmmeter.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_phys.h>
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72 "Too many physsegs.");
75 struct mem_affinity *mem_affinity;
81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
84 struct vm_phys_fictitious_seg;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
86 struct vm_phys_fictitious_seg *);
88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
89 RB_INITIALIZER(_vm_phys_fictitious_tree);
91 struct vm_phys_fictitious_seg {
92 RB_ENTRY(vm_phys_fictitious_seg) node;
93 /* Memory region data */
99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
100 vm_phys_fictitious_cmp);
102 static struct rwlock vm_phys_fictitious_reg_lock;
103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
105 static struct vm_freelist
106 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
108 static int vm_nfreelists;
111 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
113 static int vm_freelist_to_flind[VM_NFREELIST];
115 CTASSERT(VM_FREELIST_DEFAULT == 0);
117 #ifdef VM_FREELIST_ISADMA
118 #define VM_ISADMA_BOUNDARY 16777216
120 #ifdef VM_FREELIST_DMA32
121 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
125 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126 * the ordering of the free list boundaries.
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
135 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
136 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
137 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
139 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
141 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
144 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
145 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
146 NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
149 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
150 &vm_ndomains, 0, "Number of physical memory domains available.");
152 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
153 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
154 vm_paddr_t boundary);
155 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
156 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
157 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
161 * Red-black tree helpers for vm fictitious range management.
164 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
165 struct vm_phys_fictitious_seg *range)
168 KASSERT(range->start != 0 && range->end != 0,
169 ("Invalid range passed on search for vm_fictitious page"));
170 if (p->start >= range->end)
172 if (p->start < range->start)
179 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
180 struct vm_phys_fictitious_seg *p2)
183 /* Check if this is a search for a page */
185 return (vm_phys_fictitious_in_range(p1, p2));
187 KASSERT(p2->end != 0,
188 ("Invalid range passed as second parameter to vm fictitious comparison"));
190 /* Searching to add a new range */
191 if (p1->end <= p2->start)
193 if (p1->start >= p2->end)
196 panic("Trying to add overlapping vm fictitious ranges:\n"
197 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
198 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
202 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
208 if (vm_ndomains == 1 || mem_affinity == NULL)
211 DOMAINSET_ZERO(&mask);
213 * Check for any memory that overlaps low, high.
215 for (i = 0; mem_affinity[i].end != 0; i++)
216 if (mem_affinity[i].start <= high &&
217 mem_affinity[i].end >= low)
218 DOMAINSET_SET(mem_affinity[i].domain, &mask);
219 if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
221 if (DOMAINSET_EMPTY(&mask))
222 panic("vm_phys_domain_match: Impossible constraint");
223 return (DOMAINSET_FFS(&mask) - 1);
230 * Outputs the state of the physical memory allocator, specifically,
231 * the amount of physical memory in each free list.
234 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
237 struct vm_freelist *fl;
238 int dom, error, flind, oind, pind;
240 error = sysctl_wire_old_buffer(req, 0);
243 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
244 for (dom = 0; dom < vm_ndomains; dom++) {
245 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
246 for (flind = 0; flind < vm_nfreelists; flind++) {
247 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
248 "\n ORDER (SIZE) | NUMBER"
250 for (pind = 0; pind < VM_NFREEPOOL; pind++)
251 sbuf_printf(&sbuf, " | POOL %d", pind);
252 sbuf_printf(&sbuf, "\n-- ");
253 for (pind = 0; pind < VM_NFREEPOOL; pind++)
254 sbuf_printf(&sbuf, "-- -- ");
255 sbuf_printf(&sbuf, "--\n");
256 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
257 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
258 1 << (PAGE_SHIFT - 10 + oind));
259 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
260 fl = vm_phys_free_queues[dom][flind][pind];
261 sbuf_printf(&sbuf, " | %6d",
264 sbuf_printf(&sbuf, "\n");
268 error = sbuf_finish(&sbuf);
274 * Outputs the set of physical memory segments.
277 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
280 struct vm_phys_seg *seg;
283 error = sysctl_wire_old_buffer(req, 0);
286 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
287 for (segind = 0; segind < vm_phys_nsegs; segind++) {
288 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
289 seg = &vm_phys_segs[segind];
290 sbuf_printf(&sbuf, "start: %#jx\n",
291 (uintmax_t)seg->start);
292 sbuf_printf(&sbuf, "end: %#jx\n",
293 (uintmax_t)seg->end);
294 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
295 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
297 error = sbuf_finish(&sbuf);
303 * Return affinity, or -1 if there's no affinity information.
306 vm_phys_mem_affinity(int f, int t)
310 if (mem_locality == NULL)
312 if (f >= vm_ndomains || t >= vm_ndomains)
314 return (mem_locality[f * vm_ndomains + t]);
322 * Outputs the VM locality table.
325 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
330 error = sysctl_wire_old_buffer(req, 0);
333 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
335 sbuf_printf(&sbuf, "\n");
337 for (i = 0; i < vm_ndomains; i++) {
338 sbuf_printf(&sbuf, "%d: ", i);
339 for (j = 0; j < vm_ndomains; j++) {
340 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
342 sbuf_printf(&sbuf, "\n");
344 error = sbuf_finish(&sbuf);
351 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
356 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
358 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
363 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
366 TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
368 m->order = VM_NFREEORDER;
372 * Create a physical memory segment.
375 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
377 struct vm_phys_seg *seg;
379 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
380 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
381 KASSERT(domain >= 0 && domain < vm_ndomains,
382 ("vm_phys_create_seg: invalid domain provided"));
383 seg = &vm_phys_segs[vm_phys_nsegs++];
384 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
390 seg->domain = domain;
394 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
399 if (mem_affinity == NULL) {
400 _vm_phys_create_seg(start, end, 0);
405 if (mem_affinity[i].end == 0)
406 panic("Reached end of affinity info");
407 if (mem_affinity[i].end <= start)
409 if (mem_affinity[i].start > start)
410 panic("No affinity info for start %jx",
412 if (mem_affinity[i].end >= end) {
413 _vm_phys_create_seg(start, end,
414 mem_affinity[i].domain);
417 _vm_phys_create_seg(start, mem_affinity[i].end,
418 mem_affinity[i].domain);
419 start = mem_affinity[i].end;
422 _vm_phys_create_seg(start, end, 0);
427 * Add a physical memory segment.
430 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
434 KASSERT((start & PAGE_MASK) == 0,
435 ("vm_phys_define_seg: start is not page aligned"));
436 KASSERT((end & PAGE_MASK) == 0,
437 ("vm_phys_define_seg: end is not page aligned"));
440 * Split the physical memory segment if it spans two or more free
444 #ifdef VM_FREELIST_ISADMA
445 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
446 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
447 paddr = VM_ISADMA_BOUNDARY;
450 #ifdef VM_FREELIST_LOWMEM
451 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
452 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
453 paddr = VM_LOWMEM_BOUNDARY;
456 #ifdef VM_FREELIST_DMA32
457 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
458 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
459 paddr = VM_DMA32_BOUNDARY;
462 vm_phys_create_seg(paddr, end);
466 * Initialize the physical memory allocator.
468 * Requires that vm_page_array is initialized!
473 struct vm_freelist *fl;
474 struct vm_phys_seg *seg;
476 int dom, flind, freelist, oind, pind, segind;
479 * Compute the number of free lists, and generate the mapping from the
480 * manifest constants VM_FREELIST_* to the free list indices.
482 * Initially, the entries of vm_freelist_to_flind[] are set to either
483 * 0 or 1 to indicate which free lists should be created.
486 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
487 seg = &vm_phys_segs[segind];
488 #ifdef VM_FREELIST_ISADMA
489 if (seg->end <= VM_ISADMA_BOUNDARY)
490 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
493 #ifdef VM_FREELIST_LOWMEM
494 if (seg->end <= VM_LOWMEM_BOUNDARY)
495 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
498 #ifdef VM_FREELIST_DMA32
500 #ifdef VM_DMA32_NPAGES_THRESHOLD
502 * Create the DMA32 free list only if the amount of
503 * physical memory above physical address 4G exceeds the
506 npages > VM_DMA32_NPAGES_THRESHOLD &&
508 seg->end <= VM_DMA32_BOUNDARY)
509 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
513 npages += atop(seg->end - seg->start);
514 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
517 /* Change each entry into a running total of the free lists. */
518 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
519 vm_freelist_to_flind[freelist] +=
520 vm_freelist_to_flind[freelist - 1];
522 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
523 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
524 /* Change each entry into a free list index. */
525 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
526 vm_freelist_to_flind[freelist]--;
529 * Initialize the first_page and free_queues fields of each physical
532 #ifdef VM_PHYSSEG_SPARSE
535 for (segind = 0; segind < vm_phys_nsegs; segind++) {
536 seg = &vm_phys_segs[segind];
537 #ifdef VM_PHYSSEG_SPARSE
538 seg->first_page = &vm_page_array[npages];
539 npages += atop(seg->end - seg->start);
541 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
543 #ifdef VM_FREELIST_ISADMA
544 if (seg->end <= VM_ISADMA_BOUNDARY) {
545 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
547 ("vm_phys_init: ISADMA flind < 0"));
550 #ifdef VM_FREELIST_LOWMEM
551 if (seg->end <= VM_LOWMEM_BOUNDARY) {
552 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
554 ("vm_phys_init: LOWMEM flind < 0"));
557 #ifdef VM_FREELIST_DMA32
558 if (seg->end <= VM_DMA32_BOUNDARY) {
559 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
561 ("vm_phys_init: DMA32 flind < 0"));
565 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
567 ("vm_phys_init: DEFAULT flind < 0"));
569 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
573 * Initialize the free queues.
575 for (dom = 0; dom < vm_ndomains; dom++) {
576 for (flind = 0; flind < vm_nfreelists; flind++) {
577 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
578 fl = vm_phys_free_queues[dom][flind][pind];
579 for (oind = 0; oind < VM_NFREEORDER; oind++)
580 TAILQ_INIT(&fl[oind].pl);
585 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
589 * Split a contiguous, power of two-sized set of physical pages.
592 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
596 while (oind > order) {
598 m_buddy = &m[1 << oind];
599 KASSERT(m_buddy->order == VM_NFREEORDER,
600 ("vm_phys_split_pages: page %p has unexpected order %d",
601 m_buddy, m_buddy->order));
602 vm_freelist_add(fl, m_buddy, oind, 0);
607 * Allocate a contiguous, power of two-sized set of physical pages
608 * from the free lists.
610 * The free page queues must be locked.
613 vm_phys_alloc_pages(int domain, int pool, int order)
618 for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
619 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
627 * Allocate a contiguous, power of two-sized set of physical pages from the
628 * specified free list. The free list must be specified using one of the
629 * manifest constants VM_FREELIST_*.
631 * The free page queues must be locked.
634 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
636 struct vm_freelist *alt, *fl;
638 int oind, pind, flind;
640 KASSERT(domain >= 0 && domain < vm_ndomains,
641 ("vm_phys_alloc_freelist_pages: domain %d is out of range",
643 KASSERT(freelist < VM_NFREELIST,
644 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
646 KASSERT(pool < VM_NFREEPOOL,
647 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
648 KASSERT(order < VM_NFREEORDER,
649 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
651 flind = vm_freelist_to_flind[freelist];
652 /* Check if freelist is present */
656 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
657 fl = &vm_phys_free_queues[domain][flind][pool][0];
658 for (oind = order; oind < VM_NFREEORDER; oind++) {
659 m = TAILQ_FIRST(&fl[oind].pl);
661 vm_freelist_rem(fl, m, oind);
662 vm_phys_split_pages(m, oind, fl, order);
668 * The given pool was empty. Find the largest
669 * contiguous, power-of-two-sized set of pages in any
670 * pool. Transfer these pages to the given pool, and
671 * use them to satisfy the allocation.
673 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
674 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
675 alt = &vm_phys_free_queues[domain][flind][pind][0];
676 m = TAILQ_FIRST(&alt[oind].pl);
678 vm_freelist_rem(alt, m, oind);
679 vm_phys_set_pool(pool, m, oind);
680 vm_phys_split_pages(m, oind, fl, order);
689 * Find the vm_page corresponding to the given physical address.
692 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
694 struct vm_phys_seg *seg;
697 for (segind = 0; segind < vm_phys_nsegs; segind++) {
698 seg = &vm_phys_segs[segind];
699 if (pa >= seg->start && pa < seg->end)
700 return (&seg->first_page[atop(pa - seg->start)]);
706 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
708 struct vm_phys_fictitious_seg tmp, *seg;
715 rw_rlock(&vm_phys_fictitious_reg_lock);
716 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
717 rw_runlock(&vm_phys_fictitious_reg_lock);
721 m = &seg->first_page[atop(pa - seg->start)];
722 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
728 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
729 long page_count, vm_memattr_t memattr)
733 bzero(range, page_count * sizeof(*range));
734 for (i = 0; i < page_count; i++) {
735 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
736 range[i].oflags &= ~VPO_UNMANAGED;
737 range[i].busy_lock = VPB_UNBUSIED;
742 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
743 vm_memattr_t memattr)
745 struct vm_phys_fictitious_seg *seg;
748 #ifdef VM_PHYSSEG_DENSE
754 ("Start of segment isn't less than end (start: %jx end: %jx)",
755 (uintmax_t)start, (uintmax_t)end));
757 page_count = (end - start) / PAGE_SIZE;
759 #ifdef VM_PHYSSEG_DENSE
762 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
763 fp = &vm_page_array[pi - first_page];
764 if ((pe - first_page) > vm_page_array_size) {
766 * We have a segment that starts inside
767 * of vm_page_array, but ends outside of it.
769 * Use vm_page_array pages for those that are
770 * inside of the vm_page_array range, and
771 * allocate the remaining ones.
773 dpage_count = vm_page_array_size - (pi - first_page);
774 vm_phys_fictitious_init_range(fp, start, dpage_count,
776 page_count -= dpage_count;
777 start += ptoa(dpage_count);
781 * We can allocate the full range from vm_page_array,
782 * so there's no need to register the range in the tree.
784 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
786 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
788 * We have a segment that ends inside of vm_page_array,
789 * but starts outside of it.
791 fp = &vm_page_array[0];
792 dpage_count = pe - first_page;
793 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
795 end -= ptoa(dpage_count);
796 page_count -= dpage_count;
798 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
800 * Trying to register a fictitious range that expands before
801 * and after vm_page_array.
807 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
809 #ifdef VM_PHYSSEG_DENSE
812 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
814 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
817 seg->first_page = fp;
819 rw_wlock(&vm_phys_fictitious_reg_lock);
820 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
821 rw_wunlock(&vm_phys_fictitious_reg_lock);
827 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
829 struct vm_phys_fictitious_seg *seg, tmp;
830 #ifdef VM_PHYSSEG_DENSE
835 ("Start of segment isn't less than end (start: %jx end: %jx)",
836 (uintmax_t)start, (uintmax_t)end));
838 #ifdef VM_PHYSSEG_DENSE
841 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
842 if ((pe - first_page) <= vm_page_array_size) {
844 * This segment was allocated using vm_page_array
845 * only, there's nothing to do since those pages
846 * were never added to the tree.
851 * We have a segment that starts inside
852 * of vm_page_array, but ends outside of it.
854 * Calculate how many pages were added to the
855 * tree and free them.
857 start = ptoa(first_page + vm_page_array_size);
858 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
860 * We have a segment that ends inside of vm_page_array,
861 * but starts outside of it.
863 end = ptoa(first_page);
864 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
865 /* Since it's not possible to register such a range, panic. */
867 "Unregistering not registered fictitious range [%#jx:%#jx]",
868 (uintmax_t)start, (uintmax_t)end);
874 rw_wlock(&vm_phys_fictitious_reg_lock);
875 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
876 if (seg->start != start || seg->end != end) {
877 rw_wunlock(&vm_phys_fictitious_reg_lock);
879 "Unregistering not registered fictitious range [%#jx:%#jx]",
880 (uintmax_t)start, (uintmax_t)end);
882 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
883 rw_wunlock(&vm_phys_fictitious_reg_lock);
884 free(seg->first_page, M_FICT_PAGES);
885 free(seg, M_FICT_PAGES);
889 * Free a contiguous, power of two-sized set of physical pages.
891 * The free page queues must be locked.
894 vm_phys_free_pages(vm_page_t m, int order)
896 struct vm_freelist *fl;
897 struct vm_phys_seg *seg;
901 KASSERT(m->order == VM_NFREEORDER,
902 ("vm_phys_free_pages: page %p has unexpected order %d",
904 KASSERT(m->pool < VM_NFREEPOOL,
905 ("vm_phys_free_pages: page %p has unexpected pool %d",
907 KASSERT(order < VM_NFREEORDER,
908 ("vm_phys_free_pages: order %d is out of range", order));
909 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
910 seg = &vm_phys_segs[m->segind];
911 if (order < VM_NFREEORDER - 1) {
912 pa = VM_PAGE_TO_PHYS(m);
914 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
915 if (pa < seg->start || pa >= seg->end)
917 m_buddy = &seg->first_page[atop(pa - seg->start)];
918 if (m_buddy->order != order)
920 fl = (*seg->free_queues)[m_buddy->pool];
921 vm_freelist_rem(fl, m_buddy, order);
922 if (m_buddy->pool != m->pool)
923 vm_phys_set_pool(m->pool, m_buddy, order);
925 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
926 m = &seg->first_page[atop(pa - seg->start)];
927 } while (order < VM_NFREEORDER - 1);
929 fl = (*seg->free_queues)[m->pool];
930 vm_freelist_add(fl, m, order, 1);
934 * Free a contiguous, arbitrarily sized set of physical pages.
936 * The free page queues must be locked.
939 vm_phys_free_contig(vm_page_t m, u_long npages)
945 * Avoid unnecessary coalescing by freeing the pages in the largest
946 * possible power-of-two-sized subsets.
948 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
949 for (;; npages -= n) {
951 * Unsigned "min" is used here so that "order" is assigned
952 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
953 * or the low-order bits of its physical address are zero
954 * because the size of a physical address exceeds the size of
957 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
962 vm_phys_free_pages(m, order);
965 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
966 for (; npages > 0; npages -= n) {
967 order = flsl(npages) - 1;
969 vm_phys_free_pages(m, order);
975 * Scan physical memory between the specified addresses "low" and "high" for a
976 * run of contiguous physical pages that satisfy the specified conditions, and
977 * return the lowest page in the run. The specified "alignment" determines
978 * the alignment of the lowest physical page in the run. If the specified
979 * "boundary" is non-zero, then the run of physical pages cannot span a
980 * physical address that is a multiple of "boundary".
982 * "npages" must be greater than zero. Both "alignment" and "boundary" must
986 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
987 u_long alignment, vm_paddr_t boundary, int options)
990 vm_page_t m_end, m_run, m_start;
991 struct vm_phys_seg *seg;
994 KASSERT(npages > 0, ("npages is 0"));
995 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
996 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
999 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1000 seg = &vm_phys_segs[segind];
1001 if (seg->domain != domain)
1003 if (seg->start >= high)
1005 if (low >= seg->end)
1007 if (low <= seg->start)
1008 m_start = seg->first_page;
1010 m_start = &seg->first_page[atop(low - seg->start)];
1011 if (high < seg->end)
1015 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1017 m_end = &seg->first_page[atop(pa_end - seg->start)];
1018 m_run = vm_page_scan_contig(npages, m_start, m_end,
1019 alignment, boundary, options);
1027 * Set the pool for a contiguous, power of two-sized set of physical pages.
1030 vm_phys_set_pool(int pool, vm_page_t m, int order)
1034 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1039 * Search for the given physical page "m" in the free lists. If the search
1040 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
1041 * FALSE, indicating that "m" is not in the free lists.
1043 * The free page queues must be locked.
1046 vm_phys_unfree_page(vm_page_t m)
1048 struct vm_freelist *fl;
1049 struct vm_phys_seg *seg;
1050 vm_paddr_t pa, pa_half;
1051 vm_page_t m_set, m_tmp;
1054 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1057 * First, find the contiguous, power of two-sized set of free
1058 * physical pages containing the given physical page "m" and
1059 * assign it to "m_set".
1061 seg = &vm_phys_segs[m->segind];
1062 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1063 order < VM_NFREEORDER - 1; ) {
1065 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1066 if (pa >= seg->start)
1067 m_set = &seg->first_page[atop(pa - seg->start)];
1071 if (m_set->order < order)
1073 if (m_set->order == VM_NFREEORDER)
1075 KASSERT(m_set->order < VM_NFREEORDER,
1076 ("vm_phys_unfree_page: page %p has unexpected order %d",
1077 m_set, m_set->order));
1080 * Next, remove "m_set" from the free lists. Finally, extract
1081 * "m" from "m_set" using an iterative algorithm: While "m_set"
1082 * is larger than a page, shrink "m_set" by returning the half
1083 * of "m_set" that does not contain "m" to the free lists.
1085 fl = (*seg->free_queues)[m_set->pool];
1086 order = m_set->order;
1087 vm_freelist_rem(fl, m_set, order);
1090 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1091 if (m->phys_addr < pa_half)
1092 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1095 m_set = &seg->first_page[atop(pa_half - seg->start)];
1097 vm_freelist_add(fl, m_tmp, order, 0);
1099 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1104 * Allocate a contiguous set of physical pages of the given size
1105 * "npages" from the free lists. All of the physical pages must be at
1106 * or above the given physical address "low" and below the given
1107 * physical address "high". The given value "alignment" determines the
1108 * alignment of the first physical page in the set. If the given value
1109 * "boundary" is non-zero, then the set of physical pages cannot cross
1110 * any physical address boundary that is a multiple of that value. Both
1111 * "alignment" and "boundary" must be a power of two.
1114 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1115 u_long alignment, vm_paddr_t boundary)
1117 vm_paddr_t pa_end, pa_start;
1119 struct vm_phys_seg *seg;
1122 KASSERT(npages > 0, ("npages is 0"));
1123 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1124 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1125 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1129 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1130 seg = &vm_phys_segs[segind];
1131 if (seg->start >= high || seg->domain != domain)
1133 if (low >= seg->end)
1135 if (low <= seg->start)
1136 pa_start = seg->start;
1139 if (high < seg->end)
1143 if (pa_end - pa_start < ptoa(npages))
1145 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1146 alignment, boundary);
1154 * Allocate a run of contiguous physical pages from the free list for the
1155 * specified segment.
1158 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1159 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1161 struct vm_freelist *fl;
1162 vm_paddr_t pa, pa_end, size;
1165 int oind, order, pind;
1167 KASSERT(npages > 0, ("npages is 0"));
1168 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1169 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1170 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1171 /* Compute the queue that is the best fit for npages. */
1172 for (order = 0; (1 << order) < npages; order++);
1173 /* Search for a run satisfying the specified conditions. */
1174 size = npages << PAGE_SHIFT;
1175 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1177 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1178 fl = (*seg->free_queues)[pind];
1179 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1181 * Is the size of this allocation request
1182 * larger than the largest block size?
1184 if (order >= VM_NFREEORDER) {
1186 * Determine if a sufficient number of
1187 * subsequent blocks to satisfy the
1188 * allocation request are free.
1190 pa = VM_PAGE_TO_PHYS(m_ret);
1193 pa += 1 << (PAGE_SHIFT +
1199 m = &seg->first_page[atop(pa -
1201 if (m->order != VM_NFREEORDER -
1205 /* If not, go to the next block. */
1211 * Determine if the blocks are within the
1212 * given range, satisfy the given alignment,
1213 * and do not cross the given boundary.
1215 pa = VM_PAGE_TO_PHYS(m_ret);
1217 if (pa >= low && pa_end <= high &&
1218 (pa & (alignment - 1)) == 0 &&
1219 rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1226 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1227 fl = (*seg->free_queues)[m->pool];
1228 vm_freelist_rem(fl, m, m->order);
1230 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1231 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1232 fl = (*seg->free_queues)[m_ret->pool];
1233 vm_phys_split_pages(m_ret, oind, fl, order);
1234 /* Return excess pages to the free lists. */
1235 npages_end = roundup2(npages, 1 << imin(oind, order));
1236 if (npages < npages_end)
1237 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1243 * Show the number of physical pages in each of the free lists.
1245 DB_SHOW_COMMAND(freepages, db_show_freepages)
1247 struct vm_freelist *fl;
1248 int flind, oind, pind, dom;
1250 for (dom = 0; dom < vm_ndomains; dom++) {
1251 db_printf("DOMAIN: %d\n", dom);
1252 for (flind = 0; flind < vm_nfreelists; flind++) {
1253 db_printf("FREE LIST %d:\n"
1254 "\n ORDER (SIZE) | NUMBER"
1256 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1257 db_printf(" | POOL %d", pind);
1259 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1260 db_printf("-- -- ");
1262 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1263 db_printf(" %2.2d (%6.6dK)", oind,
1264 1 << (PAGE_SHIFT - 10 + oind));
1265 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1266 fl = vm_phys_free_queues[dom][flind][pind];
1267 db_printf(" | %6.6d", fl[oind].lcnt);