]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_phys.c
Merge llvm-project release/16.x llvmorg-16.0.1-0-gcd89023f7979
[FreeBSD/FreeBSD.git] / sys / vm / vm_phys.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  *      Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_ddb.h"
45 #include "opt_vm.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/domainset.h>
50 #include <sys/lock.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/queue.h>
56 #include <sys/rwlock.h>
57 #include <sys/sbuf.h>
58 #include <sys/sysctl.h>
59 #include <sys/tree.h>
60 #include <sys/vmmeter.h>
61
62 #include <ddb/ddb.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_phys.h>
71 #include <vm/vm_pagequeue.h>
72
73 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
74     "Too many physsegs.");
75
76 #ifdef NUMA
77 struct mem_affinity __read_mostly *mem_affinity;
78 int __read_mostly *mem_locality;
79 #endif
80
81 int __read_mostly vm_ndomains = 1;
82 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
83
84 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
85 int __read_mostly vm_phys_nsegs;
86 static struct vm_phys_seg vm_phys_early_segs[8];
87 static int vm_phys_early_nsegs;
88
89 struct vm_phys_fictitious_seg;
90 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
91     struct vm_phys_fictitious_seg *);
92
93 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
94     RB_INITIALIZER(&vm_phys_fictitious_tree);
95
96 struct vm_phys_fictitious_seg {
97         RB_ENTRY(vm_phys_fictitious_seg) node;
98         /* Memory region data */
99         vm_paddr_t      start;
100         vm_paddr_t      end;
101         vm_page_t       first_page;
102 };
103
104 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
105     vm_phys_fictitious_cmp);
106
107 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
108 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
109
110 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
111     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
112     [VM_NFREEORDER_MAX];
113
114 static int __read_mostly vm_nfreelists;
115
116 /*
117  * These "avail lists" are globals used to communicate boot-time physical
118  * memory layout to other parts of the kernel.  Each physically contiguous
119  * region of memory is defined by a start address at an even index and an
120  * end address at the following odd index.  Each list is terminated by a
121  * pair of zero entries.
122  *
123  * dump_avail tells the dump code what regions to include in a crash dump, and
124  * phys_avail is all of the remaining physical memory that is available for
125  * the vm system.
126  *
127  * Initially dump_avail and phys_avail are identical.  Boot time memory
128  * allocations remove extents from phys_avail that may still be included
129  * in dumps.
130  */
131 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
132 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
133
134 /*
135  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
136  */
137 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
138
139 CTASSERT(VM_FREELIST_DEFAULT == 0);
140
141 #ifdef VM_FREELIST_DMA32
142 #define VM_DMA32_BOUNDARY       ((vm_paddr_t)1 << 32)
143 #endif
144
145 /*
146  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
147  * the ordering of the free list boundaries.
148  */
149 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
150 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
151 #endif
152
153 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
154 SYSCTL_OID(_vm, OID_AUTO, phys_free,
155     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
156     sysctl_vm_phys_free, "A",
157     "Phys Free Info");
158
159 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
160 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
161     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
162     sysctl_vm_phys_segs, "A",
163     "Phys Seg Info");
164
165 #ifdef NUMA
166 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
167 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
168     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
169     sysctl_vm_phys_locality, "A",
170     "Phys Locality Info");
171 #endif
172
173 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
174     &vm_ndomains, 0, "Number of physical memory domains available.");
175
176 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
177 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
178 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
179     int order, int tail);
180
181 /*
182  * Red-black tree helpers for vm fictitious range management.
183  */
184 static inline int
185 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
186     struct vm_phys_fictitious_seg *range)
187 {
188
189         KASSERT(range->start != 0 && range->end != 0,
190             ("Invalid range passed on search for vm_fictitious page"));
191         if (p->start >= range->end)
192                 return (1);
193         if (p->start < range->start)
194                 return (-1);
195
196         return (0);
197 }
198
199 static int
200 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
201     struct vm_phys_fictitious_seg *p2)
202 {
203
204         /* Check if this is a search for a page */
205         if (p1->end == 0)
206                 return (vm_phys_fictitious_in_range(p1, p2));
207
208         KASSERT(p2->end != 0,
209     ("Invalid range passed as second parameter to vm fictitious comparison"));
210
211         /* Searching to add a new range */
212         if (p1->end <= p2->start)
213                 return (-1);
214         if (p1->start >= p2->end)
215                 return (1);
216
217         panic("Trying to add overlapping vm fictitious ranges:\n"
218             "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
219             (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
220 }
221
222 int
223 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
224 {
225 #ifdef NUMA
226         domainset_t mask;
227         int i;
228
229         if (vm_ndomains == 1 || mem_affinity == NULL)
230                 return (0);
231
232         DOMAINSET_ZERO(&mask);
233         /*
234          * Check for any memory that overlaps low, high.
235          */
236         for (i = 0; mem_affinity[i].end != 0; i++)
237                 if (mem_affinity[i].start <= high &&
238                     mem_affinity[i].end >= low)
239                         DOMAINSET_SET(mem_affinity[i].domain, &mask);
240         if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
241                 return (prefer);
242         if (DOMAINSET_EMPTY(&mask))
243                 panic("vm_phys_domain_match:  Impossible constraint");
244         return (DOMAINSET_FFS(&mask) - 1);
245 #else
246         return (0);
247 #endif
248 }
249
250 /*
251  * Outputs the state of the physical memory allocator, specifically,
252  * the amount of physical memory in each free list.
253  */
254 static int
255 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
256 {
257         struct sbuf sbuf;
258         struct vm_freelist *fl;
259         int dom, error, flind, oind, pind;
260
261         error = sysctl_wire_old_buffer(req, 0);
262         if (error != 0)
263                 return (error);
264         sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
265         for (dom = 0; dom < vm_ndomains; dom++) {
266                 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
267                 for (flind = 0; flind < vm_nfreelists; flind++) {
268                         sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
269                             "\n  ORDER (SIZE)  |  NUMBER"
270                             "\n              ", flind);
271                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
272                                 sbuf_printf(&sbuf, "  |  POOL %d", pind);
273                         sbuf_printf(&sbuf, "\n--            ");
274                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
275                                 sbuf_printf(&sbuf, "-- --      ");
276                         sbuf_printf(&sbuf, "--\n");
277                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
278                                 sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
279                                     1 << (PAGE_SHIFT - 10 + oind));
280                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
281                                 fl = vm_phys_free_queues[dom][flind][pind];
282                                         sbuf_printf(&sbuf, "  |  %6d",
283                                             fl[oind].lcnt);
284                                 }
285                                 sbuf_printf(&sbuf, "\n");
286                         }
287                 }
288         }
289         error = sbuf_finish(&sbuf);
290         sbuf_delete(&sbuf);
291         return (error);
292 }
293
294 /*
295  * Outputs the set of physical memory segments.
296  */
297 static int
298 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
299 {
300         struct sbuf sbuf;
301         struct vm_phys_seg *seg;
302         int error, segind;
303
304         error = sysctl_wire_old_buffer(req, 0);
305         if (error != 0)
306                 return (error);
307         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
308         for (segind = 0; segind < vm_phys_nsegs; segind++) {
309                 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
310                 seg = &vm_phys_segs[segind];
311                 sbuf_printf(&sbuf, "start:     %#jx\n",
312                     (uintmax_t)seg->start);
313                 sbuf_printf(&sbuf, "end:       %#jx\n",
314                     (uintmax_t)seg->end);
315                 sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
316                 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
317         }
318         error = sbuf_finish(&sbuf);
319         sbuf_delete(&sbuf);
320         return (error);
321 }
322
323 /*
324  * Return affinity, or -1 if there's no affinity information.
325  */
326 int
327 vm_phys_mem_affinity(int f, int t)
328 {
329
330 #ifdef NUMA
331         if (mem_locality == NULL)
332                 return (-1);
333         if (f >= vm_ndomains || t >= vm_ndomains)
334                 return (-1);
335         return (mem_locality[f * vm_ndomains + t]);
336 #else
337         return (-1);
338 #endif
339 }
340
341 #ifdef NUMA
342 /*
343  * Outputs the VM locality table.
344  */
345 static int
346 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
347 {
348         struct sbuf sbuf;
349         int error, i, j;
350
351         error = sysctl_wire_old_buffer(req, 0);
352         if (error != 0)
353                 return (error);
354         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
355
356         sbuf_printf(&sbuf, "\n");
357
358         for (i = 0; i < vm_ndomains; i++) {
359                 sbuf_printf(&sbuf, "%d: ", i);
360                 for (j = 0; j < vm_ndomains; j++) {
361                         sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
362                 }
363                 sbuf_printf(&sbuf, "\n");
364         }
365         error = sbuf_finish(&sbuf);
366         sbuf_delete(&sbuf);
367         return (error);
368 }
369 #endif
370
371 static void
372 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
373 {
374
375         m->order = order;
376         if (tail)
377                 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
378         else
379                 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
380         fl[order].lcnt++;
381 }
382
383 static void
384 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
385 {
386
387         TAILQ_REMOVE(&fl[order].pl, m, listq);
388         fl[order].lcnt--;
389         m->order = VM_NFREEORDER;
390 }
391
392 /*
393  * Create a physical memory segment.
394  */
395 static void
396 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
397 {
398         struct vm_phys_seg *seg;
399
400         KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
401             ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
402         KASSERT(domain >= 0 && domain < vm_ndomains,
403             ("vm_phys_create_seg: invalid domain provided"));
404         seg = &vm_phys_segs[vm_phys_nsegs++];
405         while (seg > vm_phys_segs && (seg - 1)->start >= end) {
406                 *seg = *(seg - 1);
407                 seg--;
408         }
409         seg->start = start;
410         seg->end = end;
411         seg->domain = domain;
412 }
413
414 static void
415 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
416 {
417 #ifdef NUMA
418         int i;
419
420         if (mem_affinity == NULL) {
421                 _vm_phys_create_seg(start, end, 0);
422                 return;
423         }
424
425         for (i = 0;; i++) {
426                 if (mem_affinity[i].end == 0)
427                         panic("Reached end of affinity info");
428                 if (mem_affinity[i].end <= start)
429                         continue;
430                 if (mem_affinity[i].start > start)
431                         panic("No affinity info for start %jx",
432                             (uintmax_t)start);
433                 if (mem_affinity[i].end >= end) {
434                         _vm_phys_create_seg(start, end,
435                             mem_affinity[i].domain);
436                         break;
437                 }
438                 _vm_phys_create_seg(start, mem_affinity[i].end,
439                     mem_affinity[i].domain);
440                 start = mem_affinity[i].end;
441         }
442 #else
443         _vm_phys_create_seg(start, end, 0);
444 #endif
445 }
446
447 /*
448  * Add a physical memory segment.
449  */
450 void
451 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
452 {
453         vm_paddr_t paddr;
454
455         KASSERT((start & PAGE_MASK) == 0,
456             ("vm_phys_define_seg: start is not page aligned"));
457         KASSERT((end & PAGE_MASK) == 0,
458             ("vm_phys_define_seg: end is not page aligned"));
459
460         /*
461          * Split the physical memory segment if it spans two or more free
462          * list boundaries.
463          */
464         paddr = start;
465 #ifdef  VM_FREELIST_LOWMEM
466         if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
467                 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
468                 paddr = VM_LOWMEM_BOUNDARY;
469         }
470 #endif
471 #ifdef  VM_FREELIST_DMA32
472         if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
473                 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
474                 paddr = VM_DMA32_BOUNDARY;
475         }
476 #endif
477         vm_phys_create_seg(paddr, end);
478 }
479
480 /*
481  * Initialize the physical memory allocator.
482  *
483  * Requires that vm_page_array is initialized!
484  */
485 void
486 vm_phys_init(void)
487 {
488         struct vm_freelist *fl;
489         struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
490 #if defined(VM_DMA32_NPAGES_THRESHOLD) || defined(VM_PHYSSEG_SPARSE)
491         u_long npages;
492 #endif
493         int dom, flind, freelist, oind, pind, segind;
494
495         /*
496          * Compute the number of free lists, and generate the mapping from the
497          * manifest constants VM_FREELIST_* to the free list indices.
498          *
499          * Initially, the entries of vm_freelist_to_flind[] are set to either
500          * 0 or 1 to indicate which free lists should be created.
501          */
502 #ifdef  VM_DMA32_NPAGES_THRESHOLD
503         npages = 0;
504 #endif
505         for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
506                 seg = &vm_phys_segs[segind];
507 #ifdef  VM_FREELIST_LOWMEM
508                 if (seg->end <= VM_LOWMEM_BOUNDARY)
509                         vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
510                 else
511 #endif
512 #ifdef  VM_FREELIST_DMA32
513                 if (
514 #ifdef  VM_DMA32_NPAGES_THRESHOLD
515                     /*
516                      * Create the DMA32 free list only if the amount of
517                      * physical memory above physical address 4G exceeds the
518                      * given threshold.
519                      */
520                     npages > VM_DMA32_NPAGES_THRESHOLD &&
521 #endif
522                     seg->end <= VM_DMA32_BOUNDARY)
523                         vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
524                 else
525 #endif
526                 {
527 #ifdef  VM_DMA32_NPAGES_THRESHOLD
528                         npages += atop(seg->end - seg->start);
529 #endif
530                         vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
531                 }
532         }
533         /* Change each entry into a running total of the free lists. */
534         for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
535                 vm_freelist_to_flind[freelist] +=
536                     vm_freelist_to_flind[freelist - 1];
537         }
538         vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
539         KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
540         /* Change each entry into a free list index. */
541         for (freelist = 0; freelist < VM_NFREELIST; freelist++)
542                 vm_freelist_to_flind[freelist]--;
543
544         /*
545          * Initialize the first_page and free_queues fields of each physical
546          * memory segment.
547          */
548 #ifdef VM_PHYSSEG_SPARSE
549         npages = 0;
550 #endif
551         for (segind = 0; segind < vm_phys_nsegs; segind++) {
552                 seg = &vm_phys_segs[segind];
553 #ifdef VM_PHYSSEG_SPARSE
554                 seg->first_page = &vm_page_array[npages];
555                 npages += atop(seg->end - seg->start);
556 #else
557                 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
558 #endif
559 #ifdef  VM_FREELIST_LOWMEM
560                 if (seg->end <= VM_LOWMEM_BOUNDARY) {
561                         flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
562                         KASSERT(flind >= 0,
563                             ("vm_phys_init: LOWMEM flind < 0"));
564                 } else
565 #endif
566 #ifdef  VM_FREELIST_DMA32
567                 if (seg->end <= VM_DMA32_BOUNDARY) {
568                         flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
569                         KASSERT(flind >= 0,
570                             ("vm_phys_init: DMA32 flind < 0"));
571                 } else
572 #endif
573                 {
574                         flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
575                         KASSERT(flind >= 0,
576                             ("vm_phys_init: DEFAULT flind < 0"));
577                 }
578                 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
579         }
580
581         /*
582          * Coalesce physical memory segments that are contiguous and share the
583          * same per-domain free queues.
584          */
585         prev_seg = vm_phys_segs;
586         seg = &vm_phys_segs[1];
587         end_seg = &vm_phys_segs[vm_phys_nsegs];
588         while (seg < end_seg) {
589                 if (prev_seg->end == seg->start &&
590                     prev_seg->free_queues == seg->free_queues) {
591                         prev_seg->end = seg->end;
592                         KASSERT(prev_seg->domain == seg->domain,
593                             ("vm_phys_init: free queues cannot span domains"));
594                         vm_phys_nsegs--;
595                         end_seg--;
596                         for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
597                                 *tmp_seg = *(tmp_seg + 1);
598                 } else {
599                         prev_seg = seg;
600                         seg++;
601                 }
602         }
603
604         /*
605          * Initialize the free queues.
606          */
607         for (dom = 0; dom < vm_ndomains; dom++) {
608                 for (flind = 0; flind < vm_nfreelists; flind++) {
609                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
610                                 fl = vm_phys_free_queues[dom][flind][pind];
611                                 for (oind = 0; oind < VM_NFREEORDER; oind++)
612                                         TAILQ_INIT(&fl[oind].pl);
613                         }
614                 }
615         }
616
617         rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
618 }
619
620 /*
621  * Register info about the NUMA topology of the system.
622  *
623  * Invoked by platform-dependent code prior to vm_phys_init().
624  */
625 void
626 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
627     int *locality)
628 {
629 #ifdef NUMA
630         int d, i;
631
632         /*
633          * For now the only override value that we support is 1, which
634          * effectively disables NUMA-awareness in the allocators.
635          */
636         d = 0;
637         TUNABLE_INT_FETCH("vm.numa.disabled", &d);
638         if (d)
639                 ndomains = 1;
640
641         if (ndomains > 1) {
642                 vm_ndomains = ndomains;
643                 mem_affinity = affinity;
644                 mem_locality = locality;
645         }
646
647         for (i = 0; i < vm_ndomains; i++)
648                 DOMAINSET_SET(i, &all_domains);
649 #else
650         (void)ndomains;
651         (void)affinity;
652         (void)locality;
653 #endif
654 }
655
656 /*
657  * Split a contiguous, power of two-sized set of physical pages.
658  *
659  * When this function is called by a page allocation function, the caller
660  * should request insertion at the head unless the order [order, oind) queues
661  * are known to be empty.  The objective being to reduce the likelihood of
662  * long-term fragmentation by promoting contemporaneous allocation and
663  * (hopefully) deallocation.
664  */
665 static __inline void
666 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
667     int tail)
668 {
669         vm_page_t m_buddy;
670
671         while (oind > order) {
672                 oind--;
673                 m_buddy = &m[1 << oind];
674                 KASSERT(m_buddy->order == VM_NFREEORDER,
675                     ("vm_phys_split_pages: page %p has unexpected order %d",
676                     m_buddy, m_buddy->order));
677                 vm_freelist_add(fl, m_buddy, oind, tail);
678         }
679 }
680
681 /*
682  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
683  * and sized set to the specified free list.
684  *
685  * When this function is called by a page allocation function, the caller
686  * should request insertion at the head unless the lower-order queues are
687  * known to be empty.  The objective being to reduce the likelihood of long-
688  * term fragmentation by promoting contemporaneous allocation and (hopefully)
689  * deallocation.
690  *
691  * The physical page m's buddy must not be free.
692  */
693 static void
694 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
695 {
696         u_int n;
697         int order;
698
699         KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
700         KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
701             ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
702             ("vm_phys_enq_range: page %p and npages %u are misaligned",
703             m, npages));
704         do {
705                 KASSERT(m->order == VM_NFREEORDER,
706                     ("vm_phys_enq_range: page %p has unexpected order %d",
707                     m, m->order));
708                 order = ffs(npages) - 1;
709                 KASSERT(order < VM_NFREEORDER,
710                     ("vm_phys_enq_range: order %d is out of range", order));
711                 vm_freelist_add(fl, m, order, tail);
712                 n = 1 << order;
713                 m += n;
714                 npages -= n;
715         } while (npages > 0);
716 }
717
718 /*
719  * Set the pool for a contiguous, power of two-sized set of physical pages. 
720  */
721 static void
722 vm_phys_set_pool(int pool, vm_page_t m, int order)
723 {
724         vm_page_t m_tmp;
725
726         for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
727                 m_tmp->pool = pool;
728 }
729
730 /*
731  * Tries to allocate the specified number of pages from the specified pool
732  * within the specified domain.  Returns the actual number of allocated pages
733  * and a pointer to each page through the array ma[].
734  *
735  * The returned pages may not be physically contiguous.  However, in contrast
736  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
737  * calling this function once to allocate the desired number of pages will
738  * avoid wasted time in vm_phys_split_pages().
739  *
740  * The free page queues for the specified domain must be locked.
741  */
742 int
743 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
744 {
745         struct vm_freelist *alt, *fl;
746         vm_page_t m;
747         int avail, end, flind, freelist, i, need, oind, pind;
748
749         KASSERT(domain >= 0 && domain < vm_ndomains,
750             ("vm_phys_alloc_npages: domain %d is out of range", domain));
751         KASSERT(pool < VM_NFREEPOOL,
752             ("vm_phys_alloc_npages: pool %d is out of range", pool));
753         KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
754             ("vm_phys_alloc_npages: npages %d is out of range", npages));
755         vm_domain_free_assert_locked(VM_DOMAIN(domain));
756         i = 0;
757         for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
758                 flind = vm_freelist_to_flind[freelist];
759                 if (flind < 0)
760                         continue;
761                 fl = vm_phys_free_queues[domain][flind][pool];
762                 for (oind = 0; oind < VM_NFREEORDER; oind++) {
763                         while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
764                                 vm_freelist_rem(fl, m, oind);
765                                 avail = 1 << oind;
766                                 need = imin(npages - i, avail);
767                                 for (end = i + need; i < end;)
768                                         ma[i++] = m++;
769                                 if (need < avail) {
770                                         /*
771                                          * Return excess pages to fl.  Its
772                                          * order [0, oind) queues are empty.
773                                          */
774                                         vm_phys_enq_range(m, avail - need, fl,
775                                             1);
776                                         return (npages);
777                                 } else if (i == npages)
778                                         return (npages);
779                         }
780                 }
781                 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
782                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
783                                 alt = vm_phys_free_queues[domain][flind][pind];
784                                 while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
785                                     NULL) {
786                                         vm_freelist_rem(alt, m, oind);
787                                         vm_phys_set_pool(pool, m, oind);
788                                         avail = 1 << oind;
789                                         need = imin(npages - i, avail);
790                                         for (end = i + need; i < end;)
791                                                 ma[i++] = m++;
792                                         if (need < avail) {
793                                                 /*
794                                                  * Return excess pages to fl.
795                                                  * Its order [0, oind) queues
796                                                  * are empty.
797                                                  */
798                                                 vm_phys_enq_range(m, avail -
799                                                     need, fl, 1);
800                                                 return (npages);
801                                         } else if (i == npages)
802                                                 return (npages);
803                                 }
804                         }
805                 }
806         }
807         return (i);
808 }
809
810 /*
811  * Allocate a contiguous, power of two-sized set of physical pages
812  * from the free lists.
813  *
814  * The free page queues must be locked.
815  */
816 vm_page_t
817 vm_phys_alloc_pages(int domain, int pool, int order)
818 {
819         vm_page_t m;
820         int freelist;
821
822         for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
823                 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
824                 if (m != NULL)
825                         return (m);
826         }
827         return (NULL);
828 }
829
830 /*
831  * Allocate a contiguous, power of two-sized set of physical pages from the
832  * specified free list.  The free list must be specified using one of the
833  * manifest constants VM_FREELIST_*.
834  *
835  * The free page queues must be locked.
836  */
837 vm_page_t
838 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
839 {
840         struct vm_freelist *alt, *fl;
841         vm_page_t m;
842         int oind, pind, flind;
843
844         KASSERT(domain >= 0 && domain < vm_ndomains,
845             ("vm_phys_alloc_freelist_pages: domain %d is out of range",
846             domain));
847         KASSERT(freelist < VM_NFREELIST,
848             ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
849             freelist));
850         KASSERT(pool < VM_NFREEPOOL,
851             ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
852         KASSERT(order < VM_NFREEORDER,
853             ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
854
855         flind = vm_freelist_to_flind[freelist];
856         /* Check if freelist is present */
857         if (flind < 0)
858                 return (NULL);
859
860         vm_domain_free_assert_locked(VM_DOMAIN(domain));
861         fl = &vm_phys_free_queues[domain][flind][pool][0];
862         for (oind = order; oind < VM_NFREEORDER; oind++) {
863                 m = TAILQ_FIRST(&fl[oind].pl);
864                 if (m != NULL) {
865                         vm_freelist_rem(fl, m, oind);
866                         /* The order [order, oind) queues are empty. */
867                         vm_phys_split_pages(m, oind, fl, order, 1);
868                         return (m);
869                 }
870         }
871
872         /*
873          * The given pool was empty.  Find the largest
874          * contiguous, power-of-two-sized set of pages in any
875          * pool.  Transfer these pages to the given pool, and
876          * use them to satisfy the allocation.
877          */
878         for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
879                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
880                         alt = &vm_phys_free_queues[domain][flind][pind][0];
881                         m = TAILQ_FIRST(&alt[oind].pl);
882                         if (m != NULL) {
883                                 vm_freelist_rem(alt, m, oind);
884                                 vm_phys_set_pool(pool, m, oind);
885                                 /* The order [order, oind) queues are empty. */
886                                 vm_phys_split_pages(m, oind, fl, order, 1);
887                                 return (m);
888                         }
889                 }
890         }
891         return (NULL);
892 }
893
894 /*
895  * Find the vm_page corresponding to the given physical address.
896  */
897 vm_page_t
898 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
899 {
900         struct vm_phys_seg *seg;
901
902         if ((seg = vm_phys_paddr_to_seg(pa)) != NULL)
903                 return (&seg->first_page[atop(pa - seg->start)]);
904         return (NULL);
905 }
906
907 vm_page_t
908 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
909 {
910         struct vm_phys_fictitious_seg tmp, *seg;
911         vm_page_t m;
912
913         m = NULL;
914         tmp.start = pa;
915         tmp.end = 0;
916
917         rw_rlock(&vm_phys_fictitious_reg_lock);
918         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
919         rw_runlock(&vm_phys_fictitious_reg_lock);
920         if (seg == NULL)
921                 return (NULL);
922
923         m = &seg->first_page[atop(pa - seg->start)];
924         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
925
926         return (m);
927 }
928
929 static inline void
930 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
931     long page_count, vm_memattr_t memattr)
932 {
933         long i;
934
935         bzero(range, page_count * sizeof(*range));
936         for (i = 0; i < page_count; i++) {
937                 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
938                 range[i].oflags &= ~VPO_UNMANAGED;
939                 range[i].busy_lock = VPB_UNBUSIED;
940         }
941 }
942
943 int
944 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
945     vm_memattr_t memattr)
946 {
947         struct vm_phys_fictitious_seg *seg;
948         vm_page_t fp;
949         long page_count;
950 #ifdef VM_PHYSSEG_DENSE
951         long pi, pe;
952         long dpage_count;
953 #endif
954
955         KASSERT(start < end,
956             ("Start of segment isn't less than end (start: %jx end: %jx)",
957             (uintmax_t)start, (uintmax_t)end));
958
959         page_count = (end - start) / PAGE_SIZE;
960
961 #ifdef VM_PHYSSEG_DENSE
962         pi = atop(start);
963         pe = atop(end);
964         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
965                 fp = &vm_page_array[pi - first_page];
966                 if ((pe - first_page) > vm_page_array_size) {
967                         /*
968                          * We have a segment that starts inside
969                          * of vm_page_array, but ends outside of it.
970                          *
971                          * Use vm_page_array pages for those that are
972                          * inside of the vm_page_array range, and
973                          * allocate the remaining ones.
974                          */
975                         dpage_count = vm_page_array_size - (pi - first_page);
976                         vm_phys_fictitious_init_range(fp, start, dpage_count,
977                             memattr);
978                         page_count -= dpage_count;
979                         start += ptoa(dpage_count);
980                         goto alloc;
981                 }
982                 /*
983                  * We can allocate the full range from vm_page_array,
984                  * so there's no need to register the range in the tree.
985                  */
986                 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
987                 return (0);
988         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
989                 /*
990                  * We have a segment that ends inside of vm_page_array,
991                  * but starts outside of it.
992                  */
993                 fp = &vm_page_array[0];
994                 dpage_count = pe - first_page;
995                 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
996                     memattr);
997                 end -= ptoa(dpage_count);
998                 page_count -= dpage_count;
999                 goto alloc;
1000         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1001                 /*
1002                  * Trying to register a fictitious range that expands before
1003                  * and after vm_page_array.
1004                  */
1005                 return (EINVAL);
1006         } else {
1007 alloc:
1008 #endif
1009                 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1010                     M_WAITOK);
1011 #ifdef VM_PHYSSEG_DENSE
1012         }
1013 #endif
1014         vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1015
1016         seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1017         seg->start = start;
1018         seg->end = end;
1019         seg->first_page = fp;
1020
1021         rw_wlock(&vm_phys_fictitious_reg_lock);
1022         RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1023         rw_wunlock(&vm_phys_fictitious_reg_lock);
1024
1025         return (0);
1026 }
1027
1028 void
1029 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1030 {
1031         struct vm_phys_fictitious_seg *seg, tmp;
1032 #ifdef VM_PHYSSEG_DENSE
1033         long pi, pe;
1034 #endif
1035
1036         KASSERT(start < end,
1037             ("Start of segment isn't less than end (start: %jx end: %jx)",
1038             (uintmax_t)start, (uintmax_t)end));
1039
1040 #ifdef VM_PHYSSEG_DENSE
1041         pi = atop(start);
1042         pe = atop(end);
1043         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1044                 if ((pe - first_page) <= vm_page_array_size) {
1045                         /*
1046                          * This segment was allocated using vm_page_array
1047                          * only, there's nothing to do since those pages
1048                          * were never added to the tree.
1049                          */
1050                         return;
1051                 }
1052                 /*
1053                  * We have a segment that starts inside
1054                  * of vm_page_array, but ends outside of it.
1055                  *
1056                  * Calculate how many pages were added to the
1057                  * tree and free them.
1058                  */
1059                 start = ptoa(first_page + vm_page_array_size);
1060         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1061                 /*
1062                  * We have a segment that ends inside of vm_page_array,
1063                  * but starts outside of it.
1064                  */
1065                 end = ptoa(first_page);
1066         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1067                 /* Since it's not possible to register such a range, panic. */
1068                 panic(
1069                     "Unregistering not registered fictitious range [%#jx:%#jx]",
1070                     (uintmax_t)start, (uintmax_t)end);
1071         }
1072 #endif
1073         tmp.start = start;
1074         tmp.end = 0;
1075
1076         rw_wlock(&vm_phys_fictitious_reg_lock);
1077         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1078         if (seg->start != start || seg->end != end) {
1079                 rw_wunlock(&vm_phys_fictitious_reg_lock);
1080                 panic(
1081                     "Unregistering not registered fictitious range [%#jx:%#jx]",
1082                     (uintmax_t)start, (uintmax_t)end);
1083         }
1084         RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1085         rw_wunlock(&vm_phys_fictitious_reg_lock);
1086         free(seg->first_page, M_FICT_PAGES);
1087         free(seg, M_FICT_PAGES);
1088 }
1089
1090 /*
1091  * Free a contiguous, power of two-sized set of physical pages.
1092  *
1093  * The free page queues must be locked.
1094  */
1095 void
1096 vm_phys_free_pages(vm_page_t m, int order)
1097 {
1098         struct vm_freelist *fl;
1099         struct vm_phys_seg *seg;
1100         vm_paddr_t pa;
1101         vm_page_t m_buddy;
1102
1103         KASSERT(m->order == VM_NFREEORDER,
1104             ("vm_phys_free_pages: page %p has unexpected order %d",
1105             m, m->order));
1106         KASSERT(m->pool < VM_NFREEPOOL,
1107             ("vm_phys_free_pages: page %p has unexpected pool %d",
1108             m, m->pool));
1109         KASSERT(order < VM_NFREEORDER,
1110             ("vm_phys_free_pages: order %d is out of range", order));
1111         seg = &vm_phys_segs[m->segind];
1112         vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1113         if (order < VM_NFREEORDER - 1) {
1114                 pa = VM_PAGE_TO_PHYS(m);
1115                 do {
1116                         pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1117                         if (pa < seg->start || pa >= seg->end)
1118                                 break;
1119                         m_buddy = &seg->first_page[atop(pa - seg->start)];
1120                         if (m_buddy->order != order)
1121                                 break;
1122                         fl = (*seg->free_queues)[m_buddy->pool];
1123                         vm_freelist_rem(fl, m_buddy, order);
1124                         if (m_buddy->pool != m->pool)
1125                                 vm_phys_set_pool(m->pool, m_buddy, order);
1126                         order++;
1127                         pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1128                         m = &seg->first_page[atop(pa - seg->start)];
1129                 } while (order < VM_NFREEORDER - 1);
1130         }
1131         fl = (*seg->free_queues)[m->pool];
1132         vm_freelist_add(fl, m, order, 1);
1133 }
1134
1135 /*
1136  * Return the largest possible order of a set of pages starting at m.
1137  */
1138 static int
1139 max_order(vm_page_t m)
1140 {
1141
1142         /*
1143          * Unsigned "min" is used here so that "order" is assigned
1144          * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1145          * or the low-order bits of its physical address are zero
1146          * because the size of a physical address exceeds the size of
1147          * a long.
1148          */
1149         return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1150             VM_NFREEORDER - 1));
1151 }
1152
1153 /*
1154  * Free a contiguous, arbitrarily sized set of physical pages, without
1155  * merging across set boundaries.
1156  *
1157  * The free page queues must be locked.
1158  */
1159 void
1160 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1161 {
1162         struct vm_freelist *fl;
1163         struct vm_phys_seg *seg;
1164         vm_page_t m_end;
1165         int order;
1166
1167         /*
1168          * Avoid unnecessary coalescing by freeing the pages in the largest
1169          * possible power-of-two-sized subsets.
1170          */
1171         vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1172         seg = &vm_phys_segs[m->segind];
1173         fl = (*seg->free_queues)[m->pool];
1174         m_end = m + npages;
1175         /* Free blocks of increasing size. */
1176         while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
1177             m + (1 << order) <= m_end) {
1178                 KASSERT(seg == &vm_phys_segs[m->segind],
1179                     ("%s: page range [%p,%p) spans multiple segments",
1180                     __func__, m_end - npages, m));
1181                 vm_freelist_add(fl, m, order, 1);
1182                 m += 1 << order;
1183         }
1184         /* Free blocks of maximum size. */
1185         while (m + (1 << order) <= m_end) {
1186                 KASSERT(seg == &vm_phys_segs[m->segind],
1187                     ("%s: page range [%p,%p) spans multiple segments",
1188                     __func__, m_end - npages, m));
1189                 vm_freelist_add(fl, m, order, 1);
1190                 m += 1 << order;
1191         }
1192         /* Free blocks of diminishing size. */
1193         while (m < m_end) {
1194                 KASSERT(seg == &vm_phys_segs[m->segind],
1195                     ("%s: page range [%p,%p) spans multiple segments",
1196                     __func__, m_end - npages, m));
1197                 order = flsl(m_end - m) - 1;
1198                 vm_freelist_add(fl, m, order, 1);
1199                 m += 1 << order;
1200         }
1201 }
1202
1203 /*
1204  * Free a contiguous, arbitrarily sized set of physical pages.
1205  *
1206  * The free page queues must be locked.
1207  */
1208 void
1209 vm_phys_free_contig(vm_page_t m, u_long npages)
1210 {
1211         int order_start, order_end;
1212         vm_page_t m_start, m_end;
1213
1214         vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1215
1216         m_start = m;
1217         order_start = max_order(m_start);
1218         if (order_start < VM_NFREEORDER - 1)
1219                 m_start += 1 << order_start;
1220         m_end = m + npages;
1221         order_end = max_order(m_end);
1222         if (order_end < VM_NFREEORDER - 1)
1223                 m_end -= 1 << order_end;
1224         /*
1225          * Avoid unnecessary coalescing by freeing the pages at the start and
1226          * end of the range last.
1227          */
1228         if (m_start < m_end)
1229                 vm_phys_enqueue_contig(m_start, m_end - m_start);
1230         if (order_start < VM_NFREEORDER - 1)
1231                 vm_phys_free_pages(m, order_start);
1232         if (order_end < VM_NFREEORDER - 1)
1233                 vm_phys_free_pages(m_end, order_end);
1234 }
1235
1236 /*
1237  * Identify the first address range within segment segind or greater
1238  * that matches the domain, lies within the low/high range, and has
1239  * enough pages.  Return -1 if there is none.
1240  */
1241 int
1242 vm_phys_find_range(vm_page_t bounds[], int segind, int domain,
1243     u_long npages, vm_paddr_t low, vm_paddr_t high)
1244 {
1245         vm_paddr_t pa_end, pa_start;
1246         struct vm_phys_seg *end_seg, *seg;
1247
1248         KASSERT(npages > 0, ("npages is zero"));
1249         KASSERT(domain >= 0 && domain < vm_ndomains, ("domain out of range"));
1250         end_seg = &vm_phys_segs[vm_phys_nsegs];
1251         for (seg = &vm_phys_segs[segind]; seg < end_seg; seg++) {
1252                 if (seg->domain != domain)
1253                         continue;
1254                 if (seg->start >= high)
1255                         return (-1);
1256                 pa_start = MAX(low, seg->start);
1257                 pa_end = MIN(high, seg->end);
1258                 if (pa_end - pa_start < ptoa(npages))
1259                         continue;
1260                 bounds[0] = &seg->first_page[atop(pa_start - seg->start)];
1261                 bounds[1] = &seg->first_page[atop(pa_end - seg->start)];
1262                 return (seg - vm_phys_segs);
1263         }
1264         return (-1);
1265 }
1266
1267 /*
1268  * Search for the given physical page "m" in the free lists.  If the search
1269  * succeeds, remove "m" from the free lists and return true.  Otherwise, return
1270  * false, indicating that "m" is not in the free lists.
1271  *
1272  * The free page queues must be locked.
1273  */
1274 bool
1275 vm_phys_unfree_page(vm_page_t m)
1276 {
1277         struct vm_freelist *fl;
1278         struct vm_phys_seg *seg;
1279         vm_paddr_t pa, pa_half;
1280         vm_page_t m_set, m_tmp;
1281         int order;
1282
1283         /*
1284          * First, find the contiguous, power of two-sized set of free
1285          * physical pages containing the given physical page "m" and
1286          * assign it to "m_set".
1287          */
1288         seg = &vm_phys_segs[m->segind];
1289         vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1290         for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1291             order < VM_NFREEORDER - 1; ) {
1292                 order++;
1293                 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1294                 if (pa >= seg->start)
1295                         m_set = &seg->first_page[atop(pa - seg->start)];
1296                 else
1297                         return (false);
1298         }
1299         if (m_set->order < order)
1300                 return (false);
1301         if (m_set->order == VM_NFREEORDER)
1302                 return (false);
1303         KASSERT(m_set->order < VM_NFREEORDER,
1304             ("vm_phys_unfree_page: page %p has unexpected order %d",
1305             m_set, m_set->order));
1306
1307         /*
1308          * Next, remove "m_set" from the free lists.  Finally, extract
1309          * "m" from "m_set" using an iterative algorithm: While "m_set"
1310          * is larger than a page, shrink "m_set" by returning the half
1311          * of "m_set" that does not contain "m" to the free lists.
1312          */
1313         fl = (*seg->free_queues)[m_set->pool];
1314         order = m_set->order;
1315         vm_freelist_rem(fl, m_set, order);
1316         while (order > 0) {
1317                 order--;
1318                 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1319                 if (m->phys_addr < pa_half)
1320                         m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1321                 else {
1322                         m_tmp = m_set;
1323                         m_set = &seg->first_page[atop(pa_half - seg->start)];
1324                 }
1325                 vm_freelist_add(fl, m_tmp, order, 0);
1326         }
1327         KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1328         return (true);
1329 }
1330
1331 /*
1332  * Find a run of contiguous physical pages from the specified page list.
1333  */
1334 static vm_page_t
1335 vm_phys_find_freelist_contig(struct vm_freelist *fl, int oind, u_long npages,
1336     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1337 {
1338         struct vm_phys_seg *seg;
1339         vm_paddr_t frag, lbound, pa, page_size, pa_end, pa_pre, size;
1340         vm_page_t m, m_listed, m_ret;
1341         int order;
1342
1343         KASSERT(npages > 0, ("npages is 0"));
1344         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1345         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1346         /* Search for a run satisfying the specified conditions. */
1347         page_size = PAGE_SIZE;
1348         size = npages << PAGE_SHIFT;
1349         frag = (npages & ~(~0UL << oind)) << PAGE_SHIFT;
1350         TAILQ_FOREACH(m_listed, &fl[oind].pl, listq) {
1351                 /*
1352                  * Determine if the address range starting at pa is
1353                  * too low.
1354                  */
1355                 pa = VM_PAGE_TO_PHYS(m_listed);
1356                 if (pa < low)
1357                         continue;
1358
1359                 /*
1360                  * If this is not the first free oind-block in this range, bail
1361                  * out. We have seen the first free block already, or will see
1362                  * it before failing to find an appropriate range.
1363                  */
1364                 seg = &vm_phys_segs[m_listed->segind];
1365                 lbound = low > seg->start ? low : seg->start;
1366                 pa_pre = pa - (page_size << oind);
1367                 m = &seg->first_page[atop(pa_pre - seg->start)];
1368                 if (pa != 0 && pa_pre >= lbound && m->order == oind)
1369                         continue;
1370
1371                 if (!vm_addr_align_ok(pa, alignment))
1372                         /* Advance to satisfy alignment condition. */
1373                         pa = roundup2(pa, alignment);
1374                 else if (frag != 0 && lbound + frag <= pa) {
1375                         /*
1376                          * Back up to the first aligned free block in this
1377                          * range, without moving below lbound.
1378                          */
1379                         pa_end = pa;
1380                         for (order = oind - 1; order >= 0; order--) {
1381                                 pa_pre = pa_end - (page_size << order);
1382                                 if (!vm_addr_align_ok(pa_pre, alignment))
1383                                         break;
1384                                 m = &seg->first_page[atop(pa_pre - seg->start)];
1385                                 if (pa_pre >= lbound && m->order == order)
1386                                         pa_end = pa_pre;
1387                         }
1388                         /*
1389                          * If the extra small blocks are enough to complete the
1390                          * fragment, use them.  Otherwise, look to allocate the
1391                          * fragment at the other end.
1392                          */
1393                         if (pa_end + frag <= pa)
1394                                 pa = pa_end;
1395                 }
1396
1397                 /* Advance as necessary to satisfy boundary conditions. */
1398                 if (!vm_addr_bound_ok(pa, size, boundary))
1399                         pa = roundup2(pa + 1, boundary);
1400                 pa_end = pa + size;
1401
1402                 /*
1403                  * Determine if the address range is valid (without overflow in
1404                  * pa_end calculation), and fits within the segment.
1405                  */
1406                 if (pa_end < pa || seg->end < pa_end)
1407                         continue;
1408
1409                 m_ret = &seg->first_page[atop(pa - seg->start)];
1410
1411                 /*
1412                  * Determine whether there are enough free oind-blocks here to
1413                  * satisfy the allocation request.
1414                  */
1415                 pa = VM_PAGE_TO_PHYS(m_listed);
1416                 do {
1417                         pa += page_size << oind;
1418                         if (pa >= pa_end)
1419                                 return (m_ret);
1420                         m = &seg->first_page[atop(pa - seg->start)];
1421                 } while (oind == m->order);
1422
1423                 /*
1424                  * Determine if an additional series of free blocks of
1425                  * diminishing size can help to satisfy the allocation request.
1426                  */
1427                 while (m->order < oind &&
1428                     pa + 2 * (page_size << m->order) > pa_end) {
1429                         pa += page_size << m->order;
1430                         if (pa >= pa_end)
1431                                 return (m_ret);
1432                         m = &seg->first_page[atop(pa - seg->start)];
1433                 }
1434         }
1435         return (NULL);
1436 }
1437
1438 /*
1439  * Find a run of contiguous physical pages from the specified free list
1440  * table.
1441  */
1442 static vm_page_t
1443 vm_phys_find_queues_contig(
1444     struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1445     u_long npages, vm_paddr_t low, vm_paddr_t high,
1446     u_long alignment, vm_paddr_t boundary)
1447 {
1448         struct vm_freelist *fl;
1449         vm_page_t m_ret;
1450         vm_paddr_t pa, pa_end, size;
1451         int oind, order, pind;
1452
1453         KASSERT(npages > 0, ("npages is 0"));
1454         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1455         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1456         /* Compute the queue that is the best fit for npages. */
1457         order = flsl(npages - 1);
1458         /* Search for a large enough free block. */
1459         size = npages << PAGE_SHIFT;
1460         for (oind = order; oind < VM_NFREEORDER; oind++) {
1461                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1462                         fl = (*queues)[pind];
1463                         TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1464                                 /*
1465                                  * Determine if the address range starting at pa
1466                                  * is within the given range, satisfies the
1467                                  * given alignment, and does not cross the given
1468                                  * boundary.
1469                                  */
1470                                 pa = VM_PAGE_TO_PHYS(m_ret);
1471                                 pa_end = pa + size;
1472                                 if (low <= pa && pa_end <= high &&
1473                                     vm_addr_ok(pa, size, alignment, boundary))
1474                                         return (m_ret);
1475                         }
1476                 }
1477         }
1478         if (order < VM_NFREEORDER)
1479                 return (NULL);
1480         /* Search for a long-enough sequence of small blocks. */
1481         oind = VM_NFREEORDER - 1;
1482         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1483                 fl = (*queues)[pind];
1484                 m_ret = vm_phys_find_freelist_contig(fl, oind, npages,
1485                     low, high, alignment, boundary);
1486                 if (m_ret != NULL)
1487                         return (m_ret);
1488         }
1489         return (NULL);
1490 }
1491
1492 /*
1493  * Allocate a contiguous set of physical pages of the given size
1494  * "npages" from the free lists.  All of the physical pages must be at
1495  * or above the given physical address "low" and below the given
1496  * physical address "high".  The given value "alignment" determines the
1497  * alignment of the first physical page in the set.  If the given value
1498  * "boundary" is non-zero, then the set of physical pages cannot cross
1499  * any physical address boundary that is a multiple of that value.  Both
1500  * "alignment" and "boundary" must be a power of two.
1501  */
1502 vm_page_t
1503 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1504     u_long alignment, vm_paddr_t boundary)
1505 {
1506         vm_paddr_t pa_end, pa_start;
1507         struct vm_freelist *fl;
1508         vm_page_t m, m_run;
1509         struct vm_phys_seg *seg;
1510         struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1511         int oind, segind;
1512
1513         KASSERT(npages > 0, ("npages is 0"));
1514         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1515         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1516         vm_domain_free_assert_locked(VM_DOMAIN(domain));
1517         if (low >= high)
1518                 return (NULL);
1519         queues = NULL;
1520         m_run = NULL;
1521         for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1522                 seg = &vm_phys_segs[segind];
1523                 if (seg->start >= high || seg->domain != domain)
1524                         continue;
1525                 if (low >= seg->end)
1526                         break;
1527                 if (low <= seg->start)
1528                         pa_start = seg->start;
1529                 else
1530                         pa_start = low;
1531                 if (high < seg->end)
1532                         pa_end = high;
1533                 else
1534                         pa_end = seg->end;
1535                 if (pa_end - pa_start < ptoa(npages))
1536                         continue;
1537                 /*
1538                  * If a previous segment led to a search using
1539                  * the same free lists as would this segment, then
1540                  * we've actually already searched within this
1541                  * too.  So skip it.
1542                  */
1543                 if (seg->free_queues == queues)
1544                         continue;
1545                 queues = seg->free_queues;
1546                 m_run = vm_phys_find_queues_contig(queues, npages,
1547                     low, high, alignment, boundary);
1548                 if (m_run != NULL)
1549                         break;
1550         }
1551         if (m_run == NULL)
1552                 return (NULL);
1553
1554         /* Allocate pages from the page-range found. */
1555         for (m = m_run; m < &m_run[npages]; m = &m[1 << oind]) {
1556                 fl = (*queues)[m->pool];
1557                 oind = m->order;
1558                 vm_freelist_rem(fl, m, oind);
1559                 if (m->pool != VM_FREEPOOL_DEFAULT)
1560                         vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1561         }
1562         /* Return excess pages to the free lists. */
1563         if (&m_run[npages] < m) {
1564                 fl = (*queues)[VM_FREEPOOL_DEFAULT];
1565                 vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
1566         }
1567         return (m_run);
1568 }
1569
1570 /*
1571  * Return the index of the first unused slot which may be the terminating
1572  * entry.
1573  */
1574 static int
1575 vm_phys_avail_count(void)
1576 {
1577         int i;
1578
1579         for (i = 0; phys_avail[i + 1]; i += 2)
1580                 continue;
1581         if (i > PHYS_AVAIL_ENTRIES)
1582                 panic("Improperly terminated phys_avail %d entries", i);
1583
1584         return (i);
1585 }
1586
1587 /*
1588  * Assert that a phys_avail entry is valid.
1589  */
1590 static void
1591 vm_phys_avail_check(int i)
1592 {
1593         if (phys_avail[i] & PAGE_MASK)
1594                 panic("Unaligned phys_avail[%d]: %#jx", i,
1595                     (intmax_t)phys_avail[i]);
1596         if (phys_avail[i+1] & PAGE_MASK)
1597                 panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1598                     (intmax_t)phys_avail[i]);
1599         if (phys_avail[i + 1] < phys_avail[i])
1600                 panic("phys_avail[%d] start %#jx < end %#jx", i,
1601                     (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1602 }
1603
1604 /*
1605  * Return the index of an overlapping phys_avail entry or -1.
1606  */
1607 #ifdef NUMA
1608 static int
1609 vm_phys_avail_find(vm_paddr_t pa)
1610 {
1611         int i;
1612
1613         for (i = 0; phys_avail[i + 1]; i += 2)
1614                 if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1615                         return (i);
1616         return (-1);
1617 }
1618 #endif
1619
1620 /*
1621  * Return the index of the largest entry.
1622  */
1623 int
1624 vm_phys_avail_largest(void)
1625 {
1626         vm_paddr_t sz, largesz;
1627         int largest;
1628         int i;
1629
1630         largest = 0;
1631         largesz = 0;
1632         for (i = 0; phys_avail[i + 1]; i += 2) {
1633                 sz = vm_phys_avail_size(i);
1634                 if (sz > largesz) {
1635                         largesz = sz;
1636                         largest = i;
1637                 }
1638         }
1639
1640         return (largest);
1641 }
1642
1643 vm_paddr_t
1644 vm_phys_avail_size(int i)
1645 {
1646
1647         return (phys_avail[i + 1] - phys_avail[i]);
1648 }
1649
1650 /*
1651  * Split an entry at the address 'pa'.  Return zero on success or errno.
1652  */
1653 static int
1654 vm_phys_avail_split(vm_paddr_t pa, int i)
1655 {
1656         int cnt;
1657
1658         vm_phys_avail_check(i);
1659         if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1660                 panic("vm_phys_avail_split: invalid address");
1661         cnt = vm_phys_avail_count();
1662         if (cnt >= PHYS_AVAIL_ENTRIES)
1663                 return (ENOSPC);
1664         memmove(&phys_avail[i + 2], &phys_avail[i],
1665             (cnt - i) * sizeof(phys_avail[0]));
1666         phys_avail[i + 1] = pa;
1667         phys_avail[i + 2] = pa;
1668         vm_phys_avail_check(i);
1669         vm_phys_avail_check(i+2);
1670
1671         return (0);
1672 }
1673
1674 /*
1675  * Check if a given physical address can be included as part of a crash dump.
1676  */
1677 bool
1678 vm_phys_is_dumpable(vm_paddr_t pa)
1679 {
1680         vm_page_t m;
1681         int i;
1682
1683         if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1684                 return ((m->flags & PG_NODUMP) == 0);
1685
1686         for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1687                 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1688                         return (true);
1689         }
1690         return (false);
1691 }
1692
1693 void
1694 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1695 {
1696         struct vm_phys_seg *seg;
1697
1698         if (vm_phys_early_nsegs == -1)
1699                 panic("%s: called after initialization", __func__);
1700         if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1701                 panic("%s: ran out of early segments", __func__);
1702
1703         seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1704         seg->start = start;
1705         seg->end = end;
1706 }
1707
1708 /*
1709  * This routine allocates NUMA node specific memory before the page
1710  * allocator is bootstrapped.
1711  */
1712 vm_paddr_t
1713 vm_phys_early_alloc(int domain, size_t alloc_size)
1714 {
1715 #ifdef NUMA
1716         int mem_index;
1717 #endif
1718         int i, biggestone;
1719         vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1720
1721         KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1722             ("%s: invalid domain index %d", __func__, domain));
1723
1724         /*
1725          * Search the mem_affinity array for the biggest address
1726          * range in the desired domain.  This is used to constrain
1727          * the phys_avail selection below.
1728          */
1729         biggestsize = 0;
1730         mem_start = 0;
1731         mem_end = -1;
1732 #ifdef NUMA
1733         mem_index = 0;
1734         if (mem_affinity != NULL) {
1735                 for (i = 0;; i++) {
1736                         size = mem_affinity[i].end - mem_affinity[i].start;
1737                         if (size == 0)
1738                                 break;
1739                         if (domain != -1 && mem_affinity[i].domain != domain)
1740                                 continue;
1741                         if (size > biggestsize) {
1742                                 mem_index = i;
1743                                 biggestsize = size;
1744                         }
1745                 }
1746                 mem_start = mem_affinity[mem_index].start;
1747                 mem_end = mem_affinity[mem_index].end;
1748         }
1749 #endif
1750
1751         /*
1752          * Now find biggest physical segment in within the desired
1753          * numa domain.
1754          */
1755         biggestsize = 0;
1756         biggestone = 0;
1757         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1758                 /* skip regions that are out of range */
1759                 if (phys_avail[i+1] - alloc_size < mem_start ||
1760                     phys_avail[i+1] > mem_end)
1761                         continue;
1762                 size = vm_phys_avail_size(i);
1763                 if (size > biggestsize) {
1764                         biggestone = i;
1765                         biggestsize = size;
1766                 }
1767         }
1768         alloc_size = round_page(alloc_size);
1769
1770         /*
1771          * Grab single pages from the front to reduce fragmentation.
1772          */
1773         if (alloc_size == PAGE_SIZE) {
1774                 pa = phys_avail[biggestone];
1775                 phys_avail[biggestone] += PAGE_SIZE;
1776                 vm_phys_avail_check(biggestone);
1777                 return (pa);
1778         }
1779
1780         /*
1781          * Naturally align large allocations.
1782          */
1783         align = phys_avail[biggestone + 1] & (alloc_size - 1);
1784         if (alloc_size + align > biggestsize)
1785                 panic("cannot find a large enough size\n");
1786         if (align != 0 &&
1787             vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1788             biggestone) != 0)
1789                 /* Wasting memory. */
1790                 phys_avail[biggestone + 1] -= align;
1791
1792         phys_avail[biggestone + 1] -= alloc_size;
1793         vm_phys_avail_check(biggestone);
1794         pa = phys_avail[biggestone + 1];
1795         return (pa);
1796 }
1797
1798 void
1799 vm_phys_early_startup(void)
1800 {
1801         struct vm_phys_seg *seg;
1802         int i;
1803
1804         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1805                 phys_avail[i] = round_page(phys_avail[i]);
1806                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1807         }
1808
1809         for (i = 0; i < vm_phys_early_nsegs; i++) {
1810                 seg = &vm_phys_early_segs[i];
1811                 vm_phys_add_seg(seg->start, seg->end);
1812         }
1813         vm_phys_early_nsegs = -1;
1814
1815 #ifdef NUMA
1816         /* Force phys_avail to be split by domain. */
1817         if (mem_affinity != NULL) {
1818                 int idx;
1819
1820                 for (i = 0; mem_affinity[i].end != 0; i++) {
1821                         idx = vm_phys_avail_find(mem_affinity[i].start);
1822                         if (idx != -1 &&
1823                             phys_avail[idx] != mem_affinity[i].start)
1824                                 vm_phys_avail_split(mem_affinity[i].start, idx);
1825                         idx = vm_phys_avail_find(mem_affinity[i].end);
1826                         if (idx != -1 &&
1827                             phys_avail[idx] != mem_affinity[i].end)
1828                                 vm_phys_avail_split(mem_affinity[i].end, idx);
1829                 }
1830         }
1831 #endif
1832 }
1833
1834 #ifdef DDB
1835 /*
1836  * Show the number of physical pages in each of the free lists.
1837  */
1838 DB_SHOW_COMMAND_FLAGS(freepages, db_show_freepages, DB_CMD_MEMSAFE)
1839 {
1840         struct vm_freelist *fl;
1841         int flind, oind, pind, dom;
1842
1843         for (dom = 0; dom < vm_ndomains; dom++) {
1844                 db_printf("DOMAIN: %d\n", dom);
1845                 for (flind = 0; flind < vm_nfreelists; flind++) {
1846                         db_printf("FREE LIST %d:\n"
1847                             "\n  ORDER (SIZE)  |  NUMBER"
1848                             "\n              ", flind);
1849                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1850                                 db_printf("  |  POOL %d", pind);
1851                         db_printf("\n--            ");
1852                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1853                                 db_printf("-- --      ");
1854                         db_printf("--\n");
1855                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1856                                 db_printf("  %2.2d (%6.6dK)", oind,
1857                                     1 << (PAGE_SHIFT - 10 + oind));
1858                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1859                                 fl = vm_phys_free_queues[dom][flind][pind];
1860                                         db_printf("  |  %6.6d", fl[oind].lcnt);
1861                                 }
1862                                 db_printf("\n");
1863                         }
1864                         db_printf("\n");
1865                 }
1866                 db_printf("\n");
1867         }
1868 }
1869 #endif