]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_phys.c
uma: Make the cache alignment mask unsigned
[FreeBSD/FreeBSD.git] / sys / vm / vm_phys.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  *      Physical memory system implementation
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40
41 #include <sys/cdefs.h>
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/domainset.h>
48 #include <sys/lock.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/queue.h>
54 #include <sys/rwlock.h>
55 #include <sys/sbuf.h>
56 #include <sys/sysctl.h>
57 #include <sys/tree.h>
58 #include <sys/vmmeter.h>
59
60 #include <ddb/ddb.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_phys.h>
69 #include <vm/vm_pagequeue.h>
70
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72     "Too many physsegs.");
73
74 #ifdef NUMA
75 struct mem_affinity __read_mostly *mem_affinity;
76 int __read_mostly *mem_locality;
77
78 static int numa_disabled;
79 static SYSCTL_NODE(_vm, OID_AUTO, numa, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
80     "NUMA options");
81 SYSCTL_INT(_vm_numa, OID_AUTO, disabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
82     &numa_disabled, 0, "NUMA-awareness in the allocators is disabled");
83 #endif
84
85 int __read_mostly vm_ndomains = 1;
86 domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
87
88 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
89 int __read_mostly vm_phys_nsegs;
90 static struct vm_phys_seg vm_phys_early_segs[8];
91 static int vm_phys_early_nsegs;
92
93 struct vm_phys_fictitious_seg;
94 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
95     struct vm_phys_fictitious_seg *);
96
97 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
98     RB_INITIALIZER(&vm_phys_fictitious_tree);
99
100 struct vm_phys_fictitious_seg {
101         RB_ENTRY(vm_phys_fictitious_seg) node;
102         /* Memory region data */
103         vm_paddr_t      start;
104         vm_paddr_t      end;
105         vm_page_t       first_page;
106 };
107
108 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
109     vm_phys_fictitious_cmp);
110
111 static struct rwlock_padalign vm_phys_fictitious_reg_lock;
112 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
113
114 static struct vm_freelist __aligned(CACHE_LINE_SIZE)
115     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL]
116     [VM_NFREEORDER_MAX];
117
118 static int __read_mostly vm_nfreelists;
119
120 /*
121  * These "avail lists" are globals used to communicate boot-time physical
122  * memory layout to other parts of the kernel.  Each physically contiguous
123  * region of memory is defined by a start address at an even index and an
124  * end address at the following odd index.  Each list is terminated by a
125  * pair of zero entries.
126  *
127  * dump_avail tells the dump code what regions to include in a crash dump, and
128  * phys_avail is all of the remaining physical memory that is available for
129  * the vm system.
130  *
131  * Initially dump_avail and phys_avail are identical.  Boot time memory
132  * allocations remove extents from phys_avail that may still be included
133  * in dumps.
134  */
135 vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
136 vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
137
138 /*
139  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
140  */
141 static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
142
143 CTASSERT(VM_FREELIST_DEFAULT == 0);
144
145 #ifdef VM_FREELIST_DMA32
146 #define VM_DMA32_BOUNDARY       ((vm_paddr_t)1 << 32)
147 #endif
148
149 /*
150  * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
151  * the ordering of the free list boundaries.
152  */
153 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
154 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
155 #endif
156
157 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
158 SYSCTL_OID(_vm, OID_AUTO, phys_free,
159     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
160     sysctl_vm_phys_free, "A",
161     "Phys Free Info");
162
163 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
164 SYSCTL_OID(_vm, OID_AUTO, phys_segs,
165     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
166     sysctl_vm_phys_segs, "A",
167     "Phys Seg Info");
168
169 #ifdef NUMA
170 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
171 SYSCTL_OID(_vm, OID_AUTO, phys_locality,
172     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
173     sysctl_vm_phys_locality, "A",
174     "Phys Locality Info");
175 #endif
176
177 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
178     &vm_ndomains, 0, "Number of physical memory domains available.");
179
180 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
181     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
182     vm_paddr_t boundary);
183 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
184 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
185 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
186     int order, int tail);
187
188 /*
189  * Red-black tree helpers for vm fictitious range management.
190  */
191 static inline int
192 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
193     struct vm_phys_fictitious_seg *range)
194 {
195
196         KASSERT(range->start != 0 && range->end != 0,
197             ("Invalid range passed on search for vm_fictitious page"));
198         if (p->start >= range->end)
199                 return (1);
200         if (p->start < range->start)
201                 return (-1);
202
203         return (0);
204 }
205
206 static int
207 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
208     struct vm_phys_fictitious_seg *p2)
209 {
210
211         /* Check if this is a search for a page */
212         if (p1->end == 0)
213                 return (vm_phys_fictitious_in_range(p1, p2));
214
215         KASSERT(p2->end != 0,
216     ("Invalid range passed as second parameter to vm fictitious comparison"));
217
218         /* Searching to add a new range */
219         if (p1->end <= p2->start)
220                 return (-1);
221         if (p1->start >= p2->end)
222                 return (1);
223
224         panic("Trying to add overlapping vm fictitious ranges:\n"
225             "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
226             (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
227 }
228
229 int
230 vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
231 {
232 #ifdef NUMA
233         domainset_t mask;
234         int i;
235
236         if (vm_ndomains == 1 || mem_affinity == NULL)
237                 return (0);
238
239         DOMAINSET_ZERO(&mask);
240         /*
241          * Check for any memory that overlaps low, high.
242          */
243         for (i = 0; mem_affinity[i].end != 0; i++)
244                 if (mem_affinity[i].start <= high &&
245                     mem_affinity[i].end >= low)
246                         DOMAINSET_SET(mem_affinity[i].domain, &mask);
247         if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
248                 return (prefer);
249         if (DOMAINSET_EMPTY(&mask))
250                 panic("vm_phys_domain_match:  Impossible constraint");
251         return (DOMAINSET_FFS(&mask) - 1);
252 #else
253         return (0);
254 #endif
255 }
256
257 /*
258  * Outputs the state of the physical memory allocator, specifically,
259  * the amount of physical memory in each free list.
260  */
261 static int
262 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
263 {
264         struct sbuf sbuf;
265         struct vm_freelist *fl;
266         int dom, error, flind, oind, pind;
267
268         error = sysctl_wire_old_buffer(req, 0);
269         if (error != 0)
270                 return (error);
271         sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
272         for (dom = 0; dom < vm_ndomains; dom++) {
273                 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
274                 for (flind = 0; flind < vm_nfreelists; flind++) {
275                         sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
276                             "\n  ORDER (SIZE)  |  NUMBER"
277                             "\n              ", flind);
278                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
279                                 sbuf_printf(&sbuf, "  |  POOL %d", pind);
280                         sbuf_printf(&sbuf, "\n--            ");
281                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
282                                 sbuf_printf(&sbuf, "-- --      ");
283                         sbuf_printf(&sbuf, "--\n");
284                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
285                                 sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
286                                     1 << (PAGE_SHIFT - 10 + oind));
287                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
288                                 fl = vm_phys_free_queues[dom][flind][pind];
289                                         sbuf_printf(&sbuf, "  |  %6d",
290                                             fl[oind].lcnt);
291                                 }
292                                 sbuf_printf(&sbuf, "\n");
293                         }
294                 }
295         }
296         error = sbuf_finish(&sbuf);
297         sbuf_delete(&sbuf);
298         return (error);
299 }
300
301 /*
302  * Outputs the set of physical memory segments.
303  */
304 static int
305 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
306 {
307         struct sbuf sbuf;
308         struct vm_phys_seg *seg;
309         int error, segind;
310
311         error = sysctl_wire_old_buffer(req, 0);
312         if (error != 0)
313                 return (error);
314         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
315         for (segind = 0; segind < vm_phys_nsegs; segind++) {
316                 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
317                 seg = &vm_phys_segs[segind];
318                 sbuf_printf(&sbuf, "start:     %#jx\n",
319                     (uintmax_t)seg->start);
320                 sbuf_printf(&sbuf, "end:       %#jx\n",
321                     (uintmax_t)seg->end);
322                 sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
323                 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
324         }
325         error = sbuf_finish(&sbuf);
326         sbuf_delete(&sbuf);
327         return (error);
328 }
329
330 /*
331  * Return affinity, or -1 if there's no affinity information.
332  */
333 int
334 vm_phys_mem_affinity(int f, int t)
335 {
336
337 #ifdef NUMA
338         if (mem_locality == NULL)
339                 return (-1);
340         if (f >= vm_ndomains || t >= vm_ndomains)
341                 return (-1);
342         return (mem_locality[f * vm_ndomains + t]);
343 #else
344         return (-1);
345 #endif
346 }
347
348 #ifdef NUMA
349 /*
350  * Outputs the VM locality table.
351  */
352 static int
353 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
354 {
355         struct sbuf sbuf;
356         int error, i, j;
357
358         error = sysctl_wire_old_buffer(req, 0);
359         if (error != 0)
360                 return (error);
361         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
362
363         sbuf_printf(&sbuf, "\n");
364
365         for (i = 0; i < vm_ndomains; i++) {
366                 sbuf_printf(&sbuf, "%d: ", i);
367                 for (j = 0; j < vm_ndomains; j++) {
368                         sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
369                 }
370                 sbuf_printf(&sbuf, "\n");
371         }
372         error = sbuf_finish(&sbuf);
373         sbuf_delete(&sbuf);
374         return (error);
375 }
376 #endif
377
378 static void
379 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
380 {
381
382         m->order = order;
383         if (tail)
384                 TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
385         else
386                 TAILQ_INSERT_HEAD(&fl[order].pl, m, listq);
387         fl[order].lcnt++;
388 }
389
390 static void
391 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
392 {
393
394         TAILQ_REMOVE(&fl[order].pl, m, listq);
395         fl[order].lcnt--;
396         m->order = VM_NFREEORDER;
397 }
398
399 /*
400  * Create a physical memory segment.
401  */
402 static void
403 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
404 {
405         struct vm_phys_seg *seg;
406
407         KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
408             ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
409         KASSERT(domain >= 0 && domain < vm_ndomains,
410             ("vm_phys_create_seg: invalid domain provided"));
411         seg = &vm_phys_segs[vm_phys_nsegs++];
412         while (seg > vm_phys_segs && (seg - 1)->start >= end) {
413                 *seg = *(seg - 1);
414                 seg--;
415         }
416         seg->start = start;
417         seg->end = end;
418         seg->domain = domain;
419 }
420
421 static void
422 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
423 {
424 #ifdef NUMA
425         int i;
426
427         if (mem_affinity == NULL) {
428                 _vm_phys_create_seg(start, end, 0);
429                 return;
430         }
431
432         for (i = 0;; i++) {
433                 if (mem_affinity[i].end == 0)
434                         panic("Reached end of affinity info");
435                 if (mem_affinity[i].end <= start)
436                         continue;
437                 if (mem_affinity[i].start > start)
438                         panic("No affinity info for start %jx",
439                             (uintmax_t)start);
440                 if (mem_affinity[i].end >= end) {
441                         _vm_phys_create_seg(start, end,
442                             mem_affinity[i].domain);
443                         break;
444                 }
445                 _vm_phys_create_seg(start, mem_affinity[i].end,
446                     mem_affinity[i].domain);
447                 start = mem_affinity[i].end;
448         }
449 #else
450         _vm_phys_create_seg(start, end, 0);
451 #endif
452 }
453
454 /*
455  * Add a physical memory segment.
456  */
457 void
458 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
459 {
460         vm_paddr_t paddr;
461
462         KASSERT((start & PAGE_MASK) == 0,
463             ("vm_phys_define_seg: start is not page aligned"));
464         KASSERT((end & PAGE_MASK) == 0,
465             ("vm_phys_define_seg: end is not page aligned"));
466
467         /*
468          * Split the physical memory segment if it spans two or more free
469          * list boundaries.
470          */
471         paddr = start;
472 #ifdef  VM_FREELIST_LOWMEM
473         if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
474                 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
475                 paddr = VM_LOWMEM_BOUNDARY;
476         }
477 #endif
478 #ifdef  VM_FREELIST_DMA32
479         if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
480                 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
481                 paddr = VM_DMA32_BOUNDARY;
482         }
483 #endif
484         vm_phys_create_seg(paddr, end);
485 }
486
487 /*
488  * Initialize the physical memory allocator.
489  *
490  * Requires that vm_page_array is initialized!
491  */
492 void
493 vm_phys_init(void)
494 {
495         struct vm_freelist *fl;
496         struct vm_phys_seg *end_seg, *prev_seg, *seg, *tmp_seg;
497         u_long npages;
498         int dom, flind, freelist, oind, pind, segind;
499
500         /*
501          * Compute the number of free lists, and generate the mapping from the
502          * manifest constants VM_FREELIST_* to the free list indices.
503          *
504          * Initially, the entries of vm_freelist_to_flind[] are set to either
505          * 0 or 1 to indicate which free lists should be created.
506          */
507         npages = 0;
508         for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
509                 seg = &vm_phys_segs[segind];
510 #ifdef  VM_FREELIST_LOWMEM
511                 if (seg->end <= VM_LOWMEM_BOUNDARY)
512                         vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
513                 else
514 #endif
515 #ifdef  VM_FREELIST_DMA32
516                 if (
517 #ifdef  VM_DMA32_NPAGES_THRESHOLD
518                     /*
519                      * Create the DMA32 free list only if the amount of
520                      * physical memory above physical address 4G exceeds the
521                      * given threshold.
522                      */
523                     npages > VM_DMA32_NPAGES_THRESHOLD &&
524 #endif
525                     seg->end <= VM_DMA32_BOUNDARY)
526                         vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
527                 else
528 #endif
529                 {
530                         npages += atop(seg->end - seg->start);
531                         vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
532                 }
533         }
534         /* Change each entry into a running total of the free lists. */
535         for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
536                 vm_freelist_to_flind[freelist] +=
537                     vm_freelist_to_flind[freelist - 1];
538         }
539         vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
540         KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
541         /* Change each entry into a free list index. */
542         for (freelist = 0; freelist < VM_NFREELIST; freelist++)
543                 vm_freelist_to_flind[freelist]--;
544
545         /*
546          * Initialize the first_page and free_queues fields of each physical
547          * memory segment.
548          */
549 #ifdef VM_PHYSSEG_SPARSE
550         npages = 0;
551 #endif
552         for (segind = 0; segind < vm_phys_nsegs; segind++) {
553                 seg = &vm_phys_segs[segind];
554 #ifdef VM_PHYSSEG_SPARSE
555                 seg->first_page = &vm_page_array[npages];
556                 npages += atop(seg->end - seg->start);
557 #else
558                 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
559 #endif
560 #ifdef  VM_FREELIST_LOWMEM
561                 if (seg->end <= VM_LOWMEM_BOUNDARY) {
562                         flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
563                         KASSERT(flind >= 0,
564                             ("vm_phys_init: LOWMEM flind < 0"));
565                 } else
566 #endif
567 #ifdef  VM_FREELIST_DMA32
568                 if (seg->end <= VM_DMA32_BOUNDARY) {
569                         flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
570                         KASSERT(flind >= 0,
571                             ("vm_phys_init: DMA32 flind < 0"));
572                 } else
573 #endif
574                 {
575                         flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
576                         KASSERT(flind >= 0,
577                             ("vm_phys_init: DEFAULT flind < 0"));
578                 }
579                 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
580         }
581
582         /*
583          * Coalesce physical memory segments that are contiguous and share the
584          * same per-domain free queues.
585          */
586         prev_seg = vm_phys_segs;
587         seg = &vm_phys_segs[1];
588         end_seg = &vm_phys_segs[vm_phys_nsegs];
589         while (seg < end_seg) {
590                 if (prev_seg->end == seg->start &&
591                     prev_seg->free_queues == seg->free_queues) {
592                         prev_seg->end = seg->end;
593                         KASSERT(prev_seg->domain == seg->domain,
594                             ("vm_phys_init: free queues cannot span domains"));
595                         vm_phys_nsegs--;
596                         end_seg--;
597                         for (tmp_seg = seg; tmp_seg < end_seg; tmp_seg++)
598                                 *tmp_seg = *(tmp_seg + 1);
599                 } else {
600                         prev_seg = seg;
601                         seg++;
602                 }
603         }
604
605         /*
606          * Initialize the free queues.
607          */
608         for (dom = 0; dom < vm_ndomains; dom++) {
609                 for (flind = 0; flind < vm_nfreelists; flind++) {
610                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
611                                 fl = vm_phys_free_queues[dom][flind][pind];
612                                 for (oind = 0; oind < VM_NFREEORDER; oind++)
613                                         TAILQ_INIT(&fl[oind].pl);
614                         }
615                 }
616         }
617
618         rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
619 }
620
621 /*
622  * Register info about the NUMA topology of the system.
623  *
624  * Invoked by platform-dependent code prior to vm_phys_init().
625  */
626 void
627 vm_phys_register_domains(int ndomains, struct mem_affinity *affinity,
628     int *locality)
629 {
630 #ifdef NUMA
631         int i;
632
633         /*
634          * For now the only override value that we support is 1, which
635          * effectively disables NUMA-awareness in the allocators.
636          */
637         TUNABLE_INT_FETCH("vm.numa.disabled", &numa_disabled);
638         if (numa_disabled)
639                 ndomains = 1;
640
641         if (ndomains > 1) {
642                 vm_ndomains = ndomains;
643                 mem_affinity = affinity;
644                 mem_locality = locality;
645         }
646
647         for (i = 0; i < vm_ndomains; i++)
648                 DOMAINSET_SET(i, &all_domains);
649 #else
650         (void)ndomains;
651         (void)affinity;
652         (void)locality;
653 #endif
654 }
655
656 /*
657  * Split a contiguous, power of two-sized set of physical pages.
658  *
659  * When this function is called by a page allocation function, the caller
660  * should request insertion at the head unless the order [order, oind) queues
661  * are known to be empty.  The objective being to reduce the likelihood of
662  * long-term fragmentation by promoting contemporaneous allocation and
663  * (hopefully) deallocation.
664  */
665 static __inline void
666 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
667     int tail)
668 {
669         vm_page_t m_buddy;
670
671         while (oind > order) {
672                 oind--;
673                 m_buddy = &m[1 << oind];
674                 KASSERT(m_buddy->order == VM_NFREEORDER,
675                     ("vm_phys_split_pages: page %p has unexpected order %d",
676                     m_buddy, m_buddy->order));
677                 vm_freelist_add(fl, m_buddy, oind, tail);
678         }
679 }
680
681 /*
682  * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
683  * and sized set to the specified free list.
684  *
685  * When this function is called by a page allocation function, the caller
686  * should request insertion at the head unless the lower-order queues are
687  * known to be empty.  The objective being to reduce the likelihood of long-
688  * term fragmentation by promoting contemporaneous allocation and (hopefully)
689  * deallocation.
690  *
691  * The physical page m's buddy must not be free.
692  */
693 static void
694 vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
695 {
696         u_int n;
697         int order;
698
699         KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
700         KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
701             ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
702             ("vm_phys_enq_range: page %p and npages %u are misaligned",
703             m, npages));
704         do {
705                 KASSERT(m->order == VM_NFREEORDER,
706                     ("vm_phys_enq_range: page %p has unexpected order %d",
707                     m, m->order));
708                 order = ffs(npages) - 1;
709                 KASSERT(order < VM_NFREEORDER,
710                     ("vm_phys_enq_range: order %d is out of range", order));
711                 vm_freelist_add(fl, m, order, tail);
712                 n = 1 << order;
713                 m += n;
714                 npages -= n;
715         } while (npages > 0);
716 }
717
718 /*
719  * Set the pool for a contiguous, power of two-sized set of physical pages. 
720  */
721 static void
722 vm_phys_set_pool(int pool, vm_page_t m, int order)
723 {
724         vm_page_t m_tmp;
725
726         for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
727                 m_tmp->pool = pool;
728 }
729
730 /*
731  * Tries to allocate the specified number of pages from the specified pool
732  * within the specified domain.  Returns the actual number of allocated pages
733  * and a pointer to each page through the array ma[].
734  *
735  * The returned pages may not be physically contiguous.  However, in contrast
736  * to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
737  * calling this function once to allocate the desired number of pages will
738  * avoid wasted time in vm_phys_split_pages().
739  *
740  * The free page queues for the specified domain must be locked.
741  */
742 int
743 vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
744 {
745         struct vm_freelist *alt, *fl;
746         vm_page_t m;
747         int avail, end, flind, freelist, i, need, oind, pind;
748
749         KASSERT(domain >= 0 && domain < vm_ndomains,
750             ("vm_phys_alloc_npages: domain %d is out of range", domain));
751         KASSERT(pool < VM_NFREEPOOL,
752             ("vm_phys_alloc_npages: pool %d is out of range", pool));
753         KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
754             ("vm_phys_alloc_npages: npages %d is out of range", npages));
755         vm_domain_free_assert_locked(VM_DOMAIN(domain));
756         i = 0;
757         for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
758                 flind = vm_freelist_to_flind[freelist];
759                 if (flind < 0)
760                         continue;
761                 fl = vm_phys_free_queues[domain][flind][pool];
762                 for (oind = 0; oind < VM_NFREEORDER; oind++) {
763                         while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
764                                 vm_freelist_rem(fl, m, oind);
765                                 avail = 1 << oind;
766                                 need = imin(npages - i, avail);
767                                 for (end = i + need; i < end;)
768                                         ma[i++] = m++;
769                                 if (need < avail) {
770                                         /*
771                                          * Return excess pages to fl.  Its
772                                          * order [0, oind) queues are empty.
773                                          */
774                                         vm_phys_enq_range(m, avail - need, fl,
775                                             1);
776                                         return (npages);
777                                 } else if (i == npages)
778                                         return (npages);
779                         }
780                 }
781                 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
782                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
783                                 alt = vm_phys_free_queues[domain][flind][pind];
784                                 while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
785                                     NULL) {
786                                         vm_freelist_rem(alt, m, oind);
787                                         vm_phys_set_pool(pool, m, oind);
788                                         avail = 1 << oind;
789                                         need = imin(npages - i, avail);
790                                         for (end = i + need; i < end;)
791                                                 ma[i++] = m++;
792                                         if (need < avail) {
793                                                 /*
794                                                  * Return excess pages to fl.
795                                                  * Its order [0, oind) queues
796                                                  * are empty.
797                                                  */
798                                                 vm_phys_enq_range(m, avail -
799                                                     need, fl, 1);
800                                                 return (npages);
801                                         } else if (i == npages)
802                                                 return (npages);
803                                 }
804                         }
805                 }
806         }
807         return (i);
808 }
809
810 /*
811  * Allocate a contiguous, power of two-sized set of physical pages
812  * from the free lists.
813  *
814  * The free page queues must be locked.
815  */
816 vm_page_t
817 vm_phys_alloc_pages(int domain, int pool, int order)
818 {
819         vm_page_t m;
820         int freelist;
821
822         for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
823                 m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
824                 if (m != NULL)
825                         return (m);
826         }
827         return (NULL);
828 }
829
830 /*
831  * Allocate a contiguous, power of two-sized set of physical pages from the
832  * specified free list.  The free list must be specified using one of the
833  * manifest constants VM_FREELIST_*.
834  *
835  * The free page queues must be locked.
836  */
837 vm_page_t
838 vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
839 {
840         struct vm_freelist *alt, *fl;
841         vm_page_t m;
842         int oind, pind, flind;
843
844         KASSERT(domain >= 0 && domain < vm_ndomains,
845             ("vm_phys_alloc_freelist_pages: domain %d is out of range",
846             domain));
847         KASSERT(freelist < VM_NFREELIST,
848             ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
849             freelist));
850         KASSERT(pool < VM_NFREEPOOL,
851             ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
852         KASSERT(order < VM_NFREEORDER,
853             ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
854
855         flind = vm_freelist_to_flind[freelist];
856         /* Check if freelist is present */
857         if (flind < 0)
858                 return (NULL);
859
860         vm_domain_free_assert_locked(VM_DOMAIN(domain));
861         fl = &vm_phys_free_queues[domain][flind][pool][0];
862         for (oind = order; oind < VM_NFREEORDER; oind++) {
863                 m = TAILQ_FIRST(&fl[oind].pl);
864                 if (m != NULL) {
865                         vm_freelist_rem(fl, m, oind);
866                         /* The order [order, oind) queues are empty. */
867                         vm_phys_split_pages(m, oind, fl, order, 1);
868                         return (m);
869                 }
870         }
871
872         /*
873          * The given pool was empty.  Find the largest
874          * contiguous, power-of-two-sized set of pages in any
875          * pool.  Transfer these pages to the given pool, and
876          * use them to satisfy the allocation.
877          */
878         for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
879                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
880                         alt = &vm_phys_free_queues[domain][flind][pind][0];
881                         m = TAILQ_FIRST(&alt[oind].pl);
882                         if (m != NULL) {
883                                 vm_freelist_rem(alt, m, oind);
884                                 vm_phys_set_pool(pool, m, oind);
885                                 /* The order [order, oind) queues are empty. */
886                                 vm_phys_split_pages(m, oind, fl, order, 1);
887                                 return (m);
888                         }
889                 }
890         }
891         return (NULL);
892 }
893
894 /*
895  * Find the vm_page corresponding to the given physical address.
896  */
897 vm_page_t
898 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
899 {
900         struct vm_phys_seg *seg;
901         int segind;
902
903         for (segind = 0; segind < vm_phys_nsegs; segind++) {
904                 seg = &vm_phys_segs[segind];
905                 if (pa >= seg->start && pa < seg->end)
906                         return (&seg->first_page[atop(pa - seg->start)]);
907         }
908         return (NULL);
909 }
910
911 vm_page_t
912 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
913 {
914         struct vm_phys_fictitious_seg tmp, *seg;
915         vm_page_t m;
916
917         m = NULL;
918         tmp.start = pa;
919         tmp.end = 0;
920
921         rw_rlock(&vm_phys_fictitious_reg_lock);
922         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
923         rw_runlock(&vm_phys_fictitious_reg_lock);
924         if (seg == NULL)
925                 return (NULL);
926
927         m = &seg->first_page[atop(pa - seg->start)];
928         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
929
930         return (m);
931 }
932
933 static inline void
934 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
935     long page_count, vm_memattr_t memattr)
936 {
937         long i;
938
939         bzero(range, page_count * sizeof(*range));
940         for (i = 0; i < page_count; i++) {
941                 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
942                 range[i].oflags &= ~VPO_UNMANAGED;
943                 range[i].busy_lock = VPB_UNBUSIED;
944         }
945 }
946
947 int
948 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
949     vm_memattr_t memattr)
950 {
951         struct vm_phys_fictitious_seg *seg;
952         vm_page_t fp;
953         long page_count;
954 #ifdef VM_PHYSSEG_DENSE
955         long pi, pe;
956         long dpage_count;
957 #endif
958
959         KASSERT(start < end,
960             ("Start of segment isn't less than end (start: %jx end: %jx)",
961             (uintmax_t)start, (uintmax_t)end));
962
963         page_count = (end - start) / PAGE_SIZE;
964
965 #ifdef VM_PHYSSEG_DENSE
966         pi = atop(start);
967         pe = atop(end);
968         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
969                 fp = &vm_page_array[pi - first_page];
970                 if ((pe - first_page) > vm_page_array_size) {
971                         /*
972                          * We have a segment that starts inside
973                          * of vm_page_array, but ends outside of it.
974                          *
975                          * Use vm_page_array pages for those that are
976                          * inside of the vm_page_array range, and
977                          * allocate the remaining ones.
978                          */
979                         dpage_count = vm_page_array_size - (pi - first_page);
980                         vm_phys_fictitious_init_range(fp, start, dpage_count,
981                             memattr);
982                         page_count -= dpage_count;
983                         start += ptoa(dpage_count);
984                         goto alloc;
985                 }
986                 /*
987                  * We can allocate the full range from vm_page_array,
988                  * so there's no need to register the range in the tree.
989                  */
990                 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
991                 return (0);
992         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
993                 /*
994                  * We have a segment that ends inside of vm_page_array,
995                  * but starts outside of it.
996                  */
997                 fp = &vm_page_array[0];
998                 dpage_count = pe - first_page;
999                 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
1000                     memattr);
1001                 end -= ptoa(dpage_count);
1002                 page_count -= dpage_count;
1003                 goto alloc;
1004         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1005                 /*
1006                  * Trying to register a fictitious range that expands before
1007                  * and after vm_page_array.
1008                  */
1009                 return (EINVAL);
1010         } else {
1011 alloc:
1012 #endif
1013                 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
1014                     M_WAITOK);
1015 #ifdef VM_PHYSSEG_DENSE
1016         }
1017 #endif
1018         vm_phys_fictitious_init_range(fp, start, page_count, memattr);
1019
1020         seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
1021         seg->start = start;
1022         seg->end = end;
1023         seg->first_page = fp;
1024
1025         rw_wlock(&vm_phys_fictitious_reg_lock);
1026         RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1027         rw_wunlock(&vm_phys_fictitious_reg_lock);
1028
1029         return (0);
1030 }
1031
1032 void
1033 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1034 {
1035         struct vm_phys_fictitious_seg *seg, tmp;
1036 #ifdef VM_PHYSSEG_DENSE
1037         long pi, pe;
1038 #endif
1039
1040         KASSERT(start < end,
1041             ("Start of segment isn't less than end (start: %jx end: %jx)",
1042             (uintmax_t)start, (uintmax_t)end));
1043
1044 #ifdef VM_PHYSSEG_DENSE
1045         pi = atop(start);
1046         pe = atop(end);
1047         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1048                 if ((pe - first_page) <= vm_page_array_size) {
1049                         /*
1050                          * This segment was allocated using vm_page_array
1051                          * only, there's nothing to do since those pages
1052                          * were never added to the tree.
1053                          */
1054                         return;
1055                 }
1056                 /*
1057                  * We have a segment that starts inside
1058                  * of vm_page_array, but ends outside of it.
1059                  *
1060                  * Calculate how many pages were added to the
1061                  * tree and free them.
1062                  */
1063                 start = ptoa(first_page + vm_page_array_size);
1064         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1065                 /*
1066                  * We have a segment that ends inside of vm_page_array,
1067                  * but starts outside of it.
1068                  */
1069                 end = ptoa(first_page);
1070         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1071                 /* Since it's not possible to register such a range, panic. */
1072                 panic(
1073                     "Unregistering not registered fictitious range [%#jx:%#jx]",
1074                     (uintmax_t)start, (uintmax_t)end);
1075         }
1076 #endif
1077         tmp.start = start;
1078         tmp.end = 0;
1079
1080         rw_wlock(&vm_phys_fictitious_reg_lock);
1081         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1082         if (seg->start != start || seg->end != end) {
1083                 rw_wunlock(&vm_phys_fictitious_reg_lock);
1084                 panic(
1085                     "Unregistering not registered fictitious range [%#jx:%#jx]",
1086                     (uintmax_t)start, (uintmax_t)end);
1087         }
1088         RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1089         rw_wunlock(&vm_phys_fictitious_reg_lock);
1090         free(seg->first_page, M_FICT_PAGES);
1091         free(seg, M_FICT_PAGES);
1092 }
1093
1094 /*
1095  * Free a contiguous, power of two-sized set of physical pages.
1096  *
1097  * The free page queues must be locked.
1098  */
1099 void
1100 vm_phys_free_pages(vm_page_t m, int order)
1101 {
1102         struct vm_freelist *fl;
1103         struct vm_phys_seg *seg;
1104         vm_paddr_t pa;
1105         vm_page_t m_buddy;
1106
1107         KASSERT(m->order == VM_NFREEORDER,
1108             ("vm_phys_free_pages: page %p has unexpected order %d",
1109             m, m->order));
1110         KASSERT(m->pool < VM_NFREEPOOL,
1111             ("vm_phys_free_pages: page %p has unexpected pool %d",
1112             m, m->pool));
1113         KASSERT(order < VM_NFREEORDER,
1114             ("vm_phys_free_pages: order %d is out of range", order));
1115         seg = &vm_phys_segs[m->segind];
1116         vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1117         if (order < VM_NFREEORDER - 1) {
1118                 pa = VM_PAGE_TO_PHYS(m);
1119                 do {
1120                         pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1121                         if (pa < seg->start || pa >= seg->end)
1122                                 break;
1123                         m_buddy = &seg->first_page[atop(pa - seg->start)];
1124                         if (m_buddy->order != order)
1125                                 break;
1126                         fl = (*seg->free_queues)[m_buddy->pool];
1127                         vm_freelist_rem(fl, m_buddy, order);
1128                         if (m_buddy->pool != m->pool)
1129                                 vm_phys_set_pool(m->pool, m_buddy, order);
1130                         order++;
1131                         pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1132                         m = &seg->first_page[atop(pa - seg->start)];
1133                 } while (order < VM_NFREEORDER - 1);
1134         }
1135         fl = (*seg->free_queues)[m->pool];
1136         vm_freelist_add(fl, m, order, 1);
1137 }
1138
1139 /*
1140  * Return the largest possible order of a set of pages starting at m.
1141  */
1142 static int
1143 max_order(vm_page_t m)
1144 {
1145
1146         /*
1147          * Unsigned "min" is used here so that "order" is assigned
1148          * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1149          * or the low-order bits of its physical address are zero
1150          * because the size of a physical address exceeds the size of
1151          * a long.
1152          */
1153         return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1154             VM_NFREEORDER - 1));
1155 }
1156
1157 /*
1158  * Free a contiguous, arbitrarily sized set of physical pages, without
1159  * merging across set boundaries.
1160  *
1161  * The free page queues must be locked.
1162  */
1163 void
1164 vm_phys_enqueue_contig(vm_page_t m, u_long npages)
1165 {
1166         struct vm_freelist *fl;
1167         struct vm_phys_seg *seg;
1168         vm_page_t m_end;
1169         int order;
1170
1171         /*
1172          * Avoid unnecessary coalescing by freeing the pages in the largest
1173          * possible power-of-two-sized subsets.
1174          */
1175         vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1176         seg = &vm_phys_segs[m->segind];
1177         fl = (*seg->free_queues)[m->pool];
1178         m_end = m + npages;
1179         /* Free blocks of increasing size. */
1180         while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
1181             m + (1 << order) <= m_end) {
1182                 KASSERT(seg == &vm_phys_segs[m->segind],
1183                     ("%s: page range [%p,%p) spans multiple segments",
1184                     __func__, m_end - npages, m));
1185                 vm_freelist_add(fl, m, order, 1);
1186                 m += 1 << order;
1187         }
1188         /* Free blocks of maximum size. */
1189         while (m + (1 << order) <= m_end) {
1190                 KASSERT(seg == &vm_phys_segs[m->segind],
1191                     ("%s: page range [%p,%p) spans multiple segments",
1192                     __func__, m_end - npages, m));
1193                 vm_freelist_add(fl, m, order, 1);
1194                 m += 1 << order;
1195         }
1196         /* Free blocks of diminishing size. */
1197         while (m < m_end) {
1198                 KASSERT(seg == &vm_phys_segs[m->segind],
1199                     ("%s: page range [%p,%p) spans multiple segments",
1200                     __func__, m_end - npages, m));
1201                 order = flsl(m_end - m) - 1;
1202                 vm_freelist_add(fl, m, order, 1);
1203                 m += 1 << order;
1204         }
1205 }
1206
1207 /*
1208  * Free a contiguous, arbitrarily sized set of physical pages.
1209  *
1210  * The free page queues must be locked.
1211  */
1212 void
1213 vm_phys_free_contig(vm_page_t m, u_long npages)
1214 {
1215         int order_start, order_end;
1216         vm_page_t m_start, m_end;
1217
1218         vm_domain_free_assert_locked(vm_pagequeue_domain(m));
1219
1220         m_start = m;
1221         order_start = max_order(m_start);
1222         if (order_start < VM_NFREEORDER - 1)
1223                 m_start += 1 << order_start;
1224         m_end = m + npages;
1225         order_end = max_order(m_end);
1226         if (order_end < VM_NFREEORDER - 1)
1227                 m_end -= 1 << order_end;
1228         /*
1229          * Avoid unnecessary coalescing by freeing the pages at the start and
1230          * end of the range last.
1231          */
1232         if (m_start < m_end)
1233                 vm_phys_enqueue_contig(m_start, m_end - m_start);
1234         if (order_start < VM_NFREEORDER - 1)
1235                 vm_phys_free_pages(m, order_start);
1236         if (order_end < VM_NFREEORDER - 1)
1237                 vm_phys_free_pages(m_end, order_end);
1238 }
1239
1240 /*
1241  * Scan physical memory between the specified addresses "low" and "high" for a
1242  * run of contiguous physical pages that satisfy the specified conditions, and
1243  * return the lowest page in the run.  The specified "alignment" determines
1244  * the alignment of the lowest physical page in the run.  If the specified
1245  * "boundary" is non-zero, then the run of physical pages cannot span a
1246  * physical address that is a multiple of "boundary".
1247  *
1248  * "npages" must be greater than zero.  Both "alignment" and "boundary" must
1249  * be a power of two.
1250  */
1251 vm_page_t
1252 vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1253     u_long alignment, vm_paddr_t boundary, int options)
1254 {
1255         vm_paddr_t pa_end;
1256         vm_page_t m_end, m_run, m_start;
1257         struct vm_phys_seg *seg;
1258         int segind;
1259
1260         KASSERT(npages > 0, ("npages is 0"));
1261         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1262         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1263         if (low >= high)
1264                 return (NULL);
1265         for (segind = 0; segind < vm_phys_nsegs; segind++) {
1266                 seg = &vm_phys_segs[segind];
1267                 if (seg->domain != domain)
1268                         continue;
1269                 if (seg->start >= high)
1270                         break;
1271                 if (low >= seg->end)
1272                         continue;
1273                 if (low <= seg->start)
1274                         m_start = seg->first_page;
1275                 else
1276                         m_start = &seg->first_page[atop(low - seg->start)];
1277                 if (high < seg->end)
1278                         pa_end = high;
1279                 else
1280                         pa_end = seg->end;
1281                 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1282                         continue;
1283                 m_end = &seg->first_page[atop(pa_end - seg->start)];
1284                 m_run = vm_page_scan_contig(npages, m_start, m_end,
1285                     alignment, boundary, options);
1286                 if (m_run != NULL)
1287                         return (m_run);
1288         }
1289         return (NULL);
1290 }
1291
1292 /*
1293  * Search for the given physical page "m" in the free lists.  If the search
1294  * succeeds, remove "m" from the free lists and return true.  Otherwise, return
1295  * false, indicating that "m" is not in the free lists.
1296  *
1297  * The free page queues must be locked.
1298  */
1299 bool
1300 vm_phys_unfree_page(vm_page_t m)
1301 {
1302         struct vm_freelist *fl;
1303         struct vm_phys_seg *seg;
1304         vm_paddr_t pa, pa_half;
1305         vm_page_t m_set, m_tmp;
1306         int order;
1307
1308         /*
1309          * First, find the contiguous, power of two-sized set of free
1310          * physical pages containing the given physical page "m" and
1311          * assign it to "m_set".
1312          */
1313         seg = &vm_phys_segs[m->segind];
1314         vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1315         for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1316             order < VM_NFREEORDER - 1; ) {
1317                 order++;
1318                 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1319                 if (pa >= seg->start)
1320                         m_set = &seg->first_page[atop(pa - seg->start)];
1321                 else
1322                         return (false);
1323         }
1324         if (m_set->order < order)
1325                 return (false);
1326         if (m_set->order == VM_NFREEORDER)
1327                 return (false);
1328         KASSERT(m_set->order < VM_NFREEORDER,
1329             ("vm_phys_unfree_page: page %p has unexpected order %d",
1330             m_set, m_set->order));
1331
1332         /*
1333          * Next, remove "m_set" from the free lists.  Finally, extract
1334          * "m" from "m_set" using an iterative algorithm: While "m_set"
1335          * is larger than a page, shrink "m_set" by returning the half
1336          * of "m_set" that does not contain "m" to the free lists.
1337          */
1338         fl = (*seg->free_queues)[m_set->pool];
1339         order = m_set->order;
1340         vm_freelist_rem(fl, m_set, order);
1341         while (order > 0) {
1342                 order--;
1343                 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1344                 if (m->phys_addr < pa_half)
1345                         m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1346                 else {
1347                         m_tmp = m_set;
1348                         m_set = &seg->first_page[atop(pa_half - seg->start)];
1349                 }
1350                 vm_freelist_add(fl, m_tmp, order, 0);
1351         }
1352         KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1353         return (true);
1354 }
1355
1356 /*
1357  * Allocate a contiguous set of physical pages of the given size
1358  * "npages" from the free lists.  All of the physical pages must be at
1359  * or above the given physical address "low" and below the given
1360  * physical address "high".  The given value "alignment" determines the
1361  * alignment of the first physical page in the set.  If the given value
1362  * "boundary" is non-zero, then the set of physical pages cannot cross
1363  * any physical address boundary that is a multiple of that value.  Both
1364  * "alignment" and "boundary" must be a power of two.
1365  */
1366 vm_page_t
1367 vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1368     u_long alignment, vm_paddr_t boundary)
1369 {
1370         vm_paddr_t pa_end, pa_start;
1371         vm_page_t m_run;
1372         struct vm_phys_seg *seg;
1373         int segind;
1374
1375         KASSERT(npages > 0, ("npages is 0"));
1376         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1377         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1378         vm_domain_free_assert_locked(VM_DOMAIN(domain));
1379         if (low >= high)
1380                 return (NULL);
1381         m_run = NULL;
1382         for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1383                 seg = &vm_phys_segs[segind];
1384                 if (seg->start >= high || seg->domain != domain)
1385                         continue;
1386                 if (low >= seg->end)
1387                         break;
1388                 if (low <= seg->start)
1389                         pa_start = seg->start;
1390                 else
1391                         pa_start = low;
1392                 if (high < seg->end)
1393                         pa_end = high;
1394                 else
1395                         pa_end = seg->end;
1396                 if (pa_end - pa_start < ptoa(npages))
1397                         continue;
1398                 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1399                     alignment, boundary);
1400                 if (m_run != NULL)
1401                         break;
1402         }
1403         return (m_run);
1404 }
1405
1406 /*
1407  * Allocate a run of contiguous physical pages from the free list for the
1408  * specified segment.
1409  */
1410 static vm_page_t
1411 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1412     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1413 {
1414         struct vm_freelist *fl;
1415         vm_paddr_t pa, pa_end, size;
1416         vm_page_t m, m_ret;
1417         u_long npages_end;
1418         int oind, order, pind;
1419
1420         KASSERT(npages > 0, ("npages is 0"));
1421         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1422         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1423         vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1424         /* Compute the queue that is the best fit for npages. */
1425         order = flsl(npages - 1);
1426         /* Search for a run satisfying the specified conditions. */
1427         size = npages << PAGE_SHIFT;
1428         for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1429             oind++) {
1430                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1431                         fl = (*seg->free_queues)[pind];
1432                         TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1433                                 /*
1434                                  * Is the size of this allocation request
1435                                  * larger than the largest block size?
1436                                  */
1437                                 if (order >= VM_NFREEORDER) {
1438                                         /*
1439                                          * Determine if a sufficient number of
1440                                          * subsequent blocks to satisfy the
1441                                          * allocation request are free.
1442                                          */
1443                                         pa = VM_PAGE_TO_PHYS(m_ret);
1444                                         pa_end = pa + size;
1445                                         if (pa_end < pa)
1446                                                 continue;
1447                                         for (;;) {
1448                                                 pa += 1 << (PAGE_SHIFT +
1449                                                     VM_NFREEORDER - 1);
1450                                                 if (pa >= pa_end ||
1451                                                     pa < seg->start ||
1452                                                     pa >= seg->end)
1453                                                         break;
1454                                                 m = &seg->first_page[atop(pa -
1455                                                     seg->start)];
1456                                                 if (m->order != VM_NFREEORDER -
1457                                                     1)
1458                                                         break;
1459                                         }
1460                                         /* If not, go to the next block. */
1461                                         if (pa < pa_end)
1462                                                 continue;
1463                                 }
1464
1465                                 /*
1466                                  * Determine if the blocks are within the
1467                                  * given range, satisfy the given alignment,
1468                                  * and do not cross the given boundary.
1469                                  */
1470                                 pa = VM_PAGE_TO_PHYS(m_ret);
1471                                 pa_end = pa + size;
1472                                 if (pa >= low && pa_end <= high &&
1473                                     vm_addr_ok(pa, size, alignment, boundary))
1474                                         goto done;
1475                         }
1476                 }
1477         }
1478         return (NULL);
1479 done:
1480         for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1481                 fl = (*seg->free_queues)[m->pool];
1482                 vm_freelist_rem(fl, m, oind);
1483                 if (m->pool != VM_FREEPOOL_DEFAULT)
1484                         vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1485         }
1486         /* Return excess pages to the free lists. */
1487         npages_end = roundup2(npages, 1 << oind);
1488         if (npages < npages_end) {
1489                 fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
1490                 vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1491         }
1492         return (m_ret);
1493 }
1494
1495 /*
1496  * Return the index of the first unused slot which may be the terminating
1497  * entry.
1498  */
1499 static int
1500 vm_phys_avail_count(void)
1501 {
1502         int i;
1503
1504         for (i = 0; phys_avail[i + 1]; i += 2)
1505                 continue;
1506         if (i > PHYS_AVAIL_ENTRIES)
1507                 panic("Improperly terminated phys_avail %d entries", i);
1508
1509         return (i);
1510 }
1511
1512 /*
1513  * Assert that a phys_avail entry is valid.
1514  */
1515 static void
1516 vm_phys_avail_check(int i)
1517 {
1518         if (phys_avail[i] & PAGE_MASK)
1519                 panic("Unaligned phys_avail[%d]: %#jx", i,
1520                     (intmax_t)phys_avail[i]);
1521         if (phys_avail[i+1] & PAGE_MASK)
1522                 panic("Unaligned phys_avail[%d + 1]: %#jx", i,
1523                     (intmax_t)phys_avail[i]);
1524         if (phys_avail[i + 1] < phys_avail[i])
1525                 panic("phys_avail[%d] start %#jx < end %#jx", i,
1526                     (intmax_t)phys_avail[i], (intmax_t)phys_avail[i+1]);
1527 }
1528
1529 /*
1530  * Return the index of an overlapping phys_avail entry or -1.
1531  */
1532 #ifdef NUMA
1533 static int
1534 vm_phys_avail_find(vm_paddr_t pa)
1535 {
1536         int i;
1537
1538         for (i = 0; phys_avail[i + 1]; i += 2)
1539                 if (phys_avail[i] <= pa && phys_avail[i + 1] > pa)
1540                         return (i);
1541         return (-1);
1542 }
1543 #endif
1544
1545 /*
1546  * Return the index of the largest entry.
1547  */
1548 int
1549 vm_phys_avail_largest(void)
1550 {
1551         vm_paddr_t sz, largesz;
1552         int largest;
1553         int i;
1554
1555         largest = 0;
1556         largesz = 0;
1557         for (i = 0; phys_avail[i + 1]; i += 2) {
1558                 sz = vm_phys_avail_size(i);
1559                 if (sz > largesz) {
1560                         largesz = sz;
1561                         largest = i;
1562                 }
1563         }
1564
1565         return (largest);
1566 }
1567
1568 vm_paddr_t
1569 vm_phys_avail_size(int i)
1570 {
1571
1572         return (phys_avail[i + 1] - phys_avail[i]);
1573 }
1574
1575 /*
1576  * Split an entry at the address 'pa'.  Return zero on success or errno.
1577  */
1578 static int
1579 vm_phys_avail_split(vm_paddr_t pa, int i)
1580 {
1581         int cnt;
1582
1583         vm_phys_avail_check(i);
1584         if (pa <= phys_avail[i] || pa >= phys_avail[i + 1])
1585                 panic("vm_phys_avail_split: invalid address");
1586         cnt = vm_phys_avail_count();
1587         if (cnt >= PHYS_AVAIL_ENTRIES)
1588                 return (ENOSPC);
1589         memmove(&phys_avail[i + 2], &phys_avail[i],
1590             (cnt - i) * sizeof(phys_avail[0]));
1591         phys_avail[i + 1] = pa;
1592         phys_avail[i + 2] = pa;
1593         vm_phys_avail_check(i);
1594         vm_phys_avail_check(i+2);
1595
1596         return (0);
1597 }
1598
1599 /*
1600  * Check if a given physical address can be included as part of a crash dump.
1601  */
1602 bool
1603 vm_phys_is_dumpable(vm_paddr_t pa)
1604 {
1605         vm_page_t m;
1606         int i;
1607
1608         if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
1609                 return ((m->flags & PG_NODUMP) == 0);
1610
1611         for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
1612                 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
1613                         return (true);
1614         }
1615         return (false);
1616 }
1617
1618 void
1619 vm_phys_early_add_seg(vm_paddr_t start, vm_paddr_t end)
1620 {
1621         struct vm_phys_seg *seg;
1622
1623         if (vm_phys_early_nsegs == -1)
1624                 panic("%s: called after initialization", __func__);
1625         if (vm_phys_early_nsegs == nitems(vm_phys_early_segs))
1626                 panic("%s: ran out of early segments", __func__);
1627
1628         seg = &vm_phys_early_segs[vm_phys_early_nsegs++];
1629         seg->start = start;
1630         seg->end = end;
1631 }
1632
1633 /*
1634  * This routine allocates NUMA node specific memory before the page
1635  * allocator is bootstrapped.
1636  */
1637 vm_paddr_t
1638 vm_phys_early_alloc(int domain, size_t alloc_size)
1639 {
1640         int i, mem_index, biggestone;
1641         vm_paddr_t pa, mem_start, mem_end, size, biggestsize, align;
1642
1643         KASSERT(domain == -1 || (domain >= 0 && domain < vm_ndomains),
1644             ("%s: invalid domain index %d", __func__, domain));
1645
1646         /*
1647          * Search the mem_affinity array for the biggest address
1648          * range in the desired domain.  This is used to constrain
1649          * the phys_avail selection below.
1650          */
1651         biggestsize = 0;
1652         mem_index = 0;
1653         mem_start = 0;
1654         mem_end = -1;
1655 #ifdef NUMA
1656         if (mem_affinity != NULL) {
1657                 for (i = 0;; i++) {
1658                         size = mem_affinity[i].end - mem_affinity[i].start;
1659                         if (size == 0)
1660                                 break;
1661                         if (domain != -1 && mem_affinity[i].domain != domain)
1662                                 continue;
1663                         if (size > biggestsize) {
1664                                 mem_index = i;
1665                                 biggestsize = size;
1666                         }
1667                 }
1668                 mem_start = mem_affinity[mem_index].start;
1669                 mem_end = mem_affinity[mem_index].end;
1670         }
1671 #endif
1672
1673         /*
1674          * Now find biggest physical segment in within the desired
1675          * numa domain.
1676          */
1677         biggestsize = 0;
1678         biggestone = 0;
1679         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1680                 /* skip regions that are out of range */
1681                 if (phys_avail[i+1] - alloc_size < mem_start ||
1682                     phys_avail[i+1] > mem_end)
1683                         continue;
1684                 size = vm_phys_avail_size(i);
1685                 if (size > biggestsize) {
1686                         biggestone = i;
1687                         biggestsize = size;
1688                 }
1689         }
1690         alloc_size = round_page(alloc_size);
1691
1692         /*
1693          * Grab single pages from the front to reduce fragmentation.
1694          */
1695         if (alloc_size == PAGE_SIZE) {
1696                 pa = phys_avail[biggestone];
1697                 phys_avail[biggestone] += PAGE_SIZE;
1698                 vm_phys_avail_check(biggestone);
1699                 return (pa);
1700         }
1701
1702         /*
1703          * Naturally align large allocations.
1704          */
1705         align = phys_avail[biggestone + 1] & (alloc_size - 1);
1706         if (alloc_size + align > biggestsize)
1707                 panic("cannot find a large enough size\n");
1708         if (align != 0 &&
1709             vm_phys_avail_split(phys_avail[biggestone + 1] - align,
1710             biggestone) != 0)
1711                 /* Wasting memory. */
1712                 phys_avail[biggestone + 1] -= align;
1713
1714         phys_avail[biggestone + 1] -= alloc_size;
1715         vm_phys_avail_check(biggestone);
1716         pa = phys_avail[biggestone + 1];
1717         return (pa);
1718 }
1719
1720 void
1721 vm_phys_early_startup(void)
1722 {
1723         struct vm_phys_seg *seg;
1724         int i;
1725
1726         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1727                 phys_avail[i] = round_page(phys_avail[i]);
1728                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
1729         }
1730
1731         for (i = 0; i < vm_phys_early_nsegs; i++) {
1732                 seg = &vm_phys_early_segs[i];
1733                 vm_phys_add_seg(seg->start, seg->end);
1734         }
1735         vm_phys_early_nsegs = -1;
1736
1737 #ifdef NUMA
1738         /* Force phys_avail to be split by domain. */
1739         if (mem_affinity != NULL) {
1740                 int idx;
1741
1742                 for (i = 0; mem_affinity[i].end != 0; i++) {
1743                         idx = vm_phys_avail_find(mem_affinity[i].start);
1744                         if (idx != -1 &&
1745                             phys_avail[idx] != mem_affinity[i].start)
1746                                 vm_phys_avail_split(mem_affinity[i].start, idx);
1747                         idx = vm_phys_avail_find(mem_affinity[i].end);
1748                         if (idx != -1 &&
1749                             phys_avail[idx] != mem_affinity[i].end)
1750                                 vm_phys_avail_split(mem_affinity[i].end, idx);
1751                 }
1752         }
1753 #endif
1754 }
1755
1756 #ifdef DDB
1757 /*
1758  * Show the number of physical pages in each of the free lists.
1759  */
1760 DB_SHOW_COMMAND(freepages, db_show_freepages)
1761 {
1762         struct vm_freelist *fl;
1763         int flind, oind, pind, dom;
1764
1765         for (dom = 0; dom < vm_ndomains; dom++) {
1766                 db_printf("DOMAIN: %d\n", dom);
1767                 for (flind = 0; flind < vm_nfreelists; flind++) {
1768                         db_printf("FREE LIST %d:\n"
1769                             "\n  ORDER (SIZE)  |  NUMBER"
1770                             "\n              ", flind);
1771                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1772                                 db_printf("  |  POOL %d", pind);
1773                         db_printf("\n--            ");
1774                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1775                                 db_printf("-- --      ");
1776                         db_printf("--\n");
1777                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1778                                 db_printf("  %2.2d (%6.6dK)", oind,
1779                                     1 << (PAGE_SHIFT - 10 + oind));
1780                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1781                                 fl = vm_phys_free_queues[dom][flind][pind];
1782                                         db_printf("  |  %6.6d", fl[oind].lcnt);
1783                                 }
1784                                 db_printf("\n");
1785                         }
1786                         db_printf("\n");
1787                 }
1788                 db_printf("\n");
1789         }
1790 }
1791 #endif