]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_phys.c
Add a new sysctl, vfs.zfs.vol.unmap_enabled, which allows the system
[FreeBSD/FreeBSD.git] / sys / vm / vm_phys.c
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 /*
33  *      Physical memory system implementation
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #if MAXMEMDOM > 1
52 #include <sys/proc.h>
53 #endif
54 #include <sys/queue.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sysctl.h>
58 #include <sys/tree.h>
59 #include <sys/vmmeter.h>
60
61 #include <ddb/ddb.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_phys.h>
69
70 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
71     "Too many physsegs.");
72
73 struct mem_affinity *mem_affinity;
74
75 int vm_ndomains = 1;
76
77 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
78 int vm_phys_nsegs;
79
80 struct vm_phys_fictitious_seg;
81 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
82     struct vm_phys_fictitious_seg *);
83
84 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
85     RB_INITIALIZER(_vm_phys_fictitious_tree);
86
87 struct vm_phys_fictitious_seg {
88         RB_ENTRY(vm_phys_fictitious_seg) node;
89         /* Memory region data */
90         vm_paddr_t      start;
91         vm_paddr_t      end;
92         vm_page_t       first_page;
93 };
94
95 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
96     vm_phys_fictitious_cmp);
97
98 static struct rwlock vm_phys_fictitious_reg_lock;
99 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
100
101 static struct vm_freelist
102     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
103
104 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
105
106 static int cnt_prezero;
107 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
108     &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
109
110 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
111 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
112     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
113
114 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
115 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
116     NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
117
118 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
119     &vm_ndomains, 0, "Number of physical memory domains available.");
120
121 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
122     int order);
123 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
124     int domain);
125 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
126 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
127 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
128     int order);
129
130 /*
131  * Red-black tree helpers for vm fictitious range management.
132  */
133 static inline int
134 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
135     struct vm_phys_fictitious_seg *range)
136 {
137
138         KASSERT(range->start != 0 && range->end != 0,
139             ("Invalid range passed on search for vm_fictitious page"));
140         if (p->start >= range->end)
141                 return (1);
142         if (p->start < range->start)
143                 return (-1);
144
145         return (0);
146 }
147
148 static int
149 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
150     struct vm_phys_fictitious_seg *p2)
151 {
152
153         /* Check if this is a search for a page */
154         if (p1->end == 0)
155                 return (vm_phys_fictitious_in_range(p1, p2));
156
157         KASSERT(p2->end != 0,
158     ("Invalid range passed as second parameter to vm fictitious comparison"));
159
160         /* Searching to add a new range */
161         if (p1->end <= p2->start)
162                 return (-1);
163         if (p1->start >= p2->end)
164                 return (1);
165
166         panic("Trying to add overlapping vm fictitious ranges:\n"
167             "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
168             (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
169 }
170
171 static __inline int
172 vm_rr_selectdomain(void)
173 {
174 #if MAXMEMDOM > 1
175         struct thread *td;
176
177         td = curthread;
178
179         td->td_dom_rr_idx++;
180         td->td_dom_rr_idx %= vm_ndomains;
181         return (td->td_dom_rr_idx);
182 #else
183         return (0);
184 #endif
185 }
186
187 boolean_t
188 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
189 {
190         struct vm_phys_seg *s;
191         int idx;
192
193         while ((idx = ffsl(mask)) != 0) {
194                 idx--;  /* ffsl counts from 1 */
195                 mask &= ~(1UL << idx);
196                 s = &vm_phys_segs[idx];
197                 if (low < s->end && high > s->start)
198                         return (TRUE);
199         }
200         return (FALSE);
201 }
202
203 /*
204  * Outputs the state of the physical memory allocator, specifically,
205  * the amount of physical memory in each free list.
206  */
207 static int
208 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
209 {
210         struct sbuf sbuf;
211         struct vm_freelist *fl;
212         int dom, error, flind, oind, pind;
213
214         error = sysctl_wire_old_buffer(req, 0);
215         if (error != 0)
216                 return (error);
217         sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
218         for (dom = 0; dom < vm_ndomains; dom++) {
219                 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
220                 for (flind = 0; flind < vm_nfreelists; flind++) {
221                         sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
222                             "\n  ORDER (SIZE)  |  NUMBER"
223                             "\n              ", flind);
224                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
225                                 sbuf_printf(&sbuf, "  |  POOL %d", pind);
226                         sbuf_printf(&sbuf, "\n--            ");
227                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
228                                 sbuf_printf(&sbuf, "-- --      ");
229                         sbuf_printf(&sbuf, "--\n");
230                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
231                                 sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
232                                     1 << (PAGE_SHIFT - 10 + oind));
233                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
234                                 fl = vm_phys_free_queues[dom][flind][pind];
235                                         sbuf_printf(&sbuf, "  |  %6d",
236                                             fl[oind].lcnt);
237                                 }
238                                 sbuf_printf(&sbuf, "\n");
239                         }
240                 }
241         }
242         error = sbuf_finish(&sbuf);
243         sbuf_delete(&sbuf);
244         return (error);
245 }
246
247 /*
248  * Outputs the set of physical memory segments.
249  */
250 static int
251 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
252 {
253         struct sbuf sbuf;
254         struct vm_phys_seg *seg;
255         int error, segind;
256
257         error = sysctl_wire_old_buffer(req, 0);
258         if (error != 0)
259                 return (error);
260         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
261         for (segind = 0; segind < vm_phys_nsegs; segind++) {
262                 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
263                 seg = &vm_phys_segs[segind];
264                 sbuf_printf(&sbuf, "start:     %#jx\n",
265                     (uintmax_t)seg->start);
266                 sbuf_printf(&sbuf, "end:       %#jx\n",
267                     (uintmax_t)seg->end);
268                 sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
269                 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
270         }
271         error = sbuf_finish(&sbuf);
272         sbuf_delete(&sbuf);
273         return (error);
274 }
275
276 static void
277 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
278 {
279
280         m->order = order;
281         if (tail)
282                 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
283         else
284                 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
285         fl[order].lcnt++;
286 }
287
288 static void
289 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
290 {
291
292         TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
293         fl[order].lcnt--;
294         m->order = VM_NFREEORDER;
295 }
296
297 /*
298  * Create a physical memory segment.
299  */
300 static void
301 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
302 {
303         struct vm_phys_seg *seg;
304 #ifdef VM_PHYSSEG_SPARSE
305         long pages;
306         int segind;
307
308         pages = 0;
309         for (segind = 0; segind < vm_phys_nsegs; segind++) {
310                 seg = &vm_phys_segs[segind];
311                 pages += atop(seg->end - seg->start);
312         }
313 #endif
314         KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
315             ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
316         KASSERT(domain < vm_ndomains,
317             ("vm_phys_create_seg: invalid domain provided"));
318         seg = &vm_phys_segs[vm_phys_nsegs++];
319         seg->start = start;
320         seg->end = end;
321         seg->domain = domain;
322 #ifdef VM_PHYSSEG_SPARSE
323         seg->first_page = &vm_page_array[pages];
324 #else
325         seg->first_page = PHYS_TO_VM_PAGE(start);
326 #endif
327         seg->free_queues = &vm_phys_free_queues[domain][flind];
328 }
329
330 static void
331 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
332 {
333         int i;
334
335         if (mem_affinity == NULL) {
336                 _vm_phys_create_seg(start, end, flind, 0);
337                 return;
338         }
339
340         for (i = 0;; i++) {
341                 if (mem_affinity[i].end == 0)
342                         panic("Reached end of affinity info");
343                 if (mem_affinity[i].end <= start)
344                         continue;
345                 if (mem_affinity[i].start > start)
346                         panic("No affinity info for start %jx",
347                             (uintmax_t)start);
348                 if (mem_affinity[i].end >= end) {
349                         _vm_phys_create_seg(start, end, flind,
350                             mem_affinity[i].domain);
351                         break;
352                 }
353                 _vm_phys_create_seg(start, mem_affinity[i].end, flind,
354                     mem_affinity[i].domain);
355                 start = mem_affinity[i].end;
356         }
357 }
358
359 /*
360  * Initialize the physical memory allocator.
361  */
362 void
363 vm_phys_init(void)
364 {
365         struct vm_freelist *fl;
366         int dom, flind, i, oind, pind;
367
368         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
369 #ifdef  VM_FREELIST_ISADMA
370                 if (phys_avail[i] < 16777216) {
371                         if (phys_avail[i + 1] > 16777216) {
372                                 vm_phys_create_seg(phys_avail[i], 16777216,
373                                     VM_FREELIST_ISADMA);
374                                 vm_phys_create_seg(16777216, phys_avail[i + 1],
375                                     VM_FREELIST_DEFAULT);
376                         } else {
377                                 vm_phys_create_seg(phys_avail[i],
378                                     phys_avail[i + 1], VM_FREELIST_ISADMA);
379                         }
380                         if (VM_FREELIST_ISADMA >= vm_nfreelists)
381                                 vm_nfreelists = VM_FREELIST_ISADMA + 1;
382                 } else
383 #endif
384 #ifdef  VM_FREELIST_HIGHMEM
385                 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
386                         if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
387                                 vm_phys_create_seg(phys_avail[i],
388                                     VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
389                                 vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
390                                     phys_avail[i + 1], VM_FREELIST_HIGHMEM);
391                         } else {
392                                 vm_phys_create_seg(phys_avail[i],
393                                     phys_avail[i + 1], VM_FREELIST_HIGHMEM);
394                         }
395                         if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
396                                 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
397                 } else
398 #endif
399                 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
400                     VM_FREELIST_DEFAULT);
401         }
402         for (dom = 0; dom < vm_ndomains; dom++) {
403                 for (flind = 0; flind < vm_nfreelists; flind++) {
404                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
405                                 fl = vm_phys_free_queues[dom][flind][pind];
406                                 for (oind = 0; oind < VM_NFREEORDER; oind++)
407                                         TAILQ_INIT(&fl[oind].pl);
408                         }
409                 }
410         }
411         rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
412 }
413
414 /*
415  * Split a contiguous, power of two-sized set of physical pages.
416  */
417 static __inline void
418 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
419 {
420         vm_page_t m_buddy;
421
422         while (oind > order) {
423                 oind--;
424                 m_buddy = &m[1 << oind];
425                 KASSERT(m_buddy->order == VM_NFREEORDER,
426                     ("vm_phys_split_pages: page %p has unexpected order %d",
427                     m_buddy, m_buddy->order));
428                 vm_freelist_add(fl, m_buddy, oind, 0);
429         }
430 }
431
432 /*
433  * Initialize a physical page and add it to the free lists.
434  */
435 void
436 vm_phys_add_page(vm_paddr_t pa)
437 {
438         vm_page_t m;
439         struct vm_domain *vmd;
440
441         vm_cnt.v_page_count++;
442         m = vm_phys_paddr_to_vm_page(pa);
443         m->phys_addr = pa;
444         m->queue = PQ_NONE;
445         m->segind = vm_phys_paddr_to_segind(pa);
446         vmd = vm_phys_domain(m);
447         vmd->vmd_page_count++;
448         vmd->vmd_segs |= 1UL << m->segind;
449         KASSERT(m->order == VM_NFREEORDER,
450             ("vm_phys_add_page: page %p has unexpected order %d",
451             m, m->order));
452         m->pool = VM_FREEPOOL_DEFAULT;
453         pmap_page_init(m);
454         mtx_lock(&vm_page_queue_free_mtx);
455         vm_phys_freecnt_adj(m, 1);
456         vm_phys_free_pages(m, 0);
457         mtx_unlock(&vm_page_queue_free_mtx);
458 }
459
460 /*
461  * Allocate a contiguous, power of two-sized set of physical pages
462  * from the free lists.
463  *
464  * The free page queues must be locked.
465  */
466 vm_page_t
467 vm_phys_alloc_pages(int pool, int order)
468 {
469         vm_page_t m;
470         int dom, domain, flind;
471
472         KASSERT(pool < VM_NFREEPOOL,
473             ("vm_phys_alloc_pages: pool %d is out of range", pool));
474         KASSERT(order < VM_NFREEORDER,
475             ("vm_phys_alloc_pages: order %d is out of range", order));
476
477         for (dom = 0; dom < vm_ndomains; dom++) {
478                 domain = vm_rr_selectdomain();
479                 for (flind = 0; flind < vm_nfreelists; flind++) {
480                         m = vm_phys_alloc_domain_pages(domain, flind, pool,
481                             order);
482                         if (m != NULL)
483                                 return (m);
484                 }
485         }
486         return (NULL);
487 }
488
489 /*
490  * Find and dequeue a free page on the given free list, with the 
491  * specified pool and order
492  */
493 vm_page_t
494 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
495 {
496         vm_page_t m;
497         int dom, domain;
498
499         KASSERT(flind < VM_NFREELIST,
500             ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
501         KASSERT(pool < VM_NFREEPOOL,
502             ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
503         KASSERT(order < VM_NFREEORDER,
504             ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
505
506         for (dom = 0; dom < vm_ndomains; dom++) {
507                 domain = vm_rr_selectdomain();
508                 m = vm_phys_alloc_domain_pages(domain, flind, pool, order);
509                 if (m != NULL)
510                         return (m);
511         }
512         return (NULL);
513 }
514
515 static vm_page_t
516 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
517 {       
518         struct vm_freelist *fl;
519         struct vm_freelist *alt;
520         int oind, pind;
521         vm_page_t m;
522
523         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
524         fl = &vm_phys_free_queues[domain][flind][pool][0];
525         for (oind = order; oind < VM_NFREEORDER; oind++) {
526                 m = TAILQ_FIRST(&fl[oind].pl);
527                 if (m != NULL) {
528                         vm_freelist_rem(fl, m, oind);
529                         vm_phys_split_pages(m, oind, fl, order);
530                         return (m);
531                 }
532         }
533
534         /*
535          * The given pool was empty.  Find the largest
536          * contiguous, power-of-two-sized set of pages in any
537          * pool.  Transfer these pages to the given pool, and
538          * use them to satisfy the allocation.
539          */
540         for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
541                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
542                         alt = &vm_phys_free_queues[domain][flind][pind][0];
543                         m = TAILQ_FIRST(&alt[oind].pl);
544                         if (m != NULL) {
545                                 vm_freelist_rem(alt, m, oind);
546                                 vm_phys_set_pool(pool, m, oind);
547                                 vm_phys_split_pages(m, oind, fl, order);
548                                 return (m);
549                         }
550                 }
551         }
552         return (NULL);
553 }
554
555 /*
556  * Find the vm_page corresponding to the given physical address.
557  */
558 vm_page_t
559 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
560 {
561         struct vm_phys_seg *seg;
562         int segind;
563
564         for (segind = 0; segind < vm_phys_nsegs; segind++) {
565                 seg = &vm_phys_segs[segind];
566                 if (pa >= seg->start && pa < seg->end)
567                         return (&seg->first_page[atop(pa - seg->start)]);
568         }
569         return (NULL);
570 }
571
572 vm_page_t
573 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
574 {
575         struct vm_phys_fictitious_seg tmp, *seg;
576         vm_page_t m;
577
578         m = NULL;
579         tmp.start = pa;
580         tmp.end = 0;
581
582         rw_rlock(&vm_phys_fictitious_reg_lock);
583         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
584         rw_runlock(&vm_phys_fictitious_reg_lock);
585         if (seg == NULL)
586                 return (NULL);
587
588         m = &seg->first_page[atop(pa - seg->start)];
589         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
590
591         return (m);
592 }
593
594 static inline void
595 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
596     long page_count, vm_memattr_t memattr)
597 {
598         long i;
599
600         for (i = 0; i < page_count; i++) {
601                 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
602                 range[i].oflags &= ~VPO_UNMANAGED;
603                 range[i].busy_lock = VPB_UNBUSIED;
604         }
605 }
606
607 int
608 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
609     vm_memattr_t memattr)
610 {
611         struct vm_phys_fictitious_seg *seg;
612         vm_page_t fp;
613         long page_count;
614 #ifdef VM_PHYSSEG_DENSE
615         long pi, pe;
616         long dpage_count;
617 #endif
618
619         KASSERT(start < end,
620             ("Start of segment isn't less than end (start: %jx end: %jx)",
621             (uintmax_t)start, (uintmax_t)end));
622
623         page_count = (end - start) / PAGE_SIZE;
624
625 #ifdef VM_PHYSSEG_DENSE
626         pi = atop(start);
627         pe = atop(end);
628         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
629                 fp = &vm_page_array[pi - first_page];
630                 if ((pe - first_page) > vm_page_array_size) {
631                         /*
632                          * We have a segment that starts inside
633                          * of vm_page_array, but ends outside of it.
634                          *
635                          * Use vm_page_array pages for those that are
636                          * inside of the vm_page_array range, and
637                          * allocate the remaining ones.
638                          */
639                         dpage_count = vm_page_array_size - (pi - first_page);
640                         vm_phys_fictitious_init_range(fp, start, dpage_count,
641                             memattr);
642                         page_count -= dpage_count;
643                         start += ptoa(dpage_count);
644                         goto alloc;
645                 }
646                 /*
647                  * We can allocate the full range from vm_page_array,
648                  * so there's no need to register the range in the tree.
649                  */
650                 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
651                 return (0);
652         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
653                 /*
654                  * We have a segment that ends inside of vm_page_array,
655                  * but starts outside of it.
656                  */
657                 fp = &vm_page_array[0];
658                 dpage_count = pe - first_page;
659                 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
660                     memattr);
661                 end -= ptoa(dpage_count);
662                 page_count -= dpage_count;
663                 goto alloc;
664         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
665                 /*
666                  * Trying to register a fictitious range that expands before
667                  * and after vm_page_array.
668                  */
669                 return (EINVAL);
670         } else {
671 alloc:
672 #endif
673                 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
674                     M_WAITOK | M_ZERO);
675 #ifdef VM_PHYSSEG_DENSE
676         }
677 #endif
678         vm_phys_fictitious_init_range(fp, start, page_count, memattr);
679
680         seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
681         seg->start = start;
682         seg->end = end;
683         seg->first_page = fp;
684
685         rw_wlock(&vm_phys_fictitious_reg_lock);
686         RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
687         rw_wunlock(&vm_phys_fictitious_reg_lock);
688
689         return (0);
690 }
691
692 void
693 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
694 {
695         struct vm_phys_fictitious_seg *seg, tmp;
696 #ifdef VM_PHYSSEG_DENSE
697         long pi, pe;
698 #endif
699
700         KASSERT(start < end,
701             ("Start of segment isn't less than end (start: %jx end: %jx)",
702             (uintmax_t)start, (uintmax_t)end));
703
704 #ifdef VM_PHYSSEG_DENSE
705         pi = atop(start);
706         pe = atop(end);
707         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
708                 if ((pe - first_page) <= vm_page_array_size) {
709                         /*
710                          * This segment was allocated using vm_page_array
711                          * only, there's nothing to do since those pages
712                          * were never added to the tree.
713                          */
714                         return;
715                 }
716                 /*
717                  * We have a segment that starts inside
718                  * of vm_page_array, but ends outside of it.
719                  *
720                  * Calculate how many pages were added to the
721                  * tree and free them.
722                  */
723                 start = ptoa(first_page + vm_page_array_size);
724         } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
725                 /*
726                  * We have a segment that ends inside of vm_page_array,
727                  * but starts outside of it.
728                  */
729                 end = ptoa(first_page);
730         } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
731                 /* Since it's not possible to register such a range, panic. */
732                 panic(
733                     "Unregistering not registered fictitious range [%#jx:%#jx]",
734                     (uintmax_t)start, (uintmax_t)end);
735         }
736 #endif
737         tmp.start = start;
738         tmp.end = 0;
739
740         rw_wlock(&vm_phys_fictitious_reg_lock);
741         seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
742         if (seg->start != start || seg->end != end) {
743                 rw_wunlock(&vm_phys_fictitious_reg_lock);
744                 panic(
745                     "Unregistering not registered fictitious range [%#jx:%#jx]",
746                     (uintmax_t)start, (uintmax_t)end);
747         }
748         RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
749         rw_wunlock(&vm_phys_fictitious_reg_lock);
750         free(seg->first_page, M_FICT_PAGES);
751         free(seg, M_FICT_PAGES);
752 }
753
754 /*
755  * Find the segment containing the given physical address.
756  */
757 static int
758 vm_phys_paddr_to_segind(vm_paddr_t pa)
759 {
760         struct vm_phys_seg *seg;
761         int segind;
762
763         for (segind = 0; segind < vm_phys_nsegs; segind++) {
764                 seg = &vm_phys_segs[segind];
765                 if (pa >= seg->start && pa < seg->end)
766                         return (segind);
767         }
768         panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
769             (uintmax_t)pa);
770 }
771
772 /*
773  * Free a contiguous, power of two-sized set of physical pages.
774  *
775  * The free page queues must be locked.
776  */
777 void
778 vm_phys_free_pages(vm_page_t m, int order)
779 {
780         struct vm_freelist *fl;
781         struct vm_phys_seg *seg;
782         vm_paddr_t pa;
783         vm_page_t m_buddy;
784
785         KASSERT(m->order == VM_NFREEORDER,
786             ("vm_phys_free_pages: page %p has unexpected order %d",
787             m, m->order));
788         KASSERT(m->pool < VM_NFREEPOOL,
789             ("vm_phys_free_pages: page %p has unexpected pool %d",
790             m, m->pool));
791         KASSERT(order < VM_NFREEORDER,
792             ("vm_phys_free_pages: order %d is out of range", order));
793         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
794         seg = &vm_phys_segs[m->segind];
795         if (order < VM_NFREEORDER - 1) {
796                 pa = VM_PAGE_TO_PHYS(m);
797                 do {
798                         pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
799                         if (pa < seg->start || pa >= seg->end)
800                                 break;
801                         m_buddy = &seg->first_page[atop(pa - seg->start)];
802                         if (m_buddy->order != order)
803                                 break;
804                         fl = (*seg->free_queues)[m_buddy->pool];
805                         vm_freelist_rem(fl, m_buddy, order);
806                         if (m_buddy->pool != m->pool)
807                                 vm_phys_set_pool(m->pool, m_buddy, order);
808                         order++;
809                         pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
810                         m = &seg->first_page[atop(pa - seg->start)];
811                 } while (order < VM_NFREEORDER - 1);
812         }
813         fl = (*seg->free_queues)[m->pool];
814         vm_freelist_add(fl, m, order, 1);
815 }
816
817 /*
818  * Free a contiguous, arbitrarily sized set of physical pages.
819  *
820  * The free page queues must be locked.
821  */
822 void
823 vm_phys_free_contig(vm_page_t m, u_long npages)
824 {
825         u_int n;
826         int order;
827
828         /*
829          * Avoid unnecessary coalescing by freeing the pages in the largest
830          * possible power-of-two-sized subsets.
831          */
832         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
833         for (;; npages -= n) {
834                 /*
835                  * Unsigned "min" is used here so that "order" is assigned
836                  * "VM_NFREEORDER - 1" when "m"'s physical address is zero
837                  * or the low-order bits of its physical address are zero
838                  * because the size of a physical address exceeds the size of
839                  * a long.
840                  */
841                 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
842                     VM_NFREEORDER - 1);
843                 n = 1 << order;
844                 if (npages < n)
845                         break;
846                 vm_phys_free_pages(m, order);
847                 m += n;
848         }
849         /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
850         for (; npages > 0; npages -= n) {
851                 order = flsl(npages) - 1;
852                 n = 1 << order;
853                 vm_phys_free_pages(m, order);
854                 m += n;
855         }
856 }
857
858 /*
859  * Set the pool for a contiguous, power of two-sized set of physical pages. 
860  */
861 void
862 vm_phys_set_pool(int pool, vm_page_t m, int order)
863 {
864         vm_page_t m_tmp;
865
866         for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
867                 m_tmp->pool = pool;
868 }
869
870 /*
871  * Search for the given physical page "m" in the free lists.  If the search
872  * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
873  * FALSE, indicating that "m" is not in the free lists.
874  *
875  * The free page queues must be locked.
876  */
877 boolean_t
878 vm_phys_unfree_page(vm_page_t m)
879 {
880         struct vm_freelist *fl;
881         struct vm_phys_seg *seg;
882         vm_paddr_t pa, pa_half;
883         vm_page_t m_set, m_tmp;
884         int order;
885
886         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
887
888         /*
889          * First, find the contiguous, power of two-sized set of free
890          * physical pages containing the given physical page "m" and
891          * assign it to "m_set".
892          */
893         seg = &vm_phys_segs[m->segind];
894         for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
895             order < VM_NFREEORDER - 1; ) {
896                 order++;
897                 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
898                 if (pa >= seg->start)
899                         m_set = &seg->first_page[atop(pa - seg->start)];
900                 else
901                         return (FALSE);
902         }
903         if (m_set->order < order)
904                 return (FALSE);
905         if (m_set->order == VM_NFREEORDER)
906                 return (FALSE);
907         KASSERT(m_set->order < VM_NFREEORDER,
908             ("vm_phys_unfree_page: page %p has unexpected order %d",
909             m_set, m_set->order));
910
911         /*
912          * Next, remove "m_set" from the free lists.  Finally, extract
913          * "m" from "m_set" using an iterative algorithm: While "m_set"
914          * is larger than a page, shrink "m_set" by returning the half
915          * of "m_set" that does not contain "m" to the free lists.
916          */
917         fl = (*seg->free_queues)[m_set->pool];
918         order = m_set->order;
919         vm_freelist_rem(fl, m_set, order);
920         while (order > 0) {
921                 order--;
922                 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
923                 if (m->phys_addr < pa_half)
924                         m_tmp = &seg->first_page[atop(pa_half - seg->start)];
925                 else {
926                         m_tmp = m_set;
927                         m_set = &seg->first_page[atop(pa_half - seg->start)];
928                 }
929                 vm_freelist_add(fl, m_tmp, order, 0);
930         }
931         KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
932         return (TRUE);
933 }
934
935 /*
936  * Try to zero one physical page.  Used by an idle priority thread.
937  */
938 boolean_t
939 vm_phys_zero_pages_idle(void)
940 {
941         static struct vm_freelist *fl;
942         static int flind, oind, pind;
943         vm_page_t m, m_tmp;
944         int domain;
945
946         domain = vm_rr_selectdomain();
947         fl = vm_phys_free_queues[domain][0][0];
948         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
949         for (;;) {
950                 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
951                         for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
952                                 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
953                                         vm_phys_unfree_page(m_tmp);
954                                         vm_phys_freecnt_adj(m, -1);
955                                         mtx_unlock(&vm_page_queue_free_mtx);
956                                         pmap_zero_page_idle(m_tmp);
957                                         m_tmp->flags |= PG_ZERO;
958                                         mtx_lock(&vm_page_queue_free_mtx);
959                                         vm_phys_freecnt_adj(m, 1);
960                                         vm_phys_free_pages(m_tmp, 0);
961                                         vm_page_zero_count++;
962                                         cnt_prezero++;
963                                         return (TRUE);
964                                 }
965                         }
966                 }
967                 oind++;
968                 if (oind == VM_NFREEORDER) {
969                         oind = 0;
970                         pind++;
971                         if (pind == VM_NFREEPOOL) {
972                                 pind = 0;
973                                 flind++;
974                                 if (flind == vm_nfreelists)
975                                         flind = 0;
976                         }
977                         fl = vm_phys_free_queues[domain][flind][pind];
978                 }
979         }
980 }
981
982 /*
983  * Allocate a contiguous set of physical pages of the given size
984  * "npages" from the free lists.  All of the physical pages must be at
985  * or above the given physical address "low" and below the given
986  * physical address "high".  The given value "alignment" determines the
987  * alignment of the first physical page in the set.  If the given value
988  * "boundary" is non-zero, then the set of physical pages cannot cross
989  * any physical address boundary that is a multiple of that value.  Both
990  * "alignment" and "boundary" must be a power of two.
991  */
992 vm_page_t
993 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
994     u_long alignment, vm_paddr_t boundary)
995 {
996         struct vm_freelist *fl;
997         struct vm_phys_seg *seg;
998         vm_paddr_t pa, pa_last, size;
999         vm_page_t m, m_ret;
1000         u_long npages_end;
1001         int dom, domain, flind, oind, order, pind;
1002
1003         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1004         size = npages << PAGE_SHIFT;
1005         KASSERT(size != 0,
1006             ("vm_phys_alloc_contig: size must not be 0"));
1007         KASSERT((alignment & (alignment - 1)) == 0,
1008             ("vm_phys_alloc_contig: alignment must be a power of 2"));
1009         KASSERT((boundary & (boundary - 1)) == 0,
1010             ("vm_phys_alloc_contig: boundary must be a power of 2"));
1011         /* Compute the queue that is the best fit for npages. */
1012         for (order = 0; (1 << order) < npages; order++);
1013         dom = 0;
1014 restartdom:
1015         domain = vm_rr_selectdomain();
1016         for (flind = 0; flind < vm_nfreelists; flind++) {
1017                 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
1018                         for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1019                                 fl = &vm_phys_free_queues[domain][flind][pind][0];
1020                                 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1021                                         /*
1022                                          * A free list may contain physical pages
1023                                          * from one or more segments.
1024                                          */
1025                                         seg = &vm_phys_segs[m_ret->segind];
1026                                         if (seg->start > high ||
1027                                             low >= seg->end)
1028                                                 continue;
1029
1030                                         /*
1031                                          * Is the size of this allocation request
1032                                          * larger than the largest block size?
1033                                          */
1034                                         if (order >= VM_NFREEORDER) {
1035                                                 /*
1036                                                  * Determine if a sufficient number
1037                                                  * of subsequent blocks to satisfy
1038                                                  * the allocation request are free.
1039                                                  */
1040                                                 pa = VM_PAGE_TO_PHYS(m_ret);
1041                                                 pa_last = pa + size;
1042                                                 for (;;) {
1043                                                         pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
1044                                                         if (pa >= pa_last)
1045                                                                 break;
1046                                                         if (pa < seg->start ||
1047                                                             pa >= seg->end)
1048                                                                 break;
1049                                                         m = &seg->first_page[atop(pa - seg->start)];
1050                                                         if (m->order != VM_NFREEORDER - 1)
1051                                                                 break;
1052                                                 }
1053                                                 /* If not, continue to the next block. */
1054                                                 if (pa < pa_last)
1055                                                         continue;
1056                                         }
1057
1058                                         /*
1059                                          * Determine if the blocks are within the given range,
1060                                          * satisfy the given alignment, and do not cross the
1061                                          * given boundary.
1062                                          */
1063                                         pa = VM_PAGE_TO_PHYS(m_ret);
1064                                         if (pa >= low &&
1065                                             pa + size <= high &&
1066                                             (pa & (alignment - 1)) == 0 &&
1067                                             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
1068                                                 goto done;
1069                                 }
1070                         }
1071                 }
1072         }
1073         if (++dom < vm_ndomains)
1074                 goto restartdom;
1075         return (NULL);
1076 done:
1077         for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1078                 fl = (*seg->free_queues)[m->pool];
1079                 vm_freelist_rem(fl, m, m->order);
1080         }
1081         if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1082                 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1083         fl = (*seg->free_queues)[m_ret->pool];
1084         vm_phys_split_pages(m_ret, oind, fl, order);
1085         /* Return excess pages to the free lists. */
1086         npages_end = roundup2(npages, 1 << imin(oind, order));
1087         if (npages < npages_end)
1088                 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1089         return (m_ret);
1090 }
1091
1092 #ifdef DDB
1093 /*
1094  * Show the number of physical pages in each of the free lists.
1095  */
1096 DB_SHOW_COMMAND(freepages, db_show_freepages)
1097 {
1098         struct vm_freelist *fl;
1099         int flind, oind, pind, dom;
1100
1101         for (dom = 0; dom < vm_ndomains; dom++) {
1102                 db_printf("DOMAIN: %d\n", dom);
1103                 for (flind = 0; flind < vm_nfreelists; flind++) {
1104                         db_printf("FREE LIST %d:\n"
1105                             "\n  ORDER (SIZE)  |  NUMBER"
1106                             "\n              ", flind);
1107                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1108                                 db_printf("  |  POOL %d", pind);
1109                         db_printf("\n--            ");
1110                         for (pind = 0; pind < VM_NFREEPOOL; pind++)
1111                                 db_printf("-- --      ");
1112                         db_printf("--\n");
1113                         for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1114                                 db_printf("  %2.2d (%6.6dK)", oind,
1115                                     1 << (PAGE_SHIFT - 10 + oind));
1116                                 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1117                                 fl = vm_phys_free_queues[dom][flind][pind];
1118                                         db_printf("  |  %6.6d", fl[oind].lcnt);
1119                                 }
1120                                 db_printf("\n");
1121                         }
1122                         db_printf("\n");
1123                 }
1124                 db_printf("\n");
1125         }
1126 }
1127 #endif