]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_page.c
Merge llvm trunk r366426, resolve conflicts, and update FREEBSD-Xlist.
[FreeBSD/FreeBSD.git] / sys / vm / vm_page.c
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * The Mach Operating System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
36  */
37
38 /*-
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64
65 /*
66  *      Resident memory management module.
67  */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71
72 #include "opt_vm.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/lock.h>
77 #include <sys/domainset.h>
78 #include <sys/kernel.h>
79 #include <sys/limits.h>
80 #include <sys/linker.h>
81 #include <sys/malloc.h>
82 #include <sys/mman.h>
83 #include <sys/msgbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/rwlock.h>
87 #include <sys/sbuf.h>
88 #include <sys/sched.h>
89 #include <sys/smp.h>
90 #include <sys/sysctl.h>
91 #include <sys/vmmeter.h>
92 #include <sys/vnode.h>
93
94 #include <vm/vm.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_param.h>
97 #include <vm/vm_domainset.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_phys.h>
104 #include <vm/vm_pagequeue.h>
105 #include <vm/vm_pager.h>
106 #include <vm/vm_radix.h>
107 #include <vm/vm_reserv.h>
108 #include <vm/vm_extern.h>
109 #include <vm/uma.h>
110 #include <vm/uma_int.h>
111
112 #include <machine/md_var.h>
113
114 extern int      uma_startup_count(int);
115 extern void     uma_startup(void *, int);
116 extern int      vmem_startup_count(void);
117
118 struct vm_domain vm_dom[MAXMEMDOM];
119
120 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
121
122 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
123
124 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
125 /* The following fields are protected by the domainset lock. */
126 domainset_t __exclusive_cache_line vm_min_domains;
127 domainset_t __exclusive_cache_line vm_severe_domains;
128 static int vm_min_waiters;
129 static int vm_severe_waiters;
130 static int vm_pageproc_waiters;
131
132 /*
133  * bogus page -- for I/O to/from partially complete buffers,
134  * or for paging into sparsely invalid regions.
135  */
136 vm_page_t bogus_page;
137
138 #ifdef PMAP_HAS_PAGE_ARRAY
139 vm_page_t vm_page_array = (vm_page_t)PA_MIN_ADDRESS;
140 #else
141 vm_page_t vm_page_array;
142 #endif
143 long vm_page_array_size;
144 long first_page;
145
146 static int boot_pages;
147 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
148     &boot_pages, 0,
149     "number of pages allocated for bootstrapping the VM system");
150
151 static int pa_tryrelock_restart;
152 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
153     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
154
155 static TAILQ_HEAD(, vm_page) blacklist_head;
156 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
157 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
158     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
159
160 static uma_zone_t fakepg_zone;
161
162 static void vm_page_alloc_check(vm_page_t m);
163 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
164 static void vm_page_dequeue_complete(vm_page_t m);
165 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
166 static void vm_page_init(void *dummy);
167 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
168     vm_pindex_t pindex, vm_page_t mpred);
169 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
170     vm_page_t mpred);
171 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
172     vm_page_t m_run, vm_paddr_t high);
173 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
174     int req);
175 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
176     int flags);
177 static void vm_page_zone_release(void *arg, void **store, int cnt);
178
179 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
180
181 static void
182 vm_page_init(void *dummy)
183 {
184
185         fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
186             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
187         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
188             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
189 }
190
191 /*
192  * The cache page zone is initialized later since we need to be able to allocate
193  * pages before UMA is fully initialized.
194  */
195 static void
196 vm_page_init_cache_zones(void *dummy __unused)
197 {
198         struct vm_domain *vmd;
199         struct vm_pgcache *pgcache;
200         int domain, pool;
201
202         for (domain = 0; domain < vm_ndomains; domain++) {
203                 vmd = VM_DOMAIN(domain);
204
205                 /*
206                  * Don't allow the page caches to take up more than .25% of
207                  * memory.
208                  */
209                 if (vmd->vmd_page_count / 400 < 256 * mp_ncpus * VM_NFREEPOOL)
210                         continue;
211                 for (pool = 0; pool < VM_NFREEPOOL; pool++) {
212                         pgcache = &vmd->vmd_pgcache[pool];
213                         pgcache->domain = domain;
214                         pgcache->pool = pool;
215                         pgcache->zone = uma_zcache_create("vm pgcache",
216                             sizeof(struct vm_page), NULL, NULL, NULL, NULL,
217                             vm_page_zone_import, vm_page_zone_release, pgcache,
218                             UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
219                         (void)uma_zone_set_maxcache(pgcache->zone, 0);
220                 }
221         }
222 }
223 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
224
225 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
226 #if PAGE_SIZE == 32768
227 #ifdef CTASSERT
228 CTASSERT(sizeof(u_long) >= 8);
229 #endif
230 #endif
231
232 /*
233  * Try to acquire a physical address lock while a pmap is locked.  If we
234  * fail to trylock we unlock and lock the pmap directly and cache the
235  * locked pa in *locked.  The caller should then restart their loop in case
236  * the virtual to physical mapping has changed.
237  */
238 int
239 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
240 {
241         vm_paddr_t lockpa;
242
243         lockpa = *locked;
244         *locked = pa;
245         if (lockpa) {
246                 PA_LOCK_ASSERT(lockpa, MA_OWNED);
247                 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
248                         return (0);
249                 PA_UNLOCK(lockpa);
250         }
251         if (PA_TRYLOCK(pa))
252                 return (0);
253         PMAP_UNLOCK(pmap);
254         atomic_add_int(&pa_tryrelock_restart, 1);
255         PA_LOCK(pa);
256         PMAP_LOCK(pmap);
257         return (EAGAIN);
258 }
259
260 /*
261  *      vm_set_page_size:
262  *
263  *      Sets the page size, perhaps based upon the memory
264  *      size.  Must be called before any use of page-size
265  *      dependent functions.
266  */
267 void
268 vm_set_page_size(void)
269 {
270         if (vm_cnt.v_page_size == 0)
271                 vm_cnt.v_page_size = PAGE_SIZE;
272         if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
273                 panic("vm_set_page_size: page size not a power of two");
274 }
275
276 /*
277  *      vm_page_blacklist_next:
278  *
279  *      Find the next entry in the provided string of blacklist
280  *      addresses.  Entries are separated by space, comma, or newline.
281  *      If an invalid integer is encountered then the rest of the
282  *      string is skipped.  Updates the list pointer to the next
283  *      character, or NULL if the string is exhausted or invalid.
284  */
285 static vm_paddr_t
286 vm_page_blacklist_next(char **list, char *end)
287 {
288         vm_paddr_t bad;
289         char *cp, *pos;
290
291         if (list == NULL || *list == NULL)
292                 return (0);
293         if (**list =='\0') {
294                 *list = NULL;
295                 return (0);
296         }
297
298         /*
299          * If there's no end pointer then the buffer is coming from
300          * the kenv and we know it's null-terminated.
301          */
302         if (end == NULL)
303                 end = *list + strlen(*list);
304
305         /* Ensure that strtoq() won't walk off the end */
306         if (*end != '\0') {
307                 if (*end == '\n' || *end == ' ' || *end  == ',')
308                         *end = '\0';
309                 else {
310                         printf("Blacklist not terminated, skipping\n");
311                         *list = NULL;
312                         return (0);
313                 }
314         }
315
316         for (pos = *list; *pos != '\0'; pos = cp) {
317                 bad = strtoq(pos, &cp, 0);
318                 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
319                         if (bad == 0) {
320                                 if (++cp < end)
321                                         continue;
322                                 else
323                                         break;
324                         }
325                 } else
326                         break;
327                 if (*cp == '\0' || ++cp >= end)
328                         *list = NULL;
329                 else
330                         *list = cp;
331                 return (trunc_page(bad));
332         }
333         printf("Garbage in RAM blacklist, skipping\n");
334         *list = NULL;
335         return (0);
336 }
337
338 bool
339 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
340 {
341         struct vm_domain *vmd;
342         vm_page_t m;
343         int ret;
344
345         m = vm_phys_paddr_to_vm_page(pa);
346         if (m == NULL)
347                 return (true); /* page does not exist, no failure */
348
349         vmd = vm_pagequeue_domain(m);
350         vm_domain_free_lock(vmd);
351         ret = vm_phys_unfree_page(m);
352         vm_domain_free_unlock(vmd);
353         if (ret != 0) {
354                 vm_domain_freecnt_inc(vmd, -1);
355                 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
356                 if (verbose)
357                         printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
358         }
359         return (ret);
360 }
361
362 /*
363  *      vm_page_blacklist_check:
364  *
365  *      Iterate through the provided string of blacklist addresses, pulling
366  *      each entry out of the physical allocator free list and putting it
367  *      onto a list for reporting via the vm.page_blacklist sysctl.
368  */
369 static void
370 vm_page_blacklist_check(char *list, char *end)
371 {
372         vm_paddr_t pa;
373         char *next;
374
375         next = list;
376         while (next != NULL) {
377                 if ((pa = vm_page_blacklist_next(&next, end)) == 0)
378                         continue;
379                 vm_page_blacklist_add(pa, bootverbose);
380         }
381 }
382
383 /*
384  *      vm_page_blacklist_load:
385  *
386  *      Search for a special module named "ram_blacklist".  It'll be a
387  *      plain text file provided by the user via the loader directive
388  *      of the same name.
389  */
390 static void
391 vm_page_blacklist_load(char **list, char **end)
392 {
393         void *mod;
394         u_char *ptr;
395         u_int len;
396
397         mod = NULL;
398         ptr = NULL;
399
400         mod = preload_search_by_type("ram_blacklist");
401         if (mod != NULL) {
402                 ptr = preload_fetch_addr(mod);
403                 len = preload_fetch_size(mod);
404         }
405         *list = ptr;
406         if (ptr != NULL)
407                 *end = ptr + len;
408         else
409                 *end = NULL;
410         return;
411 }
412
413 static int
414 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
415 {
416         vm_page_t m;
417         struct sbuf sbuf;
418         int error, first;
419
420         first = 1;
421         error = sysctl_wire_old_buffer(req, 0);
422         if (error != 0)
423                 return (error);
424         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
425         TAILQ_FOREACH(m, &blacklist_head, listq) {
426                 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
427                     (uintmax_t)m->phys_addr);
428                 first = 0;
429         }
430         error = sbuf_finish(&sbuf);
431         sbuf_delete(&sbuf);
432         return (error);
433 }
434
435 /*
436  * Initialize a dummy page for use in scans of the specified paging queue.
437  * In principle, this function only needs to set the flag PG_MARKER.
438  * Nonetheless, it write busies the page as a safety precaution.
439  */
440 static void
441 vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags)
442 {
443
444         bzero(marker, sizeof(*marker));
445         marker->flags = PG_MARKER;
446         marker->aflags = aflags;
447         marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
448         marker->queue = queue;
449 }
450
451 static void
452 vm_page_domain_init(int domain)
453 {
454         struct vm_domain *vmd;
455         struct vm_pagequeue *pq;
456         int i;
457
458         vmd = VM_DOMAIN(domain);
459         bzero(vmd, sizeof(*vmd));
460         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
461             "vm inactive pagequeue";
462         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
463             "vm active pagequeue";
464         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
465             "vm laundry pagequeue";
466         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
467             "vm unswappable pagequeue";
468         vmd->vmd_domain = domain;
469         vmd->vmd_page_count = 0;
470         vmd->vmd_free_count = 0;
471         vmd->vmd_segs = 0;
472         vmd->vmd_oom = FALSE;
473         for (i = 0; i < PQ_COUNT; i++) {
474                 pq = &vmd->vmd_pagequeues[i];
475                 TAILQ_INIT(&pq->pq_pl);
476                 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
477                     MTX_DEF | MTX_DUPOK);
478                 pq->pq_pdpages = 0;
479                 vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
480         }
481         mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
482         mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
483         snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
484
485         /*
486          * inacthead is used to provide FIFO ordering for LRU-bypassing
487          * insertions.
488          */
489         vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
490         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
491             &vmd->vmd_inacthead, plinks.q);
492
493         /*
494          * The clock pages are used to implement active queue scanning without
495          * requeues.  Scans start at clock[0], which is advanced after the scan
496          * ends.  When the two clock hands meet, they are reset and scanning
497          * resumes from the head of the queue.
498          */
499         vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
500         vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
501         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
502             &vmd->vmd_clock[0], plinks.q);
503         TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
504             &vmd->vmd_clock[1], plinks.q);
505 }
506
507 /*
508  * Initialize a physical page in preparation for adding it to the free
509  * lists.
510  */
511 static void
512 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
513 {
514
515         m->object = NULL;
516         m->wire_count = 0;
517         m->busy_lock = VPB_UNBUSIED;
518         m->flags = m->aflags = 0;
519         m->phys_addr = pa;
520         m->queue = PQ_NONE;
521         m->psind = 0;
522         m->segind = segind;
523         m->order = VM_NFREEORDER;
524         m->pool = VM_FREEPOOL_DEFAULT;
525         m->valid = m->dirty = 0;
526         pmap_page_init(m);
527 }
528
529 #ifndef PMAP_HAS_PAGE_ARRAY
530 static vm_paddr_t
531 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
532 {
533         vm_paddr_t new_end;
534
535         /*
536          * Reserve an unmapped guard page to trap access to vm_page_array[-1].
537          * However, because this page is allocated from KVM, out-of-bounds
538          * accesses using the direct map will not be trapped.
539          */
540         *vaddr += PAGE_SIZE;
541
542         /*
543          * Allocate physical memory for the page structures, and map it.
544          */
545         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
546         vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
547             VM_PROT_READ | VM_PROT_WRITE);
548         vm_page_array_size = page_range;
549
550         return (new_end);
551 }
552 #endif
553
554 /*
555  *      vm_page_startup:
556  *
557  *      Initializes the resident memory module.  Allocates physical memory for
558  *      bootstrapping UMA and some data structures that are used to manage
559  *      physical pages.  Initializes these structures, and populates the free
560  *      page queues.
561  */
562 vm_offset_t
563 vm_page_startup(vm_offset_t vaddr)
564 {
565         struct vm_phys_seg *seg;
566         vm_page_t m;
567         char *list, *listend;
568         vm_offset_t mapped;
569         vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
570         vm_paddr_t last_pa, pa;
571         u_long pagecount;
572         int biggestone, i, segind;
573 #ifdef WITNESS
574         int witness_size;
575 #endif
576 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
577         long ii;
578 #endif
579
580         vaddr = round_page(vaddr);
581
582         vm_phys_early_startup();
583         biggestone = vm_phys_avail_largest();
584         end = phys_avail[biggestone+1];
585
586         /*
587          * Initialize the page and queue locks.
588          */
589         mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
590         for (i = 0; i < PA_LOCK_COUNT; i++)
591                 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
592         for (i = 0; i < vm_ndomains; i++)
593                 vm_page_domain_init(i);
594
595         /*
596          * Allocate memory for use when boot strapping the kernel memory
597          * allocator.  Tell UMA how many zones we are going to create
598          * before going fully functional.  UMA will add its zones.
599          *
600          * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP,
601          * KMAP ENTRY, MAP ENTRY, VMSPACE.
602          */
603         boot_pages = uma_startup_count(8);
604
605 #ifndef UMA_MD_SMALL_ALLOC
606         /* vmem_startup() calls uma_prealloc(). */
607         boot_pages += vmem_startup_count();
608         /* vm_map_startup() calls uma_prealloc(). */
609         boot_pages += howmany(MAX_KMAP,
610             UMA_SLAB_SPACE / sizeof(struct vm_map));
611
612         /*
613          * Before going fully functional kmem_init() does allocation
614          * from "KMAP ENTRY" and vmem_create() does allocation from "vmem".
615          */
616         boot_pages += 2;
617 #endif
618         /*
619          * CTFLAG_RDTUN doesn't work during the early boot process, so we must
620          * manually fetch the value.
621          */
622         TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
623         new_end = end - (boot_pages * UMA_SLAB_SIZE);
624         new_end = trunc_page(new_end);
625         mapped = pmap_map(&vaddr, new_end, end,
626             VM_PROT_READ | VM_PROT_WRITE);
627         bzero((void *)mapped, end - new_end);
628         uma_startup((void *)mapped, boot_pages);
629
630 #ifdef WITNESS
631         witness_size = round_page(witness_startup_count());
632         new_end -= witness_size;
633         mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
634             VM_PROT_READ | VM_PROT_WRITE);
635         bzero((void *)mapped, witness_size);
636         witness_startup((void *)mapped);
637 #endif
638
639 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
640     defined(__i386__) || defined(__mips__) || defined(__riscv)
641         /*
642          * Allocate a bitmap to indicate that a random physical page
643          * needs to be included in a minidump.
644          *
645          * The amd64 port needs this to indicate which direct map pages
646          * need to be dumped, via calls to dump_add_page()/dump_drop_page().
647          *
648          * However, i386 still needs this workspace internally within the
649          * minidump code.  In theory, they are not needed on i386, but are
650          * included should the sf_buf code decide to use them.
651          */
652         last_pa = 0;
653         for (i = 0; dump_avail[i + 1] != 0; i += 2)
654                 if (dump_avail[i + 1] > last_pa)
655                         last_pa = dump_avail[i + 1];
656         page_range = last_pa / PAGE_SIZE;
657         vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
658         new_end -= vm_page_dump_size;
659         vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
660             new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
661         bzero((void *)vm_page_dump, vm_page_dump_size);
662 #else
663         (void)last_pa;
664 #endif
665 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
666     defined(__riscv)
667         /*
668          * Include the UMA bootstrap pages, witness pages and vm_page_dump
669          * in a crash dump.  When pmap_map() uses the direct map, they are
670          * not automatically included.
671          */
672         for (pa = new_end; pa < end; pa += PAGE_SIZE)
673                 dump_add_page(pa);
674 #endif
675         phys_avail[biggestone + 1] = new_end;
676 #ifdef __amd64__
677         /*
678          * Request that the physical pages underlying the message buffer be
679          * included in a crash dump.  Since the message buffer is accessed
680          * through the direct map, they are not automatically included.
681          */
682         pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
683         last_pa = pa + round_page(msgbufsize);
684         while (pa < last_pa) {
685                 dump_add_page(pa);
686                 pa += PAGE_SIZE;
687         }
688 #endif
689         /*
690          * Compute the number of pages of memory that will be available for
691          * use, taking into account the overhead of a page structure per page.
692          * In other words, solve
693          *      "available physical memory" - round_page(page_range *
694          *          sizeof(struct vm_page)) = page_range * PAGE_SIZE 
695          * for page_range.  
696          */
697         low_avail = phys_avail[0];
698         high_avail = phys_avail[1];
699         for (i = 0; i < vm_phys_nsegs; i++) {
700                 if (vm_phys_segs[i].start < low_avail)
701                         low_avail = vm_phys_segs[i].start;
702                 if (vm_phys_segs[i].end > high_avail)
703                         high_avail = vm_phys_segs[i].end;
704         }
705         /* Skip the first chunk.  It is already accounted for. */
706         for (i = 2; phys_avail[i + 1] != 0; i += 2) {
707                 if (phys_avail[i] < low_avail)
708                         low_avail = phys_avail[i];
709                 if (phys_avail[i + 1] > high_avail)
710                         high_avail = phys_avail[i + 1];
711         }
712         first_page = low_avail / PAGE_SIZE;
713 #ifdef VM_PHYSSEG_SPARSE
714         size = 0;
715         for (i = 0; i < vm_phys_nsegs; i++)
716                 size += vm_phys_segs[i].end - vm_phys_segs[i].start;
717         for (i = 0; phys_avail[i + 1] != 0; i += 2)
718                 size += phys_avail[i + 1] - phys_avail[i];
719 #elif defined(VM_PHYSSEG_DENSE)
720         size = high_avail - low_avail;
721 #else
722 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
723 #endif
724
725 #ifdef PMAP_HAS_PAGE_ARRAY
726         pmap_page_array_startup(size / PAGE_SIZE);
727         biggestone = vm_phys_avail_largest();
728         end = new_end = phys_avail[biggestone + 1];
729 #else
730 #ifdef VM_PHYSSEG_DENSE
731         /*
732          * In the VM_PHYSSEG_DENSE case, the number of pages can account for
733          * the overhead of a page structure per page only if vm_page_array is
734          * allocated from the last physical memory chunk.  Otherwise, we must
735          * allocate page structures representing the physical memory
736          * underlying vm_page_array, even though they will not be used.
737          */
738         if (new_end != high_avail)
739                 page_range = size / PAGE_SIZE;
740         else
741 #endif
742         {
743                 page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
744
745                 /*
746                  * If the partial bytes remaining are large enough for
747                  * a page (PAGE_SIZE) without a corresponding
748                  * 'struct vm_page', then new_end will contain an
749                  * extra page after subtracting the length of the VM
750                  * page array.  Compensate by subtracting an extra
751                  * page from new_end.
752                  */
753                 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
754                         if (new_end == high_avail)
755                                 high_avail -= PAGE_SIZE;
756                         new_end -= PAGE_SIZE;
757                 }
758         }
759         end = new_end;
760         new_end = vm_page_array_alloc(&vaddr, end, page_range);
761 #endif
762
763 #if VM_NRESERVLEVEL > 0
764         /*
765          * Allocate physical memory for the reservation management system's
766          * data structures, and map it.
767          */
768         new_end = vm_reserv_startup(&vaddr, new_end);
769 #endif
770 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
771     defined(__riscv)
772         /*
773          * Include vm_page_array and vm_reserv_array in a crash dump.
774          */
775         for (pa = new_end; pa < end; pa += PAGE_SIZE)
776                 dump_add_page(pa);
777 #endif
778         phys_avail[biggestone + 1] = new_end;
779
780         /*
781          * Add physical memory segments corresponding to the available
782          * physical pages.
783          */
784         for (i = 0; phys_avail[i + 1] != 0; i += 2)
785                 if (vm_phys_avail_size(i) != 0)
786                         vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
787
788         /*
789          * Initialize the physical memory allocator.
790          */
791         vm_phys_init();
792
793         /*
794          * Initialize the page structures and add every available page to the
795          * physical memory allocator's free lists.
796          */
797 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
798         for (ii = 0; ii < vm_page_array_size; ii++) {
799                 m = &vm_page_array[ii];
800                 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0);
801                 m->flags = PG_FICTITIOUS;
802         }
803 #endif
804         vm_cnt.v_page_count = 0;
805         for (segind = 0; segind < vm_phys_nsegs; segind++) {
806                 seg = &vm_phys_segs[segind];
807                 for (m = seg->first_page, pa = seg->start; pa < seg->end;
808                     m++, pa += PAGE_SIZE)
809                         vm_page_init_page(m, pa, segind);
810
811                 /*
812                  * Add the segment to the free lists only if it is covered by
813                  * one of the ranges in phys_avail.  Because we've added the
814                  * ranges to the vm_phys_segs array, we can assume that each
815                  * segment is either entirely contained in one of the ranges,
816                  * or doesn't overlap any of them.
817                  */
818                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
819                         struct vm_domain *vmd;
820
821                         if (seg->start < phys_avail[i] ||
822                             seg->end > phys_avail[i + 1])
823                                 continue;
824
825                         m = seg->first_page;
826                         pagecount = (u_long)atop(seg->end - seg->start);
827
828                         vmd = VM_DOMAIN(seg->domain);
829                         vm_domain_free_lock(vmd);
830                         vm_phys_enqueue_contig(m, pagecount);
831                         vm_domain_free_unlock(vmd);
832                         vm_domain_freecnt_inc(vmd, pagecount);
833                         vm_cnt.v_page_count += (u_int)pagecount;
834
835                         vmd = VM_DOMAIN(seg->domain);
836                         vmd->vmd_page_count += (u_int)pagecount;
837                         vmd->vmd_segs |= 1UL << m->segind;
838                         break;
839                 }
840         }
841
842         /*
843          * Remove blacklisted pages from the physical memory allocator.
844          */
845         TAILQ_INIT(&blacklist_head);
846         vm_page_blacklist_load(&list, &listend);
847         vm_page_blacklist_check(list, listend);
848
849         list = kern_getenv("vm.blacklist");
850         vm_page_blacklist_check(list, NULL);
851
852         freeenv(list);
853 #if VM_NRESERVLEVEL > 0
854         /*
855          * Initialize the reservation management system.
856          */
857         vm_reserv_init();
858 #endif
859
860         return (vaddr);
861 }
862
863 void
864 vm_page_reference(vm_page_t m)
865 {
866
867         vm_page_aflag_set(m, PGA_REFERENCED);
868 }
869
870 /*
871  *      vm_page_busy_downgrade:
872  *
873  *      Downgrade an exclusive busy page into a single shared busy page.
874  */
875 void
876 vm_page_busy_downgrade(vm_page_t m)
877 {
878         u_int x;
879         bool locked;
880
881         vm_page_assert_xbusied(m);
882         locked = mtx_owned(vm_page_lockptr(m));
883
884         for (;;) {
885                 x = m->busy_lock;
886                 x &= VPB_BIT_WAITERS;
887                 if (x != 0 && !locked)
888                         vm_page_lock(m);
889                 if (atomic_cmpset_rel_int(&m->busy_lock,
890                     VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1)))
891                         break;
892                 if (x != 0 && !locked)
893                         vm_page_unlock(m);
894         }
895         if (x != 0) {
896                 wakeup(m);
897                 if (!locked)
898                         vm_page_unlock(m);
899         }
900 }
901
902 /*
903  *      vm_page_sbusied:
904  *
905  *      Return a positive value if the page is shared busied, 0 otherwise.
906  */
907 int
908 vm_page_sbusied(vm_page_t m)
909 {
910         u_int x;
911
912         x = m->busy_lock;
913         return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
914 }
915
916 /*
917  *      vm_page_sunbusy:
918  *
919  *      Shared unbusy a page.
920  */
921 void
922 vm_page_sunbusy(vm_page_t m)
923 {
924         u_int x;
925
926         vm_page_lock_assert(m, MA_NOTOWNED);
927         vm_page_assert_sbusied(m);
928
929         for (;;) {
930                 x = m->busy_lock;
931                 if (VPB_SHARERS(x) > 1) {
932                         if (atomic_cmpset_int(&m->busy_lock, x,
933                             x - VPB_ONE_SHARER))
934                                 break;
935                         continue;
936                 }
937                 if ((x & VPB_BIT_WAITERS) == 0) {
938                         KASSERT(x == VPB_SHARERS_WORD(1),
939                             ("vm_page_sunbusy: invalid lock state"));
940                         if (atomic_cmpset_int(&m->busy_lock,
941                             VPB_SHARERS_WORD(1), VPB_UNBUSIED))
942                                 break;
943                         continue;
944                 }
945                 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
946                     ("vm_page_sunbusy: invalid lock state for waiters"));
947
948                 vm_page_lock(m);
949                 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
950                         vm_page_unlock(m);
951                         continue;
952                 }
953                 wakeup(m);
954                 vm_page_unlock(m);
955                 break;
956         }
957 }
958
959 /*
960  *      vm_page_busy_sleep:
961  *
962  *      Sleep and release the page lock, using the page pointer as wchan.
963  *      This is used to implement the hard-path of busying mechanism.
964  *
965  *      The given page must be locked.
966  *
967  *      If nonshared is true, sleep only if the page is xbusy.
968  */
969 void
970 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
971 {
972         u_int x;
973
974         vm_page_assert_locked(m);
975
976         x = m->busy_lock;
977         if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
978             ((x & VPB_BIT_WAITERS) == 0 &&
979             !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
980                 vm_page_unlock(m);
981                 return;
982         }
983         msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
984 }
985
986 /*
987  *      vm_page_trysbusy:
988  *
989  *      Try to shared busy a page.
990  *      If the operation succeeds 1 is returned otherwise 0.
991  *      The operation never sleeps.
992  */
993 int
994 vm_page_trysbusy(vm_page_t m)
995 {
996         u_int x;
997
998         for (;;) {
999                 x = m->busy_lock;
1000                 if ((x & VPB_BIT_SHARED) == 0)
1001                         return (0);
1002                 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
1003                         return (1);
1004         }
1005 }
1006
1007 static void
1008 vm_page_xunbusy_locked(vm_page_t m)
1009 {
1010
1011         vm_page_assert_xbusied(m);
1012         vm_page_assert_locked(m);
1013
1014         atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1015         /* There is a waiter, do wakeup() instead of vm_page_flash(). */
1016         wakeup(m);
1017 }
1018
1019 void
1020 vm_page_xunbusy_maybelocked(vm_page_t m)
1021 {
1022         bool lockacq;
1023
1024         vm_page_assert_xbusied(m);
1025
1026         /*
1027          * Fast path for unbusy.  If it succeeds, we know that there
1028          * are no waiters, so we do not need a wakeup.
1029          */
1030         if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER,
1031             VPB_UNBUSIED))
1032                 return;
1033
1034         lockacq = !mtx_owned(vm_page_lockptr(m));
1035         if (lockacq)
1036                 vm_page_lock(m);
1037         vm_page_xunbusy_locked(m);
1038         if (lockacq)
1039                 vm_page_unlock(m);
1040 }
1041
1042 /*
1043  *      vm_page_xunbusy_hard:
1044  *
1045  *      Called after the first try the exclusive unbusy of a page failed.
1046  *      It is assumed that the waiters bit is on.
1047  */
1048 void
1049 vm_page_xunbusy_hard(vm_page_t m)
1050 {
1051
1052         vm_page_assert_xbusied(m);
1053
1054         vm_page_lock(m);
1055         vm_page_xunbusy_locked(m);
1056         vm_page_unlock(m);
1057 }
1058
1059 /*
1060  *      vm_page_flash:
1061  *
1062  *      Wakeup anyone waiting for the page.
1063  *      The ownership bits do not change.
1064  *
1065  *      The given page must be locked.
1066  */
1067 void
1068 vm_page_flash(vm_page_t m)
1069 {
1070         u_int x;
1071
1072         vm_page_lock_assert(m, MA_OWNED);
1073
1074         for (;;) {
1075                 x = m->busy_lock;
1076                 if ((x & VPB_BIT_WAITERS) == 0)
1077                         return;
1078                 if (atomic_cmpset_int(&m->busy_lock, x,
1079                     x & (~VPB_BIT_WAITERS)))
1080                         break;
1081         }
1082         wakeup(m);
1083 }
1084
1085 /*
1086  * Avoid releasing and reacquiring the same page lock.
1087  */
1088 void
1089 vm_page_change_lock(vm_page_t m, struct mtx **mtx)
1090 {
1091         struct mtx *mtx1;
1092
1093         mtx1 = vm_page_lockptr(m);
1094         if (*mtx == mtx1)
1095                 return;
1096         if (*mtx != NULL)
1097                 mtx_unlock(*mtx);
1098         *mtx = mtx1;
1099         mtx_lock(mtx1);
1100 }
1101
1102 /*
1103  *      vm_page_unhold_pages:
1104  *
1105  *      Unhold each of the pages that is referenced by the given array.
1106  */
1107 void
1108 vm_page_unhold_pages(vm_page_t *ma, int count)
1109 {
1110         struct mtx *mtx;
1111
1112         mtx = NULL;
1113         for (; count != 0; count--) {
1114                 vm_page_change_lock(*ma, &mtx);
1115                 if (vm_page_unwire(*ma, PQ_ACTIVE) && (*ma)->object == NULL)
1116                         vm_page_free(*ma);
1117                 ma++;
1118         }
1119         if (mtx != NULL)
1120                 mtx_unlock(mtx);
1121 }
1122
1123 vm_page_t
1124 PHYS_TO_VM_PAGE(vm_paddr_t pa)
1125 {
1126         vm_page_t m;
1127
1128 #ifdef VM_PHYSSEG_SPARSE
1129         m = vm_phys_paddr_to_vm_page(pa);
1130         if (m == NULL)
1131                 m = vm_phys_fictitious_to_vm_page(pa);
1132         return (m);
1133 #elif defined(VM_PHYSSEG_DENSE)
1134         long pi;
1135
1136         pi = atop(pa);
1137         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1138                 m = &vm_page_array[pi - first_page];
1139                 return (m);
1140         }
1141         return (vm_phys_fictitious_to_vm_page(pa));
1142 #else
1143 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1144 #endif
1145 }
1146
1147 /*
1148  *      vm_page_getfake:
1149  *
1150  *      Create a fictitious page with the specified physical address and
1151  *      memory attribute.  The memory attribute is the only the machine-
1152  *      dependent aspect of a fictitious page that must be initialized.
1153  */
1154 vm_page_t
1155 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
1156 {
1157         vm_page_t m;
1158
1159         m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1160         vm_page_initfake(m, paddr, memattr);
1161         return (m);
1162 }
1163
1164 void
1165 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1166 {
1167
1168         if ((m->flags & PG_FICTITIOUS) != 0) {
1169                 /*
1170                  * The page's memattr might have changed since the
1171                  * previous initialization.  Update the pmap to the
1172                  * new memattr.
1173                  */
1174                 goto memattr;
1175         }
1176         m->phys_addr = paddr;
1177         m->queue = PQ_NONE;
1178         /* Fictitious pages don't use "segind". */
1179         m->flags = PG_FICTITIOUS;
1180         /* Fictitious pages don't use "order" or "pool". */
1181         m->oflags = VPO_UNMANAGED;
1182         m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1183         m->wire_count = 1;
1184         pmap_page_init(m);
1185 memattr:
1186         pmap_page_set_memattr(m, memattr);
1187 }
1188
1189 /*
1190  *      vm_page_putfake:
1191  *
1192  *      Release a fictitious page.
1193  */
1194 void
1195 vm_page_putfake(vm_page_t m)
1196 {
1197
1198         KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1199         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1200             ("vm_page_putfake: bad page %p", m));
1201         uma_zfree(fakepg_zone, m);
1202 }
1203
1204 /*
1205  *      vm_page_updatefake:
1206  *
1207  *      Update the given fictitious page to the specified physical address and
1208  *      memory attribute.
1209  */
1210 void
1211 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1212 {
1213
1214         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1215             ("vm_page_updatefake: bad page %p", m));
1216         m->phys_addr = paddr;
1217         pmap_page_set_memattr(m, memattr);
1218 }
1219
1220 /*
1221  *      vm_page_free:
1222  *
1223  *      Free a page.
1224  */
1225 void
1226 vm_page_free(vm_page_t m)
1227 {
1228
1229         m->flags &= ~PG_ZERO;
1230         vm_page_free_toq(m);
1231 }
1232
1233 /*
1234  *      vm_page_free_zero:
1235  *
1236  *      Free a page to the zerod-pages queue
1237  */
1238 void
1239 vm_page_free_zero(vm_page_t m)
1240 {
1241
1242         m->flags |= PG_ZERO;
1243         vm_page_free_toq(m);
1244 }
1245
1246 /*
1247  * Unbusy and handle the page queueing for a page from a getpages request that
1248  * was optionally read ahead or behind.
1249  */
1250 void
1251 vm_page_readahead_finish(vm_page_t m)
1252 {
1253
1254         /* We shouldn't put invalid pages on queues. */
1255         KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m));
1256
1257         /*
1258          * Since the page is not the actually needed one, whether it should
1259          * be activated or deactivated is not obvious.  Empirical results
1260          * have shown that deactivating the page is usually the best choice,
1261          * unless the page is wanted by another thread.
1262          */
1263         vm_page_lock(m);
1264         if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
1265                 vm_page_activate(m);
1266         else
1267                 vm_page_deactivate(m);
1268         vm_page_unlock(m);
1269         vm_page_xunbusy(m);
1270 }
1271
1272 /*
1273  *      vm_page_sleep_if_busy:
1274  *
1275  *      Sleep and release the page queues lock if the page is busied.
1276  *      Returns TRUE if the thread slept.
1277  *
1278  *      The given page must be unlocked and object containing it must
1279  *      be locked.
1280  */
1281 int
1282 vm_page_sleep_if_busy(vm_page_t m, const char *msg)
1283 {
1284         vm_object_t obj;
1285
1286         vm_page_lock_assert(m, MA_NOTOWNED);
1287         VM_OBJECT_ASSERT_WLOCKED(m->object);
1288
1289         if (vm_page_busied(m)) {
1290                 /*
1291                  * The page-specific object must be cached because page
1292                  * identity can change during the sleep, causing the
1293                  * re-lock of a different object.
1294                  * It is assumed that a reference to the object is already
1295                  * held by the callers.
1296                  */
1297                 obj = m->object;
1298                 vm_page_lock(m);
1299                 VM_OBJECT_WUNLOCK(obj);
1300                 vm_page_busy_sleep(m, msg, false);
1301                 VM_OBJECT_WLOCK(obj);
1302                 return (TRUE);
1303         }
1304         return (FALSE);
1305 }
1306
1307 /*
1308  *      vm_page_dirty_KBI:              [ internal use only ]
1309  *
1310  *      Set all bits in the page's dirty field.
1311  *
1312  *      The object containing the specified page must be locked if the
1313  *      call is made from the machine-independent layer.
1314  *
1315  *      See vm_page_clear_dirty_mask().
1316  *
1317  *      This function should only be called by vm_page_dirty().
1318  */
1319 void
1320 vm_page_dirty_KBI(vm_page_t m)
1321 {
1322
1323         /* Refer to this operation by its public name. */
1324         KASSERT(m->valid == VM_PAGE_BITS_ALL,
1325             ("vm_page_dirty: page is invalid!"));
1326         m->dirty = VM_PAGE_BITS_ALL;
1327 }
1328
1329 /*
1330  *      vm_page_insert:         [ internal use only ]
1331  *
1332  *      Inserts the given mem entry into the object and object list.
1333  *
1334  *      The object must be locked.
1335  */
1336 int
1337 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1338 {
1339         vm_page_t mpred;
1340
1341         VM_OBJECT_ASSERT_WLOCKED(object);
1342         mpred = vm_radix_lookup_le(&object->rtree, pindex);
1343         return (vm_page_insert_after(m, object, pindex, mpred));
1344 }
1345
1346 /*
1347  *      vm_page_insert_after:
1348  *
1349  *      Inserts the page "m" into the specified object at offset "pindex".
1350  *
1351  *      The page "mpred" must immediately precede the offset "pindex" within
1352  *      the specified object.
1353  *
1354  *      The object must be locked.
1355  */
1356 static int
1357 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1358     vm_page_t mpred)
1359 {
1360         vm_page_t msucc;
1361
1362         VM_OBJECT_ASSERT_WLOCKED(object);
1363         KASSERT(m->object == NULL,
1364             ("vm_page_insert_after: page already inserted"));
1365         if (mpred != NULL) {
1366                 KASSERT(mpred->object == object,
1367                     ("vm_page_insert_after: object doesn't contain mpred"));
1368                 KASSERT(mpred->pindex < pindex,
1369                     ("vm_page_insert_after: mpred doesn't precede pindex"));
1370                 msucc = TAILQ_NEXT(mpred, listq);
1371         } else
1372                 msucc = TAILQ_FIRST(&object->memq);
1373         if (msucc != NULL)
1374                 KASSERT(msucc->pindex > pindex,
1375                     ("vm_page_insert_after: msucc doesn't succeed pindex"));
1376
1377         /*
1378          * Record the object/offset pair in this page
1379          */
1380         m->object = object;
1381         m->pindex = pindex;
1382
1383         /*
1384          * Now link into the object's ordered list of backed pages.
1385          */
1386         if (vm_radix_insert(&object->rtree, m)) {
1387                 m->object = NULL;
1388                 m->pindex = 0;
1389                 return (1);
1390         }
1391         vm_page_insert_radixdone(m, object, mpred);
1392         return (0);
1393 }
1394
1395 /*
1396  *      vm_page_insert_radixdone:
1397  *
1398  *      Complete page "m" insertion into the specified object after the
1399  *      radix trie hooking.
1400  *
1401  *      The page "mpred" must precede the offset "m->pindex" within the
1402  *      specified object.
1403  *
1404  *      The object must be locked.
1405  */
1406 static void
1407 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1408 {
1409
1410         VM_OBJECT_ASSERT_WLOCKED(object);
1411         KASSERT(object != NULL && m->object == object,
1412             ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1413         if (mpred != NULL) {
1414                 KASSERT(mpred->object == object,
1415                     ("vm_page_insert_after: object doesn't contain mpred"));
1416                 KASSERT(mpred->pindex < m->pindex,
1417                     ("vm_page_insert_after: mpred doesn't precede pindex"));
1418         }
1419
1420         if (mpred != NULL)
1421                 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1422         else
1423                 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1424
1425         /*
1426          * Show that the object has one more resident page.
1427          */
1428         object->resident_page_count++;
1429
1430         /*
1431          * Hold the vnode until the last page is released.
1432          */
1433         if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1434                 vhold(object->handle);
1435
1436         /*
1437          * Since we are inserting a new and possibly dirty page,
1438          * update the object's OBJ_MIGHTBEDIRTY flag.
1439          */
1440         if (pmap_page_is_write_mapped(m))
1441                 vm_object_set_writeable_dirty(object);
1442 }
1443
1444 /*
1445  *      vm_page_remove:
1446  *
1447  *      Removes the specified page from its containing object, but does not
1448  *      invalidate any backing storage.  Return true if the page may be safely
1449  *      freed and false otherwise.
1450  *
1451  *      The object must be locked.  The page must be locked if it is managed.
1452  */
1453 bool
1454 vm_page_remove(vm_page_t m)
1455 {
1456         vm_object_t object;
1457         vm_page_t mrem;
1458
1459         object = m->object;
1460
1461         if ((m->oflags & VPO_UNMANAGED) == 0)
1462                 vm_page_assert_locked(m);
1463         VM_OBJECT_ASSERT_WLOCKED(object);
1464         if (vm_page_xbusied(m))
1465                 vm_page_xunbusy_maybelocked(m);
1466         mrem = vm_radix_remove(&object->rtree, m->pindex);
1467         KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
1468
1469         /*
1470          * Now remove from the object's list of backed pages.
1471          */
1472         TAILQ_REMOVE(&object->memq, m, listq);
1473
1474         /*
1475          * And show that the object has one fewer resident page.
1476          */
1477         object->resident_page_count--;
1478
1479         /*
1480          * The vnode may now be recycled.
1481          */
1482         if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1483                 vdrop(object->handle);
1484
1485         m->object = NULL;
1486         return (!vm_page_wired(m));
1487 }
1488
1489 /*
1490  *      vm_page_lookup:
1491  *
1492  *      Returns the page associated with the object/offset
1493  *      pair specified; if none is found, NULL is returned.
1494  *
1495  *      The object must be locked.
1496  */
1497 vm_page_t
1498 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1499 {
1500
1501         VM_OBJECT_ASSERT_LOCKED(object);
1502         return (vm_radix_lookup(&object->rtree, pindex));
1503 }
1504
1505 /*
1506  *      vm_page_find_least:
1507  *
1508  *      Returns the page associated with the object with least pindex
1509  *      greater than or equal to the parameter pindex, or NULL.
1510  *
1511  *      The object must be locked.
1512  */
1513 vm_page_t
1514 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1515 {
1516         vm_page_t m;
1517
1518         VM_OBJECT_ASSERT_LOCKED(object);
1519         if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1520                 m = vm_radix_lookup_ge(&object->rtree, pindex);
1521         return (m);
1522 }
1523
1524 /*
1525  * Returns the given page's successor (by pindex) within the object if it is
1526  * resident; if none is found, NULL is returned.
1527  *
1528  * The object must be locked.
1529  */
1530 vm_page_t
1531 vm_page_next(vm_page_t m)
1532 {
1533         vm_page_t next;
1534
1535         VM_OBJECT_ASSERT_LOCKED(m->object);
1536         if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1537                 MPASS(next->object == m->object);
1538                 if (next->pindex != m->pindex + 1)
1539                         next = NULL;
1540         }
1541         return (next);
1542 }
1543
1544 /*
1545  * Returns the given page's predecessor (by pindex) within the object if it is
1546  * resident; if none is found, NULL is returned.
1547  *
1548  * The object must be locked.
1549  */
1550 vm_page_t
1551 vm_page_prev(vm_page_t m)
1552 {
1553         vm_page_t prev;
1554
1555         VM_OBJECT_ASSERT_LOCKED(m->object);
1556         if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1557                 MPASS(prev->object == m->object);
1558                 if (prev->pindex != m->pindex - 1)
1559                         prev = NULL;
1560         }
1561         return (prev);
1562 }
1563
1564 /*
1565  * Uses the page mnew as a replacement for an existing page at index
1566  * pindex which must be already present in the object.
1567  *
1568  * The existing page must not be on a paging queue.
1569  */
1570 vm_page_t
1571 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1572 {
1573         vm_page_t mold;
1574
1575         VM_OBJECT_ASSERT_WLOCKED(object);
1576         KASSERT(mnew->object == NULL,
1577             ("vm_page_replace: page %p already in object", mnew));
1578         KASSERT(mnew->queue == PQ_NONE || vm_page_wired(mnew),
1579             ("vm_page_replace: new page %p is on a paging queue", mnew));
1580
1581         /*
1582          * This function mostly follows vm_page_insert() and
1583          * vm_page_remove() without the radix, object count and vnode
1584          * dance.  Double check such functions for more comments.
1585          */
1586
1587         mnew->object = object;
1588         mnew->pindex = pindex;
1589         mold = vm_radix_replace(&object->rtree, mnew);
1590         KASSERT(mold->queue == PQ_NONE,
1591             ("vm_page_replace: old page %p is on a paging queue", mold));
1592
1593         /* Keep the resident page list in sorted order. */
1594         TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
1595         TAILQ_REMOVE(&object->memq, mold, listq);
1596
1597         mold->object = NULL;
1598         vm_page_xunbusy_maybelocked(mold);
1599
1600         /*
1601          * The object's resident_page_count does not change because we have
1602          * swapped one page for another, but OBJ_MIGHTBEDIRTY.
1603          */
1604         if (pmap_page_is_write_mapped(mnew))
1605                 vm_object_set_writeable_dirty(object);
1606         return (mold);
1607 }
1608
1609 /*
1610  *      vm_page_rename:
1611  *
1612  *      Move the given memory entry from its
1613  *      current object to the specified target object/offset.
1614  *
1615  *      Note: swap associated with the page must be invalidated by the move.  We
1616  *            have to do this for several reasons:  (1) we aren't freeing the
1617  *            page, (2) we are dirtying the page, (3) the VM system is probably
1618  *            moving the page from object A to B, and will then later move
1619  *            the backing store from A to B and we can't have a conflict.
1620  *
1621  *      Note: we *always* dirty the page.  It is necessary both for the
1622  *            fact that we moved it, and because we may be invalidating
1623  *            swap.
1624  *
1625  *      The objects must be locked.
1626  */
1627 int
1628 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1629 {
1630         vm_page_t mpred;
1631         vm_pindex_t opidx;
1632
1633         VM_OBJECT_ASSERT_WLOCKED(new_object);
1634
1635         mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1636         KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1637             ("vm_page_rename: pindex already renamed"));
1638
1639         /*
1640          * Create a custom version of vm_page_insert() which does not depend
1641          * by m_prev and can cheat on the implementation aspects of the
1642          * function.
1643          */
1644         opidx = m->pindex;
1645         m->pindex = new_pindex;
1646         if (vm_radix_insert(&new_object->rtree, m)) {
1647                 m->pindex = opidx;
1648                 return (1);
1649         }
1650
1651         /*
1652          * The operation cannot fail anymore.  The removal must happen before
1653          * the listq iterator is tainted.
1654          */
1655         m->pindex = opidx;
1656         vm_page_lock(m);
1657         (void)vm_page_remove(m);
1658
1659         /* Return back to the new pindex to complete vm_page_insert(). */
1660         m->pindex = new_pindex;
1661         m->object = new_object;
1662         vm_page_unlock(m);
1663         vm_page_insert_radixdone(m, new_object, mpred);
1664         vm_page_dirty(m);
1665         return (0);
1666 }
1667
1668 /*
1669  *      vm_page_alloc:
1670  *
1671  *      Allocate and return a page that is associated with the specified
1672  *      object and offset pair.  By default, this page is exclusive busied.
1673  *
1674  *      The caller must always specify an allocation class.
1675  *
1676  *      allocation classes:
1677  *      VM_ALLOC_NORMAL         normal process request
1678  *      VM_ALLOC_SYSTEM         system *really* needs a page
1679  *      VM_ALLOC_INTERRUPT      interrupt time request
1680  *
1681  *      optional allocation flags:
1682  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
1683  *                              intends to allocate
1684  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
1685  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
1686  *      VM_ALLOC_NOOBJ          page is not associated with an object and
1687  *                              should not be exclusive busy
1688  *      VM_ALLOC_SBUSY          shared busy the allocated page
1689  *      VM_ALLOC_WIRED          wire the allocated page
1690  *      VM_ALLOC_ZERO           prefer a zeroed page
1691  */
1692 vm_page_t
1693 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1694 {
1695
1696         return (vm_page_alloc_after(object, pindex, req, object != NULL ?
1697             vm_radix_lookup_le(&object->rtree, pindex) : NULL));
1698 }
1699
1700 vm_page_t
1701 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
1702     int req)
1703 {
1704
1705         return (vm_page_alloc_domain_after(object, pindex, domain, req,
1706             object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) :
1707             NULL));
1708 }
1709
1710 /*
1711  * Allocate a page in the specified object with the given page index.  To
1712  * optimize insertion of the page into the object, the caller must also specifiy
1713  * the resident page in the object with largest index smaller than the given
1714  * page index, or NULL if no such page exists.
1715  */
1716 vm_page_t
1717 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
1718     int req, vm_page_t mpred)
1719 {
1720         struct vm_domainset_iter di;
1721         vm_page_t m;
1722         int domain;
1723
1724         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
1725         do {
1726                 m = vm_page_alloc_domain_after(object, pindex, domain, req,
1727                     mpred);
1728                 if (m != NULL)
1729                         break;
1730         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
1731
1732         return (m);
1733 }
1734
1735 /*
1736  * Returns true if the number of free pages exceeds the minimum
1737  * for the request class and false otherwise.
1738  */
1739 int
1740 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
1741 {
1742         u_int limit, old, new;
1743
1744         req = req & VM_ALLOC_CLASS_MASK;
1745
1746         /*
1747          * The page daemon is allowed to dig deeper into the free page list.
1748          */
1749         if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
1750                 req = VM_ALLOC_SYSTEM;
1751         if (req == VM_ALLOC_INTERRUPT)
1752                 limit = 0;
1753         else if (req == VM_ALLOC_SYSTEM)
1754                 limit = vmd->vmd_interrupt_free_min;
1755         else
1756                 limit = vmd->vmd_free_reserved;
1757
1758         /*
1759          * Attempt to reserve the pages.  Fail if we're below the limit.
1760          */
1761         limit += npages;
1762         old = vmd->vmd_free_count;
1763         do {
1764                 if (old < limit)
1765                         return (0);
1766                 new = old - npages;
1767         } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
1768
1769         /* Wake the page daemon if we've crossed the threshold. */
1770         if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
1771                 pagedaemon_wakeup(vmd->vmd_domain);
1772
1773         /* Only update bitsets on transitions. */
1774         if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
1775             (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
1776                 vm_domain_set(vmd);
1777
1778         return (1);
1779 }
1780
1781 vm_page_t
1782 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
1783     int req, vm_page_t mpred)
1784 {
1785         struct vm_domain *vmd;
1786         vm_page_t m;
1787         int flags, pool;
1788
1789         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1790             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1791             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1792             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1793             ("inconsistent object(%p)/req(%x)", object, req));
1794         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
1795             ("Can't sleep and retry object insertion."));
1796         KASSERT(mpred == NULL || mpred->pindex < pindex,
1797             ("mpred %p doesn't precede pindex 0x%jx", mpred,
1798             (uintmax_t)pindex));
1799         if (object != NULL)
1800                 VM_OBJECT_ASSERT_WLOCKED(object);
1801
1802         flags = 0;
1803         m = NULL;
1804         pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT;
1805 again:
1806 #if VM_NRESERVLEVEL > 0
1807         /*
1808          * Can we allocate the page from a reservation?
1809          */
1810         if (vm_object_reserv(object) &&
1811             (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
1812             NULL) {
1813                 domain = vm_phys_domain(m);
1814                 vmd = VM_DOMAIN(domain);
1815                 goto found;
1816         }
1817 #endif
1818         vmd = VM_DOMAIN(domain);
1819         if (vmd->vmd_pgcache[pool].zone != NULL) {
1820                 m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT);
1821                 if (m != NULL) {
1822                         flags |= PG_PCPU_CACHE;
1823                         goto found;
1824                 }
1825         }
1826         if (vm_domain_allocate(vmd, req, 1)) {
1827                 /*
1828                  * If not, allocate it from the free page queues.
1829                  */
1830                 vm_domain_free_lock(vmd);
1831                 m = vm_phys_alloc_pages(domain, pool, 0);
1832                 vm_domain_free_unlock(vmd);
1833                 if (m == NULL) {
1834                         vm_domain_freecnt_inc(vmd, 1);
1835 #if VM_NRESERVLEVEL > 0
1836                         if (vm_reserv_reclaim_inactive(domain))
1837                                 goto again;
1838 #endif
1839                 }
1840         }
1841         if (m == NULL) {
1842                 /*
1843                  * Not allocatable, give up.
1844                  */
1845                 if (vm_domain_alloc_fail(vmd, object, req))
1846                         goto again;
1847                 return (NULL);
1848         }
1849
1850         /*
1851          * At this point we had better have found a good page.
1852          */
1853 found:
1854         vm_page_dequeue(m);
1855         vm_page_alloc_check(m);
1856
1857         /*
1858          * Initialize the page.  Only the PG_ZERO flag is inherited.
1859          */
1860         if ((req & VM_ALLOC_ZERO) != 0)
1861                 flags |= (m->flags & PG_ZERO);
1862         if ((req & VM_ALLOC_NODUMP) != 0)
1863                 flags |= PG_NODUMP;
1864         m->flags = flags;
1865         m->aflags = 0;
1866         m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1867             VPO_UNMANAGED : 0;
1868         m->busy_lock = VPB_UNBUSIED;
1869         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
1870                 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1871         if ((req & VM_ALLOC_SBUSY) != 0)
1872                 m->busy_lock = VPB_SHARERS_WORD(1);
1873         if (req & VM_ALLOC_WIRED) {
1874                 /*
1875                  * The page lock is not required for wiring a page until that
1876                  * page is inserted into the object.
1877                  */
1878                 vm_wire_add(1);
1879                 m->wire_count = 1;
1880         }
1881         m->act_count = 0;
1882
1883         if (object != NULL) {
1884                 if (vm_page_insert_after(m, object, pindex, mpred)) {
1885                         if (req & VM_ALLOC_WIRED) {
1886                                 vm_wire_sub(1);
1887                                 m->wire_count = 0;
1888                         }
1889                         KASSERT(m->object == NULL, ("page %p has object", m));
1890                         m->oflags = VPO_UNMANAGED;
1891                         m->busy_lock = VPB_UNBUSIED;
1892                         /* Don't change PG_ZERO. */
1893                         vm_page_free_toq(m);
1894                         if (req & VM_ALLOC_WAITFAIL) {
1895                                 VM_OBJECT_WUNLOCK(object);
1896                                 vm_radix_wait();
1897                                 VM_OBJECT_WLOCK(object);
1898                         }
1899                         return (NULL);
1900                 }
1901
1902                 /* Ignore device objects; the pager sets "memattr" for them. */
1903                 if (object->memattr != VM_MEMATTR_DEFAULT &&
1904                     (object->flags & OBJ_FICTITIOUS) == 0)
1905                         pmap_page_set_memattr(m, object->memattr);
1906         } else
1907                 m->pindex = pindex;
1908
1909         return (m);
1910 }
1911
1912 /*
1913  *      vm_page_alloc_contig:
1914  *
1915  *      Allocate a contiguous set of physical pages of the given size "npages"
1916  *      from the free lists.  All of the physical pages must be at or above
1917  *      the given physical address "low" and below the given physical address
1918  *      "high".  The given value "alignment" determines the alignment of the
1919  *      first physical page in the set.  If the given value "boundary" is
1920  *      non-zero, then the set of physical pages cannot cross any physical
1921  *      address boundary that is a multiple of that value.  Both "alignment"
1922  *      and "boundary" must be a power of two.
1923  *
1924  *      If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1925  *      then the memory attribute setting for the physical pages is configured
1926  *      to the object's memory attribute setting.  Otherwise, the memory
1927  *      attribute setting for the physical pages is configured to "memattr",
1928  *      overriding the object's memory attribute setting.  However, if the
1929  *      object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1930  *      memory attribute setting for the physical pages cannot be configured
1931  *      to VM_MEMATTR_DEFAULT.
1932  *
1933  *      The specified object may not contain fictitious pages.
1934  *
1935  *      The caller must always specify an allocation class.
1936  *
1937  *      allocation classes:
1938  *      VM_ALLOC_NORMAL         normal process request
1939  *      VM_ALLOC_SYSTEM         system *really* needs a page
1940  *      VM_ALLOC_INTERRUPT      interrupt time request
1941  *
1942  *      optional allocation flags:
1943  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
1944  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
1945  *      VM_ALLOC_NOOBJ          page is not associated with an object and
1946  *                              should not be exclusive busy
1947  *      VM_ALLOC_SBUSY          shared busy the allocated page
1948  *      VM_ALLOC_WIRED          wire the allocated page
1949  *      VM_ALLOC_ZERO           prefer a zeroed page
1950  */
1951 vm_page_t
1952 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1953     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1954     vm_paddr_t boundary, vm_memattr_t memattr)
1955 {
1956         struct vm_domainset_iter di;
1957         vm_page_t m;
1958         int domain;
1959
1960         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
1961         do {
1962                 m = vm_page_alloc_contig_domain(object, pindex, domain, req,
1963                     npages, low, high, alignment, boundary, memattr);
1964                 if (m != NULL)
1965                         break;
1966         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
1967
1968         return (m);
1969 }
1970
1971 vm_page_t
1972 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
1973     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1974     vm_paddr_t boundary, vm_memattr_t memattr)
1975 {
1976         struct vm_domain *vmd;
1977         vm_page_t m, m_ret, mpred;
1978         u_int busy_lock, flags, oflags;
1979
1980         mpred = NULL;   /* XXX: pacify gcc */
1981         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1982             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1983             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1984             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1985             ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
1986             req));
1987         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
1988             ("Can't sleep and retry object insertion."));
1989         if (object != NULL) {
1990                 VM_OBJECT_ASSERT_WLOCKED(object);
1991                 KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
1992                     ("vm_page_alloc_contig: object %p has fictitious pages",
1993                     object));
1994         }
1995         KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1996
1997         if (object != NULL) {
1998                 mpred = vm_radix_lookup_le(&object->rtree, pindex);
1999                 KASSERT(mpred == NULL || mpred->pindex != pindex,
2000                     ("vm_page_alloc_contig: pindex already allocated"));
2001         }
2002
2003         /*
2004          * Can we allocate the pages without the number of free pages falling
2005          * below the lower bound for the allocation class?
2006          */
2007         m_ret = NULL;
2008 again:
2009 #if VM_NRESERVLEVEL > 0
2010         /*
2011          * Can we allocate the pages from a reservation?
2012          */
2013         if (vm_object_reserv(object) &&
2014             (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
2015             mpred, npages, low, high, alignment, boundary)) != NULL) {
2016                 domain = vm_phys_domain(m_ret);
2017                 vmd = VM_DOMAIN(domain);
2018                 goto found;
2019         }
2020 #endif
2021         vmd = VM_DOMAIN(domain);
2022         if (vm_domain_allocate(vmd, req, npages)) {
2023                 /*
2024                  * allocate them from the free page queues.
2025                  */
2026                 vm_domain_free_lock(vmd);
2027                 m_ret = vm_phys_alloc_contig(domain, npages, low, high,
2028                     alignment, boundary);
2029                 vm_domain_free_unlock(vmd);
2030                 if (m_ret == NULL) {
2031                         vm_domain_freecnt_inc(vmd, npages);
2032 #if VM_NRESERVLEVEL > 0
2033                         if (vm_reserv_reclaim_contig(domain, npages, low,
2034                             high, alignment, boundary))
2035                                 goto again;
2036 #endif
2037                 }
2038         }
2039         if (m_ret == NULL) {
2040                 if (vm_domain_alloc_fail(vmd, object, req))
2041                         goto again;
2042                 return (NULL);
2043         }
2044 #if VM_NRESERVLEVEL > 0
2045 found:
2046 #endif
2047         for (m = m_ret; m < &m_ret[npages]; m++) {
2048                 vm_page_dequeue(m);
2049                 vm_page_alloc_check(m);
2050         }
2051
2052         /*
2053          * Initialize the pages.  Only the PG_ZERO flag is inherited.
2054          */
2055         flags = 0;
2056         if ((req & VM_ALLOC_ZERO) != 0)
2057                 flags = PG_ZERO;
2058         if ((req & VM_ALLOC_NODUMP) != 0)
2059                 flags |= PG_NODUMP;
2060         oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
2061             VPO_UNMANAGED : 0;
2062         busy_lock = VPB_UNBUSIED;
2063         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
2064                 busy_lock = VPB_SINGLE_EXCLUSIVER;
2065         if ((req & VM_ALLOC_SBUSY) != 0)
2066                 busy_lock = VPB_SHARERS_WORD(1);
2067         if ((req & VM_ALLOC_WIRED) != 0)
2068                 vm_wire_add(npages);
2069         if (object != NULL) {
2070                 if (object->memattr != VM_MEMATTR_DEFAULT &&
2071                     memattr == VM_MEMATTR_DEFAULT)
2072                         memattr = object->memattr;
2073         }
2074         for (m = m_ret; m < &m_ret[npages]; m++) {
2075                 m->aflags = 0;
2076                 m->flags = (m->flags | PG_NODUMP) & flags;
2077                 m->busy_lock = busy_lock;
2078                 if ((req & VM_ALLOC_WIRED) != 0)
2079                         m->wire_count = 1;
2080                 m->act_count = 0;
2081                 m->oflags = oflags;
2082                 if (object != NULL) {
2083                         if (vm_page_insert_after(m, object, pindex, mpred)) {
2084                                 if ((req & VM_ALLOC_WIRED) != 0)
2085                                         vm_wire_sub(npages);
2086                                 KASSERT(m->object == NULL,
2087                                     ("page %p has object", m));
2088                                 mpred = m;
2089                                 for (m = m_ret; m < &m_ret[npages]; m++) {
2090                                         if (m <= mpred &&
2091                                             (req & VM_ALLOC_WIRED) != 0)
2092                                                 m->wire_count = 0;
2093                                         m->oflags = VPO_UNMANAGED;
2094                                         m->busy_lock = VPB_UNBUSIED;
2095                                         /* Don't change PG_ZERO. */
2096                                         vm_page_free_toq(m);
2097                                 }
2098                                 if (req & VM_ALLOC_WAITFAIL) {
2099                                         VM_OBJECT_WUNLOCK(object);
2100                                         vm_radix_wait();
2101                                         VM_OBJECT_WLOCK(object);
2102                                 }
2103                                 return (NULL);
2104                         }
2105                         mpred = m;
2106                 } else
2107                         m->pindex = pindex;
2108                 if (memattr != VM_MEMATTR_DEFAULT)
2109                         pmap_page_set_memattr(m, memattr);
2110                 pindex++;
2111         }
2112         return (m_ret);
2113 }
2114
2115 /*
2116  * Check a page that has been freshly dequeued from a freelist.
2117  */
2118 static void
2119 vm_page_alloc_check(vm_page_t m)
2120 {
2121
2122         KASSERT(m->object == NULL, ("page %p has object", m));
2123         KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
2124             ("page %p has unexpected queue %d, flags %#x",
2125             m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
2126         KASSERT(!vm_page_wired(m), ("page %p is wired", m));
2127         KASSERT(!vm_page_busied(m), ("page %p is busy", m));
2128         KASSERT(m->dirty == 0, ("page %p is dirty", m));
2129         KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2130             ("page %p has unexpected memattr %d",
2131             m, pmap_page_get_memattr(m)));
2132         KASSERT(m->valid == 0, ("free page %p is valid", m));
2133 }
2134
2135 /*
2136  *      vm_page_alloc_freelist:
2137  *
2138  *      Allocate a physical page from the specified free page list.
2139  *
2140  *      The caller must always specify an allocation class.
2141  *
2142  *      allocation classes:
2143  *      VM_ALLOC_NORMAL         normal process request
2144  *      VM_ALLOC_SYSTEM         system *really* needs a page
2145  *      VM_ALLOC_INTERRUPT      interrupt time request
2146  *
2147  *      optional allocation flags:
2148  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
2149  *                              intends to allocate
2150  *      VM_ALLOC_WIRED          wire the allocated page
2151  *      VM_ALLOC_ZERO           prefer a zeroed page
2152  */
2153 vm_page_t
2154 vm_page_alloc_freelist(int freelist, int req)
2155 {
2156         struct vm_domainset_iter di;
2157         vm_page_t m;
2158         int domain;
2159
2160         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2161         do {
2162                 m = vm_page_alloc_freelist_domain(domain, freelist, req);
2163                 if (m != NULL)
2164                         break;
2165         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2166
2167         return (m);
2168 }
2169
2170 vm_page_t
2171 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
2172 {
2173         struct vm_domain *vmd;
2174         vm_page_t m;
2175         u_int flags;
2176
2177         m = NULL;
2178         vmd = VM_DOMAIN(domain);
2179 again:
2180         if (vm_domain_allocate(vmd, req, 1)) {
2181                 vm_domain_free_lock(vmd);
2182                 m = vm_phys_alloc_freelist_pages(domain, freelist,
2183                     VM_FREEPOOL_DIRECT, 0);
2184                 vm_domain_free_unlock(vmd);
2185                 if (m == NULL)
2186                         vm_domain_freecnt_inc(vmd, 1);
2187         }
2188         if (m == NULL) {
2189                 if (vm_domain_alloc_fail(vmd, NULL, req))
2190                         goto again;
2191                 return (NULL);
2192         }
2193         vm_page_dequeue(m);
2194         vm_page_alloc_check(m);
2195
2196         /*
2197          * Initialize the page.  Only the PG_ZERO flag is inherited.
2198          */
2199         m->aflags = 0;
2200         flags = 0;
2201         if ((req & VM_ALLOC_ZERO) != 0)
2202                 flags = PG_ZERO;
2203         m->flags &= flags;
2204         if ((req & VM_ALLOC_WIRED) != 0) {
2205                 /*
2206                  * The page lock is not required for wiring a page that does
2207                  * not belong to an object.
2208                  */
2209                 vm_wire_add(1);
2210                 m->wire_count = 1;
2211         }
2212         /* Unmanaged pages don't use "act_count". */
2213         m->oflags = VPO_UNMANAGED;
2214         return (m);
2215 }
2216
2217 static int
2218 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
2219 {
2220         struct vm_domain *vmd;
2221         struct vm_pgcache *pgcache;
2222         int i;
2223
2224         pgcache = arg;
2225         vmd = VM_DOMAIN(pgcache->domain);
2226         /* Only import if we can bring in a full bucket. */
2227         if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2228                 return (0);
2229         domain = vmd->vmd_domain;
2230         vm_domain_free_lock(vmd);
2231         i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
2232             (vm_page_t *)store);
2233         vm_domain_free_unlock(vmd);
2234         if (cnt != i)
2235                 vm_domain_freecnt_inc(vmd, cnt - i);
2236
2237         return (i);
2238 }
2239
2240 static void
2241 vm_page_zone_release(void *arg, void **store, int cnt)
2242 {
2243         struct vm_domain *vmd;
2244         struct vm_pgcache *pgcache;
2245         vm_page_t m;
2246         int i;
2247
2248         pgcache = arg;
2249         vmd = VM_DOMAIN(pgcache->domain);
2250         vm_domain_free_lock(vmd);
2251         for (i = 0; i < cnt; i++) {
2252                 m = (vm_page_t)store[i];
2253                 vm_phys_free_pages(m, 0);
2254         }
2255         vm_domain_free_unlock(vmd);
2256         vm_domain_freecnt_inc(vmd, cnt);
2257 }
2258
2259 #define VPSC_ANY        0       /* No restrictions. */
2260 #define VPSC_NORESERV   1       /* Skip reservations; implies VPSC_NOSUPER. */
2261 #define VPSC_NOSUPER    2       /* Skip superpages. */
2262
2263 /*
2264  *      vm_page_scan_contig:
2265  *
2266  *      Scan vm_page_array[] between the specified entries "m_start" and
2267  *      "m_end" for a run of contiguous physical pages that satisfy the
2268  *      specified conditions, and return the lowest page in the run.  The
2269  *      specified "alignment" determines the alignment of the lowest physical
2270  *      page in the run.  If the specified "boundary" is non-zero, then the
2271  *      run of physical pages cannot span a physical address that is a
2272  *      multiple of "boundary".
2273  *
2274  *      "m_end" is never dereferenced, so it need not point to a vm_page
2275  *      structure within vm_page_array[].
2276  *
2277  *      "npages" must be greater than zero.  "m_start" and "m_end" must not
2278  *      span a hole (or discontiguity) in the physical address space.  Both
2279  *      "alignment" and "boundary" must be a power of two.
2280  */
2281 vm_page_t
2282 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2283     u_long alignment, vm_paddr_t boundary, int options)
2284 {
2285         struct mtx *m_mtx;
2286         vm_object_t object;
2287         vm_paddr_t pa;
2288         vm_page_t m, m_run;
2289 #if VM_NRESERVLEVEL > 0
2290         int level;
2291 #endif
2292         int m_inc, order, run_ext, run_len;
2293
2294         KASSERT(npages > 0, ("npages is 0"));
2295         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2296         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2297         m_run = NULL;
2298         run_len = 0;
2299         m_mtx = NULL;
2300         for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2301                 KASSERT((m->flags & PG_MARKER) == 0,
2302                     ("page %p is PG_MARKER", m));
2303                 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1,
2304                     ("fictitious page %p has invalid wire count", m));
2305
2306                 /*
2307                  * If the current page would be the start of a run, check its
2308                  * physical address against the end, alignment, and boundary
2309                  * conditions.  If it doesn't satisfy these conditions, either
2310                  * terminate the scan or advance to the next page that
2311                  * satisfies the failed condition.
2312                  */
2313                 if (run_len == 0) {
2314                         KASSERT(m_run == NULL, ("m_run != NULL"));
2315                         if (m + npages > m_end)
2316                                 break;
2317                         pa = VM_PAGE_TO_PHYS(m);
2318                         if ((pa & (alignment - 1)) != 0) {
2319                                 m_inc = atop(roundup2(pa, alignment) - pa);
2320                                 continue;
2321                         }
2322                         if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
2323                             boundary) != 0) {
2324                                 m_inc = atop(roundup2(pa, boundary) - pa);
2325                                 continue;
2326                         }
2327                 } else
2328                         KASSERT(m_run != NULL, ("m_run == NULL"));
2329
2330                 vm_page_change_lock(m, &m_mtx);
2331                 m_inc = 1;
2332 retry:
2333                 if (vm_page_wired(m))
2334                         run_ext = 0;
2335 #if VM_NRESERVLEVEL > 0
2336                 else if ((level = vm_reserv_level(m)) >= 0 &&
2337                     (options & VPSC_NORESERV) != 0) {
2338                         run_ext = 0;
2339                         /* Advance to the end of the reservation. */
2340                         pa = VM_PAGE_TO_PHYS(m);
2341                         m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2342                             pa);
2343                 }
2344 #endif
2345                 else if ((object = m->object) != NULL) {
2346                         /*
2347                          * The page is considered eligible for relocation if
2348                          * and only if it could be laundered or reclaimed by
2349                          * the page daemon.
2350                          */
2351                         if (!VM_OBJECT_TRYRLOCK(object)) {
2352                                 mtx_unlock(m_mtx);
2353                                 VM_OBJECT_RLOCK(object);
2354                                 mtx_lock(m_mtx);
2355                                 if (m->object != object) {
2356                                         /*
2357                                          * The page may have been freed.
2358                                          */
2359                                         VM_OBJECT_RUNLOCK(object);
2360                                         goto retry;
2361                                 } else if (vm_page_wired(m)) {
2362                                         run_ext = 0;
2363                                         goto unlock;
2364                                 }
2365                         }
2366                         /* Don't care: PG_NODUMP, PG_ZERO. */
2367                         if (object->type != OBJT_DEFAULT &&
2368                             object->type != OBJT_SWAP &&
2369                             object->type != OBJT_VNODE) {
2370                                 run_ext = 0;
2371 #if VM_NRESERVLEVEL > 0
2372                         } else if ((options & VPSC_NOSUPER) != 0 &&
2373                             (level = vm_reserv_level_iffullpop(m)) >= 0) {
2374                                 run_ext = 0;
2375                                 /* Advance to the end of the superpage. */
2376                                 pa = VM_PAGE_TO_PHYS(m);
2377                                 m_inc = atop(roundup2(pa + 1,
2378                                     vm_reserv_size(level)) - pa);
2379 #endif
2380                         } else if (object->memattr == VM_MEMATTR_DEFAULT &&
2381                             vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
2382                                 /*
2383                                  * The page is allocated but eligible for
2384                                  * relocation.  Extend the current run by one
2385                                  * page.
2386                                  */
2387                                 KASSERT(pmap_page_get_memattr(m) ==
2388                                     VM_MEMATTR_DEFAULT,
2389                                     ("page %p has an unexpected memattr", m));
2390                                 KASSERT((m->oflags & (VPO_SWAPINPROG |
2391                                     VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2392                                     ("page %p has unexpected oflags", m));
2393                                 /* Don't care: VPO_NOSYNC. */
2394                                 run_ext = 1;
2395                         } else
2396                                 run_ext = 0;
2397 unlock:
2398                         VM_OBJECT_RUNLOCK(object);
2399 #if VM_NRESERVLEVEL > 0
2400                 } else if (level >= 0) {
2401                         /*
2402                          * The page is reserved but not yet allocated.  In
2403                          * other words, it is still free.  Extend the current
2404                          * run by one page.
2405                          */
2406                         run_ext = 1;
2407 #endif
2408                 } else if ((order = m->order) < VM_NFREEORDER) {
2409                         /*
2410                          * The page is enqueued in the physical memory
2411                          * allocator's free page queues.  Moreover, it is the
2412                          * first page in a power-of-two-sized run of
2413                          * contiguous free pages.  Add these pages to the end
2414                          * of the current run, and jump ahead.
2415                          */
2416                         run_ext = 1 << order;
2417                         m_inc = 1 << order;
2418                 } else {
2419                         /*
2420                          * Skip the page for one of the following reasons: (1)
2421                          * It is enqueued in the physical memory allocator's
2422                          * free page queues.  However, it is not the first
2423                          * page in a run of contiguous free pages.  (This case
2424                          * rarely occurs because the scan is performed in
2425                          * ascending order.) (2) It is not reserved, and it is
2426                          * transitioning from free to allocated.  (Conversely,
2427                          * the transition from allocated to free for managed
2428                          * pages is blocked by the page lock.) (3) It is
2429                          * allocated but not contained by an object and not
2430                          * wired, e.g., allocated by Xen's balloon driver.
2431                          */
2432                         run_ext = 0;
2433                 }
2434
2435                 /*
2436                  * Extend or reset the current run of pages.
2437                  */
2438                 if (run_ext > 0) {
2439                         if (run_len == 0)
2440                                 m_run = m;
2441                         run_len += run_ext;
2442                 } else {
2443                         if (run_len > 0) {
2444                                 m_run = NULL;
2445                                 run_len = 0;
2446                         }
2447                 }
2448         }
2449         if (m_mtx != NULL)
2450                 mtx_unlock(m_mtx);
2451         if (run_len >= npages)
2452                 return (m_run);
2453         return (NULL);
2454 }
2455
2456 /*
2457  *      vm_page_reclaim_run:
2458  *
2459  *      Try to relocate each of the allocated virtual pages within the
2460  *      specified run of physical pages to a new physical address.  Free the
2461  *      physical pages underlying the relocated virtual pages.  A virtual page
2462  *      is relocatable if and only if it could be laundered or reclaimed by
2463  *      the page daemon.  Whenever possible, a virtual page is relocated to a
2464  *      physical address above "high".
2465  *
2466  *      Returns 0 if every physical page within the run was already free or
2467  *      just freed by a successful relocation.  Otherwise, returns a non-zero
2468  *      value indicating why the last attempt to relocate a virtual page was
2469  *      unsuccessful.
2470  *
2471  *      "req_class" must be an allocation class.
2472  */
2473 static int
2474 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
2475     vm_paddr_t high)
2476 {
2477         struct vm_domain *vmd;
2478         struct mtx *m_mtx;
2479         struct spglist free;
2480         vm_object_t object;
2481         vm_paddr_t pa;
2482         vm_page_t m, m_end, m_new;
2483         int error, order, req;
2484
2485         KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
2486             ("req_class is not an allocation class"));
2487         SLIST_INIT(&free);
2488         error = 0;
2489         m = m_run;
2490         m_end = m_run + npages;
2491         m_mtx = NULL;
2492         for (; error == 0 && m < m_end; m++) {
2493                 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2494                     ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2495
2496                 /*
2497                  * Avoid releasing and reacquiring the same page lock.
2498                  */
2499                 vm_page_change_lock(m, &m_mtx);
2500 retry:
2501                 if (vm_page_wired(m))
2502                         error = EBUSY;
2503                 else if ((object = m->object) != NULL) {
2504                         /*
2505                          * The page is relocated if and only if it could be
2506                          * laundered or reclaimed by the page daemon.
2507                          */
2508                         if (!VM_OBJECT_TRYWLOCK(object)) {
2509                                 mtx_unlock(m_mtx);
2510                                 VM_OBJECT_WLOCK(object);
2511                                 mtx_lock(m_mtx);
2512                                 if (m->object != object) {
2513                                         /*
2514                                          * The page may have been freed.
2515                                          */
2516                                         VM_OBJECT_WUNLOCK(object);
2517                                         goto retry;
2518                                 } else if (vm_page_wired(m)) {
2519                                         error = EBUSY;
2520                                         goto unlock;
2521                                 }
2522                         }
2523                         /* Don't care: PG_NODUMP, PG_ZERO. */
2524                         if (object->type != OBJT_DEFAULT &&
2525                             object->type != OBJT_SWAP &&
2526                             object->type != OBJT_VNODE)
2527                                 error = EINVAL;
2528                         else if (object->memattr != VM_MEMATTR_DEFAULT)
2529                                 error = EINVAL;
2530                         else if (vm_page_queue(m) != PQ_NONE &&
2531                             !vm_page_busied(m)) {
2532                                 KASSERT(pmap_page_get_memattr(m) ==
2533                                     VM_MEMATTR_DEFAULT,
2534                                     ("page %p has an unexpected memattr", m));
2535                                 KASSERT((m->oflags & (VPO_SWAPINPROG |
2536                                     VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2537                                     ("page %p has unexpected oflags", m));
2538                                 /* Don't care: VPO_NOSYNC. */
2539                                 if (m->valid != 0) {
2540                                         /*
2541                                          * First, try to allocate a new page
2542                                          * that is above "high".  Failing
2543                                          * that, try to allocate a new page
2544                                          * that is below "m_run".  Allocate
2545                                          * the new page between the end of
2546                                          * "m_run" and "high" only as a last
2547                                          * resort.
2548                                          */
2549                                         req = req_class | VM_ALLOC_NOOBJ;
2550                                         if ((m->flags & PG_NODUMP) != 0)
2551                                                 req |= VM_ALLOC_NODUMP;
2552                                         if (trunc_page(high) !=
2553                                             ~(vm_paddr_t)PAGE_MASK) {
2554                                                 m_new = vm_page_alloc_contig(
2555                                                     NULL, 0, req, 1,
2556                                                     round_page(high),
2557                                                     ~(vm_paddr_t)0,
2558                                                     PAGE_SIZE, 0,
2559                                                     VM_MEMATTR_DEFAULT);
2560                                         } else
2561                                                 m_new = NULL;
2562                                         if (m_new == NULL) {
2563                                                 pa = VM_PAGE_TO_PHYS(m_run);
2564                                                 m_new = vm_page_alloc_contig(
2565                                                     NULL, 0, req, 1,
2566                                                     0, pa - 1, PAGE_SIZE, 0,
2567                                                     VM_MEMATTR_DEFAULT);
2568                                         }
2569                                         if (m_new == NULL) {
2570                                                 pa += ptoa(npages);
2571                                                 m_new = vm_page_alloc_contig(
2572                                                     NULL, 0, req, 1,
2573                                                     pa, high, PAGE_SIZE, 0,
2574                                                     VM_MEMATTR_DEFAULT);
2575                                         }
2576                                         if (m_new == NULL) {
2577                                                 error = ENOMEM;
2578                                                 goto unlock;
2579                                         }
2580                                         KASSERT(!vm_page_wired(m_new),
2581                                             ("page %p is wired", m_new));
2582
2583                                         /*
2584                                          * Replace "m" with the new page.  For
2585                                          * vm_page_replace(), "m" must be busy
2586                                          * and dequeued.  Finally, change "m"
2587                                          * as if vm_page_free() was called.
2588                                          */
2589                                         if (object->ref_count != 0)
2590                                                 pmap_remove_all(m);
2591                                         m_new->aflags = m->aflags &
2592                                             ~PGA_QUEUE_STATE_MASK;
2593                                         KASSERT(m_new->oflags == VPO_UNMANAGED,
2594                                             ("page %p is managed", m_new));
2595                                         m_new->oflags = m->oflags & VPO_NOSYNC;
2596                                         pmap_copy_page(m, m_new);
2597                                         m_new->valid = m->valid;
2598                                         m_new->dirty = m->dirty;
2599                                         m->flags &= ~PG_ZERO;
2600                                         vm_page_xbusy(m);
2601                                         vm_page_dequeue(m);
2602                                         vm_page_replace_checked(m_new, object,
2603                                             m->pindex, m);
2604                                         if (vm_page_free_prep(m))
2605                                                 SLIST_INSERT_HEAD(&free, m,
2606                                                     plinks.s.ss);
2607
2608                                         /*
2609                                          * The new page must be deactivated
2610                                          * before the object is unlocked.
2611                                          */
2612                                         vm_page_change_lock(m_new, &m_mtx);
2613                                         vm_page_deactivate(m_new);
2614                                 } else {
2615                                         m->flags &= ~PG_ZERO;
2616                                         vm_page_dequeue(m);
2617                                         if (vm_page_free_prep(m))
2618                                                 SLIST_INSERT_HEAD(&free, m,
2619                                                     plinks.s.ss);
2620                                         KASSERT(m->dirty == 0,
2621                                             ("page %p is dirty", m));
2622                                 }
2623                         } else
2624                                 error = EBUSY;
2625 unlock:
2626                         VM_OBJECT_WUNLOCK(object);
2627                 } else {
2628                         MPASS(vm_phys_domain(m) == domain);
2629                         vmd = VM_DOMAIN(domain);
2630                         vm_domain_free_lock(vmd);
2631                         order = m->order;
2632                         if (order < VM_NFREEORDER) {
2633                                 /*
2634                                  * The page is enqueued in the physical memory
2635                                  * allocator's free page queues.  Moreover, it
2636                                  * is the first page in a power-of-two-sized
2637                                  * run of contiguous free pages.  Jump ahead
2638                                  * to the last page within that run, and
2639                                  * continue from there.
2640                                  */
2641                                 m += (1 << order) - 1;
2642                         }
2643 #if VM_NRESERVLEVEL > 0
2644                         else if (vm_reserv_is_page_free(m))
2645                                 order = 0;
2646 #endif
2647                         vm_domain_free_unlock(vmd);
2648                         if (order == VM_NFREEORDER)
2649                                 error = EINVAL;
2650                 }
2651         }
2652         if (m_mtx != NULL)
2653                 mtx_unlock(m_mtx);
2654         if ((m = SLIST_FIRST(&free)) != NULL) {
2655                 int cnt;
2656
2657                 vmd = VM_DOMAIN(domain);
2658                 cnt = 0;
2659                 vm_domain_free_lock(vmd);
2660                 do {
2661                         MPASS(vm_phys_domain(m) == domain);
2662                         SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2663                         vm_phys_free_pages(m, 0);
2664                         cnt++;
2665                 } while ((m = SLIST_FIRST(&free)) != NULL);
2666                 vm_domain_free_unlock(vmd);
2667                 vm_domain_freecnt_inc(vmd, cnt);
2668         }
2669         return (error);
2670 }
2671
2672 #define NRUNS   16
2673
2674 CTASSERT(powerof2(NRUNS));
2675
2676 #define RUN_INDEX(count)        ((count) & (NRUNS - 1))
2677
2678 #define MIN_RECLAIM     8
2679
2680 /*
2681  *      vm_page_reclaim_contig:
2682  *
2683  *      Reclaim allocated, contiguous physical memory satisfying the specified
2684  *      conditions by relocating the virtual pages using that physical memory.
2685  *      Returns true if reclamation is successful and false otherwise.  Since
2686  *      relocation requires the allocation of physical pages, reclamation may
2687  *      fail due to a shortage of free pages.  When reclamation fails, callers
2688  *      are expected to perform vm_wait() before retrying a failed allocation
2689  *      operation, e.g., vm_page_alloc_contig().
2690  *
2691  *      The caller must always specify an allocation class through "req".
2692  *
2693  *      allocation classes:
2694  *      VM_ALLOC_NORMAL         normal process request
2695  *      VM_ALLOC_SYSTEM         system *really* needs a page
2696  *      VM_ALLOC_INTERRUPT      interrupt time request
2697  *
2698  *      The optional allocation flags are ignored.
2699  *
2700  *      "npages" must be greater than zero.  Both "alignment" and "boundary"
2701  *      must be a power of two.
2702  */
2703 bool
2704 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
2705     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2706 {
2707         struct vm_domain *vmd;
2708         vm_paddr_t curr_low;
2709         vm_page_t m_run, m_runs[NRUNS];
2710         u_long count, reclaimed;
2711         int error, i, options, req_class;
2712
2713         KASSERT(npages > 0, ("npages is 0"));
2714         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2715         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2716         req_class = req & VM_ALLOC_CLASS_MASK;
2717
2718         /*
2719          * The page daemon is allowed to dig deeper into the free page list.
2720          */
2721         if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2722                 req_class = VM_ALLOC_SYSTEM;
2723
2724         /*
2725          * Return if the number of free pages cannot satisfy the requested
2726          * allocation.
2727          */
2728         vmd = VM_DOMAIN(domain);
2729         count = vmd->vmd_free_count;
2730         if (count < npages + vmd->vmd_free_reserved || (count < npages +
2731             vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
2732             (count < npages && req_class == VM_ALLOC_INTERRUPT))
2733                 return (false);
2734
2735         /*
2736          * Scan up to three times, relaxing the restrictions ("options") on
2737          * the reclamation of reservations and superpages each time.
2738          */
2739         for (options = VPSC_NORESERV;;) {
2740                 /*
2741                  * Find the highest runs that satisfy the given constraints
2742                  * and restrictions, and record them in "m_runs".
2743                  */
2744                 curr_low = low;
2745                 count = 0;
2746                 for (;;) {
2747                         m_run = vm_phys_scan_contig(domain, npages, curr_low,
2748                             high, alignment, boundary, options);
2749                         if (m_run == NULL)
2750                                 break;
2751                         curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
2752                         m_runs[RUN_INDEX(count)] = m_run;
2753                         count++;
2754                 }
2755
2756                 /*
2757                  * Reclaim the highest runs in LIFO (descending) order until
2758                  * the number of reclaimed pages, "reclaimed", is at least
2759                  * MIN_RECLAIM.  Reset "reclaimed" each time because each
2760                  * reclamation is idempotent, and runs will (likely) recur
2761                  * from one scan to the next as restrictions are relaxed.
2762                  */
2763                 reclaimed = 0;
2764                 for (i = 0; count > 0 && i < NRUNS; i++) {
2765                         count--;
2766                         m_run = m_runs[RUN_INDEX(count)];
2767                         error = vm_page_reclaim_run(req_class, domain, npages,
2768                             m_run, high);
2769                         if (error == 0) {
2770                                 reclaimed += npages;
2771                                 if (reclaimed >= MIN_RECLAIM)
2772                                         return (true);
2773                         }
2774                 }
2775
2776                 /*
2777                  * Either relax the restrictions on the next scan or return if
2778                  * the last scan had no restrictions.
2779                  */
2780                 if (options == VPSC_NORESERV)
2781                         options = VPSC_NOSUPER;
2782                 else if (options == VPSC_NOSUPER)
2783                         options = VPSC_ANY;
2784                 else if (options == VPSC_ANY)
2785                         return (reclaimed != 0);
2786         }
2787 }
2788
2789 bool
2790 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
2791     u_long alignment, vm_paddr_t boundary)
2792 {
2793         struct vm_domainset_iter di;
2794         int domain;
2795         bool ret;
2796
2797         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2798         do {
2799                 ret = vm_page_reclaim_contig_domain(domain, req, npages, low,
2800                     high, alignment, boundary);
2801                 if (ret)
2802                         break;
2803         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2804
2805         return (ret);
2806 }
2807
2808 /*
2809  * Set the domain in the appropriate page level domainset.
2810  */
2811 void
2812 vm_domain_set(struct vm_domain *vmd)
2813 {
2814
2815         mtx_lock(&vm_domainset_lock);
2816         if (!vmd->vmd_minset && vm_paging_min(vmd)) {
2817                 vmd->vmd_minset = 1;
2818                 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
2819         }
2820         if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
2821                 vmd->vmd_severeset = 1;
2822                 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
2823         }
2824         mtx_unlock(&vm_domainset_lock);
2825 }
2826
2827 /*
2828  * Clear the domain from the appropriate page level domainset.
2829  */
2830 void
2831 vm_domain_clear(struct vm_domain *vmd)
2832 {
2833
2834         mtx_lock(&vm_domainset_lock);
2835         if (vmd->vmd_minset && !vm_paging_min(vmd)) {
2836                 vmd->vmd_minset = 0;
2837                 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
2838                 if (vm_min_waiters != 0) {
2839                         vm_min_waiters = 0;
2840                         wakeup(&vm_min_domains);
2841                 }
2842         }
2843         if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
2844                 vmd->vmd_severeset = 0;
2845                 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
2846                 if (vm_severe_waiters != 0) {
2847                         vm_severe_waiters = 0;
2848                         wakeup(&vm_severe_domains);
2849                 }
2850         }
2851
2852         /*
2853          * If pageout daemon needs pages, then tell it that there are
2854          * some free.
2855          */
2856         if (vmd->vmd_pageout_pages_needed &&
2857             vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
2858                 wakeup(&vmd->vmd_pageout_pages_needed);
2859                 vmd->vmd_pageout_pages_needed = 0;
2860         }
2861
2862         /* See comments in vm_wait_doms(). */
2863         if (vm_pageproc_waiters) {
2864                 vm_pageproc_waiters = 0;
2865                 wakeup(&vm_pageproc_waiters);
2866         }
2867         mtx_unlock(&vm_domainset_lock);
2868 }
2869
2870 /*
2871  * Wait for free pages to exceed the min threshold globally.
2872  */
2873 void
2874 vm_wait_min(void)
2875 {
2876
2877         mtx_lock(&vm_domainset_lock);
2878         while (vm_page_count_min()) {
2879                 vm_min_waiters++;
2880                 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
2881         }
2882         mtx_unlock(&vm_domainset_lock);
2883 }
2884
2885 /*
2886  * Wait for free pages to exceed the severe threshold globally.
2887  */
2888 void
2889 vm_wait_severe(void)
2890 {
2891
2892         mtx_lock(&vm_domainset_lock);
2893         while (vm_page_count_severe()) {
2894                 vm_severe_waiters++;
2895                 msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
2896                     "vmwait", 0);
2897         }
2898         mtx_unlock(&vm_domainset_lock);
2899 }
2900
2901 u_int
2902 vm_wait_count(void)
2903 {
2904
2905         return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
2906 }
2907
2908 void
2909 vm_wait_doms(const domainset_t *wdoms)
2910 {
2911
2912         /*
2913          * We use racey wakeup synchronization to avoid expensive global
2914          * locking for the pageproc when sleeping with a non-specific vm_wait.
2915          * To handle this, we only sleep for one tick in this instance.  It
2916          * is expected that most allocations for the pageproc will come from
2917          * kmem or vm_page_grab* which will use the more specific and
2918          * race-free vm_wait_domain().
2919          */
2920         if (curproc == pageproc) {
2921                 mtx_lock(&vm_domainset_lock);
2922                 vm_pageproc_waiters++;
2923                 msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP,
2924                     "pageprocwait", 1);
2925         } else {
2926                 /*
2927                  * XXX Ideally we would wait only until the allocation could
2928                  * be satisfied.  This condition can cause new allocators to
2929                  * consume all freed pages while old allocators wait.
2930                  */
2931                 mtx_lock(&vm_domainset_lock);
2932                 if (vm_page_count_min_set(wdoms)) {
2933                         vm_min_waiters++;
2934                         msleep(&vm_min_domains, &vm_domainset_lock,
2935                             PVM | PDROP, "vmwait", 0);
2936                 } else
2937                         mtx_unlock(&vm_domainset_lock);
2938         }
2939 }
2940
2941 /*
2942  *      vm_wait_domain:
2943  *
2944  *      Sleep until free pages are available for allocation.
2945  *      - Called in various places after failed memory allocations.
2946  */
2947 void
2948 vm_wait_domain(int domain)
2949 {
2950         struct vm_domain *vmd;
2951         domainset_t wdom;
2952
2953         vmd = VM_DOMAIN(domain);
2954         vm_domain_free_assert_unlocked(vmd);
2955
2956         if (curproc == pageproc) {
2957                 mtx_lock(&vm_domainset_lock);
2958                 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
2959                         vmd->vmd_pageout_pages_needed = 1;
2960                         msleep(&vmd->vmd_pageout_pages_needed,
2961                             &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
2962                 } else
2963                         mtx_unlock(&vm_domainset_lock);
2964         } else {
2965                 if (pageproc == NULL)
2966                         panic("vm_wait in early boot");
2967                 DOMAINSET_ZERO(&wdom);
2968                 DOMAINSET_SET(vmd->vmd_domain, &wdom);
2969                 vm_wait_doms(&wdom);
2970         }
2971 }
2972
2973 /*
2974  *      vm_wait:
2975  *
2976  *      Sleep until free pages are available for allocation in the
2977  *      affinity domains of the obj.  If obj is NULL, the domain set
2978  *      for the calling thread is used.
2979  *      Called in various places after failed memory allocations.
2980  */
2981 void
2982 vm_wait(vm_object_t obj)
2983 {
2984         struct domainset *d;
2985
2986         d = NULL;
2987
2988         /*
2989          * Carefully fetch pointers only once: the struct domainset
2990          * itself is ummutable but the pointer might change.
2991          */
2992         if (obj != NULL)
2993                 d = obj->domain.dr_policy;
2994         if (d == NULL)
2995                 d = curthread->td_domain.dr_policy;
2996
2997         vm_wait_doms(&d->ds_mask);
2998 }
2999
3000 /*
3001  *      vm_domain_alloc_fail:
3002  *
3003  *      Called when a page allocation function fails.  Informs the
3004  *      pagedaemon and performs the requested wait.  Requires the
3005  *      domain_free and object lock on entry.  Returns with the
3006  *      object lock held and free lock released.  Returns an error when
3007  *      retry is necessary.
3008  *
3009  */
3010 static int
3011 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3012 {
3013
3014         vm_domain_free_assert_unlocked(vmd);
3015
3016         atomic_add_int(&vmd->vmd_pageout_deficit,
3017             max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
3018         if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
3019                 if (object != NULL) 
3020                         VM_OBJECT_WUNLOCK(object);
3021                 vm_wait_domain(vmd->vmd_domain);
3022                 if (object != NULL) 
3023                         VM_OBJECT_WLOCK(object);
3024                 if (req & VM_ALLOC_WAITOK)
3025                         return (EAGAIN);
3026         }
3027
3028         return (0);
3029 }
3030
3031 /*
3032  *      vm_waitpfault:
3033  *
3034  *      Sleep until free pages are available for allocation.
3035  *      - Called only in vm_fault so that processes page faulting
3036  *        can be easily tracked.
3037  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3038  *        processes will be able to grab memory first.  Do not change
3039  *        this balance without careful testing first.
3040  */
3041 void
3042 vm_waitpfault(struct domainset *dset, int timo)
3043 {
3044
3045         /*
3046          * XXX Ideally we would wait only until the allocation could
3047          * be satisfied.  This condition can cause new allocators to
3048          * consume all freed pages while old allocators wait.
3049          */
3050         mtx_lock(&vm_domainset_lock);
3051         if (vm_page_count_min_set(&dset->ds_mask)) {
3052                 vm_min_waiters++;
3053                 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
3054                     "pfault", timo);
3055         } else
3056                 mtx_unlock(&vm_domainset_lock);
3057 }
3058
3059 struct vm_pagequeue *
3060 vm_page_pagequeue(vm_page_t m)
3061 {
3062
3063         return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]);
3064 }
3065
3066 static struct mtx *
3067 vm_page_pagequeue_lockptr(vm_page_t m)
3068 {
3069         uint8_t queue;
3070
3071         if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
3072                 return (NULL);
3073         return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex);
3074 }
3075
3076 static inline void
3077 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
3078 {
3079         struct vm_domain *vmd;
3080         uint8_t qflags;
3081
3082         CRITICAL_ASSERT(curthread);
3083         vm_pagequeue_assert_locked(pq);
3084
3085         /*
3086          * The page daemon is allowed to set m->queue = PQ_NONE without
3087          * the page queue lock held.  In this case it is about to free the page,
3088          * which must not have any queue state.
3089          */
3090         qflags = atomic_load_8(&m->aflags) & PGA_QUEUE_STATE_MASK;
3091         KASSERT(pq == vm_page_pagequeue(m) || qflags == 0,
3092             ("page %p doesn't belong to queue %p but has queue state %#x",
3093             m, pq, qflags));
3094
3095         if ((qflags & PGA_DEQUEUE) != 0) {
3096                 if (__predict_true((qflags & PGA_ENQUEUED) != 0)) {
3097                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3098                         vm_pagequeue_cnt_dec(pq);
3099                 }
3100                 vm_page_dequeue_complete(m);
3101         } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
3102                 if ((qflags & PGA_ENQUEUED) != 0)
3103                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3104                 else {
3105                         vm_pagequeue_cnt_inc(pq);
3106                         vm_page_aflag_set(m, PGA_ENQUEUED);
3107                 }
3108                 if ((qflags & PGA_REQUEUE_HEAD) != 0) {
3109                         KASSERT(m->queue == PQ_INACTIVE,
3110                             ("head enqueue not supported for page %p", m));
3111                         vmd = vm_pagequeue_domain(m);
3112                         TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3113                 } else
3114                         TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3115
3116                 /*
3117                  * PGA_REQUEUE and PGA_REQUEUE_HEAD must be cleared after
3118                  * setting PGA_ENQUEUED in order to synchronize with the
3119                  * page daemon.
3120                  */
3121                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
3122         }
3123 }
3124
3125 static void
3126 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
3127     uint8_t queue)
3128 {
3129         vm_page_t m;
3130         int i;
3131
3132         for (i = 0; i < bq->bq_cnt; i++) {
3133                 m = bq->bq_pa[i];
3134                 if (__predict_false(m->queue != queue))
3135                         continue;
3136                 vm_pqbatch_process_page(pq, m);
3137         }
3138         vm_batchqueue_init(bq);
3139 }
3140
3141 static void
3142 vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
3143 {
3144         struct vm_batchqueue *bq;
3145         struct vm_pagequeue *pq;
3146         int domain;
3147
3148         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3149             ("page %p is unmanaged", m));
3150         KASSERT(mtx_owned(vm_page_lockptr(m)) ||
3151             (m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0),
3152             ("missing synchronization for page %p", m));
3153         KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
3154
3155         domain = vm_phys_domain(m);
3156         pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
3157
3158         critical_enter();
3159         bq = DPCPU_PTR(pqbatch[domain][queue]);
3160         if (vm_batchqueue_insert(bq, m)) {
3161                 critical_exit();
3162                 return;
3163         }
3164         if (!vm_pagequeue_trylock(pq)) {
3165                 critical_exit();
3166                 vm_pagequeue_lock(pq);
3167                 critical_enter();
3168                 bq = DPCPU_PTR(pqbatch[domain][queue]);
3169         }
3170         vm_pqbatch_process(pq, bq, queue);
3171
3172         /*
3173          * The page may have been logically dequeued before we acquired the
3174          * page queue lock.  In this case, since we either hold the page lock
3175          * or the page is being freed, a different thread cannot be concurrently
3176          * enqueuing the page.
3177          */
3178         if (__predict_true(m->queue == queue))
3179                 vm_pqbatch_process_page(pq, m);
3180         else {
3181                 KASSERT(m->queue == PQ_NONE,
3182                     ("invalid queue transition for page %p", m));
3183                 KASSERT((m->aflags & PGA_ENQUEUED) == 0,
3184                     ("page %p is enqueued with invalid queue index", m));
3185                 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
3186         }
3187         vm_pagequeue_unlock(pq);
3188         critical_exit();
3189 }
3190
3191 /*
3192  *      vm_page_drain_pqbatch:          [ internal use only ]
3193  *
3194  *      Force all per-CPU page queue batch queues to be drained.  This is
3195  *      intended for use in severe memory shortages, to ensure that pages
3196  *      do not remain stuck in the batch queues.
3197  */
3198 void
3199 vm_page_drain_pqbatch(void)
3200 {
3201         struct thread *td;
3202         struct vm_domain *vmd;
3203         struct vm_pagequeue *pq;
3204         int cpu, domain, queue;
3205
3206         td = curthread;
3207         CPU_FOREACH(cpu) {
3208                 thread_lock(td);
3209                 sched_bind(td, cpu);
3210                 thread_unlock(td);
3211
3212                 for (domain = 0; domain < vm_ndomains; domain++) {
3213                         vmd = VM_DOMAIN(domain);
3214                         for (queue = 0; queue < PQ_COUNT; queue++) {
3215                                 pq = &vmd->vmd_pagequeues[queue];
3216                                 vm_pagequeue_lock(pq);
3217                                 critical_enter();
3218                                 vm_pqbatch_process(pq,
3219                                     DPCPU_PTR(pqbatch[domain][queue]), queue);
3220                                 critical_exit();
3221                                 vm_pagequeue_unlock(pq);
3222                         }
3223                 }
3224         }
3225         thread_lock(td);
3226         sched_unbind(td);
3227         thread_unlock(td);
3228 }
3229
3230 /*
3231  * Complete the logical removal of a page from a page queue.  We must be
3232  * careful to synchronize with the page daemon, which may be concurrently
3233  * examining the page with only the page lock held.  The page must not be
3234  * in a state where it appears to be logically enqueued.
3235  */
3236 static void
3237 vm_page_dequeue_complete(vm_page_t m)
3238 {
3239
3240         m->queue = PQ_NONE;
3241         atomic_thread_fence_rel();
3242         vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
3243 }
3244
3245 /*
3246  *      vm_page_dequeue_deferred:       [ internal use only ]
3247  *
3248  *      Request removal of the given page from its current page
3249  *      queue.  Physical removal from the queue may be deferred
3250  *      indefinitely.
3251  *
3252  *      The page must be locked.
3253  */
3254 void
3255 vm_page_dequeue_deferred(vm_page_t m)
3256 {
3257         uint8_t queue;
3258
3259         vm_page_assert_locked(m);
3260
3261         if ((queue = vm_page_queue(m)) == PQ_NONE)
3262                 return;
3263         vm_page_aflag_set(m, PGA_DEQUEUE);
3264         vm_pqbatch_submit_page(m, queue);
3265 }
3266
3267 /*
3268  * A variant of vm_page_dequeue_deferred() that does not assert the page
3269  * lock and is only to be called from vm_page_free_prep().  It is just an
3270  * open-coded implementation of vm_page_dequeue_deferred().  Because the
3271  * page is being freed, we can assume that nothing else is scheduling queue
3272  * operations on this page, so we get for free the mutual exclusion that
3273  * is otherwise provided by the page lock.
3274  */
3275 static void
3276 vm_page_dequeue_deferred_free(vm_page_t m)
3277 {
3278         uint8_t queue;
3279
3280         KASSERT(m->object == NULL, ("page %p has an object reference", m));
3281
3282         if ((m->aflags & PGA_DEQUEUE) != 0)
3283                 return;
3284         atomic_thread_fence_acq();
3285         if ((queue = m->queue) == PQ_NONE)
3286                 return;
3287         vm_page_aflag_set(m, PGA_DEQUEUE);
3288         vm_pqbatch_submit_page(m, queue);
3289 }
3290
3291 /*
3292  *      vm_page_dequeue:
3293  *
3294  *      Remove the page from whichever page queue it's in, if any.
3295  *      The page must either be locked or unallocated.  This constraint
3296  *      ensures that the queue state of the page will remain consistent
3297  *      after this function returns.
3298  */
3299 void
3300 vm_page_dequeue(vm_page_t m)
3301 {
3302         struct mtx *lock, *lock1;
3303         struct vm_pagequeue *pq;
3304         uint8_t aflags;
3305
3306         KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER,
3307             ("page %p is allocated and unlocked", m));
3308
3309         for (;;) {
3310                 lock = vm_page_pagequeue_lockptr(m);
3311                 if (lock == NULL) {
3312                         /*
3313                          * A thread may be concurrently executing
3314                          * vm_page_dequeue_complete().  Ensure that all queue
3315                          * state is cleared before we return.
3316                          */
3317                         aflags = atomic_load_8(&m->aflags);
3318                         if ((aflags & PGA_QUEUE_STATE_MASK) == 0)
3319                                 return;
3320                         KASSERT((aflags & PGA_DEQUEUE) != 0,
3321                             ("page %p has unexpected queue state flags %#x",
3322                             m, aflags));
3323
3324                         /*
3325                          * Busy wait until the thread updating queue state is
3326                          * finished.  Such a thread must be executing in a
3327                          * critical section.
3328                          */
3329                         cpu_spinwait();
3330                         continue;
3331                 }
3332                 mtx_lock(lock);
3333                 if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock)
3334                         break;
3335                 mtx_unlock(lock);
3336                 lock = lock1;
3337         }
3338         KASSERT(lock == vm_page_pagequeue_lockptr(m),
3339             ("%s: page %p migrated directly between queues", __func__, m));
3340         KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
3341             mtx_owned(vm_page_lockptr(m)),
3342             ("%s: queued unlocked page %p", __func__, m));
3343
3344         if ((m->aflags & PGA_ENQUEUED) != 0) {
3345                 pq = vm_page_pagequeue(m);
3346                 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3347                 vm_pagequeue_cnt_dec(pq);
3348         }
3349         vm_page_dequeue_complete(m);
3350         mtx_unlock(lock);
3351 }
3352
3353 /*
3354  * Schedule the given page for insertion into the specified page queue.
3355  * Physical insertion of the page may be deferred indefinitely.
3356  */
3357 static void
3358 vm_page_enqueue(vm_page_t m, uint8_t queue)
3359 {
3360
3361         vm_page_assert_locked(m);
3362         KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
3363             ("%s: page %p is already enqueued", __func__, m));
3364
3365         m->queue = queue;
3366         if ((m->aflags & PGA_REQUEUE) == 0)
3367                 vm_page_aflag_set(m, PGA_REQUEUE);
3368         vm_pqbatch_submit_page(m, queue);
3369 }
3370
3371 /*
3372  *      vm_page_requeue:                [ internal use only ]
3373  *
3374  *      Schedule a requeue of the given page.
3375  *
3376  *      The page must be locked.
3377  */
3378 void
3379 vm_page_requeue(vm_page_t m)
3380 {
3381
3382         vm_page_assert_locked(m);
3383         KASSERT(vm_page_queue(m) != PQ_NONE,
3384             ("%s: page %p is not logically enqueued", __func__, m));
3385
3386         if ((m->aflags & PGA_REQUEUE) == 0)
3387                 vm_page_aflag_set(m, PGA_REQUEUE);
3388         vm_pqbatch_submit_page(m, atomic_load_8(&m->queue));
3389 }
3390
3391 /*
3392  *      vm_page_free_prep:
3393  *
3394  *      Prepares the given page to be put on the free list,
3395  *      disassociating it from any VM object. The caller may return
3396  *      the page to the free list only if this function returns true.
3397  *
3398  *      The object must be locked.  The page must be locked if it is
3399  *      managed.
3400  */
3401 bool
3402 vm_page_free_prep(vm_page_t m)
3403 {
3404
3405 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
3406         if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
3407                 uint64_t *p;
3408                 int i;
3409                 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3410                 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
3411                         KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
3412                             m, i, (uintmax_t)*p));
3413         }
3414 #endif
3415         if ((m->oflags & VPO_UNMANAGED) == 0) {
3416                 vm_page_lock_assert(m, MA_OWNED);
3417                 KASSERT(!pmap_page_is_mapped(m),
3418                     ("vm_page_free_prep: freeing mapped page %p", m));
3419         } else
3420                 KASSERT(m->queue == PQ_NONE,
3421                     ("vm_page_free_prep: unmanaged page %p is queued", m));
3422         VM_CNT_INC(v_tfree);
3423
3424         if (vm_page_sbusied(m))
3425                 panic("vm_page_free_prep: freeing busy page %p", m);
3426
3427         if (m->object != NULL)
3428                 (void)vm_page_remove(m);
3429
3430         /*
3431          * If fictitious remove object association and
3432          * return.
3433          */
3434         if ((m->flags & PG_FICTITIOUS) != 0) {
3435                 KASSERT(m->wire_count == 1,
3436                     ("fictitious page %p is not wired", m));
3437                 KASSERT(m->queue == PQ_NONE,
3438                     ("fictitious page %p is queued", m));
3439                 return (false);
3440         }
3441
3442         /*
3443          * Pages need not be dequeued before they are returned to the physical
3444          * memory allocator, but they must at least be marked for a deferred
3445          * dequeue.
3446          */
3447         if ((m->oflags & VPO_UNMANAGED) == 0)
3448                 vm_page_dequeue_deferred_free(m);
3449
3450         m->valid = 0;
3451         vm_page_undirty(m);
3452
3453         if (vm_page_wired(m) != 0)
3454                 panic("vm_page_free_prep: freeing wired page %p", m);
3455
3456         /*
3457          * Restore the default memory attribute to the page.
3458          */
3459         if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
3460                 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
3461
3462 #if VM_NRESERVLEVEL > 0
3463         /*
3464          * Determine whether the page belongs to a reservation.  If the page was
3465          * allocated from a per-CPU cache, it cannot belong to a reservation, so
3466          * as an optimization, we avoid the check in that case.
3467          */
3468         if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
3469                 return (false);
3470 #endif
3471
3472         return (true);
3473 }
3474
3475 /*
3476  *      vm_page_free_toq:
3477  *
3478  *      Returns the given page to the free list, disassociating it
3479  *      from any VM object.
3480  *
3481  *      The object must be locked.  The page must be locked if it is
3482  *      managed.
3483  */
3484 void
3485 vm_page_free_toq(vm_page_t m)
3486 {
3487         struct vm_domain *vmd;
3488         uma_zone_t zone;
3489
3490         if (!vm_page_free_prep(m))
3491                 return;
3492
3493         vmd = vm_pagequeue_domain(m);
3494         zone = vmd->vmd_pgcache[m->pool].zone;
3495         if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
3496                 uma_zfree(zone, m);
3497                 return;
3498         }
3499         vm_domain_free_lock(vmd);
3500         vm_phys_free_pages(m, 0);
3501         vm_domain_free_unlock(vmd);
3502         vm_domain_freecnt_inc(vmd, 1);
3503 }
3504
3505 /*
3506  *      vm_page_free_pages_toq:
3507  *
3508  *      Returns a list of pages to the free list, disassociating it
3509  *      from any VM object.  In other words, this is equivalent to
3510  *      calling vm_page_free_toq() for each page of a list of VM objects.
3511  *
3512  *      The objects must be locked.  The pages must be locked if it is
3513  *      managed.
3514  */
3515 void
3516 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
3517 {
3518         vm_page_t m;
3519         int count;
3520
3521         if (SLIST_EMPTY(free))
3522                 return;
3523
3524         count = 0;
3525         while ((m = SLIST_FIRST(free)) != NULL) {
3526                 count++;
3527                 SLIST_REMOVE_HEAD(free, plinks.s.ss);
3528                 vm_page_free_toq(m);
3529         }
3530
3531         if (update_wire_count)
3532                 vm_wire_sub(count);
3533 }
3534
3535 /*
3536  *      vm_page_wire:
3537  *
3538  * Mark this page as wired down.  If the page is fictitious, then
3539  * its wire count must remain one.
3540  *
3541  * The page must be locked.
3542  */
3543 void
3544 vm_page_wire(vm_page_t m)
3545 {
3546
3547         vm_page_assert_locked(m);
3548         if ((m->flags & PG_FICTITIOUS) != 0) {
3549                 KASSERT(m->wire_count == 1,
3550                     ("vm_page_wire: fictitious page %p's wire count isn't one",
3551                     m));
3552                 return;
3553         }
3554         if (!vm_page_wired(m)) {
3555                 KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
3556                     m->queue == PQ_NONE,
3557                     ("vm_page_wire: unmanaged page %p is queued", m));
3558                 vm_wire_add(1);
3559         }
3560         m->wire_count++;
3561         KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
3562 }
3563
3564 /*
3565  * vm_page_unwire:
3566  *
3567  * Release one wiring of the specified page, potentially allowing it to be
3568  * paged out.  Returns TRUE if the number of wirings transitions to zero and
3569  * FALSE otherwise.
3570  *
3571  * Only managed pages belonging to an object can be paged out.  If the number
3572  * of wirings transitions to zero and the page is eligible for page out, then
3573  * the page is added to the specified paging queue (unless PQ_NONE is
3574  * specified, in which case the page is dequeued if it belongs to a paging
3575  * queue).
3576  *
3577  * If a page is fictitious, then its wire count must always be one.
3578  *
3579  * A managed page must be locked.
3580  */
3581 bool
3582 vm_page_unwire(vm_page_t m, uint8_t queue)
3583 {
3584         bool unwired;
3585
3586         KASSERT(queue < PQ_COUNT || queue == PQ_NONE,
3587             ("vm_page_unwire: invalid queue %u request for page %p",
3588             queue, m));
3589         if ((m->oflags & VPO_UNMANAGED) == 0)
3590                 vm_page_assert_locked(m);
3591
3592         unwired = vm_page_unwire_noq(m);
3593         if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL)
3594                 return (unwired);
3595
3596         if (vm_page_queue(m) == queue) {
3597                 if (queue == PQ_ACTIVE)
3598                         vm_page_reference(m);
3599                 else if (queue != PQ_NONE)
3600                         vm_page_requeue(m);
3601         } else {
3602                 vm_page_dequeue(m);
3603                 if (queue != PQ_NONE) {
3604                         vm_page_enqueue(m, queue);
3605                         if (queue == PQ_ACTIVE)
3606                                 /* Initialize act_count. */
3607                                 vm_page_activate(m);
3608                 }
3609         }
3610         return (unwired);
3611 }
3612
3613 /*
3614  *
3615  * vm_page_unwire_noq:
3616  *
3617  * Unwire a page without (re-)inserting it into a page queue.  It is up
3618  * to the caller to enqueue, requeue, or free the page as appropriate.
3619  * In most cases, vm_page_unwire() should be used instead.
3620  */
3621 bool
3622 vm_page_unwire_noq(vm_page_t m)
3623 {
3624
3625         if ((m->oflags & VPO_UNMANAGED) == 0)
3626                 vm_page_assert_locked(m);
3627         if ((m->flags & PG_FICTITIOUS) != 0) {
3628                 KASSERT(m->wire_count == 1,
3629             ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
3630                 return (false);
3631         }
3632         if (!vm_page_wired(m))
3633                 panic("vm_page_unwire: page %p's wire count is zero", m);
3634         m->wire_count--;
3635         if (m->wire_count == 0) {
3636                 vm_wire_sub(1);
3637                 return (true);
3638         } else
3639                 return (false);
3640 }
3641
3642 /*
3643  *      vm_page_activate:
3644  *
3645  *      Put the specified page on the active list (if appropriate).
3646  *      Ensure that act_count is at least ACT_INIT but do not otherwise
3647  *      mess with it.
3648  *
3649  *      The page must be locked.
3650  */
3651 void
3652 vm_page_activate(vm_page_t m)
3653 {
3654
3655         vm_page_assert_locked(m);
3656
3657         if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3658                 return;
3659         if (vm_page_queue(m) == PQ_ACTIVE) {
3660                 if (m->act_count < ACT_INIT)
3661                         m->act_count = ACT_INIT;
3662                 return;
3663         }
3664
3665         vm_page_dequeue(m);
3666         if (m->act_count < ACT_INIT)
3667                 m->act_count = ACT_INIT;
3668         vm_page_enqueue(m, PQ_ACTIVE);
3669 }
3670
3671 /*
3672  * Move the specified page to the tail of the inactive queue, or requeue
3673  * the page if it is already in the inactive queue.
3674  *
3675  * The page must be locked.
3676  */
3677 void
3678 vm_page_deactivate(vm_page_t m)
3679 {
3680
3681         vm_page_assert_locked(m);
3682
3683         if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3684                 return;
3685
3686         if (!vm_page_inactive(m)) {
3687                 vm_page_dequeue(m);
3688                 vm_page_enqueue(m, PQ_INACTIVE);
3689         } else
3690                 vm_page_requeue(m);
3691 }
3692
3693 /*
3694  * Move the specified page close to the head of the inactive queue,
3695  * bypassing LRU.  A marker page is used to maintain FIFO ordering.
3696  * As with regular enqueues, we use a per-CPU batch queue to reduce
3697  * contention on the page queue lock.
3698  *
3699  * The page must be locked.
3700  */
3701 void
3702 vm_page_deactivate_noreuse(vm_page_t m)
3703 {
3704
3705         vm_page_assert_locked(m);
3706
3707         if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3708                 return;
3709
3710         if (!vm_page_inactive(m)) {
3711                 vm_page_dequeue(m);
3712                 m->queue = PQ_INACTIVE;
3713         }
3714         if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
3715                 vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
3716         vm_pqbatch_submit_page(m, PQ_INACTIVE);
3717 }
3718
3719 /*
3720  * vm_page_launder
3721  *
3722  *      Put a page in the laundry, or requeue it if it is already there.
3723  */
3724 void
3725 vm_page_launder(vm_page_t m)
3726 {
3727
3728         vm_page_assert_locked(m);
3729         if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0)
3730                 return;
3731
3732         if (vm_page_in_laundry(m))
3733                 vm_page_requeue(m);
3734         else {
3735                 vm_page_dequeue(m);
3736                 vm_page_enqueue(m, PQ_LAUNDRY);
3737         }
3738 }
3739
3740 /*
3741  * vm_page_unswappable
3742  *
3743  *      Put a page in the PQ_UNSWAPPABLE holding queue.
3744  */
3745 void
3746 vm_page_unswappable(vm_page_t m)
3747 {
3748
3749         vm_page_assert_locked(m);
3750         KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0,
3751             ("page %p already unswappable", m));
3752
3753         vm_page_dequeue(m);
3754         vm_page_enqueue(m, PQ_UNSWAPPABLE);
3755 }
3756
3757 static void
3758 vm_page_release_toq(vm_page_t m, int flags)
3759 {
3760
3761         /*
3762          * Use a check of the valid bits to determine whether we should
3763          * accelerate reclamation of the page.  The object lock might not be
3764          * held here, in which case the check is racy.  At worst we will either
3765          * accelerate reclamation of a valid page and violate LRU, or
3766          * unnecessarily defer reclamation of an invalid page.
3767          *
3768          * If we were asked to not cache the page, place it near the head of the
3769          * inactive queue so that is reclaimed sooner.
3770          */
3771         if ((flags & (VPR_TRYFREE | VPR_NOREUSE)) != 0 || m->valid == 0)
3772                 vm_page_deactivate_noreuse(m);
3773         else if (vm_page_active(m))
3774                 vm_page_reference(m);
3775         else
3776                 vm_page_deactivate(m);
3777 }
3778
3779 /*
3780  * Unwire a page and either attempt to free it or re-add it to the page queues.
3781  */
3782 void
3783 vm_page_release(vm_page_t m, int flags)
3784 {
3785         vm_object_t object;
3786         bool freed;
3787
3788         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3789             ("vm_page_release: page %p is unmanaged", m));
3790
3791         vm_page_lock(m);
3792         if (m->object != NULL)
3793                 VM_OBJECT_ASSERT_UNLOCKED(m->object);
3794         if (vm_page_unwire_noq(m)) {
3795                 if ((object = m->object) == NULL) {
3796                         vm_page_free(m);
3797                 } else {
3798                         freed = false;
3799                         if ((flags & VPR_TRYFREE) != 0 && !vm_page_busied(m) &&
3800                             /* Depends on type stability. */
3801                             VM_OBJECT_TRYWLOCK(object)) {
3802                                 /*
3803                                  * Only free unmapped pages.  The busy test from
3804                                  * before the object was locked cannot be relied
3805                                  * upon.
3806                                  */
3807                                 if ((object->ref_count == 0 ||
3808                                     !pmap_page_is_mapped(m)) && m->dirty == 0 &&
3809                                     !vm_page_busied(m)) {
3810                                         vm_page_free(m);
3811                                         freed = true;
3812                                 }
3813                                 VM_OBJECT_WUNLOCK(object);
3814                         }
3815
3816                         if (!freed)
3817                                 vm_page_release_toq(m, flags);
3818                 }
3819         }
3820         vm_page_unlock(m);
3821 }
3822
3823 /* See vm_page_release(). */
3824 void
3825 vm_page_release_locked(vm_page_t m, int flags)
3826 {
3827
3828         VM_OBJECT_ASSERT_WLOCKED(m->object);
3829         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3830             ("vm_page_release_locked: page %p is unmanaged", m));
3831
3832         vm_page_lock(m);
3833         if (vm_page_unwire_noq(m)) {
3834                 if ((flags & VPR_TRYFREE) != 0 &&
3835                     (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
3836                     m->dirty == 0 && !vm_page_busied(m)) {
3837                         vm_page_free(m);
3838                 } else {
3839                         vm_page_release_toq(m, flags);
3840                 }
3841         }
3842         vm_page_unlock(m);
3843 }
3844
3845 /*
3846  * vm_page_advise
3847  *
3848  *      Apply the specified advice to the given page.
3849  *
3850  *      The object and page must be locked.
3851  */
3852 void
3853 vm_page_advise(vm_page_t m, int advice)
3854 {
3855
3856         vm_page_assert_locked(m);
3857         VM_OBJECT_ASSERT_WLOCKED(m->object);
3858         if (advice == MADV_FREE)
3859                 /*
3860                  * Mark the page clean.  This will allow the page to be freed
3861                  * without first paging it out.  MADV_FREE pages are often
3862                  * quickly reused by malloc(3), so we do not do anything that
3863                  * would result in a page fault on a later access.
3864                  */
3865                 vm_page_undirty(m);
3866         else if (advice != MADV_DONTNEED) {
3867                 if (advice == MADV_WILLNEED)
3868                         vm_page_activate(m);
3869                 return;
3870         }
3871
3872         /*
3873          * Clear any references to the page.  Otherwise, the page daemon will
3874          * immediately reactivate the page.
3875          */
3876         vm_page_aflag_clear(m, PGA_REFERENCED);
3877
3878         if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
3879                 vm_page_dirty(m);
3880
3881         /*
3882          * Place clean pages near the head of the inactive queue rather than
3883          * the tail, thus defeating the queue's LRU operation and ensuring that
3884          * the page will be reused quickly.  Dirty pages not already in the
3885          * laundry are moved there.
3886          */
3887         if (m->dirty == 0)
3888                 vm_page_deactivate_noreuse(m);
3889         else if (!vm_page_in_laundry(m))
3890                 vm_page_launder(m);
3891 }
3892
3893 /*
3894  * Grab a page, waiting until we are waken up due to the page
3895  * changing state.  We keep on waiting, if the page continues
3896  * to be in the object.  If the page doesn't exist, first allocate it
3897  * and then conditionally zero it.
3898  *
3899  * This routine may sleep.
3900  *
3901  * The object must be locked on entry.  The lock will, however, be released
3902  * and reacquired if the routine sleeps.
3903  */
3904 vm_page_t
3905 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
3906 {
3907         vm_page_t m;
3908         int sleep;
3909         int pflags;
3910
3911         VM_OBJECT_ASSERT_WLOCKED(object);
3912         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
3913             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
3914             ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
3915         pflags = allocflags &
3916             ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
3917         if ((allocflags & VM_ALLOC_NOWAIT) == 0)
3918                 pflags |= VM_ALLOC_WAITFAIL;
3919 retrylookup:
3920         if ((m = vm_page_lookup(object, pindex)) != NULL) {
3921                 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
3922                     vm_page_xbusied(m) : vm_page_busied(m);
3923                 if (sleep) {
3924                         if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3925                                 return (NULL);
3926                         /*
3927                          * Reference the page before unlocking and
3928                          * sleeping so that the page daemon is less
3929                          * likely to reclaim it.
3930                          */
3931                         vm_page_aflag_set(m, PGA_REFERENCED);
3932                         vm_page_lock(m);
3933                         VM_OBJECT_WUNLOCK(object);
3934                         vm_page_busy_sleep(m, "pgrbwt", (allocflags &
3935                             VM_ALLOC_IGN_SBUSY) != 0);
3936                         VM_OBJECT_WLOCK(object);
3937                         goto retrylookup;
3938                 } else {
3939                         if ((allocflags & VM_ALLOC_WIRED) != 0) {
3940                                 vm_page_lock(m);
3941                                 vm_page_wire(m);
3942                                 vm_page_unlock(m);
3943                         }
3944                         if ((allocflags &
3945                             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
3946                                 vm_page_xbusy(m);
3947                         if ((allocflags & VM_ALLOC_SBUSY) != 0)
3948                                 vm_page_sbusy(m);
3949                         return (m);
3950                 }
3951         }
3952         m = vm_page_alloc(object, pindex, pflags);
3953         if (m == NULL) {
3954                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3955                         return (NULL);
3956                 goto retrylookup;
3957         }
3958         if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
3959                 pmap_zero_page(m);
3960         return (m);
3961 }
3962
3963 /*
3964  * Return the specified range of pages from the given object.  For each
3965  * page offset within the range, if a page already exists within the object
3966  * at that offset and it is busy, then wait for it to change state.  If,
3967  * instead, the page doesn't exist, then allocate it.
3968  *
3969  * The caller must always specify an allocation class.
3970  *
3971  * allocation classes:
3972  *      VM_ALLOC_NORMAL         normal process request
3973  *      VM_ALLOC_SYSTEM         system *really* needs the pages
3974  *
3975  * The caller must always specify that the pages are to be busied and/or
3976  * wired.
3977  *
3978  * optional allocation flags:
3979  *      VM_ALLOC_IGN_SBUSY      do not sleep on soft busy pages
3980  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
3981  *      VM_ALLOC_NOWAIT         do not sleep
3982  *      VM_ALLOC_SBUSY          set page to sbusy state
3983  *      VM_ALLOC_WIRED          wire the pages
3984  *      VM_ALLOC_ZERO           zero and validate any invalid pages
3985  *
3986  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
3987  * may return a partial prefix of the requested range.
3988  */
3989 int
3990 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
3991     vm_page_t *ma, int count)
3992 {
3993         vm_page_t m, mpred;
3994         int pflags;
3995         int i;
3996         bool sleep;
3997
3998         VM_OBJECT_ASSERT_WLOCKED(object);
3999         KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
4000             ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
4001         KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
4002             (allocflags & VM_ALLOC_WIRED) != 0,
4003             ("vm_page_grab_pages: the pages must be busied or wired"));
4004         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4005             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4006             ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
4007         if (count == 0)
4008                 return (0);
4009         pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |
4010             VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY);
4011         if ((allocflags & VM_ALLOC_NOWAIT) == 0)
4012                 pflags |= VM_ALLOC_WAITFAIL;
4013         i = 0;
4014 retrylookup:
4015         m = vm_radix_lookup_le(&object->rtree, pindex + i);
4016         if (m == NULL || m->pindex != pindex + i) {
4017                 mpred = m;
4018                 m = NULL;
4019         } else
4020                 mpred = TAILQ_PREV(m, pglist, listq);
4021         for (; i < count; i++) {
4022                 if (m != NULL) {
4023                         sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
4024                             vm_page_xbusied(m) : vm_page_busied(m);
4025                         if (sleep) {
4026                                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4027                                         break;
4028                                 /*
4029                                  * Reference the page before unlocking and
4030                                  * sleeping so that the page daemon is less
4031                                  * likely to reclaim it.
4032                                  */
4033                                 vm_page_aflag_set(m, PGA_REFERENCED);
4034                                 vm_page_lock(m);
4035                                 VM_OBJECT_WUNLOCK(object);
4036                                 vm_page_busy_sleep(m, "grbmaw", (allocflags &
4037                                     VM_ALLOC_IGN_SBUSY) != 0);
4038                                 VM_OBJECT_WLOCK(object);
4039                                 goto retrylookup;
4040                         }
4041                         if ((allocflags & VM_ALLOC_WIRED) != 0) {
4042                                 vm_page_lock(m);
4043                                 vm_page_wire(m);
4044                                 vm_page_unlock(m);
4045                         }
4046                         if ((allocflags & (VM_ALLOC_NOBUSY |
4047                             VM_ALLOC_SBUSY)) == 0)
4048                                 vm_page_xbusy(m);
4049                         if ((allocflags & VM_ALLOC_SBUSY) != 0)
4050                                 vm_page_sbusy(m);
4051                 } else {
4052                         m = vm_page_alloc_after(object, pindex + i,
4053                             pflags | VM_ALLOC_COUNT(count - i), mpred);
4054                         if (m == NULL) {
4055                                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
4056                                         break;
4057                                 goto retrylookup;
4058                         }
4059                 }
4060                 if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) {
4061                         if ((m->flags & PG_ZERO) == 0)
4062                                 pmap_zero_page(m);
4063                         m->valid = VM_PAGE_BITS_ALL;
4064                 }
4065                 ma[i] = mpred = m;
4066                 m = vm_page_next(m);
4067         }
4068         return (i);
4069 }
4070
4071 /*
4072  * Mapping function for valid or dirty bits in a page.
4073  *
4074  * Inputs are required to range within a page.
4075  */
4076 vm_page_bits_t
4077 vm_page_bits(int base, int size)
4078 {
4079         int first_bit;
4080         int last_bit;
4081
4082         KASSERT(
4083             base + size <= PAGE_SIZE,
4084             ("vm_page_bits: illegal base/size %d/%d", base, size)
4085         );
4086
4087         if (size == 0)          /* handle degenerate case */
4088                 return (0);
4089
4090         first_bit = base >> DEV_BSHIFT;
4091         last_bit = (base + size - 1) >> DEV_BSHIFT;
4092
4093         return (((vm_page_bits_t)2 << last_bit) -
4094             ((vm_page_bits_t)1 << first_bit));
4095 }
4096
4097 /*
4098  *      vm_page_set_valid_range:
4099  *
4100  *      Sets portions of a page valid.  The arguments are expected
4101  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4102  *      of any partial chunks touched by the range.  The invalid portion of
4103  *      such chunks will be zeroed.
4104  *
4105  *      (base + size) must be less then or equal to PAGE_SIZE.
4106  */
4107 void
4108 vm_page_set_valid_range(vm_page_t m, int base, int size)
4109 {
4110         int endoff, frag;
4111
4112         VM_OBJECT_ASSERT_WLOCKED(m->object);
4113         if (size == 0)  /* handle degenerate case */
4114                 return;
4115
4116         /*
4117          * If the base is not DEV_BSIZE aligned and the valid
4118          * bit is clear, we have to zero out a portion of the
4119          * first block.
4120          */
4121         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4122             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
4123                 pmap_zero_page_area(m, frag, base - frag);
4124
4125         /*
4126          * If the ending offset is not DEV_BSIZE aligned and the
4127          * valid bit is clear, we have to zero out a portion of
4128          * the last block.
4129          */
4130         endoff = base + size;
4131         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4132             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
4133                 pmap_zero_page_area(m, endoff,
4134                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4135
4136         /*
4137          * Assert that no previously invalid block that is now being validated
4138          * is already dirty.
4139          */
4140         KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
4141             ("vm_page_set_valid_range: page %p is dirty", m));
4142
4143         /*
4144          * Set valid bits inclusive of any overlap.
4145          */
4146         m->valid |= vm_page_bits(base, size);
4147 }
4148
4149 /*
4150  * Clear the given bits from the specified page's dirty field.
4151  */
4152 static __inline void
4153 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
4154 {
4155         uintptr_t addr;
4156 #if PAGE_SIZE < 16384
4157         int shift;
4158 #endif
4159
4160         /*
4161          * If the object is locked and the page is neither exclusive busy nor
4162          * write mapped, then the page's dirty field cannot possibly be
4163          * set by a concurrent pmap operation.
4164          */
4165         VM_OBJECT_ASSERT_WLOCKED(m->object);
4166         if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
4167                 m->dirty &= ~pagebits;
4168         else {
4169                 /*
4170                  * The pmap layer can call vm_page_dirty() without
4171                  * holding a distinguished lock.  The combination of
4172                  * the object's lock and an atomic operation suffice
4173                  * to guarantee consistency of the page dirty field.
4174                  *
4175                  * For PAGE_SIZE == 32768 case, compiler already
4176                  * properly aligns the dirty field, so no forcible
4177                  * alignment is needed. Only require existence of
4178                  * atomic_clear_64 when page size is 32768.
4179                  */
4180                 addr = (uintptr_t)&m->dirty;
4181 #if PAGE_SIZE == 32768
4182                 atomic_clear_64((uint64_t *)addr, pagebits);
4183 #elif PAGE_SIZE == 16384
4184                 atomic_clear_32((uint32_t *)addr, pagebits);
4185 #else           /* PAGE_SIZE <= 8192 */
4186                 /*
4187                  * Use a trick to perform a 32-bit atomic on the
4188                  * containing aligned word, to not depend on the existence
4189                  * of atomic_clear_{8, 16}.
4190                  */
4191                 shift = addr & (sizeof(uint32_t) - 1);
4192 #if BYTE_ORDER == BIG_ENDIAN
4193                 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
4194 #else
4195                 shift *= NBBY;
4196 #endif
4197                 addr &= ~(sizeof(uint32_t) - 1);
4198                 atomic_clear_32((uint32_t *)addr, pagebits << shift);
4199 #endif          /* PAGE_SIZE */
4200         }
4201 }
4202
4203 /*
4204  *      vm_page_set_validclean:
4205  *
4206  *      Sets portions of a page valid and clean.  The arguments are expected
4207  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4208  *      of any partial chunks touched by the range.  The invalid portion of
4209  *      such chunks will be zero'd.
4210  *
4211  *      (base + size) must be less then or equal to PAGE_SIZE.
4212  */
4213 void
4214 vm_page_set_validclean(vm_page_t m, int base, int size)
4215 {
4216         vm_page_bits_t oldvalid, pagebits;
4217         int endoff, frag;
4218
4219         VM_OBJECT_ASSERT_WLOCKED(m->object);
4220         if (size == 0)  /* handle degenerate case */
4221                 return;
4222
4223         /*
4224          * If the base is not DEV_BSIZE aligned and the valid
4225          * bit is clear, we have to zero out a portion of the
4226          * first block.
4227          */
4228         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4229             (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
4230                 pmap_zero_page_area(m, frag, base - frag);
4231
4232         /*
4233          * If the ending offset is not DEV_BSIZE aligned and the
4234          * valid bit is clear, we have to zero out a portion of
4235          * the last block.
4236          */
4237         endoff = base + size;
4238         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4239             (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
4240                 pmap_zero_page_area(m, endoff,
4241                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4242
4243         /*
4244          * Set valid, clear dirty bits.  If validating the entire
4245          * page we can safely clear the pmap modify bit.  We also
4246          * use this opportunity to clear the VPO_NOSYNC flag.  If a process
4247          * takes a write fault on a MAP_NOSYNC memory area the flag will
4248          * be set again.
4249          *
4250          * We set valid bits inclusive of any overlap, but we can only
4251          * clear dirty bits for DEV_BSIZE chunks that are fully within
4252          * the range.
4253          */
4254         oldvalid = m->valid;
4255         pagebits = vm_page_bits(base, size);
4256         m->valid |= pagebits;
4257 #if 0   /* NOT YET */
4258         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
4259                 frag = DEV_BSIZE - frag;
4260                 base += frag;
4261                 size -= frag;
4262                 if (size < 0)
4263                         size = 0;
4264         }
4265         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
4266 #endif
4267         if (base == 0 && size == PAGE_SIZE) {
4268                 /*
4269                  * The page can only be modified within the pmap if it is
4270                  * mapped, and it can only be mapped if it was previously
4271                  * fully valid.
4272                  */
4273                 if (oldvalid == VM_PAGE_BITS_ALL)
4274                         /*
4275                          * Perform the pmap_clear_modify() first.  Otherwise,
4276                          * a concurrent pmap operation, such as
4277                          * pmap_protect(), could clear a modification in the
4278                          * pmap and set the dirty field on the page before
4279                          * pmap_clear_modify() had begun and after the dirty
4280                          * field was cleared here.
4281                          */
4282                         pmap_clear_modify(m);
4283                 m->dirty = 0;
4284                 m->oflags &= ~VPO_NOSYNC;
4285         } else if (oldvalid != VM_PAGE_BITS_ALL)
4286                 m->dirty &= ~pagebits;
4287         else
4288                 vm_page_clear_dirty_mask(m, pagebits);
4289 }
4290
4291 void
4292 vm_page_clear_dirty(vm_page_t m, int base, int size)
4293 {
4294
4295         vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
4296 }
4297
4298 /*
4299  *      vm_page_set_invalid:
4300  *
4301  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
4302  *      valid and dirty bits for the effected areas are cleared.
4303  */
4304 void
4305 vm_page_set_invalid(vm_page_t m, int base, int size)
4306 {
4307         vm_page_bits_t bits;
4308         vm_object_t object;
4309
4310         object = m->object;
4311         VM_OBJECT_ASSERT_WLOCKED(object);
4312         if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
4313             size >= object->un_pager.vnp.vnp_size)
4314                 bits = VM_PAGE_BITS_ALL;
4315         else
4316                 bits = vm_page_bits(base, size);
4317         if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL &&
4318             bits != 0)
4319                 pmap_remove_all(m);
4320         KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) ||
4321             !pmap_page_is_mapped(m),
4322             ("vm_page_set_invalid: page %p is mapped", m));
4323         m->valid &= ~bits;
4324         m->dirty &= ~bits;
4325 }
4326
4327 /*
4328  * vm_page_zero_invalid()
4329  *
4330  *      The kernel assumes that the invalid portions of a page contain
4331  *      garbage, but such pages can be mapped into memory by user code.
4332  *      When this occurs, we must zero out the non-valid portions of the
4333  *      page so user code sees what it expects.
4334  *
4335  *      Pages are most often semi-valid when the end of a file is mapped
4336  *      into memory and the file's size is not page aligned.
4337  */
4338 void
4339 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
4340 {
4341         int b;
4342         int i;
4343
4344         VM_OBJECT_ASSERT_WLOCKED(m->object);
4345         /*
4346          * Scan the valid bits looking for invalid sections that
4347          * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
4348          * valid bit may be set ) have already been zeroed by
4349          * vm_page_set_validclean().
4350          */
4351         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
4352                 if (i == (PAGE_SIZE / DEV_BSIZE) ||
4353                     (m->valid & ((vm_page_bits_t)1 << i))) {
4354                         if (i > b) {
4355                                 pmap_zero_page_area(m,
4356                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
4357                         }
4358                         b = i + 1;
4359                 }
4360         }
4361
4362         /*
4363          * setvalid is TRUE when we can safely set the zero'd areas
4364          * as being valid.  We can do this if there are no cache consistancy
4365          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
4366          */
4367         if (setvalid)
4368                 m->valid = VM_PAGE_BITS_ALL;
4369 }
4370
4371 /*
4372  *      vm_page_is_valid:
4373  *
4374  *      Is (partial) page valid?  Note that the case where size == 0
4375  *      will return FALSE in the degenerate case where the page is
4376  *      entirely invalid, and TRUE otherwise.
4377  */
4378 int
4379 vm_page_is_valid(vm_page_t m, int base, int size)
4380 {
4381         vm_page_bits_t bits;
4382
4383         VM_OBJECT_ASSERT_LOCKED(m->object);
4384         bits = vm_page_bits(base, size);
4385         return (m->valid != 0 && (m->valid & bits) == bits);
4386 }
4387
4388 /*
4389  * Returns true if all of the specified predicates are true for the entire
4390  * (super)page and false otherwise.
4391  */
4392 bool
4393 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
4394 {
4395         vm_object_t object;
4396         int i, npages;
4397
4398         object = m->object;
4399         if (skip_m != NULL && skip_m->object != object)
4400                 return (false);
4401         VM_OBJECT_ASSERT_LOCKED(object);
4402         npages = atop(pagesizes[m->psind]);
4403
4404         /*
4405          * The physically contiguous pages that make up a superpage, i.e., a
4406          * page with a page size index ("psind") greater than zero, will
4407          * occupy adjacent entries in vm_page_array[].
4408          */
4409         for (i = 0; i < npages; i++) {
4410                 /* Always test object consistency, including "skip_m". */
4411                 if (m[i].object != object)
4412                         return (false);
4413                 if (&m[i] == skip_m)
4414                         continue;
4415                 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
4416                         return (false);
4417                 if ((flags & PS_ALL_DIRTY) != 0) {
4418                         /*
4419                          * Calling vm_page_test_dirty() or pmap_is_modified()
4420                          * might stop this case from spuriously returning
4421                          * "false".  However, that would require a write lock
4422                          * on the object containing "m[i]".
4423                          */
4424                         if (m[i].dirty != VM_PAGE_BITS_ALL)
4425                                 return (false);
4426                 }
4427                 if ((flags & PS_ALL_VALID) != 0 &&
4428                     m[i].valid != VM_PAGE_BITS_ALL)
4429                         return (false);
4430         }
4431         return (true);
4432 }
4433
4434 /*
4435  * Set the page's dirty bits if the page is modified.
4436  */
4437 void
4438 vm_page_test_dirty(vm_page_t m)
4439 {
4440
4441         VM_OBJECT_ASSERT_WLOCKED(m->object);
4442         if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
4443                 vm_page_dirty(m);
4444 }
4445
4446 void
4447 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
4448 {
4449
4450         mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
4451 }
4452
4453 void
4454 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
4455 {
4456
4457         mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
4458 }
4459
4460 int
4461 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
4462 {
4463
4464         return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
4465 }
4466
4467 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
4468 void
4469 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
4470 {
4471
4472         vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
4473 }
4474
4475 void
4476 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
4477 {
4478
4479         mtx_assert_(vm_page_lockptr(m), a, file, line);
4480 }
4481 #endif
4482
4483 #ifdef INVARIANTS
4484 void
4485 vm_page_object_lock_assert(vm_page_t m)
4486 {
4487
4488         /*
4489          * Certain of the page's fields may only be modified by the
4490          * holder of the containing object's lock or the exclusive busy.
4491          * holder.  Unfortunately, the holder of the write busy is
4492          * not recorded, and thus cannot be checked here.
4493          */
4494         if (m->object != NULL && !vm_page_xbusied(m))
4495                 VM_OBJECT_ASSERT_WLOCKED(m->object);
4496 }
4497
4498 void
4499 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits)
4500 {
4501
4502         if ((bits & PGA_WRITEABLE) == 0)
4503                 return;
4504
4505         /*
4506          * The PGA_WRITEABLE flag can only be set if the page is
4507          * managed, is exclusively busied or the object is locked.
4508          * Currently, this flag is only set by pmap_enter().
4509          */
4510         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4511             ("PGA_WRITEABLE on unmanaged page"));
4512         if (!vm_page_xbusied(m))
4513                 VM_OBJECT_ASSERT_LOCKED(m->object);
4514 }
4515 #endif
4516
4517 #include "opt_ddb.h"
4518 #ifdef DDB
4519 #include <sys/kernel.h>
4520
4521 #include <ddb/ddb.h>
4522
4523 DB_SHOW_COMMAND(page, vm_page_print_page_info)
4524 {
4525
4526         db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
4527         db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
4528         db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
4529         db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
4530         db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
4531         db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
4532         db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
4533         db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
4534         db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
4535 }
4536
4537 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
4538 {
4539         int dom;
4540
4541         db_printf("pq_free %d\n", vm_free_count());
4542         for (dom = 0; dom < vm_ndomains; dom++) {
4543                 db_printf(
4544     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
4545                     dom,
4546                     vm_dom[dom].vmd_page_count,
4547                     vm_dom[dom].vmd_free_count,
4548                     vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
4549                     vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
4550                     vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
4551                     vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
4552         }
4553 }
4554
4555 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
4556 {
4557         vm_page_t m;
4558         boolean_t phys, virt;
4559
4560         if (!have_addr) {
4561                 db_printf("show pginfo addr\n");
4562                 return;
4563         }
4564
4565         phys = strchr(modif, 'p') != NULL;
4566         virt = strchr(modif, 'v') != NULL;
4567         if (virt)
4568                 m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
4569         else if (phys)
4570                 m = PHYS_TO_VM_PAGE(addr);
4571         else
4572                 m = (vm_page_t)addr;
4573         db_printf(
4574     "page %p obj %p pidx 0x%jx phys 0x%jx q %d wire %d\n"
4575     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
4576             m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
4577             m->queue, m->wire_count, m->aflags, m->oflags,
4578             m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
4579 }
4580 #endif /* DDB */