]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_page.c
Merge ^/vendor/llvm-project/release-10.x up to its last change (upstream
[FreeBSD/FreeBSD.git] / sys / vm / vm_page.c
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * The Mach Operating System project at Carnegie-Mellon University.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
36  */
37
38 /*-
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64
65 /*
66  *      Resident memory management module.
67  */
68
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71
72 #include "opt_vm.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/counter.h>
77 #include <sys/domainset.h>
78 #include <sys/kernel.h>
79 #include <sys/limits.h>
80 #include <sys/linker.h>
81 #include <sys/lock.h>
82 #include <sys/malloc.h>
83 #include <sys/mman.h>
84 #include <sys/msgbuf.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/rwlock.h>
88 #include <sys/sleepqueue.h>
89 #include <sys/sbuf.h>
90 #include <sys/sched.h>
91 #include <sys/smp.h>
92 #include <sys/sysctl.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
95
96 #include <vm/vm.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_param.h>
99 #include <vm/vm_domainset.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_object.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_phys.h>
106 #include <vm/vm_pagequeue.h>
107 #include <vm/vm_pager.h>
108 #include <vm/vm_radix.h>
109 #include <vm/vm_reserv.h>
110 #include <vm/vm_extern.h>
111 #include <vm/uma.h>
112 #include <vm/uma_int.h>
113
114 #include <machine/md_var.h>
115
116 struct vm_domain vm_dom[MAXMEMDOM];
117
118 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
119
120 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
121
122 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
123 /* The following fields are protected by the domainset lock. */
124 domainset_t __exclusive_cache_line vm_min_domains;
125 domainset_t __exclusive_cache_line vm_severe_domains;
126 static int vm_min_waiters;
127 static int vm_severe_waiters;
128 static int vm_pageproc_waiters;
129
130 static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD, 0,
131     "VM page statistics");
132
133 static counter_u64_t pqstate_commit_retries = EARLY_COUNTER;
134 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries,
135     CTLFLAG_RD, &pqstate_commit_retries,
136     "Number of failed per-page atomic queue state updates");
137
138 static counter_u64_t queue_ops = EARLY_COUNTER;
139 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops,
140     CTLFLAG_RD, &queue_ops,
141     "Number of batched queue operations");
142
143 static counter_u64_t queue_nops = EARLY_COUNTER;
144 SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_nops,
145     CTLFLAG_RD, &queue_nops,
146     "Number of batched queue operations with no effects");
147
148 static void
149 counter_startup(void)
150 {
151
152         pqstate_commit_retries = counter_u64_alloc(M_WAITOK);
153         queue_ops = counter_u64_alloc(M_WAITOK);
154         queue_nops = counter_u64_alloc(M_WAITOK);
155 }
156 SYSINIT(page_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL);
157
158 /*
159  * bogus page -- for I/O to/from partially complete buffers,
160  * or for paging into sparsely invalid regions.
161  */
162 vm_page_t bogus_page;
163
164 vm_page_t vm_page_array;
165 long vm_page_array_size;
166 long first_page;
167
168 static TAILQ_HEAD(, vm_page) blacklist_head;
169 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
170 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
171     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
172
173 static uma_zone_t fakepg_zone;
174
175 static void vm_page_alloc_check(vm_page_t m);
176 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
177     vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
178 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
179 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
180 static bool vm_page_free_prep(vm_page_t m);
181 static void vm_page_free_toq(vm_page_t m);
182 static void vm_page_init(void *dummy);
183 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
184     vm_pindex_t pindex, vm_page_t mpred);
185 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
186     vm_page_t mpred);
187 static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
188     const uint16_t nflag);
189 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
190     vm_page_t m_run, vm_paddr_t high);
191 static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, bool noreuse);
192 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
193     int req);
194 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
195     int flags);
196 static void vm_page_zone_release(void *arg, void **store, int cnt);
197
198 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
199
200 static void
201 vm_page_init(void *dummy)
202 {
203
204         fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
205             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
206         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
207             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
208 }
209
210 /*
211  * The cache page zone is initialized later since we need to be able to allocate
212  * pages before UMA is fully initialized.
213  */
214 static void
215 vm_page_init_cache_zones(void *dummy __unused)
216 {
217         struct vm_domain *vmd;
218         struct vm_pgcache *pgcache;
219         int cache, domain, maxcache, pool;
220
221         maxcache = 0;
222         TUNABLE_INT_FETCH("vm.pgcache_zone_max_pcpu", &maxcache);
223         maxcache *= mp_ncpus;
224         for (domain = 0; domain < vm_ndomains; domain++) {
225                 vmd = VM_DOMAIN(domain);
226                 for (pool = 0; pool < VM_NFREEPOOL; pool++) {
227                         pgcache = &vmd->vmd_pgcache[pool];
228                         pgcache->domain = domain;
229                         pgcache->pool = pool;
230                         pgcache->zone = uma_zcache_create("vm pgcache",
231                             PAGE_SIZE, NULL, NULL, NULL, NULL,
232                             vm_page_zone_import, vm_page_zone_release, pgcache,
233                             UMA_ZONE_VM);
234
235                         /*
236                          * Limit each pool's zone to 0.1% of the pages in the
237                          * domain.
238                          */
239                         cache = maxcache != 0 ? maxcache :
240                             vmd->vmd_page_count / 1000;
241                         uma_zone_set_maxcache(pgcache->zone, cache);
242                 }
243         }
244 }
245 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
246
247 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
248 #if PAGE_SIZE == 32768
249 #ifdef CTASSERT
250 CTASSERT(sizeof(u_long) >= 8);
251 #endif
252 #endif
253
254 /*
255  *      vm_set_page_size:
256  *
257  *      Sets the page size, perhaps based upon the memory
258  *      size.  Must be called before any use of page-size
259  *      dependent functions.
260  */
261 void
262 vm_set_page_size(void)
263 {
264         if (vm_cnt.v_page_size == 0)
265                 vm_cnt.v_page_size = PAGE_SIZE;
266         if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
267                 panic("vm_set_page_size: page size not a power of two");
268 }
269
270 /*
271  *      vm_page_blacklist_next:
272  *
273  *      Find the next entry in the provided string of blacklist
274  *      addresses.  Entries are separated by space, comma, or newline.
275  *      If an invalid integer is encountered then the rest of the
276  *      string is skipped.  Updates the list pointer to the next
277  *      character, or NULL if the string is exhausted or invalid.
278  */
279 static vm_paddr_t
280 vm_page_blacklist_next(char **list, char *end)
281 {
282         vm_paddr_t bad;
283         char *cp, *pos;
284
285         if (list == NULL || *list == NULL)
286                 return (0);
287         if (**list =='\0') {
288                 *list = NULL;
289                 return (0);
290         }
291
292         /*
293          * If there's no end pointer then the buffer is coming from
294          * the kenv and we know it's null-terminated.
295          */
296         if (end == NULL)
297                 end = *list + strlen(*list);
298
299         /* Ensure that strtoq() won't walk off the end */
300         if (*end != '\0') {
301                 if (*end == '\n' || *end == ' ' || *end  == ',')
302                         *end = '\0';
303                 else {
304                         printf("Blacklist not terminated, skipping\n");
305                         *list = NULL;
306                         return (0);
307                 }
308         }
309
310         for (pos = *list; *pos != '\0'; pos = cp) {
311                 bad = strtoq(pos, &cp, 0);
312                 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
313                         if (bad == 0) {
314                                 if (++cp < end)
315                                         continue;
316                                 else
317                                         break;
318                         }
319                 } else
320                         break;
321                 if (*cp == '\0' || ++cp >= end)
322                         *list = NULL;
323                 else
324                         *list = cp;
325                 return (trunc_page(bad));
326         }
327         printf("Garbage in RAM blacklist, skipping\n");
328         *list = NULL;
329         return (0);
330 }
331
332 bool
333 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
334 {
335         struct vm_domain *vmd;
336         vm_page_t m;
337         int ret;
338
339         m = vm_phys_paddr_to_vm_page(pa);
340         if (m == NULL)
341                 return (true); /* page does not exist, no failure */
342
343         vmd = vm_pagequeue_domain(m);
344         vm_domain_free_lock(vmd);
345         ret = vm_phys_unfree_page(m);
346         vm_domain_free_unlock(vmd);
347         if (ret != 0) {
348                 vm_domain_freecnt_inc(vmd, -1);
349                 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
350                 if (verbose)
351                         printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
352         }
353         return (ret);
354 }
355
356 /*
357  *      vm_page_blacklist_check:
358  *
359  *      Iterate through the provided string of blacklist addresses, pulling
360  *      each entry out of the physical allocator free list and putting it
361  *      onto a list for reporting via the vm.page_blacklist sysctl.
362  */
363 static void
364 vm_page_blacklist_check(char *list, char *end)
365 {
366         vm_paddr_t pa;
367         char *next;
368
369         next = list;
370         while (next != NULL) {
371                 if ((pa = vm_page_blacklist_next(&next, end)) == 0)
372                         continue;
373                 vm_page_blacklist_add(pa, bootverbose);
374         }
375 }
376
377 /*
378  *      vm_page_blacklist_load:
379  *
380  *      Search for a special module named "ram_blacklist".  It'll be a
381  *      plain text file provided by the user via the loader directive
382  *      of the same name.
383  */
384 static void
385 vm_page_blacklist_load(char **list, char **end)
386 {
387         void *mod;
388         u_char *ptr;
389         u_int len;
390
391         mod = NULL;
392         ptr = NULL;
393
394         mod = preload_search_by_type("ram_blacklist");
395         if (mod != NULL) {
396                 ptr = preload_fetch_addr(mod);
397                 len = preload_fetch_size(mod);
398         }
399         *list = ptr;
400         if (ptr != NULL)
401                 *end = ptr + len;
402         else
403                 *end = NULL;
404         return;
405 }
406
407 static int
408 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
409 {
410         vm_page_t m;
411         struct sbuf sbuf;
412         int error, first;
413
414         first = 1;
415         error = sysctl_wire_old_buffer(req, 0);
416         if (error != 0)
417                 return (error);
418         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
419         TAILQ_FOREACH(m, &blacklist_head, listq) {
420                 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
421                     (uintmax_t)m->phys_addr);
422                 first = 0;
423         }
424         error = sbuf_finish(&sbuf);
425         sbuf_delete(&sbuf);
426         return (error);
427 }
428
429 /*
430  * Initialize a dummy page for use in scans of the specified paging queue.
431  * In principle, this function only needs to set the flag PG_MARKER.
432  * Nonetheless, it write busies the page as a safety precaution.
433  */
434 static void
435 vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
436 {
437
438         bzero(marker, sizeof(*marker));
439         marker->flags = PG_MARKER;
440         marker->a.flags = aflags;
441         marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
442         marker->a.queue = queue;
443 }
444
445 static void
446 vm_page_domain_init(int domain)
447 {
448         struct vm_domain *vmd;
449         struct vm_pagequeue *pq;
450         int i;
451
452         vmd = VM_DOMAIN(domain);
453         bzero(vmd, sizeof(*vmd));
454         *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
455             "vm inactive pagequeue";
456         *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
457             "vm active pagequeue";
458         *__DECONST(const char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
459             "vm laundry pagequeue";
460         *__DECONST(const char **,
461             &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
462             "vm unswappable pagequeue";
463         vmd->vmd_domain = domain;
464         vmd->vmd_page_count = 0;
465         vmd->vmd_free_count = 0;
466         vmd->vmd_segs = 0;
467         vmd->vmd_oom = FALSE;
468         for (i = 0; i < PQ_COUNT; i++) {
469                 pq = &vmd->vmd_pagequeues[i];
470                 TAILQ_INIT(&pq->pq_pl);
471                 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
472                     MTX_DEF | MTX_DUPOK);
473                 pq->pq_pdpages = 0;
474                 vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
475         }
476         mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
477         mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
478         snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
479
480         /*
481          * inacthead is used to provide FIFO ordering for LRU-bypassing
482          * insertions.
483          */
484         vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
485         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
486             &vmd->vmd_inacthead, plinks.q);
487
488         /*
489          * The clock pages are used to implement active queue scanning without
490          * requeues.  Scans start at clock[0], which is advanced after the scan
491          * ends.  When the two clock hands meet, they are reset and scanning
492          * resumes from the head of the queue.
493          */
494         vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
495         vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
496         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
497             &vmd->vmd_clock[0], plinks.q);
498         TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
499             &vmd->vmd_clock[1], plinks.q);
500 }
501
502 /*
503  * Initialize a physical page in preparation for adding it to the free
504  * lists.
505  */
506 static void
507 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
508 {
509
510         m->object = NULL;
511         m->ref_count = 0;
512         m->busy_lock = VPB_FREED;
513         m->flags = m->a.flags = 0;
514         m->phys_addr = pa;
515         m->a.queue = PQ_NONE;
516         m->psind = 0;
517         m->segind = segind;
518         m->order = VM_NFREEORDER;
519         m->pool = VM_FREEPOOL_DEFAULT;
520         m->valid = m->dirty = 0;
521         pmap_page_init(m);
522 }
523
524 #ifndef PMAP_HAS_PAGE_ARRAY
525 static vm_paddr_t
526 vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
527 {
528         vm_paddr_t new_end;
529
530         /*
531          * Reserve an unmapped guard page to trap access to vm_page_array[-1].
532          * However, because this page is allocated from KVM, out-of-bounds
533          * accesses using the direct map will not be trapped.
534          */
535         *vaddr += PAGE_SIZE;
536
537         /*
538          * Allocate physical memory for the page structures, and map it.
539          */
540         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
541         vm_page_array = (vm_page_t)pmap_map(vaddr, new_end, end,
542             VM_PROT_READ | VM_PROT_WRITE);
543         vm_page_array_size = page_range;
544
545         return (new_end);
546 }
547 #endif
548
549 /*
550  *      vm_page_startup:
551  *
552  *      Initializes the resident memory module.  Allocates physical memory for
553  *      bootstrapping UMA and some data structures that are used to manage
554  *      physical pages.  Initializes these structures, and populates the free
555  *      page queues.
556  */
557 vm_offset_t
558 vm_page_startup(vm_offset_t vaddr)
559 {
560         struct vm_phys_seg *seg;
561         vm_page_t m;
562         char *list, *listend;
563         vm_paddr_t end, high_avail, low_avail, new_end, size;
564         vm_paddr_t page_range __unused;
565         vm_paddr_t last_pa, pa;
566         u_long pagecount;
567         int biggestone, i, segind;
568 #ifdef WITNESS
569         vm_offset_t mapped;
570         int witness_size;
571 #endif
572 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
573         long ii;
574 #endif
575
576         vaddr = round_page(vaddr);
577
578         vm_phys_early_startup();
579         biggestone = vm_phys_avail_largest();
580         end = phys_avail[biggestone+1];
581
582         /*
583          * Initialize the page and queue locks.
584          */
585         mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
586         for (i = 0; i < PA_LOCK_COUNT; i++)
587                 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
588         for (i = 0; i < vm_ndomains; i++)
589                 vm_page_domain_init(i);
590
591         new_end = end;
592 #ifdef WITNESS
593         witness_size = round_page(witness_startup_count());
594         new_end -= witness_size;
595         mapped = pmap_map(&vaddr, new_end, new_end + witness_size,
596             VM_PROT_READ | VM_PROT_WRITE);
597         bzero((void *)mapped, witness_size);
598         witness_startup((void *)mapped);
599 #endif
600
601 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
602     defined(__i386__) || defined(__mips__) || defined(__riscv) || \
603     defined(__powerpc64__)
604         /*
605          * Allocate a bitmap to indicate that a random physical page
606          * needs to be included in a minidump.
607          *
608          * The amd64 port needs this to indicate which direct map pages
609          * need to be dumped, via calls to dump_add_page()/dump_drop_page().
610          *
611          * However, i386 still needs this workspace internally within the
612          * minidump code.  In theory, they are not needed on i386, but are
613          * included should the sf_buf code decide to use them.
614          */
615         last_pa = 0;
616         for (i = 0; dump_avail[i + 1] != 0; i += 2)
617                 if (dump_avail[i + 1] > last_pa)
618                         last_pa = dump_avail[i + 1];
619         page_range = last_pa / PAGE_SIZE;
620         vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
621         new_end -= vm_page_dump_size;
622         vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
623             new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
624         bzero((void *)vm_page_dump, vm_page_dump_size);
625 #else
626         (void)last_pa;
627 #endif
628 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
629     defined(__riscv) || defined(__powerpc64__)
630         /*
631          * Include the UMA bootstrap pages, witness pages and vm_page_dump
632          * in a crash dump.  When pmap_map() uses the direct map, they are
633          * not automatically included.
634          */
635         for (pa = new_end; pa < end; pa += PAGE_SIZE)
636                 dump_add_page(pa);
637 #endif
638         phys_avail[biggestone + 1] = new_end;
639 #ifdef __amd64__
640         /*
641          * Request that the physical pages underlying the message buffer be
642          * included in a crash dump.  Since the message buffer is accessed
643          * through the direct map, they are not automatically included.
644          */
645         pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
646         last_pa = pa + round_page(msgbufsize);
647         while (pa < last_pa) {
648                 dump_add_page(pa);
649                 pa += PAGE_SIZE;
650         }
651 #endif
652         /*
653          * Compute the number of pages of memory that will be available for
654          * use, taking into account the overhead of a page structure per page.
655          * In other words, solve
656          *      "available physical memory" - round_page(page_range *
657          *          sizeof(struct vm_page)) = page_range * PAGE_SIZE 
658          * for page_range.  
659          */
660         low_avail = phys_avail[0];
661         high_avail = phys_avail[1];
662         for (i = 0; i < vm_phys_nsegs; i++) {
663                 if (vm_phys_segs[i].start < low_avail)
664                         low_avail = vm_phys_segs[i].start;
665                 if (vm_phys_segs[i].end > high_avail)
666                         high_avail = vm_phys_segs[i].end;
667         }
668         /* Skip the first chunk.  It is already accounted for. */
669         for (i = 2; phys_avail[i + 1] != 0; i += 2) {
670                 if (phys_avail[i] < low_avail)
671                         low_avail = phys_avail[i];
672                 if (phys_avail[i + 1] > high_avail)
673                         high_avail = phys_avail[i + 1];
674         }
675         first_page = low_avail / PAGE_SIZE;
676 #ifdef VM_PHYSSEG_SPARSE
677         size = 0;
678         for (i = 0; i < vm_phys_nsegs; i++)
679                 size += vm_phys_segs[i].end - vm_phys_segs[i].start;
680         for (i = 0; phys_avail[i + 1] != 0; i += 2)
681                 size += phys_avail[i + 1] - phys_avail[i];
682 #elif defined(VM_PHYSSEG_DENSE)
683         size = high_avail - low_avail;
684 #else
685 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
686 #endif
687
688 #ifdef PMAP_HAS_PAGE_ARRAY
689         pmap_page_array_startup(size / PAGE_SIZE);
690         biggestone = vm_phys_avail_largest();
691         end = new_end = phys_avail[biggestone + 1];
692 #else
693 #ifdef VM_PHYSSEG_DENSE
694         /*
695          * In the VM_PHYSSEG_DENSE case, the number of pages can account for
696          * the overhead of a page structure per page only if vm_page_array is
697          * allocated from the last physical memory chunk.  Otherwise, we must
698          * allocate page structures representing the physical memory
699          * underlying vm_page_array, even though they will not be used.
700          */
701         if (new_end != high_avail)
702                 page_range = size / PAGE_SIZE;
703         else
704 #endif
705         {
706                 page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
707
708                 /*
709                  * If the partial bytes remaining are large enough for
710                  * a page (PAGE_SIZE) without a corresponding
711                  * 'struct vm_page', then new_end will contain an
712                  * extra page after subtracting the length of the VM
713                  * page array.  Compensate by subtracting an extra
714                  * page from new_end.
715                  */
716                 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
717                         if (new_end == high_avail)
718                                 high_avail -= PAGE_SIZE;
719                         new_end -= PAGE_SIZE;
720                 }
721         }
722         end = new_end;
723         new_end = vm_page_array_alloc(&vaddr, end, page_range);
724 #endif
725
726 #if VM_NRESERVLEVEL > 0
727         /*
728          * Allocate physical memory for the reservation management system's
729          * data structures, and map it.
730          */
731         new_end = vm_reserv_startup(&vaddr, new_end);
732 #endif
733 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
734     defined(__riscv) || defined(__powerpc64__)
735         /*
736          * Include vm_page_array and vm_reserv_array in a crash dump.
737          */
738         for (pa = new_end; pa < end; pa += PAGE_SIZE)
739                 dump_add_page(pa);
740 #endif
741         phys_avail[biggestone + 1] = new_end;
742
743         /*
744          * Add physical memory segments corresponding to the available
745          * physical pages.
746          */
747         for (i = 0; phys_avail[i + 1] != 0; i += 2)
748                 if (vm_phys_avail_size(i) != 0)
749                         vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
750
751         /*
752          * Initialize the physical memory allocator.
753          */
754         vm_phys_init();
755
756         /*
757          * Initialize the page structures and add every available page to the
758          * physical memory allocator's free lists.
759          */
760 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
761         for (ii = 0; ii < vm_page_array_size; ii++) {
762                 m = &vm_page_array[ii];
763                 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0);
764                 m->flags = PG_FICTITIOUS;
765         }
766 #endif
767         vm_cnt.v_page_count = 0;
768         for (segind = 0; segind < vm_phys_nsegs; segind++) {
769                 seg = &vm_phys_segs[segind];
770                 for (m = seg->first_page, pa = seg->start; pa < seg->end;
771                     m++, pa += PAGE_SIZE)
772                         vm_page_init_page(m, pa, segind);
773
774                 /*
775                  * Add the segment to the free lists only if it is covered by
776                  * one of the ranges in phys_avail.  Because we've added the
777                  * ranges to the vm_phys_segs array, we can assume that each
778                  * segment is either entirely contained in one of the ranges,
779                  * or doesn't overlap any of them.
780                  */
781                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
782                         struct vm_domain *vmd;
783
784                         if (seg->start < phys_avail[i] ||
785                             seg->end > phys_avail[i + 1])
786                                 continue;
787
788                         m = seg->first_page;
789                         pagecount = (u_long)atop(seg->end - seg->start);
790
791                         vmd = VM_DOMAIN(seg->domain);
792                         vm_domain_free_lock(vmd);
793                         vm_phys_enqueue_contig(m, pagecount);
794                         vm_domain_free_unlock(vmd);
795                         vm_domain_freecnt_inc(vmd, pagecount);
796                         vm_cnt.v_page_count += (u_int)pagecount;
797
798                         vmd = VM_DOMAIN(seg->domain);
799                         vmd->vmd_page_count += (u_int)pagecount;
800                         vmd->vmd_segs |= 1UL << m->segind;
801                         break;
802                 }
803         }
804
805         /*
806          * Remove blacklisted pages from the physical memory allocator.
807          */
808         TAILQ_INIT(&blacklist_head);
809         vm_page_blacklist_load(&list, &listend);
810         vm_page_blacklist_check(list, listend);
811
812         list = kern_getenv("vm.blacklist");
813         vm_page_blacklist_check(list, NULL);
814
815         freeenv(list);
816 #if VM_NRESERVLEVEL > 0
817         /*
818          * Initialize the reservation management system.
819          */
820         vm_reserv_init();
821 #endif
822
823         return (vaddr);
824 }
825
826 void
827 vm_page_reference(vm_page_t m)
828 {
829
830         vm_page_aflag_set(m, PGA_REFERENCED);
831 }
832
833 static bool
834 vm_page_acquire_flags(vm_page_t m, int allocflags)
835 {
836         bool locked;
837
838         if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0)
839                 locked = vm_page_trysbusy(m);
840         else
841                 locked = vm_page_tryxbusy(m);
842         if (locked && (allocflags & VM_ALLOC_WIRED) != 0)
843                 vm_page_wire(m);
844         return (locked);
845 }
846
847 /*
848  *      vm_page_busy_sleep_flags
849  *
850  *      Sleep for busy according to VM_ALLOC_ parameters.  Returns true
851  *      if the caller should retry and false otherwise.
852  */
853 static bool
854 vm_page_busy_sleep_flags(vm_object_t object, vm_page_t m, const char *wmesg,
855     int allocflags)
856 {
857
858         if ((allocflags & VM_ALLOC_NOWAIT) != 0)
859                 return (false);
860
861         /*
862          * Reference the page before unlocking and sleeping so that
863          * the page daemon is less likely to reclaim it.
864          */
865         if ((allocflags & VM_ALLOC_NOCREAT) == 0)
866                 vm_page_reference(m);
867
868         if (_vm_page_busy_sleep(object, m, m->pindex, wmesg, allocflags, true))
869                 VM_OBJECT_WLOCK(object);
870         if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
871                 return (false);
872
873         return (true);
874 }
875
876 /*
877  *      vm_page_busy_acquire:
878  *
879  *      Acquire the busy lock as described by VM_ALLOC_* flags.  Will loop
880  *      and drop the object lock if necessary.
881  */
882 bool
883 vm_page_busy_acquire(vm_page_t m, int allocflags)
884 {
885         vm_object_t obj;
886         bool locked;
887
888         /*
889          * The page-specific object must be cached because page
890          * identity can change during the sleep, causing the
891          * re-lock of a different object.
892          * It is assumed that a reference to the object is already
893          * held by the callers.
894          */
895         obj = m->object;
896         for (;;) {
897                 if (vm_page_acquire_flags(m, allocflags))
898                         return (true);
899                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
900                         return (false);
901                 if (obj != NULL)
902                         locked = VM_OBJECT_WOWNED(obj);
903                 else
904                         locked = false;
905                 MPASS(locked || vm_page_wired(m));
906                 if (_vm_page_busy_sleep(obj, m, m->pindex, "vmpba", allocflags,
907                     locked) && locked)
908                         VM_OBJECT_WLOCK(obj);
909                 if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
910                         return (false);
911                 KASSERT(m->object == obj || m->object == NULL,
912                     ("vm_page_busy_acquire: page %p does not belong to %p",
913                     m, obj));
914         }
915 }
916
917 /*
918  *      vm_page_busy_downgrade:
919  *
920  *      Downgrade an exclusive busy page into a single shared busy page.
921  */
922 void
923 vm_page_busy_downgrade(vm_page_t m)
924 {
925         u_int x;
926
927         vm_page_assert_xbusied(m);
928
929         x = m->busy_lock;
930         for (;;) {
931                 if (atomic_fcmpset_rel_int(&m->busy_lock,
932                     &x, VPB_SHARERS_WORD(1)))
933                         break;
934         }
935         if ((x & VPB_BIT_WAITERS) != 0)
936                 wakeup(m);
937 }
938
939 /*
940  *
941  *      vm_page_busy_tryupgrade:
942  *
943  *      Attempt to upgrade a single shared busy into an exclusive busy.
944  */
945 int
946 vm_page_busy_tryupgrade(vm_page_t m)
947 {
948         u_int ce, x;
949
950         vm_page_assert_sbusied(m);
951
952         x = m->busy_lock;
953         ce = VPB_CURTHREAD_EXCLUSIVE;
954         for (;;) {
955                 if (VPB_SHARERS(x) > 1)
956                         return (0);
957                 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
958                     ("vm_page_busy_tryupgrade: invalid lock state"));
959                 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
960                     ce | (x & VPB_BIT_WAITERS)))
961                         continue;
962                 return (1);
963         }
964 }
965
966 /*
967  *      vm_page_sbusied:
968  *
969  *      Return a positive value if the page is shared busied, 0 otherwise.
970  */
971 int
972 vm_page_sbusied(vm_page_t m)
973 {
974         u_int x;
975
976         x = m->busy_lock;
977         return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
978 }
979
980 /*
981  *      vm_page_sunbusy:
982  *
983  *      Shared unbusy a page.
984  */
985 void
986 vm_page_sunbusy(vm_page_t m)
987 {
988         u_int x;
989
990         vm_page_assert_sbusied(m);
991
992         x = m->busy_lock;
993         for (;;) {
994                 KASSERT(x != VPB_FREED,
995                     ("vm_page_sunbusy: Unlocking freed page."));
996                 if (VPB_SHARERS(x) > 1) {
997                         if (atomic_fcmpset_int(&m->busy_lock, &x,
998                             x - VPB_ONE_SHARER))
999                                 break;
1000                         continue;
1001                 }
1002                 KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
1003                     ("vm_page_sunbusy: invalid lock state"));
1004                 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
1005                         continue;
1006                 if ((x & VPB_BIT_WAITERS) == 0)
1007                         break;
1008                 wakeup(m);
1009                 break;
1010         }
1011 }
1012
1013 /*
1014  *      vm_page_busy_sleep:
1015  *
1016  *      Sleep if the page is busy, using the page pointer as wchan.
1017  *      This is used to implement the hard-path of busying mechanism.
1018  *
1019  *      If nonshared is true, sleep only if the page is xbusy.
1020  *
1021  *      The object lock must be held on entry and will be released on exit.
1022  */
1023 void
1024 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
1025 {
1026         vm_object_t obj;
1027
1028         obj = m->object;
1029         VM_OBJECT_ASSERT_LOCKED(obj);
1030         vm_page_lock_assert(m, MA_NOTOWNED);
1031
1032         if (!_vm_page_busy_sleep(obj, m, m->pindex, wmesg,
1033             nonshared ? VM_ALLOC_SBUSY : 0 , true))
1034                 VM_OBJECT_DROP(obj);
1035 }
1036
1037 /*
1038  *      vm_page_busy_sleep_unlocked:
1039  *
1040  *      Sleep if the page is busy, using the page pointer as wchan.
1041  *      This is used to implement the hard-path of busying mechanism.
1042  *
1043  *      If nonshared is true, sleep only if the page is xbusy.
1044  *
1045  *      The object lock must not be held on entry.  The operation will
1046  *      return if the page changes identity.
1047  */
1048 void
1049 vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1050     const char *wmesg, bool nonshared)
1051 {
1052
1053         VM_OBJECT_ASSERT_UNLOCKED(obj);
1054         vm_page_lock_assert(m, MA_NOTOWNED);
1055
1056         _vm_page_busy_sleep(obj, m, pindex, wmesg,
1057             nonshared ? VM_ALLOC_SBUSY : 0, false);
1058 }
1059
1060 /*
1061  *      _vm_page_busy_sleep:
1062  *
1063  *      Internal busy sleep function.  Verifies the page identity and
1064  *      lockstate against parameters.  Returns true if it sleeps and
1065  *      false otherwise.
1066  *
1067  *      If locked is true the lock will be dropped for any true returns
1068  *      and held for any false returns.
1069  */
1070 static bool
1071 _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex,
1072     const char *wmesg, int allocflags, bool locked)
1073 {
1074         bool xsleep;
1075         u_int x;
1076
1077         /*
1078          * If the object is busy we must wait for that to drain to zero
1079          * before trying the page again.
1080          */
1081         if (obj != NULL && vm_object_busied(obj)) {
1082                 if (locked)
1083                         VM_OBJECT_DROP(obj);
1084                 vm_object_busy_wait(obj, wmesg);
1085                 return (true);
1086         }
1087
1088         if (!vm_page_busied(m))
1089                 return (false);
1090
1091         xsleep = (allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0;
1092         sleepq_lock(m);
1093         x = atomic_load_int(&m->busy_lock);
1094         do {
1095                 /*
1096                  * If the page changes objects or becomes unlocked we can
1097                  * simply return.
1098                  */
1099                 if (x == VPB_UNBUSIED ||
1100                     (xsleep && (x & VPB_BIT_SHARED) != 0) ||
1101                     m->object != obj || m->pindex != pindex) {
1102                         sleepq_release(m);
1103                         return (false);
1104                 }
1105                 if ((x & VPB_BIT_WAITERS) != 0)
1106                         break;
1107         } while (!atomic_fcmpset_int(&m->busy_lock, &x, x | VPB_BIT_WAITERS));
1108         if (locked)
1109                 VM_OBJECT_DROP(obj);
1110         DROP_GIANT();
1111         sleepq_add(m, NULL, wmesg, 0, 0);
1112         sleepq_wait(m, PVM);
1113         PICKUP_GIANT();
1114         return (true);
1115 }
1116
1117 /*
1118  *      vm_page_trysbusy:
1119  *
1120  *      Try to shared busy a page.
1121  *      If the operation succeeds 1 is returned otherwise 0.
1122  *      The operation never sleeps.
1123  */
1124 int
1125 vm_page_trysbusy(vm_page_t m)
1126 {
1127         vm_object_t obj;
1128         u_int x;
1129
1130         obj = m->object;
1131         x = m->busy_lock;
1132         for (;;) {
1133                 if ((x & VPB_BIT_SHARED) == 0)
1134                         return (0);
1135                 /*
1136                  * Reduce the window for transient busies that will trigger
1137                  * false negatives in vm_page_ps_test().
1138                  */
1139                 if (obj != NULL && vm_object_busied(obj))
1140                         return (0);
1141                 if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
1142                     x + VPB_ONE_SHARER))
1143                         break;
1144         }
1145
1146         /* Refetch the object now that we're guaranteed that it is stable. */
1147         obj = m->object;
1148         if (obj != NULL && vm_object_busied(obj)) {
1149                 vm_page_sunbusy(m);
1150                 return (0);
1151         }
1152         return (1);
1153 }
1154
1155 /*
1156  *      vm_page_tryxbusy:
1157  *
1158  *      Try to exclusive busy a page.
1159  *      If the operation succeeds 1 is returned otherwise 0.
1160  *      The operation never sleeps.
1161  */
1162 int
1163 vm_page_tryxbusy(vm_page_t m)
1164 {
1165         vm_object_t obj;
1166
1167         if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,
1168             VPB_CURTHREAD_EXCLUSIVE) == 0)
1169                 return (0);
1170
1171         obj = m->object;
1172         if (obj != NULL && vm_object_busied(obj)) {
1173                 vm_page_xunbusy(m);
1174                 return (0);
1175         }
1176         return (1);
1177 }
1178
1179 static void
1180 vm_page_xunbusy_hard_tail(vm_page_t m)
1181 {
1182         atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1183         /* Wake the waiter. */
1184         wakeup(m);
1185 }
1186
1187 /*
1188  *      vm_page_xunbusy_hard:
1189  *
1190  *      Called when unbusy has failed because there is a waiter.
1191  */
1192 void
1193 vm_page_xunbusy_hard(vm_page_t m)
1194 {
1195         vm_page_assert_xbusied(m);
1196         vm_page_xunbusy_hard_tail(m);
1197 }
1198
1199 void
1200 vm_page_xunbusy_hard_unchecked(vm_page_t m)
1201 {
1202         vm_page_assert_xbusied_unchecked(m);
1203         vm_page_xunbusy_hard_tail(m);
1204 }
1205
1206 static void
1207 vm_page_busy_free(vm_page_t m)
1208 {
1209         u_int x;
1210
1211         atomic_thread_fence_rel();
1212         x = atomic_swap_int(&m->busy_lock, VPB_FREED);
1213         if ((x & VPB_BIT_WAITERS) != 0)
1214                 wakeup(m);
1215 }
1216
1217 /*
1218  *      vm_page_unhold_pages:
1219  *
1220  *      Unhold each of the pages that is referenced by the given array.
1221  */
1222 void
1223 vm_page_unhold_pages(vm_page_t *ma, int count)
1224 {
1225
1226         for (; count != 0; count--) {
1227                 vm_page_unwire(*ma, PQ_ACTIVE);
1228                 ma++;
1229         }
1230 }
1231
1232 vm_page_t
1233 PHYS_TO_VM_PAGE(vm_paddr_t pa)
1234 {
1235         vm_page_t m;
1236
1237 #ifdef VM_PHYSSEG_SPARSE
1238         m = vm_phys_paddr_to_vm_page(pa);
1239         if (m == NULL)
1240                 m = vm_phys_fictitious_to_vm_page(pa);
1241         return (m);
1242 #elif defined(VM_PHYSSEG_DENSE)
1243         long pi;
1244
1245         pi = atop(pa);
1246         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1247                 m = &vm_page_array[pi - first_page];
1248                 return (m);
1249         }
1250         return (vm_phys_fictitious_to_vm_page(pa));
1251 #else
1252 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1253 #endif
1254 }
1255
1256 /*
1257  *      vm_page_getfake:
1258  *
1259  *      Create a fictitious page with the specified physical address and
1260  *      memory attribute.  The memory attribute is the only the machine-
1261  *      dependent aspect of a fictitious page that must be initialized.
1262  */
1263 vm_page_t
1264 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
1265 {
1266         vm_page_t m;
1267
1268         m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1269         vm_page_initfake(m, paddr, memattr);
1270         return (m);
1271 }
1272
1273 void
1274 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1275 {
1276
1277         if ((m->flags & PG_FICTITIOUS) != 0) {
1278                 /*
1279                  * The page's memattr might have changed since the
1280                  * previous initialization.  Update the pmap to the
1281                  * new memattr.
1282                  */
1283                 goto memattr;
1284         }
1285         m->phys_addr = paddr;
1286         m->a.queue = PQ_NONE;
1287         /* Fictitious pages don't use "segind". */
1288         m->flags = PG_FICTITIOUS;
1289         /* Fictitious pages don't use "order" or "pool". */
1290         m->oflags = VPO_UNMANAGED;
1291         m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
1292         /* Fictitious pages are unevictable. */
1293         m->ref_count = 1;
1294         pmap_page_init(m);
1295 memattr:
1296         pmap_page_set_memattr(m, memattr);
1297 }
1298
1299 /*
1300  *      vm_page_putfake:
1301  *
1302  *      Release a fictitious page.
1303  */
1304 void
1305 vm_page_putfake(vm_page_t m)
1306 {
1307
1308         KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
1309         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1310             ("vm_page_putfake: bad page %p", m));
1311         vm_page_assert_xbusied(m);
1312         vm_page_busy_free(m);
1313         uma_zfree(fakepg_zone, m);
1314 }
1315
1316 /*
1317  *      vm_page_updatefake:
1318  *
1319  *      Update the given fictitious page to the specified physical address and
1320  *      memory attribute.
1321  */
1322 void
1323 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1324 {
1325
1326         KASSERT((m->flags & PG_FICTITIOUS) != 0,
1327             ("vm_page_updatefake: bad page %p", m));
1328         m->phys_addr = paddr;
1329         pmap_page_set_memattr(m, memattr);
1330 }
1331
1332 /*
1333  *      vm_page_free:
1334  *
1335  *      Free a page.
1336  */
1337 void
1338 vm_page_free(vm_page_t m)
1339 {
1340
1341         m->flags &= ~PG_ZERO;
1342         vm_page_free_toq(m);
1343 }
1344
1345 /*
1346  *      vm_page_free_zero:
1347  *
1348  *      Free a page to the zerod-pages queue
1349  */
1350 void
1351 vm_page_free_zero(vm_page_t m)
1352 {
1353
1354         m->flags |= PG_ZERO;
1355         vm_page_free_toq(m);
1356 }
1357
1358 /*
1359  * Unbusy and handle the page queueing for a page from a getpages request that
1360  * was optionally read ahead or behind.
1361  */
1362 void
1363 vm_page_readahead_finish(vm_page_t m)
1364 {
1365
1366         /* We shouldn't put invalid pages on queues. */
1367         KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m));
1368
1369         /*
1370          * Since the page is not the actually needed one, whether it should
1371          * be activated or deactivated is not obvious.  Empirical results
1372          * have shown that deactivating the page is usually the best choice,
1373          * unless the page is wanted by another thread.
1374          */
1375         if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
1376                 vm_page_activate(m);
1377         else
1378                 vm_page_deactivate(m);
1379         vm_page_xunbusy_unchecked(m);
1380 }
1381
1382 /*
1383  *      vm_page_sleep_if_busy:
1384  *
1385  *      Sleep and release the object lock if the page is busied.
1386  *      Returns TRUE if the thread slept.
1387  *
1388  *      The given page must be unlocked and object containing it must
1389  *      be locked.
1390  */
1391 int
1392 vm_page_sleep_if_busy(vm_page_t m, const char *wmesg)
1393 {
1394         vm_object_t obj;
1395
1396         vm_page_lock_assert(m, MA_NOTOWNED);
1397         VM_OBJECT_ASSERT_WLOCKED(m->object);
1398
1399         /*
1400          * The page-specific object must be cached because page
1401          * identity can change during the sleep, causing the
1402          * re-lock of a different object.
1403          * It is assumed that a reference to the object is already
1404          * held by the callers.
1405          */
1406         obj = m->object;
1407         if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 0, true)) {
1408                 VM_OBJECT_WLOCK(obj);
1409                 return (TRUE);
1410         }
1411         return (FALSE);
1412 }
1413
1414 /*
1415  *      vm_page_sleep_if_xbusy:
1416  *
1417  *      Sleep and release the object lock if the page is xbusied.
1418  *      Returns TRUE if the thread slept.
1419  *
1420  *      The given page must be unlocked and object containing it must
1421  *      be locked.
1422  */
1423 int
1424 vm_page_sleep_if_xbusy(vm_page_t m, const char *wmesg)
1425 {
1426         vm_object_t obj;
1427
1428         vm_page_lock_assert(m, MA_NOTOWNED);
1429         VM_OBJECT_ASSERT_WLOCKED(m->object);
1430
1431         /*
1432          * The page-specific object must be cached because page
1433          * identity can change during the sleep, causing the
1434          * re-lock of a different object.
1435          * It is assumed that a reference to the object is already
1436          * held by the callers.
1437          */
1438         obj = m->object;
1439         if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, VM_ALLOC_SBUSY,
1440             true)) {
1441                 VM_OBJECT_WLOCK(obj);
1442                 return (TRUE);
1443         }
1444         return (FALSE);
1445 }
1446
1447 /*
1448  *      vm_page_dirty_KBI:              [ internal use only ]
1449  *
1450  *      Set all bits in the page's dirty field.
1451  *
1452  *      The object containing the specified page must be locked if the
1453  *      call is made from the machine-independent layer.
1454  *
1455  *      See vm_page_clear_dirty_mask().
1456  *
1457  *      This function should only be called by vm_page_dirty().
1458  */
1459 void
1460 vm_page_dirty_KBI(vm_page_t m)
1461 {
1462
1463         /* Refer to this operation by its public name. */
1464         KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!"));
1465         m->dirty = VM_PAGE_BITS_ALL;
1466 }
1467
1468 /*
1469  *      vm_page_insert:         [ internal use only ]
1470  *
1471  *      Inserts the given mem entry into the object and object list.
1472  *
1473  *      The object must be locked.
1474  */
1475 int
1476 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1477 {
1478         vm_page_t mpred;
1479
1480         VM_OBJECT_ASSERT_WLOCKED(object);
1481         mpred = vm_radix_lookup_le(&object->rtree, pindex);
1482         return (vm_page_insert_after(m, object, pindex, mpred));
1483 }
1484
1485 /*
1486  *      vm_page_insert_after:
1487  *
1488  *      Inserts the page "m" into the specified object at offset "pindex".
1489  *
1490  *      The page "mpred" must immediately precede the offset "pindex" within
1491  *      the specified object.
1492  *
1493  *      The object must be locked.
1494  */
1495 static int
1496 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1497     vm_page_t mpred)
1498 {
1499         vm_page_t msucc;
1500
1501         VM_OBJECT_ASSERT_WLOCKED(object);
1502         KASSERT(m->object == NULL,
1503             ("vm_page_insert_after: page already inserted"));
1504         if (mpred != NULL) {
1505                 KASSERT(mpred->object == object,
1506                     ("vm_page_insert_after: object doesn't contain mpred"));
1507                 KASSERT(mpred->pindex < pindex,
1508                     ("vm_page_insert_after: mpred doesn't precede pindex"));
1509                 msucc = TAILQ_NEXT(mpred, listq);
1510         } else
1511                 msucc = TAILQ_FIRST(&object->memq);
1512         if (msucc != NULL)
1513                 KASSERT(msucc->pindex > pindex,
1514                     ("vm_page_insert_after: msucc doesn't succeed pindex"));
1515
1516         /*
1517          * Record the object/offset pair in this page.
1518          */
1519         m->object = object;
1520         m->pindex = pindex;
1521         m->ref_count |= VPRC_OBJREF;
1522
1523         /*
1524          * Now link into the object's ordered list of backed pages.
1525          */
1526         if (vm_radix_insert(&object->rtree, m)) {
1527                 m->object = NULL;
1528                 m->pindex = 0;
1529                 m->ref_count &= ~VPRC_OBJREF;
1530                 return (1);
1531         }
1532         vm_page_insert_radixdone(m, object, mpred);
1533         return (0);
1534 }
1535
1536 /*
1537  *      vm_page_insert_radixdone:
1538  *
1539  *      Complete page "m" insertion into the specified object after the
1540  *      radix trie hooking.
1541  *
1542  *      The page "mpred" must precede the offset "m->pindex" within the
1543  *      specified object.
1544  *
1545  *      The object must be locked.
1546  */
1547 static void
1548 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1549 {
1550
1551         VM_OBJECT_ASSERT_WLOCKED(object);
1552         KASSERT(object != NULL && m->object == object,
1553             ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1554         KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1555             ("vm_page_insert_radixdone: page %p is missing object ref", m));
1556         if (mpred != NULL) {
1557                 KASSERT(mpred->object == object,
1558                     ("vm_page_insert_radixdone: object doesn't contain mpred"));
1559                 KASSERT(mpred->pindex < m->pindex,
1560                     ("vm_page_insert_radixdone: mpred doesn't precede pindex"));
1561         }
1562
1563         if (mpred != NULL)
1564                 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1565         else
1566                 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1567
1568         /*
1569          * Show that the object has one more resident page.
1570          */
1571         object->resident_page_count++;
1572
1573         /*
1574          * Hold the vnode until the last page is released.
1575          */
1576         if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1577                 vhold(object->handle);
1578
1579         /*
1580          * Since we are inserting a new and possibly dirty page,
1581          * update the object's generation count.
1582          */
1583         if (pmap_page_is_write_mapped(m))
1584                 vm_object_set_writeable_dirty(object);
1585 }
1586
1587 /*
1588  * Do the work to remove a page from its object.  The caller is responsible for
1589  * updating the page's fields to reflect this removal.
1590  */
1591 static void
1592 vm_page_object_remove(vm_page_t m)
1593 {
1594         vm_object_t object;
1595         vm_page_t mrem;
1596
1597         vm_page_assert_xbusied(m);
1598         object = m->object;
1599         VM_OBJECT_ASSERT_WLOCKED(object);
1600         KASSERT((m->ref_count & VPRC_OBJREF) != 0,
1601             ("page %p is missing its object ref", m));
1602
1603         /* Deferred free of swap space. */
1604         if ((m->a.flags & PGA_SWAP_FREE) != 0)
1605                 vm_pager_page_unswapped(m);
1606
1607         mrem = vm_radix_remove(&object->rtree, m->pindex);
1608         KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
1609
1610         /*
1611          * Now remove from the object's list of backed pages.
1612          */
1613         TAILQ_REMOVE(&object->memq, m, listq);
1614
1615         /*
1616          * And show that the object has one fewer resident page.
1617          */
1618         object->resident_page_count--;
1619
1620         /*
1621          * The vnode may now be recycled.
1622          */
1623         if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1624                 vdrop(object->handle);
1625 }
1626
1627 /*
1628  *      vm_page_remove:
1629  *
1630  *      Removes the specified page from its containing object, but does not
1631  *      invalidate any backing storage.  Returns true if the object's reference
1632  *      was the last reference to the page, and false otherwise.
1633  *
1634  *      The object must be locked and the page must be exclusively busied.
1635  *      The exclusive busy will be released on return.  If this is not the
1636  *      final ref and the caller does not hold a wire reference it may not
1637  *      continue to access the page.
1638  */
1639 bool
1640 vm_page_remove(vm_page_t m)
1641 {
1642         bool dropped;
1643
1644         dropped = vm_page_remove_xbusy(m);
1645         vm_page_xunbusy(m);
1646
1647         return (dropped);
1648 }
1649
1650 /*
1651  *      vm_page_remove_xbusy
1652  *
1653  *      Removes the page but leaves the xbusy held.  Returns true if this
1654  *      removed the final ref and false otherwise.
1655  */
1656 bool
1657 vm_page_remove_xbusy(vm_page_t m)
1658 {
1659
1660         vm_page_object_remove(m);
1661         m->object = NULL;
1662         return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF);
1663 }
1664
1665 /*
1666  *      vm_page_lookup:
1667  *
1668  *      Returns the page associated with the object/offset
1669  *      pair specified; if none is found, NULL is returned.
1670  *
1671  *      The object must be locked.
1672  */
1673 vm_page_t
1674 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1675 {
1676
1677         VM_OBJECT_ASSERT_LOCKED(object);
1678         return (vm_radix_lookup(&object->rtree, pindex));
1679 }
1680
1681 /*
1682  *      vm_page_find_least:
1683  *
1684  *      Returns the page associated with the object with least pindex
1685  *      greater than or equal to the parameter pindex, or NULL.
1686  *
1687  *      The object must be locked.
1688  */
1689 vm_page_t
1690 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1691 {
1692         vm_page_t m;
1693
1694         VM_OBJECT_ASSERT_LOCKED(object);
1695         if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1696                 m = vm_radix_lookup_ge(&object->rtree, pindex);
1697         return (m);
1698 }
1699
1700 /*
1701  * Returns the given page's successor (by pindex) within the object if it is
1702  * resident; if none is found, NULL is returned.
1703  *
1704  * The object must be locked.
1705  */
1706 vm_page_t
1707 vm_page_next(vm_page_t m)
1708 {
1709         vm_page_t next;
1710
1711         VM_OBJECT_ASSERT_LOCKED(m->object);
1712         if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1713                 MPASS(next->object == m->object);
1714                 if (next->pindex != m->pindex + 1)
1715                         next = NULL;
1716         }
1717         return (next);
1718 }
1719
1720 /*
1721  * Returns the given page's predecessor (by pindex) within the object if it is
1722  * resident; if none is found, NULL is returned.
1723  *
1724  * The object must be locked.
1725  */
1726 vm_page_t
1727 vm_page_prev(vm_page_t m)
1728 {
1729         vm_page_t prev;
1730
1731         VM_OBJECT_ASSERT_LOCKED(m->object);
1732         if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1733                 MPASS(prev->object == m->object);
1734                 if (prev->pindex != m->pindex - 1)
1735                         prev = NULL;
1736         }
1737         return (prev);
1738 }
1739
1740 /*
1741  * Uses the page mnew as a replacement for an existing page at index
1742  * pindex which must be already present in the object.
1743  *
1744  * Both pages must be exclusively busied on enter.  The old page is
1745  * unbusied on exit.
1746  *
1747  * A return value of true means mold is now free.  If this is not the
1748  * final ref and the caller does not hold a wire reference it may not
1749  * continue to access the page.
1750  */
1751 static bool
1752 vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
1753     vm_page_t mold)
1754 {
1755         vm_page_t mret;
1756         bool dropped;
1757
1758         VM_OBJECT_ASSERT_WLOCKED(object);
1759         vm_page_assert_xbusied(mold);
1760         KASSERT(mnew->object == NULL && (mnew->ref_count & VPRC_OBJREF) == 0,
1761             ("vm_page_replace: page %p already in object", mnew));
1762
1763         /*
1764          * This function mostly follows vm_page_insert() and
1765          * vm_page_remove() without the radix, object count and vnode
1766          * dance.  Double check such functions for more comments.
1767          */
1768
1769         mnew->object = object;
1770         mnew->pindex = pindex;
1771         atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
1772         mret = vm_radix_replace(&object->rtree, mnew);
1773         KASSERT(mret == mold,
1774             ("invalid page replacement, mold=%p, mret=%p", mold, mret));
1775         KASSERT((mold->oflags & VPO_UNMANAGED) ==
1776             (mnew->oflags & VPO_UNMANAGED),
1777             ("vm_page_replace: mismatched VPO_UNMANAGED"));
1778
1779         /* Keep the resident page list in sorted order. */
1780         TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
1781         TAILQ_REMOVE(&object->memq, mold, listq);
1782         mold->object = NULL;
1783
1784         /*
1785          * The object's resident_page_count does not change because we have
1786          * swapped one page for another, but the generation count should
1787          * change if the page is dirty.
1788          */
1789         if (pmap_page_is_write_mapped(mnew))
1790                 vm_object_set_writeable_dirty(object);
1791         dropped = vm_page_drop(mold, VPRC_OBJREF) == VPRC_OBJREF;
1792         vm_page_xunbusy(mold);
1793
1794         return (dropped);
1795 }
1796
1797 void
1798 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
1799     vm_page_t mold)
1800 {
1801
1802         vm_page_assert_xbusied(mnew);
1803
1804         if (vm_page_replace_hold(mnew, object, pindex, mold))
1805                 vm_page_free(mold);
1806 }
1807
1808 /*
1809  *      vm_page_rename:
1810  *
1811  *      Move the given memory entry from its
1812  *      current object to the specified target object/offset.
1813  *
1814  *      Note: swap associated with the page must be invalidated by the move.  We
1815  *            have to do this for several reasons:  (1) we aren't freeing the
1816  *            page, (2) we are dirtying the page, (3) the VM system is probably
1817  *            moving the page from object A to B, and will then later move
1818  *            the backing store from A to B and we can't have a conflict.
1819  *
1820  *      Note: we *always* dirty the page.  It is necessary both for the
1821  *            fact that we moved it, and because we may be invalidating
1822  *            swap.
1823  *
1824  *      The objects must be locked.
1825  */
1826 int
1827 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1828 {
1829         vm_page_t mpred;
1830         vm_pindex_t opidx;
1831
1832         VM_OBJECT_ASSERT_WLOCKED(new_object);
1833
1834         KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m));
1835         mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1836         KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1837             ("vm_page_rename: pindex already renamed"));
1838
1839         /*
1840          * Create a custom version of vm_page_insert() which does not depend
1841          * by m_prev and can cheat on the implementation aspects of the
1842          * function.
1843          */
1844         opidx = m->pindex;
1845         m->pindex = new_pindex;
1846         if (vm_radix_insert(&new_object->rtree, m)) {
1847                 m->pindex = opidx;
1848                 return (1);
1849         }
1850
1851         /*
1852          * The operation cannot fail anymore.  The removal must happen before
1853          * the listq iterator is tainted.
1854          */
1855         m->pindex = opidx;
1856         vm_page_object_remove(m);
1857
1858         /* Return back to the new pindex to complete vm_page_insert(). */
1859         m->pindex = new_pindex;
1860         m->object = new_object;
1861
1862         vm_page_insert_radixdone(m, new_object, mpred);
1863         vm_page_dirty(m);
1864         return (0);
1865 }
1866
1867 /*
1868  *      vm_page_alloc:
1869  *
1870  *      Allocate and return a page that is associated with the specified
1871  *      object and offset pair.  By default, this page is exclusive busied.
1872  *
1873  *      The caller must always specify an allocation class.
1874  *
1875  *      allocation classes:
1876  *      VM_ALLOC_NORMAL         normal process request
1877  *      VM_ALLOC_SYSTEM         system *really* needs a page
1878  *      VM_ALLOC_INTERRUPT      interrupt time request
1879  *
1880  *      optional allocation flags:
1881  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
1882  *                              intends to allocate
1883  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
1884  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
1885  *      VM_ALLOC_NOOBJ          page is not associated with an object and
1886  *                              should not be exclusive busy
1887  *      VM_ALLOC_SBUSY          shared busy the allocated page
1888  *      VM_ALLOC_WIRED          wire the allocated page
1889  *      VM_ALLOC_ZERO           prefer a zeroed page
1890  */
1891 vm_page_t
1892 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1893 {
1894
1895         return (vm_page_alloc_after(object, pindex, req, object != NULL ?
1896             vm_radix_lookup_le(&object->rtree, pindex) : NULL));
1897 }
1898
1899 vm_page_t
1900 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
1901     int req)
1902 {
1903
1904         return (vm_page_alloc_domain_after(object, pindex, domain, req,
1905             object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) :
1906             NULL));
1907 }
1908
1909 /*
1910  * Allocate a page in the specified object with the given page index.  To
1911  * optimize insertion of the page into the object, the caller must also specifiy
1912  * the resident page in the object with largest index smaller than the given
1913  * page index, or NULL if no such page exists.
1914  */
1915 vm_page_t
1916 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
1917     int req, vm_page_t mpred)
1918 {
1919         struct vm_domainset_iter di;
1920         vm_page_t m;
1921         int domain;
1922
1923         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
1924         do {
1925                 m = vm_page_alloc_domain_after(object, pindex, domain, req,
1926                     mpred);
1927                 if (m != NULL)
1928                         break;
1929         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
1930
1931         return (m);
1932 }
1933
1934 /*
1935  * Returns true if the number of free pages exceeds the minimum
1936  * for the request class and false otherwise.
1937  */
1938 static int
1939 _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
1940 {
1941         u_int limit, old, new;
1942
1943         if (req_class == VM_ALLOC_INTERRUPT)
1944                 limit = 0;
1945         else if (req_class == VM_ALLOC_SYSTEM)
1946                 limit = vmd->vmd_interrupt_free_min;
1947         else
1948                 limit = vmd->vmd_free_reserved;
1949
1950         /*
1951          * Attempt to reserve the pages.  Fail if we're below the limit.
1952          */
1953         limit += npages;
1954         old = vmd->vmd_free_count;
1955         do {
1956                 if (old < limit)
1957                         return (0);
1958                 new = old - npages;
1959         } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
1960
1961         /* Wake the page daemon if we've crossed the threshold. */
1962         if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
1963                 pagedaemon_wakeup(vmd->vmd_domain);
1964
1965         /* Only update bitsets on transitions. */
1966         if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
1967             (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
1968                 vm_domain_set(vmd);
1969
1970         return (1);
1971 }
1972
1973 int
1974 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
1975 {
1976         int req_class;
1977
1978         /*
1979          * The page daemon is allowed to dig deeper into the free page list.
1980          */
1981         req_class = req & VM_ALLOC_CLASS_MASK;
1982         if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1983                 req_class = VM_ALLOC_SYSTEM;
1984         return (_vm_domain_allocate(vmd, req_class, npages));
1985 }
1986
1987 vm_page_t
1988 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
1989     int req, vm_page_t mpred)
1990 {
1991         struct vm_domain *vmd;
1992         vm_page_t m;
1993         int flags, pool;
1994
1995         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1996             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1997             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1998             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1999             ("inconsistent object(%p)/req(%x)", object, req));
2000         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
2001             ("Can't sleep and retry object insertion."));
2002         KASSERT(mpred == NULL || mpred->pindex < pindex,
2003             ("mpred %p doesn't precede pindex 0x%jx", mpred,
2004             (uintmax_t)pindex));
2005         if (object != NULL)
2006                 VM_OBJECT_ASSERT_WLOCKED(object);
2007
2008         flags = 0;
2009         m = NULL;
2010         pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT;
2011 again:
2012 #if VM_NRESERVLEVEL > 0
2013         /*
2014          * Can we allocate the page from a reservation?
2015          */
2016         if (vm_object_reserv(object) &&
2017             (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
2018             NULL) {
2019                 domain = vm_phys_domain(m);
2020                 vmd = VM_DOMAIN(domain);
2021                 goto found;
2022         }
2023 #endif
2024         vmd = VM_DOMAIN(domain);
2025         if (vmd->vmd_pgcache[pool].zone != NULL) {
2026                 m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT | M_NOVM);
2027                 if (m != NULL) {
2028                         flags |= PG_PCPU_CACHE;
2029                         goto found;
2030                 }
2031         }
2032         if (vm_domain_allocate(vmd, req, 1)) {
2033                 /*
2034                  * If not, allocate it from the free page queues.
2035                  */
2036                 vm_domain_free_lock(vmd);
2037                 m = vm_phys_alloc_pages(domain, pool, 0);
2038                 vm_domain_free_unlock(vmd);
2039                 if (m == NULL) {
2040                         vm_domain_freecnt_inc(vmd, 1);
2041 #if VM_NRESERVLEVEL > 0
2042                         if (vm_reserv_reclaim_inactive(domain))
2043                                 goto again;
2044 #endif
2045                 }
2046         }
2047         if (m == NULL) {
2048                 /*
2049                  * Not allocatable, give up.
2050                  */
2051                 if (vm_domain_alloc_fail(vmd, object, req))
2052                         goto again;
2053                 return (NULL);
2054         }
2055
2056         /*
2057          * At this point we had better have found a good page.
2058          */
2059 found:
2060         vm_page_dequeue(m);
2061         vm_page_alloc_check(m);
2062
2063         /*
2064          * Initialize the page.  Only the PG_ZERO flag is inherited.
2065          */
2066         if ((req & VM_ALLOC_ZERO) != 0)
2067                 flags |= (m->flags & PG_ZERO);
2068         if ((req & VM_ALLOC_NODUMP) != 0)
2069                 flags |= PG_NODUMP;
2070         m->flags = flags;
2071         m->a.flags = 0;
2072         m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
2073             VPO_UNMANAGED : 0;
2074         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
2075                 m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2076         else if ((req & VM_ALLOC_SBUSY) != 0)
2077                 m->busy_lock = VPB_SHARERS_WORD(1);
2078         else
2079                 m->busy_lock = VPB_UNBUSIED;
2080         if (req & VM_ALLOC_WIRED) {
2081                 vm_wire_add(1);
2082                 m->ref_count = 1;
2083         }
2084         m->a.act_count = 0;
2085
2086         if (object != NULL) {
2087                 if (vm_page_insert_after(m, object, pindex, mpred)) {
2088                         if (req & VM_ALLOC_WIRED) {
2089                                 vm_wire_sub(1);
2090                                 m->ref_count = 0;
2091                         }
2092                         KASSERT(m->object == NULL, ("page %p has object", m));
2093                         m->oflags = VPO_UNMANAGED;
2094                         m->busy_lock = VPB_UNBUSIED;
2095                         /* Don't change PG_ZERO. */
2096                         vm_page_free_toq(m);
2097                         if (req & VM_ALLOC_WAITFAIL) {
2098                                 VM_OBJECT_WUNLOCK(object);
2099                                 vm_radix_wait();
2100                                 VM_OBJECT_WLOCK(object);
2101                         }
2102                         return (NULL);
2103                 }
2104
2105                 /* Ignore device objects; the pager sets "memattr" for them. */
2106                 if (object->memattr != VM_MEMATTR_DEFAULT &&
2107                     (object->flags & OBJ_FICTITIOUS) == 0)
2108                         pmap_page_set_memattr(m, object->memattr);
2109         } else
2110                 m->pindex = pindex;
2111
2112         return (m);
2113 }
2114
2115 /*
2116  *      vm_page_alloc_contig:
2117  *
2118  *      Allocate a contiguous set of physical pages of the given size "npages"
2119  *      from the free lists.  All of the physical pages must be at or above
2120  *      the given physical address "low" and below the given physical address
2121  *      "high".  The given value "alignment" determines the alignment of the
2122  *      first physical page in the set.  If the given value "boundary" is
2123  *      non-zero, then the set of physical pages cannot cross any physical
2124  *      address boundary that is a multiple of that value.  Both "alignment"
2125  *      and "boundary" must be a power of two.
2126  *
2127  *      If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
2128  *      then the memory attribute setting for the physical pages is configured
2129  *      to the object's memory attribute setting.  Otherwise, the memory
2130  *      attribute setting for the physical pages is configured to "memattr",
2131  *      overriding the object's memory attribute setting.  However, if the
2132  *      object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
2133  *      memory attribute setting for the physical pages cannot be configured
2134  *      to VM_MEMATTR_DEFAULT.
2135  *
2136  *      The specified object may not contain fictitious pages.
2137  *
2138  *      The caller must always specify an allocation class.
2139  *
2140  *      allocation classes:
2141  *      VM_ALLOC_NORMAL         normal process request
2142  *      VM_ALLOC_SYSTEM         system *really* needs a page
2143  *      VM_ALLOC_INTERRUPT      interrupt time request
2144  *
2145  *      optional allocation flags:
2146  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
2147  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
2148  *      VM_ALLOC_NOOBJ          page is not associated with an object and
2149  *                              should not be exclusive busy
2150  *      VM_ALLOC_SBUSY          shared busy the allocated page
2151  *      VM_ALLOC_WIRED          wire the allocated page
2152  *      VM_ALLOC_ZERO           prefer a zeroed page
2153  */
2154 vm_page_t
2155 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
2156     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2157     vm_paddr_t boundary, vm_memattr_t memattr)
2158 {
2159         struct vm_domainset_iter di;
2160         vm_page_t m;
2161         int domain;
2162
2163         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
2164         do {
2165                 m = vm_page_alloc_contig_domain(object, pindex, domain, req,
2166                     npages, low, high, alignment, boundary, memattr);
2167                 if (m != NULL)
2168                         break;
2169         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
2170
2171         return (m);
2172 }
2173
2174 vm_page_t
2175 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
2176     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2177     vm_paddr_t boundary, vm_memattr_t memattr)
2178 {
2179         struct vm_domain *vmd;
2180         vm_page_t m, m_ret, mpred;
2181         u_int busy_lock, flags, oflags;
2182
2183         mpred = NULL;   /* XXX: pacify gcc */
2184         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
2185             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
2186             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
2187             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
2188             ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
2189             req));
2190         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
2191             ("Can't sleep and retry object insertion."));
2192         if (object != NULL) {
2193                 VM_OBJECT_ASSERT_WLOCKED(object);
2194                 KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
2195                     ("vm_page_alloc_contig: object %p has fictitious pages",
2196                     object));
2197         }
2198         KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
2199
2200         if (object != NULL) {
2201                 mpred = vm_radix_lookup_le(&object->rtree, pindex);
2202                 KASSERT(mpred == NULL || mpred->pindex != pindex,
2203                     ("vm_page_alloc_contig: pindex already allocated"));
2204         }
2205
2206         /*
2207          * Can we allocate the pages without the number of free pages falling
2208          * below the lower bound for the allocation class?
2209          */
2210         m_ret = NULL;
2211 again:
2212 #if VM_NRESERVLEVEL > 0
2213         /*
2214          * Can we allocate the pages from a reservation?
2215          */
2216         if (vm_object_reserv(object) &&
2217             (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
2218             mpred, npages, low, high, alignment, boundary)) != NULL) {
2219                 domain = vm_phys_domain(m_ret);
2220                 vmd = VM_DOMAIN(domain);
2221                 goto found;
2222         }
2223 #endif
2224         vmd = VM_DOMAIN(domain);
2225         if (vm_domain_allocate(vmd, req, npages)) {
2226                 /*
2227                  * allocate them from the free page queues.
2228                  */
2229                 vm_domain_free_lock(vmd);
2230                 m_ret = vm_phys_alloc_contig(domain, npages, low, high,
2231                     alignment, boundary);
2232                 vm_domain_free_unlock(vmd);
2233                 if (m_ret == NULL) {
2234                         vm_domain_freecnt_inc(vmd, npages);
2235 #if VM_NRESERVLEVEL > 0
2236                         if (vm_reserv_reclaim_contig(domain, npages, low,
2237                             high, alignment, boundary))
2238                                 goto again;
2239 #endif
2240                 }
2241         }
2242         if (m_ret == NULL) {
2243                 if (vm_domain_alloc_fail(vmd, object, req))
2244                         goto again;
2245                 return (NULL);
2246         }
2247 #if VM_NRESERVLEVEL > 0
2248 found:
2249 #endif
2250         for (m = m_ret; m < &m_ret[npages]; m++) {
2251                 vm_page_dequeue(m);
2252                 vm_page_alloc_check(m);
2253         }
2254
2255         /*
2256          * Initialize the pages.  Only the PG_ZERO flag is inherited.
2257          */
2258         flags = 0;
2259         if ((req & VM_ALLOC_ZERO) != 0)
2260                 flags = PG_ZERO;
2261         if ((req & VM_ALLOC_NODUMP) != 0)
2262                 flags |= PG_NODUMP;
2263         oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
2264             VPO_UNMANAGED : 0;
2265         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
2266                 busy_lock = VPB_CURTHREAD_EXCLUSIVE;
2267         else if ((req & VM_ALLOC_SBUSY) != 0)
2268                 busy_lock = VPB_SHARERS_WORD(1);
2269         else
2270                 busy_lock = VPB_UNBUSIED;
2271         if ((req & VM_ALLOC_WIRED) != 0)
2272                 vm_wire_add(npages);
2273         if (object != NULL) {
2274                 if (object->memattr != VM_MEMATTR_DEFAULT &&
2275                     memattr == VM_MEMATTR_DEFAULT)
2276                         memattr = object->memattr;
2277         }
2278         for (m = m_ret; m < &m_ret[npages]; m++) {
2279                 m->a.flags = 0;
2280                 m->flags = (m->flags | PG_NODUMP) & flags;
2281                 m->busy_lock = busy_lock;
2282                 if ((req & VM_ALLOC_WIRED) != 0)
2283                         m->ref_count = 1;
2284                 m->a.act_count = 0;
2285                 m->oflags = oflags;
2286                 if (object != NULL) {
2287                         if (vm_page_insert_after(m, object, pindex, mpred)) {
2288                                 if ((req & VM_ALLOC_WIRED) != 0)
2289                                         vm_wire_sub(npages);
2290                                 KASSERT(m->object == NULL,
2291                                     ("page %p has object", m));
2292                                 mpred = m;
2293                                 for (m = m_ret; m < &m_ret[npages]; m++) {
2294                                         if (m <= mpred &&
2295                                             (req & VM_ALLOC_WIRED) != 0)
2296                                                 m->ref_count = 0;
2297                                         m->oflags = VPO_UNMANAGED;
2298                                         m->busy_lock = VPB_UNBUSIED;
2299                                         /* Don't change PG_ZERO. */
2300                                         vm_page_free_toq(m);
2301                                 }
2302                                 if (req & VM_ALLOC_WAITFAIL) {
2303                                         VM_OBJECT_WUNLOCK(object);
2304                                         vm_radix_wait();
2305                                         VM_OBJECT_WLOCK(object);
2306                                 }
2307                                 return (NULL);
2308                         }
2309                         mpred = m;
2310                 } else
2311                         m->pindex = pindex;
2312                 if (memattr != VM_MEMATTR_DEFAULT)
2313                         pmap_page_set_memattr(m, memattr);
2314                 pindex++;
2315         }
2316         return (m_ret);
2317 }
2318
2319 /*
2320  * Check a page that has been freshly dequeued from a freelist.
2321  */
2322 static void
2323 vm_page_alloc_check(vm_page_t m)
2324 {
2325
2326         KASSERT(m->object == NULL, ("page %p has object", m));
2327         KASSERT(m->a.queue == PQ_NONE &&
2328             (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
2329             ("page %p has unexpected queue %d, flags %#x",
2330             m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
2331         KASSERT(m->ref_count == 0, ("page %p has references", m));
2332         KASSERT(vm_page_busy_freed(m), ("page %p is not freed", m));
2333         KASSERT(m->dirty == 0, ("page %p is dirty", m));
2334         KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2335             ("page %p has unexpected memattr %d",
2336             m, pmap_page_get_memattr(m)));
2337         KASSERT(m->valid == 0, ("free page %p is valid", m));
2338 }
2339
2340 /*
2341  *      vm_page_alloc_freelist:
2342  *
2343  *      Allocate a physical page from the specified free page list.
2344  *
2345  *      The caller must always specify an allocation class.
2346  *
2347  *      allocation classes:
2348  *      VM_ALLOC_NORMAL         normal process request
2349  *      VM_ALLOC_SYSTEM         system *really* needs a page
2350  *      VM_ALLOC_INTERRUPT      interrupt time request
2351  *
2352  *      optional allocation flags:
2353  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
2354  *                              intends to allocate
2355  *      VM_ALLOC_WIRED          wire the allocated page
2356  *      VM_ALLOC_ZERO           prefer a zeroed page
2357  */
2358 vm_page_t
2359 vm_page_alloc_freelist(int freelist, int req)
2360 {
2361         struct vm_domainset_iter di;
2362         vm_page_t m;
2363         int domain;
2364
2365         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2366         do {
2367                 m = vm_page_alloc_freelist_domain(domain, freelist, req);
2368                 if (m != NULL)
2369                         break;
2370         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2371
2372         return (m);
2373 }
2374
2375 vm_page_t
2376 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
2377 {
2378         struct vm_domain *vmd;
2379         vm_page_t m;
2380         u_int flags;
2381
2382         m = NULL;
2383         vmd = VM_DOMAIN(domain);
2384 again:
2385         if (vm_domain_allocate(vmd, req, 1)) {
2386                 vm_domain_free_lock(vmd);
2387                 m = vm_phys_alloc_freelist_pages(domain, freelist,
2388                     VM_FREEPOOL_DIRECT, 0);
2389                 vm_domain_free_unlock(vmd);
2390                 if (m == NULL)
2391                         vm_domain_freecnt_inc(vmd, 1);
2392         }
2393         if (m == NULL) {
2394                 if (vm_domain_alloc_fail(vmd, NULL, req))
2395                         goto again;
2396                 return (NULL);
2397         }
2398         vm_page_dequeue(m);
2399         vm_page_alloc_check(m);
2400
2401         /*
2402          * Initialize the page.  Only the PG_ZERO flag is inherited.
2403          */
2404         m->a.flags = 0;
2405         flags = 0;
2406         if ((req & VM_ALLOC_ZERO) != 0)
2407                 flags = PG_ZERO;
2408         m->flags &= flags;
2409         if ((req & VM_ALLOC_WIRED) != 0) {
2410                 vm_wire_add(1);
2411                 m->ref_count = 1;
2412         }
2413         /* Unmanaged pages don't use "act_count". */
2414         m->oflags = VPO_UNMANAGED;
2415         return (m);
2416 }
2417
2418 static int
2419 vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
2420 {
2421         struct vm_domain *vmd;
2422         struct vm_pgcache *pgcache;
2423         int i;
2424
2425         pgcache = arg;
2426         vmd = VM_DOMAIN(pgcache->domain);
2427
2428         /*
2429          * The page daemon should avoid creating extra memory pressure since its
2430          * main purpose is to replenish the store of free pages.
2431          */
2432         if (vmd->vmd_severeset || curproc == pageproc ||
2433             !_vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
2434                 return (0);
2435         domain = vmd->vmd_domain;
2436         vm_domain_free_lock(vmd);
2437         i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
2438             (vm_page_t *)store);
2439         vm_domain_free_unlock(vmd);
2440         if (cnt != i)
2441                 vm_domain_freecnt_inc(vmd, cnt - i);
2442
2443         return (i);
2444 }
2445
2446 static void
2447 vm_page_zone_release(void *arg, void **store, int cnt)
2448 {
2449         struct vm_domain *vmd;
2450         struct vm_pgcache *pgcache;
2451         vm_page_t m;
2452         int i;
2453
2454         pgcache = arg;
2455         vmd = VM_DOMAIN(pgcache->domain);
2456         vm_domain_free_lock(vmd);
2457         for (i = 0; i < cnt; i++) {
2458                 m = (vm_page_t)store[i];
2459                 vm_phys_free_pages(m, 0);
2460         }
2461         vm_domain_free_unlock(vmd);
2462         vm_domain_freecnt_inc(vmd, cnt);
2463 }
2464
2465 #define VPSC_ANY        0       /* No restrictions. */
2466 #define VPSC_NORESERV   1       /* Skip reservations; implies VPSC_NOSUPER. */
2467 #define VPSC_NOSUPER    2       /* Skip superpages. */
2468
2469 /*
2470  *      vm_page_scan_contig:
2471  *
2472  *      Scan vm_page_array[] between the specified entries "m_start" and
2473  *      "m_end" for a run of contiguous physical pages that satisfy the
2474  *      specified conditions, and return the lowest page in the run.  The
2475  *      specified "alignment" determines the alignment of the lowest physical
2476  *      page in the run.  If the specified "boundary" is non-zero, then the
2477  *      run of physical pages cannot span a physical address that is a
2478  *      multiple of "boundary".
2479  *
2480  *      "m_end" is never dereferenced, so it need not point to a vm_page
2481  *      structure within vm_page_array[].
2482  *
2483  *      "npages" must be greater than zero.  "m_start" and "m_end" must not
2484  *      span a hole (or discontiguity) in the physical address space.  Both
2485  *      "alignment" and "boundary" must be a power of two.
2486  */
2487 vm_page_t
2488 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2489     u_long alignment, vm_paddr_t boundary, int options)
2490 {
2491         vm_object_t object;
2492         vm_paddr_t pa;
2493         vm_page_t m, m_run;
2494 #if VM_NRESERVLEVEL > 0
2495         int level;
2496 #endif
2497         int m_inc, order, run_ext, run_len;
2498
2499         KASSERT(npages > 0, ("npages is 0"));
2500         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2501         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2502         m_run = NULL;
2503         run_len = 0;
2504         for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2505                 KASSERT((m->flags & PG_MARKER) == 0,
2506                     ("page %p is PG_MARKER", m));
2507                 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1,
2508                     ("fictitious page %p has invalid ref count", m));
2509
2510                 /*
2511                  * If the current page would be the start of a run, check its
2512                  * physical address against the end, alignment, and boundary
2513                  * conditions.  If it doesn't satisfy these conditions, either
2514                  * terminate the scan or advance to the next page that
2515                  * satisfies the failed condition.
2516                  */
2517                 if (run_len == 0) {
2518                         KASSERT(m_run == NULL, ("m_run != NULL"));
2519                         if (m + npages > m_end)
2520                                 break;
2521                         pa = VM_PAGE_TO_PHYS(m);
2522                         if ((pa & (alignment - 1)) != 0) {
2523                                 m_inc = atop(roundup2(pa, alignment) - pa);
2524                                 continue;
2525                         }
2526                         if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
2527                             boundary) != 0) {
2528                                 m_inc = atop(roundup2(pa, boundary) - pa);
2529                                 continue;
2530                         }
2531                 } else
2532                         KASSERT(m_run != NULL, ("m_run == NULL"));
2533
2534 retry:
2535                 m_inc = 1;
2536                 if (vm_page_wired(m))
2537                         run_ext = 0;
2538 #if VM_NRESERVLEVEL > 0
2539                 else if ((level = vm_reserv_level(m)) >= 0 &&
2540                     (options & VPSC_NORESERV) != 0) {
2541                         run_ext = 0;
2542                         /* Advance to the end of the reservation. */
2543                         pa = VM_PAGE_TO_PHYS(m);
2544                         m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2545                             pa);
2546                 }
2547 #endif
2548                 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
2549                         /*
2550                          * The page is considered eligible for relocation if
2551                          * and only if it could be laundered or reclaimed by
2552                          * the page daemon.
2553                          */
2554                         VM_OBJECT_RLOCK(object);
2555                         if (object != m->object) {
2556                                 VM_OBJECT_RUNLOCK(object);
2557                                 goto retry;
2558                         }
2559                         /* Don't care: PG_NODUMP, PG_ZERO. */
2560                         if (object->type != OBJT_DEFAULT &&
2561                             object->type != OBJT_SWAP &&
2562                             object->type != OBJT_VNODE) {
2563                                 run_ext = 0;
2564 #if VM_NRESERVLEVEL > 0
2565                         } else if ((options & VPSC_NOSUPER) != 0 &&
2566                             (level = vm_reserv_level_iffullpop(m)) >= 0) {
2567                                 run_ext = 0;
2568                                 /* Advance to the end of the superpage. */
2569                                 pa = VM_PAGE_TO_PHYS(m);
2570                                 m_inc = atop(roundup2(pa + 1,
2571                                     vm_reserv_size(level)) - pa);
2572 #endif
2573                         } else if (object->memattr == VM_MEMATTR_DEFAULT &&
2574                             vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
2575                                 /*
2576                                  * The page is allocated but eligible for
2577                                  * relocation.  Extend the current run by one
2578                                  * page.
2579                                  */
2580                                 KASSERT(pmap_page_get_memattr(m) ==
2581                                     VM_MEMATTR_DEFAULT,
2582                                     ("page %p has an unexpected memattr", m));
2583                                 KASSERT((m->oflags & (VPO_SWAPINPROG |
2584                                     VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2585                                     ("page %p has unexpected oflags", m));
2586                                 /* Don't care: PGA_NOSYNC. */
2587                                 run_ext = 1;
2588                         } else
2589                                 run_ext = 0;
2590                         VM_OBJECT_RUNLOCK(object);
2591 #if VM_NRESERVLEVEL > 0
2592                 } else if (level >= 0) {
2593                         /*
2594                          * The page is reserved but not yet allocated.  In
2595                          * other words, it is still free.  Extend the current
2596                          * run by one page.
2597                          */
2598                         run_ext = 1;
2599 #endif
2600                 } else if ((order = m->order) < VM_NFREEORDER) {
2601                         /*
2602                          * The page is enqueued in the physical memory
2603                          * allocator's free page queues.  Moreover, it is the
2604                          * first page in a power-of-two-sized run of
2605                          * contiguous free pages.  Add these pages to the end
2606                          * of the current run, and jump ahead.
2607                          */
2608                         run_ext = 1 << order;
2609                         m_inc = 1 << order;
2610                 } else {
2611                         /*
2612                          * Skip the page for one of the following reasons: (1)
2613                          * It is enqueued in the physical memory allocator's
2614                          * free page queues.  However, it is not the first
2615                          * page in a run of contiguous free pages.  (This case
2616                          * rarely occurs because the scan is performed in
2617                          * ascending order.) (2) It is not reserved, and it is
2618                          * transitioning from free to allocated.  (Conversely,
2619                          * the transition from allocated to free for managed
2620                          * pages is blocked by the page lock.) (3) It is
2621                          * allocated but not contained by an object and not
2622                          * wired, e.g., allocated by Xen's balloon driver.
2623                          */
2624                         run_ext = 0;
2625                 }
2626
2627                 /*
2628                  * Extend or reset the current run of pages.
2629                  */
2630                 if (run_ext > 0) {
2631                         if (run_len == 0)
2632                                 m_run = m;
2633                         run_len += run_ext;
2634                 } else {
2635                         if (run_len > 0) {
2636                                 m_run = NULL;
2637                                 run_len = 0;
2638                         }
2639                 }
2640         }
2641         if (run_len >= npages)
2642                 return (m_run);
2643         return (NULL);
2644 }
2645
2646 /*
2647  *      vm_page_reclaim_run:
2648  *
2649  *      Try to relocate each of the allocated virtual pages within the
2650  *      specified run of physical pages to a new physical address.  Free the
2651  *      physical pages underlying the relocated virtual pages.  A virtual page
2652  *      is relocatable if and only if it could be laundered or reclaimed by
2653  *      the page daemon.  Whenever possible, a virtual page is relocated to a
2654  *      physical address above "high".
2655  *
2656  *      Returns 0 if every physical page within the run was already free or
2657  *      just freed by a successful relocation.  Otherwise, returns a non-zero
2658  *      value indicating why the last attempt to relocate a virtual page was
2659  *      unsuccessful.
2660  *
2661  *      "req_class" must be an allocation class.
2662  */
2663 static int
2664 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
2665     vm_paddr_t high)
2666 {
2667         struct vm_domain *vmd;
2668         struct spglist free;
2669         vm_object_t object;
2670         vm_paddr_t pa;
2671         vm_page_t m, m_end, m_new;
2672         int error, order, req;
2673
2674         KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
2675             ("req_class is not an allocation class"));
2676         SLIST_INIT(&free);
2677         error = 0;
2678         m = m_run;
2679         m_end = m_run + npages;
2680         for (; error == 0 && m < m_end; m++) {
2681                 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2682                     ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2683
2684                 /*
2685                  * Racily check for wirings.  Races are handled once the object
2686                  * lock is held and the page is unmapped.
2687                  */
2688                 if (vm_page_wired(m))
2689                         error = EBUSY;
2690                 else if ((object = atomic_load_ptr(&m->object)) != NULL) {
2691                         /*
2692                          * The page is relocated if and only if it could be
2693                          * laundered or reclaimed by the page daemon.
2694                          */
2695                         VM_OBJECT_WLOCK(object);
2696                         /* Don't care: PG_NODUMP, PG_ZERO. */
2697                         if (m->object != object ||
2698                             (object->type != OBJT_DEFAULT &&
2699                             object->type != OBJT_SWAP &&
2700                             object->type != OBJT_VNODE))
2701                                 error = EINVAL;
2702                         else if (object->memattr != VM_MEMATTR_DEFAULT)
2703                                 error = EINVAL;
2704                         else if (vm_page_queue(m) != PQ_NONE &&
2705                             vm_page_tryxbusy(m) != 0) {
2706                                 if (vm_page_wired(m)) {
2707                                         vm_page_xunbusy(m);
2708                                         error = EBUSY;
2709                                         goto unlock;
2710                                 }
2711                                 KASSERT(pmap_page_get_memattr(m) ==
2712                                     VM_MEMATTR_DEFAULT,
2713                                     ("page %p has an unexpected memattr", m));
2714                                 KASSERT(m->oflags == 0,
2715                                     ("page %p has unexpected oflags", m));
2716                                 /* Don't care: PGA_NOSYNC. */
2717                                 if (!vm_page_none_valid(m)) {
2718                                         /*
2719                                          * First, try to allocate a new page
2720                                          * that is above "high".  Failing
2721                                          * that, try to allocate a new page
2722                                          * that is below "m_run".  Allocate
2723                                          * the new page between the end of
2724                                          * "m_run" and "high" only as a last
2725                                          * resort.
2726                                          */
2727                                         req = req_class | VM_ALLOC_NOOBJ;
2728                                         if ((m->flags & PG_NODUMP) != 0)
2729                                                 req |= VM_ALLOC_NODUMP;
2730                                         if (trunc_page(high) !=
2731                                             ~(vm_paddr_t)PAGE_MASK) {
2732                                                 m_new = vm_page_alloc_contig(
2733                                                     NULL, 0, req, 1,
2734                                                     round_page(high),
2735                                                     ~(vm_paddr_t)0,
2736                                                     PAGE_SIZE, 0,
2737                                                     VM_MEMATTR_DEFAULT);
2738                                         } else
2739                                                 m_new = NULL;
2740                                         if (m_new == NULL) {
2741                                                 pa = VM_PAGE_TO_PHYS(m_run);
2742                                                 m_new = vm_page_alloc_contig(
2743                                                     NULL, 0, req, 1,
2744                                                     0, pa - 1, PAGE_SIZE, 0,
2745                                                     VM_MEMATTR_DEFAULT);
2746                                         }
2747                                         if (m_new == NULL) {
2748                                                 pa += ptoa(npages);
2749                                                 m_new = vm_page_alloc_contig(
2750                                                     NULL, 0, req, 1,
2751                                                     pa, high, PAGE_SIZE, 0,
2752                                                     VM_MEMATTR_DEFAULT);
2753                                         }
2754                                         if (m_new == NULL) {
2755                                                 vm_page_xunbusy(m);
2756                                                 error = ENOMEM;
2757                                                 goto unlock;
2758                                         }
2759
2760                                         /*
2761                                          * Unmap the page and check for new
2762                                          * wirings that may have been acquired
2763                                          * through a pmap lookup.
2764                                          */
2765                                         if (object->ref_count != 0 &&
2766                                             !vm_page_try_remove_all(m)) {
2767                                                 vm_page_xunbusy(m);
2768                                                 vm_page_free(m_new);
2769                                                 error = EBUSY;
2770                                                 goto unlock;
2771                                         }
2772
2773                                         /*
2774                                          * Replace "m" with the new page.  For
2775                                          * vm_page_replace(), "m" must be busy
2776                                          * and dequeued.  Finally, change "m"
2777                                          * as if vm_page_free() was called.
2778                                          */
2779                                         m_new->a.flags = m->a.flags &
2780                                             ~PGA_QUEUE_STATE_MASK;
2781                                         KASSERT(m_new->oflags == VPO_UNMANAGED,
2782                                             ("page %p is managed", m_new));
2783                                         m_new->oflags = 0;
2784                                         pmap_copy_page(m, m_new);
2785                                         m_new->valid = m->valid;
2786                                         m_new->dirty = m->dirty;
2787                                         m->flags &= ~PG_ZERO;
2788                                         vm_page_dequeue(m);
2789                                         if (vm_page_replace_hold(m_new, object,
2790                                             m->pindex, m) &&
2791                                             vm_page_free_prep(m))
2792                                                 SLIST_INSERT_HEAD(&free, m,
2793                                                     plinks.s.ss);
2794
2795                                         /*
2796                                          * The new page must be deactivated
2797                                          * before the object is unlocked.
2798                                          */
2799                                         vm_page_deactivate(m_new);
2800                                 } else {
2801                                         m->flags &= ~PG_ZERO;
2802                                         vm_page_dequeue(m);
2803                                         if (vm_page_free_prep(m))
2804                                                 SLIST_INSERT_HEAD(&free, m,
2805                                                     plinks.s.ss);
2806                                         KASSERT(m->dirty == 0,
2807                                             ("page %p is dirty", m));
2808                                 }
2809                         } else
2810                                 error = EBUSY;
2811 unlock:
2812                         VM_OBJECT_WUNLOCK(object);
2813                 } else {
2814                         MPASS(vm_phys_domain(m) == domain);
2815                         vmd = VM_DOMAIN(domain);
2816                         vm_domain_free_lock(vmd);
2817                         order = m->order;
2818                         if (order < VM_NFREEORDER) {
2819                                 /*
2820                                  * The page is enqueued in the physical memory
2821                                  * allocator's free page queues.  Moreover, it
2822                                  * is the first page in a power-of-two-sized
2823                                  * run of contiguous free pages.  Jump ahead
2824                                  * to the last page within that run, and
2825                                  * continue from there.
2826                                  */
2827                                 m += (1 << order) - 1;
2828                         }
2829 #if VM_NRESERVLEVEL > 0
2830                         else if (vm_reserv_is_page_free(m))
2831                                 order = 0;
2832 #endif
2833                         vm_domain_free_unlock(vmd);
2834                         if (order == VM_NFREEORDER)
2835                                 error = EINVAL;
2836                 }
2837         }
2838         if ((m = SLIST_FIRST(&free)) != NULL) {
2839                 int cnt;
2840
2841                 vmd = VM_DOMAIN(domain);
2842                 cnt = 0;
2843                 vm_domain_free_lock(vmd);
2844                 do {
2845                         MPASS(vm_phys_domain(m) == domain);
2846                         SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2847                         vm_phys_free_pages(m, 0);
2848                         cnt++;
2849                 } while ((m = SLIST_FIRST(&free)) != NULL);
2850                 vm_domain_free_unlock(vmd);
2851                 vm_domain_freecnt_inc(vmd, cnt);
2852         }
2853         return (error);
2854 }
2855
2856 #define NRUNS   16
2857
2858 CTASSERT(powerof2(NRUNS));
2859
2860 #define RUN_INDEX(count)        ((count) & (NRUNS - 1))
2861
2862 #define MIN_RECLAIM     8
2863
2864 /*
2865  *      vm_page_reclaim_contig:
2866  *
2867  *      Reclaim allocated, contiguous physical memory satisfying the specified
2868  *      conditions by relocating the virtual pages using that physical memory.
2869  *      Returns true if reclamation is successful and false otherwise.  Since
2870  *      relocation requires the allocation of physical pages, reclamation may
2871  *      fail due to a shortage of free pages.  When reclamation fails, callers
2872  *      are expected to perform vm_wait() before retrying a failed allocation
2873  *      operation, e.g., vm_page_alloc_contig().
2874  *
2875  *      The caller must always specify an allocation class through "req".
2876  *
2877  *      allocation classes:
2878  *      VM_ALLOC_NORMAL         normal process request
2879  *      VM_ALLOC_SYSTEM         system *really* needs a page
2880  *      VM_ALLOC_INTERRUPT      interrupt time request
2881  *
2882  *      The optional allocation flags are ignored.
2883  *
2884  *      "npages" must be greater than zero.  Both "alignment" and "boundary"
2885  *      must be a power of two.
2886  */
2887 bool
2888 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
2889     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2890 {
2891         struct vm_domain *vmd;
2892         vm_paddr_t curr_low;
2893         vm_page_t m_run, m_runs[NRUNS];
2894         u_long count, reclaimed;
2895         int error, i, options, req_class;
2896
2897         KASSERT(npages > 0, ("npages is 0"));
2898         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2899         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2900         req_class = req & VM_ALLOC_CLASS_MASK;
2901
2902         /*
2903          * The page daemon is allowed to dig deeper into the free page list.
2904          */
2905         if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2906                 req_class = VM_ALLOC_SYSTEM;
2907
2908         /*
2909          * Return if the number of free pages cannot satisfy the requested
2910          * allocation.
2911          */
2912         vmd = VM_DOMAIN(domain);
2913         count = vmd->vmd_free_count;
2914         if (count < npages + vmd->vmd_free_reserved || (count < npages +
2915             vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
2916             (count < npages && req_class == VM_ALLOC_INTERRUPT))
2917                 return (false);
2918
2919         /*
2920          * Scan up to three times, relaxing the restrictions ("options") on
2921          * the reclamation of reservations and superpages each time.
2922          */
2923         for (options = VPSC_NORESERV;;) {
2924                 /*
2925                  * Find the highest runs that satisfy the given constraints
2926                  * and restrictions, and record them in "m_runs".
2927                  */
2928                 curr_low = low;
2929                 count = 0;
2930                 for (;;) {
2931                         m_run = vm_phys_scan_contig(domain, npages, curr_low,
2932                             high, alignment, boundary, options);
2933                         if (m_run == NULL)
2934                                 break;
2935                         curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
2936                         m_runs[RUN_INDEX(count)] = m_run;
2937                         count++;
2938                 }
2939
2940                 /*
2941                  * Reclaim the highest runs in LIFO (descending) order until
2942                  * the number of reclaimed pages, "reclaimed", is at least
2943                  * MIN_RECLAIM.  Reset "reclaimed" each time because each
2944                  * reclamation is idempotent, and runs will (likely) recur
2945                  * from one scan to the next as restrictions are relaxed.
2946                  */
2947                 reclaimed = 0;
2948                 for (i = 0; count > 0 && i < NRUNS; i++) {
2949                         count--;
2950                         m_run = m_runs[RUN_INDEX(count)];
2951                         error = vm_page_reclaim_run(req_class, domain, npages,
2952                             m_run, high);
2953                         if (error == 0) {
2954                                 reclaimed += npages;
2955                                 if (reclaimed >= MIN_RECLAIM)
2956                                         return (true);
2957                         }
2958                 }
2959
2960                 /*
2961                  * Either relax the restrictions on the next scan or return if
2962                  * the last scan had no restrictions.
2963                  */
2964                 if (options == VPSC_NORESERV)
2965                         options = VPSC_NOSUPER;
2966                 else if (options == VPSC_NOSUPER)
2967                         options = VPSC_ANY;
2968                 else if (options == VPSC_ANY)
2969                         return (reclaimed != 0);
2970         }
2971 }
2972
2973 bool
2974 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
2975     u_long alignment, vm_paddr_t boundary)
2976 {
2977         struct vm_domainset_iter di;
2978         int domain;
2979         bool ret;
2980
2981         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
2982         do {
2983                 ret = vm_page_reclaim_contig_domain(domain, req, npages, low,
2984                     high, alignment, boundary);
2985                 if (ret)
2986                         break;
2987         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
2988
2989         return (ret);
2990 }
2991
2992 /*
2993  * Set the domain in the appropriate page level domainset.
2994  */
2995 void
2996 vm_domain_set(struct vm_domain *vmd)
2997 {
2998
2999         mtx_lock(&vm_domainset_lock);
3000         if (!vmd->vmd_minset && vm_paging_min(vmd)) {
3001                 vmd->vmd_minset = 1;
3002                 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
3003         }
3004         if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
3005                 vmd->vmd_severeset = 1;
3006                 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
3007         }
3008         mtx_unlock(&vm_domainset_lock);
3009 }
3010
3011 /*
3012  * Clear the domain from the appropriate page level domainset.
3013  */
3014 void
3015 vm_domain_clear(struct vm_domain *vmd)
3016 {
3017
3018         mtx_lock(&vm_domainset_lock);
3019         if (vmd->vmd_minset && !vm_paging_min(vmd)) {
3020                 vmd->vmd_minset = 0;
3021                 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
3022                 if (vm_min_waiters != 0) {
3023                         vm_min_waiters = 0;
3024                         wakeup(&vm_min_domains);
3025                 }
3026         }
3027         if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
3028                 vmd->vmd_severeset = 0;
3029                 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
3030                 if (vm_severe_waiters != 0) {
3031                         vm_severe_waiters = 0;
3032                         wakeup(&vm_severe_domains);
3033                 }
3034         }
3035
3036         /*
3037          * If pageout daemon needs pages, then tell it that there are
3038          * some free.
3039          */
3040         if (vmd->vmd_pageout_pages_needed &&
3041             vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
3042                 wakeup(&vmd->vmd_pageout_pages_needed);
3043                 vmd->vmd_pageout_pages_needed = 0;
3044         }
3045
3046         /* See comments in vm_wait_doms(). */
3047         if (vm_pageproc_waiters) {
3048                 vm_pageproc_waiters = 0;
3049                 wakeup(&vm_pageproc_waiters);
3050         }
3051         mtx_unlock(&vm_domainset_lock);
3052 }
3053
3054 /*
3055  * Wait for free pages to exceed the min threshold globally.
3056  */
3057 void
3058 vm_wait_min(void)
3059 {
3060
3061         mtx_lock(&vm_domainset_lock);
3062         while (vm_page_count_min()) {
3063                 vm_min_waiters++;
3064                 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
3065         }
3066         mtx_unlock(&vm_domainset_lock);
3067 }
3068
3069 /*
3070  * Wait for free pages to exceed the severe threshold globally.
3071  */
3072 void
3073 vm_wait_severe(void)
3074 {
3075
3076         mtx_lock(&vm_domainset_lock);
3077         while (vm_page_count_severe()) {
3078                 vm_severe_waiters++;
3079                 msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
3080                     "vmwait", 0);
3081         }
3082         mtx_unlock(&vm_domainset_lock);
3083 }
3084
3085 u_int
3086 vm_wait_count(void)
3087 {
3088
3089         return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
3090 }
3091
3092 void
3093 vm_wait_doms(const domainset_t *wdoms)
3094 {
3095
3096         /*
3097          * We use racey wakeup synchronization to avoid expensive global
3098          * locking for the pageproc when sleeping with a non-specific vm_wait.
3099          * To handle this, we only sleep for one tick in this instance.  It
3100          * is expected that most allocations for the pageproc will come from
3101          * kmem or vm_page_grab* which will use the more specific and
3102          * race-free vm_wait_domain().
3103          */
3104         if (curproc == pageproc) {
3105                 mtx_lock(&vm_domainset_lock);
3106                 vm_pageproc_waiters++;
3107                 msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP,
3108                     "pageprocwait", 1);
3109         } else {
3110                 /*
3111                  * XXX Ideally we would wait only until the allocation could
3112                  * be satisfied.  This condition can cause new allocators to
3113                  * consume all freed pages while old allocators wait.
3114                  */
3115                 mtx_lock(&vm_domainset_lock);
3116                 if (vm_page_count_min_set(wdoms)) {
3117                         vm_min_waiters++;
3118                         msleep(&vm_min_domains, &vm_domainset_lock,
3119                             PVM | PDROP, "vmwait", 0);
3120                 } else
3121                         mtx_unlock(&vm_domainset_lock);
3122         }
3123 }
3124
3125 /*
3126  *      vm_wait_domain:
3127  *
3128  *      Sleep until free pages are available for allocation.
3129  *      - Called in various places after failed memory allocations.
3130  */
3131 void
3132 vm_wait_domain(int domain)
3133 {
3134         struct vm_domain *vmd;
3135         domainset_t wdom;
3136
3137         vmd = VM_DOMAIN(domain);
3138         vm_domain_free_assert_unlocked(vmd);
3139
3140         if (curproc == pageproc) {
3141                 mtx_lock(&vm_domainset_lock);
3142                 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
3143                         vmd->vmd_pageout_pages_needed = 1;
3144                         msleep(&vmd->vmd_pageout_pages_needed,
3145                             &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
3146                 } else
3147                         mtx_unlock(&vm_domainset_lock);
3148         } else {
3149                 if (pageproc == NULL)
3150                         panic("vm_wait in early boot");
3151                 DOMAINSET_ZERO(&wdom);
3152                 DOMAINSET_SET(vmd->vmd_domain, &wdom);
3153                 vm_wait_doms(&wdom);
3154         }
3155 }
3156
3157 /*
3158  *      vm_wait:
3159  *
3160  *      Sleep until free pages are available for allocation in the
3161  *      affinity domains of the obj.  If obj is NULL, the domain set
3162  *      for the calling thread is used.
3163  *      Called in various places after failed memory allocations.
3164  */
3165 void
3166 vm_wait(vm_object_t obj)
3167 {
3168         struct domainset *d;
3169
3170         d = NULL;
3171
3172         /*
3173          * Carefully fetch pointers only once: the struct domainset
3174          * itself is ummutable but the pointer might change.
3175          */
3176         if (obj != NULL)
3177                 d = obj->domain.dr_policy;
3178         if (d == NULL)
3179                 d = curthread->td_domain.dr_policy;
3180
3181         vm_wait_doms(&d->ds_mask);
3182 }
3183
3184 /*
3185  *      vm_domain_alloc_fail:
3186  *
3187  *      Called when a page allocation function fails.  Informs the
3188  *      pagedaemon and performs the requested wait.  Requires the
3189  *      domain_free and object lock on entry.  Returns with the
3190  *      object lock held and free lock released.  Returns an error when
3191  *      retry is necessary.
3192  *
3193  */
3194 static int
3195 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
3196 {
3197
3198         vm_domain_free_assert_unlocked(vmd);
3199
3200         atomic_add_int(&vmd->vmd_pageout_deficit,
3201             max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
3202         if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
3203                 if (object != NULL) 
3204                         VM_OBJECT_WUNLOCK(object);
3205                 vm_wait_domain(vmd->vmd_domain);
3206                 if (object != NULL) 
3207                         VM_OBJECT_WLOCK(object);
3208                 if (req & VM_ALLOC_WAITOK)
3209                         return (EAGAIN);
3210         }
3211
3212         return (0);
3213 }
3214
3215 /*
3216  *      vm_waitpfault:
3217  *
3218  *      Sleep until free pages are available for allocation.
3219  *      - Called only in vm_fault so that processes page faulting
3220  *        can be easily tracked.
3221  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
3222  *        processes will be able to grab memory first.  Do not change
3223  *        this balance without careful testing first.
3224  */
3225 void
3226 vm_waitpfault(struct domainset *dset, int timo)
3227 {
3228
3229         /*
3230          * XXX Ideally we would wait only until the allocation could
3231          * be satisfied.  This condition can cause new allocators to
3232          * consume all freed pages while old allocators wait.
3233          */
3234         mtx_lock(&vm_domainset_lock);
3235         if (vm_page_count_min_set(&dset->ds_mask)) {
3236                 vm_min_waiters++;
3237                 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
3238                     "pfault", timo);
3239         } else
3240                 mtx_unlock(&vm_domainset_lock);
3241 }
3242
3243 static struct vm_pagequeue *
3244 _vm_page_pagequeue(vm_page_t m, uint8_t queue)
3245 {
3246
3247         return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
3248 }
3249
3250 #ifdef INVARIANTS
3251 static struct vm_pagequeue *
3252 vm_page_pagequeue(vm_page_t m)
3253 {
3254
3255         return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
3256 }
3257 #endif
3258
3259 static __always_inline bool
3260 vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3261 {
3262         vm_page_astate_t tmp;
3263
3264         tmp = *old;
3265         do {
3266                 if (__predict_true(vm_page_astate_fcmpset(m, old, new)))
3267                         return (true);
3268                 counter_u64_add(pqstate_commit_retries, 1);
3269         } while (old->_bits == tmp._bits);
3270
3271         return (false);
3272 }
3273
3274 /*
3275  * Do the work of committing a queue state update that moves the page out of
3276  * its current queue.
3277  */
3278 static bool
3279 _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m,
3280     vm_page_astate_t *old, vm_page_astate_t new)
3281 {
3282         vm_page_t next;
3283
3284         vm_pagequeue_assert_locked(pq);
3285         KASSERT(vm_page_pagequeue(m) == pq,
3286             ("%s: queue %p does not match page %p", __func__, pq, m));
3287         KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
3288             ("%s: invalid queue indices %d %d",
3289             __func__, old->queue, new.queue));
3290
3291         /*
3292          * Once the queue index of the page changes there is nothing
3293          * synchronizing with further updates to the page's physical
3294          * queue state.  Therefore we must speculatively remove the page
3295          * from the queue now and be prepared to roll back if the queue
3296          * state update fails.  If the page is not physically enqueued then
3297          * we just update its queue index.
3298          */
3299         if ((old->flags & PGA_ENQUEUED) != 0) {
3300                 new.flags &= ~PGA_ENQUEUED;
3301                 next = TAILQ_NEXT(m, plinks.q);
3302                 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3303                 vm_pagequeue_cnt_dec(pq);
3304                 if (!vm_page_pqstate_fcmpset(m, old, new)) {
3305                         if (next == NULL)
3306                                 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3307                         else
3308                                 TAILQ_INSERT_BEFORE(next, m, plinks.q);
3309                         vm_pagequeue_cnt_inc(pq);
3310                         return (false);
3311                 } else {
3312                         return (true);
3313                 }
3314         } else {
3315                 return (vm_page_pqstate_fcmpset(m, old, new));
3316         }
3317 }
3318
3319 static bool
3320 vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old,
3321     vm_page_astate_t new)
3322 {
3323         struct vm_pagequeue *pq;
3324         vm_page_astate_t as;
3325         bool ret;
3326
3327         pq = _vm_page_pagequeue(m, old->queue);
3328
3329         /*
3330          * The queue field and PGA_ENQUEUED flag are stable only so long as the
3331          * corresponding page queue lock is held.
3332          */
3333         vm_pagequeue_lock(pq);
3334         as = vm_page_astate_load(m);
3335         if (__predict_false(as._bits != old->_bits)) {
3336                 *old = as;
3337                 ret = false;
3338         } else {
3339                 ret = _vm_page_pqstate_commit_dequeue(pq, m, old, new);
3340         }
3341         vm_pagequeue_unlock(pq);
3342         return (ret);
3343 }
3344
3345 /*
3346  * Commit a queue state update that enqueues or requeues a page.
3347  */
3348 static bool
3349 _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m,
3350     vm_page_astate_t *old, vm_page_astate_t new)
3351 {
3352         struct vm_domain *vmd;
3353
3354         vm_pagequeue_assert_locked(pq);
3355         KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
3356             ("%s: invalid queue indices %d %d",
3357             __func__, old->queue, new.queue));
3358
3359         new.flags |= PGA_ENQUEUED;
3360         if (!vm_page_pqstate_fcmpset(m, old, new))
3361                 return (false);
3362
3363         if ((old->flags & PGA_ENQUEUED) != 0)
3364                 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3365         else
3366                 vm_pagequeue_cnt_inc(pq);
3367
3368         /*
3369          * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.  In particular, if
3370          * both flags are set in close succession, only PGA_REQUEUE_HEAD will be
3371          * applied, even if it was set first.
3372          */
3373         if ((old->flags & PGA_REQUEUE_HEAD) != 0) {
3374                 vmd = vm_pagequeue_domain(m);
3375                 KASSERT(pq == &vmd->vmd_pagequeues[PQ_INACTIVE],
3376                     ("%s: invalid page queue for page %p", __func__, m));
3377                 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
3378         } else {
3379                 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3380         }
3381         return (true);
3382 }
3383
3384 /*
3385  * Commit a queue state update that encodes a request for a deferred queue
3386  * operation.
3387  */
3388 static bool
3389 vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old,
3390     vm_page_astate_t new)
3391 {
3392
3393         KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
3394             ("%s: invalid state, queue %d flags %x",
3395             __func__, new.queue, new.flags));
3396
3397         if (old->_bits != new._bits &&
3398             !vm_page_pqstate_fcmpset(m, old, new))
3399                 return (false);
3400         vm_page_pqbatch_submit(m, new.queue);
3401         return (true);
3402 }
3403
3404 /*
3405  * A generic queue state update function.  This handles more cases than the
3406  * specialized functions above.
3407  */
3408 bool
3409 vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
3410 {
3411
3412         if (old->_bits == new._bits)
3413                 return (true);
3414
3415         if (old->queue != PQ_NONE && new.queue != old->queue) {
3416                 if (!vm_page_pqstate_commit_dequeue(m, old, new))
3417                         return (false);
3418                 if (new.queue != PQ_NONE)
3419                         vm_page_pqbatch_submit(m, new.queue);
3420         } else {
3421                 if (!vm_page_pqstate_fcmpset(m, old, new))
3422                         return (false);
3423                 if (new.queue != PQ_NONE &&
3424                     ((new.flags & ~old->flags) & PGA_QUEUE_OP_MASK) != 0)
3425                         vm_page_pqbatch_submit(m, new.queue);
3426         }
3427         return (true);
3428 }
3429
3430 /*
3431  * Apply deferred queue state updates to a page.
3432  */
3433 static inline void
3434 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
3435 {
3436         vm_page_astate_t new, old;
3437
3438         CRITICAL_ASSERT(curthread);
3439         vm_pagequeue_assert_locked(pq);
3440         KASSERT(queue < PQ_COUNT,
3441             ("%s: invalid queue index %d", __func__, queue));
3442         KASSERT(pq == _vm_page_pagequeue(m, queue),
3443             ("%s: page %p does not belong to queue %p", __func__, m, pq));
3444
3445         for (old = vm_page_astate_load(m);;) {
3446                 if (__predict_false(old.queue != queue ||
3447                     (old.flags & PGA_QUEUE_OP_MASK) == 0)) {
3448                         counter_u64_add(queue_nops, 1);
3449                         break;
3450                 }
3451                 KASSERT(old.queue != PQ_NONE || (old.flags & PGA_QUEUE_STATE_MASK) == 0,
3452                     ("%s: page %p has unexpected queue state", __func__, m));
3453
3454                 new = old;
3455                 if ((old.flags & PGA_DEQUEUE) != 0) {
3456                         new.flags &= ~PGA_QUEUE_OP_MASK;
3457                         new.queue = PQ_NONE;
3458                         if (__predict_true(_vm_page_pqstate_commit_dequeue(pq,
3459                             m, &old, new))) {
3460                                 counter_u64_add(queue_ops, 1);
3461                                 break;
3462                         }
3463                 } else {
3464                         new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
3465                         if (__predict_true(_vm_page_pqstate_commit_requeue(pq,
3466                             m, &old, new))) {
3467                                 counter_u64_add(queue_ops, 1);
3468                                 break;
3469                         }
3470                 }
3471         }
3472 }
3473
3474 static void
3475 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
3476     uint8_t queue)
3477 {
3478         int i;
3479
3480         for (i = 0; i < bq->bq_cnt; i++)
3481                 vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
3482         vm_batchqueue_init(bq);
3483 }
3484
3485 /*
3486  *      vm_page_pqbatch_submit:         [ internal use only ]
3487  *
3488  *      Enqueue a page in the specified page queue's batched work queue.
3489  *      The caller must have encoded the requested operation in the page
3490  *      structure's a.flags field.
3491  */
3492 void
3493 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
3494 {
3495         struct vm_batchqueue *bq;
3496         struct vm_pagequeue *pq;
3497         int domain;
3498
3499         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3500             ("page %p is unmanaged", m));
3501         KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
3502
3503         domain = vm_phys_domain(m);
3504         pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
3505
3506         critical_enter();
3507         bq = DPCPU_PTR(pqbatch[domain][queue]);
3508         if (vm_batchqueue_insert(bq, m)) {
3509                 critical_exit();
3510                 return;
3511         }
3512         critical_exit();
3513         vm_pagequeue_lock(pq);
3514         critical_enter();
3515         bq = DPCPU_PTR(pqbatch[domain][queue]);
3516         vm_pqbatch_process(pq, bq, queue);
3517         vm_pqbatch_process_page(pq, m, queue);
3518         vm_pagequeue_unlock(pq);
3519         critical_exit();
3520 }
3521
3522 /*
3523  *      vm_page_pqbatch_drain:          [ internal use only ]
3524  *
3525  *      Force all per-CPU page queue batch queues to be drained.  This is
3526  *      intended for use in severe memory shortages, to ensure that pages
3527  *      do not remain stuck in the batch queues.
3528  */
3529 void
3530 vm_page_pqbatch_drain(void)
3531 {
3532         struct thread *td;
3533         struct vm_domain *vmd;
3534         struct vm_pagequeue *pq;
3535         int cpu, domain, queue;
3536
3537         td = curthread;
3538         CPU_FOREACH(cpu) {
3539                 thread_lock(td);
3540                 sched_bind(td, cpu);
3541                 thread_unlock(td);
3542
3543                 for (domain = 0; domain < vm_ndomains; domain++) {
3544                         vmd = VM_DOMAIN(domain);
3545                         for (queue = 0; queue < PQ_COUNT; queue++) {
3546                                 pq = &vmd->vmd_pagequeues[queue];
3547                                 vm_pagequeue_lock(pq);
3548                                 critical_enter();
3549                                 vm_pqbatch_process(pq,
3550                                     DPCPU_PTR(pqbatch[domain][queue]), queue);
3551                                 critical_exit();
3552                                 vm_pagequeue_unlock(pq);
3553                         }
3554                 }
3555         }
3556         thread_lock(td);
3557         sched_unbind(td);
3558         thread_unlock(td);
3559 }
3560
3561 /*
3562  *      vm_page_dequeue_deferred:       [ internal use only ]
3563  *
3564  *      Request removal of the given page from its current page
3565  *      queue.  Physical removal from the queue may be deferred
3566  *      indefinitely.
3567  *
3568  *      The page must be locked.
3569  */
3570 void
3571 vm_page_dequeue_deferred(vm_page_t m)
3572 {
3573         vm_page_astate_t new, old;
3574
3575         old = vm_page_astate_load(m);
3576         do {
3577                 if (old.queue == PQ_NONE) {
3578                         KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
3579                             ("%s: page %p has unexpected queue state",
3580                             __func__, m));
3581                         break;
3582                 }
3583                 new = old;
3584                 new.flags |= PGA_DEQUEUE;
3585         } while (!vm_page_pqstate_commit_request(m, &old, new));
3586 }
3587
3588 /*
3589  *      vm_page_dequeue:
3590  *
3591  *      Remove the page from whichever page queue it's in, if any, before
3592  *      returning.
3593  */
3594 void
3595 vm_page_dequeue(vm_page_t m)
3596 {
3597         vm_page_astate_t new, old;
3598
3599         old = vm_page_astate_load(m);
3600         do {
3601                 if (old.queue == PQ_NONE) {
3602                         KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0,
3603                             ("%s: page %p has unexpected queue state",
3604                             __func__, m));
3605                         break;
3606                 }
3607                 new = old;
3608                 new.flags &= ~PGA_QUEUE_OP_MASK;
3609                 new.queue = PQ_NONE;
3610         } while (!vm_page_pqstate_commit_dequeue(m, &old, new));
3611
3612 }
3613
3614 /*
3615  * Schedule the given page for insertion into the specified page queue.
3616  * Physical insertion of the page may be deferred indefinitely.
3617  */
3618 static void
3619 vm_page_enqueue(vm_page_t m, uint8_t queue)
3620 {
3621
3622         KASSERT(m->a.queue == PQ_NONE &&
3623             (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
3624             ("%s: page %p is already enqueued", __func__, m));
3625         KASSERT(m->ref_count > 0,
3626             ("%s: page %p does not carry any references", __func__, m));
3627
3628         m->a.queue = queue;
3629         if ((m->a.flags & PGA_REQUEUE) == 0)
3630                 vm_page_aflag_set(m, PGA_REQUEUE);
3631         vm_page_pqbatch_submit(m, queue);
3632 }
3633
3634 /*
3635  *      vm_page_free_prep:
3636  *
3637  *      Prepares the given page to be put on the free list,
3638  *      disassociating it from any VM object. The caller may return
3639  *      the page to the free list only if this function returns true.
3640  *
3641  *      The object must be locked.  The page must be locked if it is
3642  *      managed.
3643  */
3644 static bool
3645 vm_page_free_prep(vm_page_t m)
3646 {
3647
3648         /*
3649          * Synchronize with threads that have dropped a reference to this
3650          * page.
3651          */
3652         atomic_thread_fence_acq();
3653
3654 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
3655         if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
3656                 uint64_t *p;
3657                 int i;
3658                 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3659                 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
3660                         KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
3661                             m, i, (uintmax_t)*p));
3662         }
3663 #endif
3664         if ((m->oflags & VPO_UNMANAGED) == 0) {
3665                 KASSERT(!pmap_page_is_mapped(m),
3666                     ("vm_page_free_prep: freeing mapped page %p", m));
3667                 KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
3668                     ("vm_page_free_prep: mapping flags set in page %p", m));
3669         } else {
3670                 KASSERT(m->a.queue == PQ_NONE,
3671                     ("vm_page_free_prep: unmanaged page %p is queued", m));
3672         }
3673         VM_CNT_INC(v_tfree);
3674
3675         if (m->object != NULL) {
3676                 KASSERT(((m->oflags & VPO_UNMANAGED) != 0) ==
3677                     ((m->object->flags & OBJ_UNMANAGED) != 0),
3678                     ("vm_page_free_prep: managed flag mismatch for page %p",
3679                     m));
3680                 vm_page_assert_xbusied(m);
3681
3682                 /*
3683                  * The object reference can be released without an atomic
3684                  * operation.
3685                  */
3686                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
3687                     m->ref_count == VPRC_OBJREF,
3688                     ("vm_page_free_prep: page %p has unexpected ref_count %u",
3689                     m, m->ref_count));
3690                 vm_page_object_remove(m);
3691                 m->object = NULL;
3692                 m->ref_count -= VPRC_OBJREF;
3693         } else
3694                 vm_page_assert_unbusied(m);
3695
3696         vm_page_busy_free(m);
3697
3698         /*
3699          * If fictitious remove object association and
3700          * return.
3701          */
3702         if ((m->flags & PG_FICTITIOUS) != 0) {
3703                 KASSERT(m->ref_count == 1,
3704                     ("fictitious page %p is referenced", m));
3705                 KASSERT(m->a.queue == PQ_NONE,
3706                     ("fictitious page %p is queued", m));
3707                 return (false);
3708         }
3709
3710         /*
3711          * Pages need not be dequeued before they are returned to the physical
3712          * memory allocator, but they must at least be marked for a deferred
3713          * dequeue.
3714          */
3715         if ((m->oflags & VPO_UNMANAGED) == 0)
3716                 vm_page_dequeue_deferred(m);
3717
3718         m->valid = 0;
3719         vm_page_undirty(m);
3720
3721         if (m->ref_count != 0)
3722                 panic("vm_page_free_prep: page %p has references", m);
3723
3724         /*
3725          * Restore the default memory attribute to the page.
3726          */
3727         if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
3728                 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
3729
3730 #if VM_NRESERVLEVEL > 0
3731         /*
3732          * Determine whether the page belongs to a reservation.  If the page was
3733          * allocated from a per-CPU cache, it cannot belong to a reservation, so
3734          * as an optimization, we avoid the check in that case.
3735          */
3736         if ((m->flags & PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
3737                 return (false);
3738 #endif
3739
3740         return (true);
3741 }
3742
3743 /*
3744  *      vm_page_free_toq:
3745  *
3746  *      Returns the given page to the free list, disassociating it
3747  *      from any VM object.
3748  *
3749  *      The object must be locked.  The page must be locked if it is
3750  *      managed.
3751  */
3752 static void
3753 vm_page_free_toq(vm_page_t m)
3754 {
3755         struct vm_domain *vmd;
3756         uma_zone_t zone;
3757
3758         if (!vm_page_free_prep(m))
3759                 return;
3760
3761         vmd = vm_pagequeue_domain(m);
3762         zone = vmd->vmd_pgcache[m->pool].zone;
3763         if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
3764                 uma_zfree(zone, m);
3765                 return;
3766         }
3767         vm_domain_free_lock(vmd);
3768         vm_phys_free_pages(m, 0);
3769         vm_domain_free_unlock(vmd);
3770         vm_domain_freecnt_inc(vmd, 1);
3771 }
3772
3773 /*
3774  *      vm_page_free_pages_toq:
3775  *
3776  *      Returns a list of pages to the free list, disassociating it
3777  *      from any VM object.  In other words, this is equivalent to
3778  *      calling vm_page_free_toq() for each page of a list of VM objects.
3779  *
3780  *      The objects must be locked.  The pages must be locked if it is
3781  *      managed.
3782  */
3783 void
3784 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
3785 {
3786         vm_page_t m;
3787         int count;
3788
3789         if (SLIST_EMPTY(free))
3790                 return;
3791
3792         count = 0;
3793         while ((m = SLIST_FIRST(free)) != NULL) {
3794                 count++;
3795                 SLIST_REMOVE_HEAD(free, plinks.s.ss);
3796                 vm_page_free_toq(m);
3797         }
3798
3799         if (update_wire_count)
3800                 vm_wire_sub(count);
3801 }
3802
3803 /*
3804  * Mark this page as wired down, preventing reclamation by the page daemon
3805  * or when the containing object is destroyed.
3806  */
3807 void
3808 vm_page_wire(vm_page_t m)
3809 {
3810         u_int old;
3811
3812         KASSERT(m->object != NULL,
3813             ("vm_page_wire: page %p does not belong to an object", m));
3814         if (!vm_page_busied(m) && !vm_object_busied(m->object))
3815                 VM_OBJECT_ASSERT_LOCKED(m->object);
3816         KASSERT((m->flags & PG_FICTITIOUS) == 0 ||
3817             VPRC_WIRE_COUNT(m->ref_count) >= 1,
3818             ("vm_page_wire: fictitious page %p has zero wirings", m));
3819
3820         old = atomic_fetchadd_int(&m->ref_count, 1);
3821         KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX,
3822             ("vm_page_wire: counter overflow for page %p", m));
3823         if (VPRC_WIRE_COUNT(old) == 0) {
3824                 if ((m->oflags & VPO_UNMANAGED) == 0)
3825                         vm_page_aflag_set(m, PGA_DEQUEUE);
3826                 vm_wire_add(1);
3827         }
3828 }
3829
3830 /*
3831  * Attempt to wire a mapped page following a pmap lookup of that page.
3832  * This may fail if a thread is concurrently tearing down mappings of the page.
3833  * The transient failure is acceptable because it translates to the
3834  * failure of the caller pmap_extract_and_hold(), which should be then
3835  * followed by the vm_fault() fallback, see e.g. vm_fault_quick_hold_pages().
3836  */
3837 bool
3838 vm_page_wire_mapped(vm_page_t m)
3839 {
3840         u_int old;
3841
3842         old = m->ref_count;
3843         do {
3844                 KASSERT(old > 0,
3845                     ("vm_page_wire_mapped: wiring unreferenced page %p", m));
3846                 if ((old & VPRC_BLOCKED) != 0)
3847                         return (false);
3848         } while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
3849
3850         if (VPRC_WIRE_COUNT(old) == 0) {
3851                 if ((m->oflags & VPO_UNMANAGED) == 0)
3852                         vm_page_aflag_set(m, PGA_DEQUEUE);
3853                 vm_wire_add(1);
3854         }
3855         return (true);
3856 }
3857
3858 /*
3859  * Release a wiring reference to a managed page.  If the page still belongs to
3860  * an object, update its position in the page queues to reflect the reference.
3861  * If the wiring was the last reference to the page, free the page.
3862  */
3863 static void
3864 vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
3865 {
3866         u_int old;
3867
3868         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3869             ("%s: page %p is unmanaged", __func__, m));
3870
3871         /*
3872          * Update LRU state before releasing the wiring reference.
3873          * Use a release store when updating the reference count to
3874          * synchronize with vm_page_free_prep().
3875          */
3876         old = m->ref_count;
3877         do {
3878                 KASSERT(VPRC_WIRE_COUNT(old) > 0,
3879                     ("vm_page_unwire: wire count underflow for page %p", m));
3880
3881                 if (old > VPRC_OBJREF + 1) {
3882                         /*
3883                          * The page has at least one other wiring reference.  An
3884                          * earlier iteration of this loop may have called
3885                          * vm_page_release_toq() and cleared PGA_DEQUEUE, so
3886                          * re-set it if necessary.
3887                          */
3888                         if ((vm_page_astate_load(m).flags & PGA_DEQUEUE) == 0)
3889                                 vm_page_aflag_set(m, PGA_DEQUEUE);
3890                 } else if (old == VPRC_OBJREF + 1) {
3891                         /*
3892                          * This is the last wiring.  Clear PGA_DEQUEUE and
3893                          * update the page's queue state to reflect the
3894                          * reference.  If the page does not belong to an object
3895                          * (i.e., the VPRC_OBJREF bit is clear), we only need to
3896                          * clear leftover queue state.
3897                          */
3898                         vm_page_release_toq(m, nqueue, false);
3899                 } else if (old == 1) {
3900                         vm_page_aflag_clear(m, PGA_DEQUEUE);
3901                 }
3902         } while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
3903
3904         if (VPRC_WIRE_COUNT(old) == 1) {
3905                 vm_wire_sub(1);
3906                 if (old == 1)
3907                         vm_page_free(m);
3908         }
3909 }
3910
3911 /*
3912  * Release one wiring of the specified page, potentially allowing it to be
3913  * paged out.
3914  *
3915  * Only managed pages belonging to an object can be paged out.  If the number
3916  * of wirings transitions to zero and the page is eligible for page out, then
3917  * the page is added to the specified paging queue.  If the released wiring
3918  * represented the last reference to the page, the page is freed.
3919  *
3920  * A managed page must be locked.
3921  */
3922 void
3923 vm_page_unwire(vm_page_t m, uint8_t nqueue)
3924 {
3925
3926         KASSERT(nqueue < PQ_COUNT,
3927             ("vm_page_unwire: invalid queue %u request for page %p",
3928             nqueue, m));
3929
3930         if ((m->oflags & VPO_UNMANAGED) != 0) {
3931                 if (vm_page_unwire_noq(m) && m->ref_count == 0)
3932                         vm_page_free(m);
3933                 return;
3934         }
3935         vm_page_unwire_managed(m, nqueue, false);
3936 }
3937
3938 /*
3939  * Unwire a page without (re-)inserting it into a page queue.  It is up
3940  * to the caller to enqueue, requeue, or free the page as appropriate.
3941  * In most cases involving managed pages, vm_page_unwire() should be used
3942  * instead.
3943  */
3944 bool
3945 vm_page_unwire_noq(vm_page_t m)
3946 {
3947         u_int old;
3948
3949         old = vm_page_drop(m, 1);
3950         KASSERT(VPRC_WIRE_COUNT(old) != 0,
3951             ("vm_page_unref: counter underflow for page %p", m));
3952         KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1,
3953             ("vm_page_unref: missing ref on fictitious page %p", m));
3954
3955         if (VPRC_WIRE_COUNT(old) > 1)
3956                 return (false);
3957         if ((m->oflags & VPO_UNMANAGED) == 0)
3958                 vm_page_aflag_clear(m, PGA_DEQUEUE);
3959         vm_wire_sub(1);
3960         return (true);
3961 }
3962
3963 /*
3964  * Ensure that the page ends up in the specified page queue.  If the page is
3965  * active or being moved to the active queue, ensure that its act_count is
3966  * at least ACT_INIT but do not otherwise mess with it.
3967  *
3968  * A managed page must be locked.
3969  */
3970 static __always_inline void
3971 vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
3972 {
3973         vm_page_astate_t old, new;
3974
3975         KASSERT(m->ref_count > 0,
3976             ("%s: page %p does not carry any references", __func__, m));
3977         KASSERT(nflag == PGA_REQUEUE || nflag == PGA_REQUEUE_HEAD,
3978             ("%s: invalid flags %x", __func__, nflag));
3979
3980         if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m))
3981                 return;
3982
3983         old = vm_page_astate_load(m);
3984         do {
3985                 if ((old.flags & PGA_DEQUEUE) != 0)
3986                         break;
3987                 new = old;
3988                 new.flags &= ~PGA_QUEUE_OP_MASK;
3989                 if (nqueue == PQ_ACTIVE)
3990                         new.act_count = max(old.act_count, ACT_INIT);
3991                 if (old.queue == nqueue) {
3992                         if (nqueue != PQ_ACTIVE)
3993                                 new.flags |= nflag;
3994                 } else {
3995                         new.flags |= nflag;
3996                         new.queue = nqueue;
3997                 }
3998         } while (!vm_page_pqstate_commit(m, &old, new));
3999 }
4000
4001 /*
4002  * Put the specified page on the active list (if appropriate).
4003  */
4004 void
4005 vm_page_activate(vm_page_t m)
4006 {
4007
4008         vm_page_mvqueue(m, PQ_ACTIVE, PGA_REQUEUE);
4009 }
4010
4011 /*
4012  * Move the specified page to the tail of the inactive queue, or requeue
4013  * the page if it is already in the inactive queue.
4014  */
4015 void
4016 vm_page_deactivate(vm_page_t m)
4017 {
4018
4019         vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE);
4020 }
4021
4022 void
4023 vm_page_deactivate_noreuse(vm_page_t m)
4024 {
4025
4026         vm_page_mvqueue(m, PQ_INACTIVE, PGA_REQUEUE_HEAD);
4027 }
4028
4029 /*
4030  * Put a page in the laundry, or requeue it if it is already there.
4031  */
4032 void
4033 vm_page_launder(vm_page_t m)
4034 {
4035
4036         vm_page_mvqueue(m, PQ_LAUNDRY, PGA_REQUEUE);
4037 }
4038
4039 /*
4040  * Put a page in the PQ_UNSWAPPABLE holding queue.
4041  */
4042 void
4043 vm_page_unswappable(vm_page_t m)
4044 {
4045
4046         KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0,
4047             ("page %p already unswappable", m));
4048
4049         vm_page_dequeue(m);
4050         vm_page_enqueue(m, PQ_UNSWAPPABLE);
4051 }
4052
4053 /*
4054  * Release a page back to the page queues in preparation for unwiring.
4055  */
4056 static void
4057 vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
4058 {
4059         vm_page_astate_t old, new;
4060         uint16_t nflag;
4061
4062         /*
4063          * Use a check of the valid bits to determine whether we should
4064          * accelerate reclamation of the page.  The object lock might not be
4065          * held here, in which case the check is racy.  At worst we will either
4066          * accelerate reclamation of a valid page and violate LRU, or
4067          * unnecessarily defer reclamation of an invalid page.
4068          *
4069          * If we were asked to not cache the page, place it near the head of the
4070          * inactive queue so that is reclaimed sooner.
4071          */
4072         if (noreuse || m->valid == 0) {
4073                 nqueue = PQ_INACTIVE;
4074                 nflag = PGA_REQUEUE_HEAD;
4075         } else {
4076                 nflag = PGA_REQUEUE;
4077         }
4078
4079         old = vm_page_astate_load(m);
4080         do {
4081                 new = old;
4082
4083                 /*
4084                  * If the page is already in the active queue and we are not
4085                  * trying to accelerate reclamation, simply mark it as
4086                  * referenced and avoid any queue operations.
4087                  */
4088                 new.flags &= ~PGA_QUEUE_OP_MASK;
4089                 if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE)
4090                         new.flags |= PGA_REFERENCED;
4091                 else {
4092                         new.flags |= nflag;
4093                         new.queue = nqueue;
4094                 }
4095         } while (!vm_page_pqstate_commit(m, &old, new));
4096 }
4097
4098 /*
4099  * Unwire a page and either attempt to free it or re-add it to the page queues.
4100  */
4101 void
4102 vm_page_release(vm_page_t m, int flags)
4103 {
4104         vm_object_t object;
4105
4106         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4107             ("vm_page_release: page %p is unmanaged", m));
4108
4109         if ((flags & VPR_TRYFREE) != 0) {
4110                 for (;;) {
4111                         object = atomic_load_ptr(&m->object);
4112                         if (object == NULL)
4113                                 break;
4114                         /* Depends on type-stability. */
4115                         if (vm_page_busied(m) || !VM_OBJECT_TRYWLOCK(object))
4116                                 break;
4117                         if (object == m->object) {
4118                                 vm_page_release_locked(m, flags);
4119                                 VM_OBJECT_WUNLOCK(object);
4120                                 return;
4121                         }
4122                         VM_OBJECT_WUNLOCK(object);
4123                 }
4124         }
4125         vm_page_unwire_managed(m, PQ_INACTIVE, flags != 0);
4126 }
4127
4128 /* See vm_page_release(). */
4129 void
4130 vm_page_release_locked(vm_page_t m, int flags)
4131 {
4132
4133         VM_OBJECT_ASSERT_WLOCKED(m->object);
4134         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4135             ("vm_page_release_locked: page %p is unmanaged", m));
4136
4137         if (vm_page_unwire_noq(m)) {
4138                 if ((flags & VPR_TRYFREE) != 0 &&
4139                     (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
4140                     m->dirty == 0 && vm_page_tryxbusy(m)) {
4141                         vm_page_free(m);
4142                 } else {
4143                         vm_page_release_toq(m, PQ_INACTIVE, flags != 0);
4144                 }
4145         }
4146 }
4147
4148 static bool
4149 vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t))
4150 {
4151         u_int old;
4152
4153         KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0,
4154             ("vm_page_try_blocked_op: page %p has no object", m));
4155         KASSERT(vm_page_busied(m),
4156             ("vm_page_try_blocked_op: page %p is not busy", m));
4157         VM_OBJECT_ASSERT_LOCKED(m->object);
4158
4159         old = m->ref_count;
4160         do {
4161                 KASSERT(old != 0,
4162                     ("vm_page_try_blocked_op: page %p has no references", m));
4163                 if (VPRC_WIRE_COUNT(old) != 0)
4164                         return (false);
4165         } while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED));
4166
4167         (op)(m);
4168
4169         /*
4170          * If the object is read-locked, new wirings may be created via an
4171          * object lookup.
4172          */
4173         old = vm_page_drop(m, VPRC_BLOCKED);
4174         KASSERT(!VM_OBJECT_WOWNED(m->object) ||
4175             old == (VPRC_BLOCKED | VPRC_OBJREF),
4176             ("vm_page_try_blocked_op: unexpected refcount value %u for %p",
4177             old, m));
4178         return (true);
4179 }
4180
4181 /*
4182  * Atomically check for wirings and remove all mappings of the page.
4183  */
4184 bool
4185 vm_page_try_remove_all(vm_page_t m)
4186 {
4187
4188         return (vm_page_try_blocked_op(m, pmap_remove_all));
4189 }
4190
4191 /*
4192  * Atomically check for wirings and remove all writeable mappings of the page.
4193  */
4194 bool
4195 vm_page_try_remove_write(vm_page_t m)
4196 {
4197
4198         return (vm_page_try_blocked_op(m, pmap_remove_write));
4199 }
4200
4201 /*
4202  * vm_page_advise
4203  *
4204  *      Apply the specified advice to the given page.
4205  *
4206  *      The object and page must be locked.
4207  */
4208 void
4209 vm_page_advise(vm_page_t m, int advice)
4210 {
4211
4212         VM_OBJECT_ASSERT_WLOCKED(m->object);
4213         if (advice == MADV_FREE)
4214                 /*
4215                  * Mark the page clean.  This will allow the page to be freed
4216                  * without first paging it out.  MADV_FREE pages are often
4217                  * quickly reused by malloc(3), so we do not do anything that
4218                  * would result in a page fault on a later access.
4219                  */
4220                 vm_page_undirty(m);
4221         else if (advice != MADV_DONTNEED) {
4222                 if (advice == MADV_WILLNEED)
4223                         vm_page_activate(m);
4224                 return;
4225         }
4226
4227         if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
4228                 vm_page_dirty(m);
4229
4230         /*
4231          * Clear any references to the page.  Otherwise, the page daemon will
4232          * immediately reactivate the page.
4233          */
4234         vm_page_aflag_clear(m, PGA_REFERENCED);
4235
4236         /*
4237          * Place clean pages near the head of the inactive queue rather than
4238          * the tail, thus defeating the queue's LRU operation and ensuring that
4239          * the page will be reused quickly.  Dirty pages not already in the
4240          * laundry are moved there.
4241          */
4242         if (m->dirty == 0)
4243                 vm_page_deactivate_noreuse(m);
4244         else if (!vm_page_in_laundry(m))
4245                 vm_page_launder(m);
4246 }
4247
4248 static inline int
4249 vm_page_grab_pflags(int allocflags)
4250 {
4251         int pflags;
4252
4253         KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
4254             (allocflags & VM_ALLOC_WIRED) != 0,
4255             ("vm_page_grab_pflags: the pages must be busied or wired"));
4256         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4257             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4258             ("vm_page_grab_pflags: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
4259             "mismatch"));
4260         pflags = allocflags &
4261             ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL |
4262             VM_ALLOC_NOBUSY);
4263         if ((allocflags & VM_ALLOC_NOWAIT) == 0)
4264                 pflags |= VM_ALLOC_WAITFAIL;
4265         if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4266                 pflags |= VM_ALLOC_SBUSY;
4267
4268         return (pflags);
4269 }
4270
4271 /*
4272  * Grab a page, waiting until we are waken up due to the page
4273  * changing state.  We keep on waiting, if the page continues
4274  * to be in the object.  If the page doesn't exist, first allocate it
4275  * and then conditionally zero it.
4276  *
4277  * This routine may sleep.
4278  *
4279  * The object must be locked on entry.  The lock will, however, be released
4280  * and reacquired if the routine sleeps.
4281  */
4282 vm_page_t
4283 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
4284 {
4285         vm_page_t m;
4286         int pflags;
4287
4288         VM_OBJECT_ASSERT_WLOCKED(object);
4289         pflags = vm_page_grab_pflags(allocflags);
4290 retrylookup:
4291         if ((m = vm_page_lookup(object, pindex)) != NULL) {
4292                 if (!vm_page_acquire_flags(m, allocflags)) {
4293                         if (vm_page_busy_sleep_flags(object, m, "pgrbwt",
4294                             allocflags))
4295                                 goto retrylookup;
4296                         return (NULL);
4297                 }
4298                 goto out;
4299         }
4300         if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4301                 return (NULL);
4302         m = vm_page_alloc(object, pindex, pflags);
4303         if (m == NULL) {
4304                 if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
4305                         return (NULL);
4306                 goto retrylookup;
4307         }
4308         if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
4309                 pmap_zero_page(m);
4310
4311 out:
4312         if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
4313                 if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4314                         vm_page_sunbusy(m);
4315                 else
4316                         vm_page_xunbusy(m);
4317         }
4318         return (m);
4319 }
4320
4321 /*
4322  * Grab a page and make it valid, paging in if necessary.  Pages missing from
4323  * their pager are zero filled and validated.  If a VM_ALLOC_COUNT is supplied
4324  * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
4325  * in simultaneously.  Additional pages will be left on a paging queue but
4326  * will neither be wired nor busy regardless of allocflags.
4327  */
4328 int
4329 vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
4330 {
4331         vm_page_t m;
4332         vm_page_t ma[VM_INITIAL_PAGEIN];
4333         bool sleep, xbusy;
4334         int after, i, pflags, rv;
4335
4336         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
4337             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
4338             ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4339         KASSERT((allocflags &
4340             (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0,
4341             ("vm_page_grab_valid: Invalid flags 0x%X", allocflags));
4342         VM_OBJECT_ASSERT_WLOCKED(object);
4343         pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY);
4344         pflags |= VM_ALLOC_WAITFAIL;
4345
4346 retrylookup:
4347         xbusy = false;
4348         if ((m = vm_page_lookup(object, pindex)) != NULL) {
4349                 /*
4350                  * If the page is fully valid it can only become invalid
4351                  * with the object lock held.  If it is not valid it can
4352                  * become valid with the busy lock held.  Therefore, we
4353                  * may unnecessarily lock the exclusive busy here if we
4354                  * race with I/O completion not using the object lock.
4355                  * However, we will not end up with an invalid page and a
4356                  * shared lock.
4357                  */
4358                 if (!vm_page_all_valid(m) ||
4359                     (allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) {
4360                         sleep = !vm_page_tryxbusy(m);
4361                         xbusy = true;
4362                 } else
4363                         sleep = !vm_page_trysbusy(m);
4364                 if (sleep) {
4365                         (void)vm_page_busy_sleep_flags(object, m, "pgrbwt",
4366                             allocflags);
4367                         goto retrylookup;
4368                 }
4369                 if ((allocflags & VM_ALLOC_NOCREAT) != 0 &&
4370                    !vm_page_all_valid(m)) {
4371                         if (xbusy)
4372                                 vm_page_xunbusy(m);
4373                         else
4374                                 vm_page_sunbusy(m);
4375                         *mp = NULL;
4376                         return (VM_PAGER_FAIL);
4377                 }
4378                 if ((allocflags & VM_ALLOC_WIRED) != 0)
4379                         vm_page_wire(m);
4380                 if (vm_page_all_valid(m))
4381                         goto out;
4382         } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) {
4383                 *mp = NULL;
4384                 return (VM_PAGER_FAIL);
4385         } else if ((m = vm_page_alloc(object, pindex, pflags)) != NULL) {
4386                 xbusy = true;
4387         } else {
4388                 goto retrylookup;
4389         }
4390
4391         vm_page_assert_xbusied(m);
4392         MPASS(xbusy);
4393         if (vm_pager_has_page(object, pindex, NULL, &after)) {
4394                 after = MIN(after, VM_INITIAL_PAGEIN);
4395                 after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
4396                 after = MAX(after, 1);
4397                 ma[0] = m;
4398                 for (i = 1; i < after; i++) {
4399                         if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
4400                                 if (ma[i]->valid || !vm_page_tryxbusy(ma[i]))
4401                                         break;
4402                         } else {
4403                                 ma[i] = vm_page_alloc(object, m->pindex + i,
4404                                     VM_ALLOC_NORMAL);
4405                                 if (ma[i] == NULL)
4406                                         break;
4407                         }
4408                 }
4409                 after = i;
4410                 vm_object_pip_add(object, after);
4411                 VM_OBJECT_WUNLOCK(object);
4412                 rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
4413                 VM_OBJECT_WLOCK(object);
4414                 vm_object_pip_wakeupn(object, after);
4415                 /* Pager may have replaced a page. */
4416                 m = ma[0];
4417                 if (rv != VM_PAGER_OK) {
4418                         if ((allocflags & VM_ALLOC_WIRED) != 0)
4419                                 vm_page_unwire_noq(m);
4420                         for (i = 0; i < after; i++) {
4421                                 if (!vm_page_wired(ma[i]))
4422                                         vm_page_free(ma[i]);
4423                                 else
4424                                         vm_page_xunbusy(ma[i]);
4425                         }
4426                         *mp = NULL;
4427                         return (rv);
4428                 }
4429                 for (i = 1; i < after; i++)
4430                         vm_page_readahead_finish(ma[i]);
4431                 MPASS(vm_page_all_valid(m));
4432         } else {
4433                 vm_page_zero_invalid(m, TRUE);
4434         }
4435 out:
4436         if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
4437                 if (xbusy)
4438                         vm_page_xunbusy(m);
4439                 else
4440                         vm_page_sunbusy(m);
4441         }
4442         if ((allocflags & VM_ALLOC_SBUSY) != 0 && xbusy)
4443                 vm_page_busy_downgrade(m);
4444         *mp = m;
4445         return (VM_PAGER_OK);
4446 }
4447
4448 /*
4449  * Return the specified range of pages from the given object.  For each
4450  * page offset within the range, if a page already exists within the object
4451  * at that offset and it is busy, then wait for it to change state.  If,
4452  * instead, the page doesn't exist, then allocate it.
4453  *
4454  * The caller must always specify an allocation class.
4455  *
4456  * allocation classes:
4457  *      VM_ALLOC_NORMAL         normal process request
4458  *      VM_ALLOC_SYSTEM         system *really* needs the pages
4459  *
4460  * The caller must always specify that the pages are to be busied and/or
4461  * wired.
4462  *
4463  * optional allocation flags:
4464  *      VM_ALLOC_IGN_SBUSY      do not sleep on soft busy pages
4465  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
4466  *      VM_ALLOC_NOWAIT         do not sleep
4467  *      VM_ALLOC_SBUSY          set page to sbusy state
4468  *      VM_ALLOC_WIRED          wire the pages
4469  *      VM_ALLOC_ZERO           zero and validate any invalid pages
4470  *
4471  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
4472  * may return a partial prefix of the requested range.
4473  */
4474 int
4475 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
4476     vm_page_t *ma, int count)
4477 {
4478         vm_page_t m, mpred;
4479         int pflags;
4480         int i;
4481
4482         VM_OBJECT_ASSERT_WLOCKED(object);
4483         KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
4484             ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
4485
4486         pflags = vm_page_grab_pflags(allocflags);
4487         if (count == 0)
4488                 return (0);
4489
4490         i = 0;
4491 retrylookup:
4492         m = vm_radix_lookup_le(&object->rtree, pindex + i);
4493         if (m == NULL || m->pindex != pindex + i) {
4494                 mpred = m;
4495                 m = NULL;
4496         } else
4497                 mpred = TAILQ_PREV(m, pglist, listq);
4498         for (; i < count; i++) {
4499                 if (m != NULL) {
4500                         if (!vm_page_acquire_flags(m, allocflags)) {
4501                                 if (vm_page_busy_sleep_flags(object, m,
4502                                     "grbmaw", allocflags))
4503                                         goto retrylookup;
4504                                 break;
4505                         }
4506                 } else {
4507                         if ((allocflags & VM_ALLOC_NOCREAT) != 0)
4508                                 break;
4509                         m = vm_page_alloc_after(object, pindex + i,
4510                             pflags | VM_ALLOC_COUNT(count - i), mpred);
4511                         if (m == NULL) {
4512                                 if ((allocflags & (VM_ALLOC_NOWAIT |
4513                                     VM_ALLOC_WAITFAIL)) != 0)
4514                                         break;
4515                                 goto retrylookup;
4516                         }
4517                 }
4518                 if (vm_page_none_valid(m) &&
4519                     (allocflags & VM_ALLOC_ZERO) != 0) {
4520                         if ((m->flags & PG_ZERO) == 0)
4521                                 pmap_zero_page(m);
4522                         vm_page_valid(m);
4523                 }
4524                 if ((allocflags & VM_ALLOC_NOBUSY) != 0) {
4525                         if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0)
4526                                 vm_page_sunbusy(m);
4527                         else
4528                                 vm_page_xunbusy(m);
4529                 }
4530                 ma[i] = mpred = m;
4531                 m = vm_page_next(m);
4532         }
4533         return (i);
4534 }
4535
4536 /*
4537  * Mapping function for valid or dirty bits in a page.
4538  *
4539  * Inputs are required to range within a page.
4540  */
4541 vm_page_bits_t
4542 vm_page_bits(int base, int size)
4543 {
4544         int first_bit;
4545         int last_bit;
4546
4547         KASSERT(
4548             base + size <= PAGE_SIZE,
4549             ("vm_page_bits: illegal base/size %d/%d", base, size)
4550         );
4551
4552         if (size == 0)          /* handle degenerate case */
4553                 return (0);
4554
4555         first_bit = base >> DEV_BSHIFT;
4556         last_bit = (base + size - 1) >> DEV_BSHIFT;
4557
4558         return (((vm_page_bits_t)2 << last_bit) -
4559             ((vm_page_bits_t)1 << first_bit));
4560 }
4561
4562 void
4563 vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
4564 {
4565
4566 #if PAGE_SIZE == 32768
4567         atomic_set_64((uint64_t *)bits, set);
4568 #elif PAGE_SIZE == 16384
4569         atomic_set_32((uint32_t *)bits, set);
4570 #elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
4571         atomic_set_16((uint16_t *)bits, set);
4572 #elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
4573         atomic_set_8((uint8_t *)bits, set);
4574 #else           /* PAGE_SIZE <= 8192 */
4575         uintptr_t addr;
4576         int shift;
4577
4578         addr = (uintptr_t)bits;
4579         /*
4580          * Use a trick to perform a 32-bit atomic on the
4581          * containing aligned word, to not depend on the existence
4582          * of atomic_{set, clear}_{8, 16}.
4583          */
4584         shift = addr & (sizeof(uint32_t) - 1);
4585 #if BYTE_ORDER == BIG_ENDIAN
4586         shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
4587 #else
4588         shift *= NBBY;
4589 #endif
4590         addr &= ~(sizeof(uint32_t) - 1);
4591         atomic_set_32((uint32_t *)addr, set << shift);
4592 #endif          /* PAGE_SIZE */
4593 }
4594
4595 static inline void
4596 vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
4597 {
4598
4599 #if PAGE_SIZE == 32768
4600         atomic_clear_64((uint64_t *)bits, clear);
4601 #elif PAGE_SIZE == 16384
4602         atomic_clear_32((uint32_t *)bits, clear);
4603 #elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
4604         atomic_clear_16((uint16_t *)bits, clear);
4605 #elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
4606         atomic_clear_8((uint8_t *)bits, clear);
4607 #else           /* PAGE_SIZE <= 8192 */
4608         uintptr_t addr;
4609         int shift;
4610
4611         addr = (uintptr_t)bits;
4612         /*
4613          * Use a trick to perform a 32-bit atomic on the
4614          * containing aligned word, to not depend on the existence
4615          * of atomic_{set, clear}_{8, 16}.
4616          */
4617         shift = addr & (sizeof(uint32_t) - 1);
4618 #if BYTE_ORDER == BIG_ENDIAN
4619         shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
4620 #else
4621         shift *= NBBY;
4622 #endif
4623         addr &= ~(sizeof(uint32_t) - 1);
4624         atomic_clear_32((uint32_t *)addr, clear << shift);
4625 #endif          /* PAGE_SIZE */
4626 }
4627
4628 static inline vm_page_bits_t
4629 vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
4630 {
4631 #if PAGE_SIZE == 32768
4632         uint64_t old;
4633
4634         old = *bits;
4635         while (atomic_fcmpset_64(bits, &old, newbits) == 0);
4636         return (old);
4637 #elif PAGE_SIZE == 16384
4638         uint32_t old;
4639
4640         old = *bits;
4641         while (atomic_fcmpset_32(bits, &old, newbits) == 0);
4642         return (old);
4643 #elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
4644         uint16_t old;
4645
4646         old = *bits;
4647         while (atomic_fcmpset_16(bits, &old, newbits) == 0);
4648         return (old);
4649 #elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
4650         uint8_t old;
4651
4652         old = *bits;
4653         while (atomic_fcmpset_8(bits, &old, newbits) == 0);
4654         return (old);
4655 #else           /* PAGE_SIZE <= 4096*/
4656         uintptr_t addr;
4657         uint32_t old, new, mask;
4658         int shift;
4659
4660         addr = (uintptr_t)bits;
4661         /*
4662          * Use a trick to perform a 32-bit atomic on the
4663          * containing aligned word, to not depend on the existence
4664          * of atomic_{set, swap, clear}_{8, 16}.
4665          */
4666         shift = addr & (sizeof(uint32_t) - 1);
4667 #if BYTE_ORDER == BIG_ENDIAN
4668         shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY;
4669 #else
4670         shift *= NBBY;
4671 #endif
4672         addr &= ~(sizeof(uint32_t) - 1);
4673         mask = VM_PAGE_BITS_ALL << shift;
4674
4675         old = *bits;
4676         do {
4677                 new = old & ~mask;
4678                 new |= newbits << shift;
4679         } while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0);
4680         return (old >> shift);
4681 #endif          /* PAGE_SIZE */
4682 }
4683
4684 /*
4685  *      vm_page_set_valid_range:
4686  *
4687  *      Sets portions of a page valid.  The arguments are expected
4688  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4689  *      of any partial chunks touched by the range.  The invalid portion of
4690  *      such chunks will be zeroed.
4691  *
4692  *      (base + size) must be less then or equal to PAGE_SIZE.
4693  */
4694 void
4695 vm_page_set_valid_range(vm_page_t m, int base, int size)
4696 {
4697         int endoff, frag;
4698         vm_page_bits_t pagebits;
4699
4700         vm_page_assert_busied(m);
4701         if (size == 0)  /* handle degenerate case */
4702                 return;
4703
4704         /*
4705          * If the base is not DEV_BSIZE aligned and the valid
4706          * bit is clear, we have to zero out a portion of the
4707          * first block.
4708          */
4709         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4710             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
4711                 pmap_zero_page_area(m, frag, base - frag);
4712
4713         /*
4714          * If the ending offset is not DEV_BSIZE aligned and the
4715          * valid bit is clear, we have to zero out a portion of
4716          * the last block.
4717          */
4718         endoff = base + size;
4719         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4720             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
4721                 pmap_zero_page_area(m, endoff,
4722                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4723
4724         /*
4725          * Assert that no previously invalid block that is now being validated
4726          * is already dirty.
4727          */
4728         KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
4729             ("vm_page_set_valid_range: page %p is dirty", m));
4730
4731         /*
4732          * Set valid bits inclusive of any overlap.
4733          */
4734         pagebits = vm_page_bits(base, size);
4735         if (vm_page_xbusied(m))
4736                 m->valid |= pagebits;
4737         else
4738                 vm_page_bits_set(m, &m->valid, pagebits);
4739 }
4740
4741 /*
4742  * Set the page dirty bits and free the invalid swap space if
4743  * present.  Returns the previous dirty bits.
4744  */
4745 vm_page_bits_t
4746 vm_page_set_dirty(vm_page_t m)
4747 {
4748         vm_page_bits_t old;
4749
4750         VM_PAGE_OBJECT_BUSY_ASSERT(m);
4751
4752         if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) {
4753                 old = m->dirty;
4754                 m->dirty = VM_PAGE_BITS_ALL;
4755         } else
4756                 old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL);
4757         if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0)
4758                 vm_pager_page_unswapped(m);
4759
4760         return (old);
4761 }
4762
4763 /*
4764  * Clear the given bits from the specified page's dirty field.
4765  */
4766 static __inline void
4767 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
4768 {
4769
4770         vm_page_assert_busied(m);
4771
4772         /*
4773          * If the page is xbusied and not write mapped we are the
4774          * only thread that can modify dirty bits.  Otherwise, The pmap
4775          * layer can call vm_page_dirty() without holding a distinguished
4776          * lock.  The combination of page busy and atomic operations
4777          * suffice to guarantee consistency of the page dirty field.
4778          */
4779         if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
4780                 m->dirty &= ~pagebits;
4781         else
4782                 vm_page_bits_clear(m, &m->dirty, pagebits);
4783 }
4784
4785 /*
4786  *      vm_page_set_validclean:
4787  *
4788  *      Sets portions of a page valid and clean.  The arguments are expected
4789  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
4790  *      of any partial chunks touched by the range.  The invalid portion of
4791  *      such chunks will be zero'd.
4792  *
4793  *      (base + size) must be less then or equal to PAGE_SIZE.
4794  */
4795 void
4796 vm_page_set_validclean(vm_page_t m, int base, int size)
4797 {
4798         vm_page_bits_t oldvalid, pagebits;
4799         int endoff, frag;
4800
4801         vm_page_assert_busied(m);
4802         if (size == 0)  /* handle degenerate case */
4803                 return;
4804
4805         /*
4806          * If the base is not DEV_BSIZE aligned and the valid
4807          * bit is clear, we have to zero out a portion of the
4808          * first block.
4809          */
4810         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
4811             (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
4812                 pmap_zero_page_area(m, frag, base - frag);
4813
4814         /*
4815          * If the ending offset is not DEV_BSIZE aligned and the
4816          * valid bit is clear, we have to zero out a portion of
4817          * the last block.
4818          */
4819         endoff = base + size;
4820         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
4821             (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
4822                 pmap_zero_page_area(m, endoff,
4823                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
4824
4825         /*
4826          * Set valid, clear dirty bits.  If validating the entire
4827          * page we can safely clear the pmap modify bit.  We also
4828          * use this opportunity to clear the PGA_NOSYNC flag.  If a process
4829          * takes a write fault on a MAP_NOSYNC memory area the flag will
4830          * be set again.
4831          *
4832          * We set valid bits inclusive of any overlap, but we can only
4833          * clear dirty bits for DEV_BSIZE chunks that are fully within
4834          * the range.
4835          */
4836         oldvalid = m->valid;
4837         pagebits = vm_page_bits(base, size);
4838         if (vm_page_xbusied(m))
4839                 m->valid |= pagebits;
4840         else
4841                 vm_page_bits_set(m, &m->valid, pagebits);
4842 #if 0   /* NOT YET */
4843         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
4844                 frag = DEV_BSIZE - frag;
4845                 base += frag;
4846                 size -= frag;
4847                 if (size < 0)
4848                         size = 0;
4849         }
4850         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
4851 #endif
4852         if (base == 0 && size == PAGE_SIZE) {
4853                 /*
4854                  * The page can only be modified within the pmap if it is
4855                  * mapped, and it can only be mapped if it was previously
4856                  * fully valid.
4857                  */
4858                 if (oldvalid == VM_PAGE_BITS_ALL)
4859                         /*
4860                          * Perform the pmap_clear_modify() first.  Otherwise,
4861                          * a concurrent pmap operation, such as
4862                          * pmap_protect(), could clear a modification in the
4863                          * pmap and set the dirty field on the page before
4864                          * pmap_clear_modify() had begun and after the dirty
4865                          * field was cleared here.
4866                          */
4867                         pmap_clear_modify(m);
4868                 m->dirty = 0;
4869                 vm_page_aflag_clear(m, PGA_NOSYNC);
4870         } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
4871                 m->dirty &= ~pagebits;
4872         else
4873                 vm_page_clear_dirty_mask(m, pagebits);
4874 }
4875
4876 void
4877 vm_page_clear_dirty(vm_page_t m, int base, int size)
4878 {
4879
4880         vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
4881 }
4882
4883 /*
4884  *      vm_page_set_invalid:
4885  *
4886  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
4887  *      valid and dirty bits for the effected areas are cleared.
4888  */
4889 void
4890 vm_page_set_invalid(vm_page_t m, int base, int size)
4891 {
4892         vm_page_bits_t bits;
4893         vm_object_t object;
4894
4895         /*
4896          * The object lock is required so that pages can't be mapped
4897          * read-only while we're in the process of invalidating them.
4898          */
4899         object = m->object;
4900         VM_OBJECT_ASSERT_WLOCKED(object);
4901         vm_page_assert_busied(m);
4902
4903         if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
4904             size >= object->un_pager.vnp.vnp_size)
4905                 bits = VM_PAGE_BITS_ALL;
4906         else
4907                 bits = vm_page_bits(base, size);
4908         if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0)
4909                 pmap_remove_all(m);
4910         KASSERT((bits == 0 && vm_page_all_valid(m)) ||
4911             !pmap_page_is_mapped(m),
4912             ("vm_page_set_invalid: page %p is mapped", m));
4913         if (vm_page_xbusied(m)) {
4914                 m->valid &= ~bits;
4915                 m->dirty &= ~bits;
4916         } else {
4917                 vm_page_bits_clear(m, &m->valid, bits);
4918                 vm_page_bits_clear(m, &m->dirty, bits);
4919         }
4920 }
4921
4922 /*
4923  *      vm_page_invalid:
4924  *
4925  *      Invalidates the entire page.  The page must be busy, unmapped, and
4926  *      the enclosing object must be locked.  The object locks protects
4927  *      against concurrent read-only pmap enter which is done without
4928  *      busy.
4929  */
4930 void
4931 vm_page_invalid(vm_page_t m)
4932 {
4933
4934         vm_page_assert_busied(m);
4935         VM_OBJECT_ASSERT_LOCKED(m->object);
4936         MPASS(!pmap_page_is_mapped(m));
4937
4938         if (vm_page_xbusied(m))
4939                 m->valid = 0;
4940         else
4941                 vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL);
4942 }
4943
4944 /*
4945  * vm_page_zero_invalid()
4946  *
4947  *      The kernel assumes that the invalid portions of a page contain
4948  *      garbage, but such pages can be mapped into memory by user code.
4949  *      When this occurs, we must zero out the non-valid portions of the
4950  *      page so user code sees what it expects.
4951  *
4952  *      Pages are most often semi-valid when the end of a file is mapped
4953  *      into memory and the file's size is not page aligned.
4954  */
4955 void
4956 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
4957 {
4958         int b;
4959         int i;
4960
4961         /*
4962          * Scan the valid bits looking for invalid sections that
4963          * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
4964          * valid bit may be set ) have already been zeroed by
4965          * vm_page_set_validclean().
4966          */
4967         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
4968                 if (i == (PAGE_SIZE / DEV_BSIZE) ||
4969                     (m->valid & ((vm_page_bits_t)1 << i))) {
4970                         if (i > b) {
4971                                 pmap_zero_page_area(m,
4972                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
4973                         }
4974                         b = i + 1;
4975                 }
4976         }
4977
4978         /*
4979          * setvalid is TRUE when we can safely set the zero'd areas
4980          * as being valid.  We can do this if there are no cache consistancy
4981          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
4982          */
4983         if (setvalid)
4984                 vm_page_valid(m);
4985 }
4986
4987 /*
4988  *      vm_page_is_valid:
4989  *
4990  *      Is (partial) page valid?  Note that the case where size == 0
4991  *      will return FALSE in the degenerate case where the page is
4992  *      entirely invalid, and TRUE otherwise.
4993  *
4994  *      Some callers envoke this routine without the busy lock held and
4995  *      handle races via higher level locks.  Typical callers should
4996  *      hold a busy lock to prevent invalidation.
4997  */
4998 int
4999 vm_page_is_valid(vm_page_t m, int base, int size)
5000 {
5001         vm_page_bits_t bits;
5002
5003         bits = vm_page_bits(base, size);
5004         return (m->valid != 0 && (m->valid & bits) == bits);
5005 }
5006
5007 /*
5008  * Returns true if all of the specified predicates are true for the entire
5009  * (super)page and false otherwise.
5010  */
5011 bool
5012 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
5013 {
5014         vm_object_t object;
5015         int i, npages;
5016
5017         object = m->object;
5018         if (skip_m != NULL && skip_m->object != object)
5019                 return (false);
5020         VM_OBJECT_ASSERT_LOCKED(object);
5021         npages = atop(pagesizes[m->psind]);
5022
5023         /*
5024          * The physically contiguous pages that make up a superpage, i.e., a
5025          * page with a page size index ("psind") greater than zero, will
5026          * occupy adjacent entries in vm_page_array[].
5027          */
5028         for (i = 0; i < npages; i++) {
5029                 /* Always test object consistency, including "skip_m". */
5030                 if (m[i].object != object)
5031                         return (false);
5032                 if (&m[i] == skip_m)
5033                         continue;
5034                 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
5035                         return (false);
5036                 if ((flags & PS_ALL_DIRTY) != 0) {
5037                         /*
5038                          * Calling vm_page_test_dirty() or pmap_is_modified()
5039                          * might stop this case from spuriously returning
5040                          * "false".  However, that would require a write lock
5041                          * on the object containing "m[i]".
5042                          */
5043                         if (m[i].dirty != VM_PAGE_BITS_ALL)
5044                                 return (false);
5045                 }
5046                 if ((flags & PS_ALL_VALID) != 0 &&
5047                     m[i].valid != VM_PAGE_BITS_ALL)
5048                         return (false);
5049         }
5050         return (true);
5051 }
5052
5053 /*
5054  * Set the page's dirty bits if the page is modified.
5055  */
5056 void
5057 vm_page_test_dirty(vm_page_t m)
5058 {
5059
5060         vm_page_assert_busied(m);
5061         if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
5062                 vm_page_dirty(m);
5063 }
5064
5065 void
5066 vm_page_valid(vm_page_t m)
5067 {
5068
5069         vm_page_assert_busied(m);
5070         if (vm_page_xbusied(m))
5071                 m->valid = VM_PAGE_BITS_ALL;
5072         else
5073                 vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL);
5074 }
5075
5076 void
5077 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
5078 {
5079
5080         mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
5081 }
5082
5083 void
5084 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
5085 {
5086
5087         mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
5088 }
5089
5090 int
5091 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
5092 {
5093
5094         return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
5095 }
5096
5097 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
5098 void
5099 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
5100 {
5101
5102         vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
5103 }
5104
5105 void
5106 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
5107 {
5108
5109         mtx_assert_(vm_page_lockptr(m), a, file, line);
5110 }
5111 #endif
5112
5113 #ifdef INVARIANTS
5114 void
5115 vm_page_object_busy_assert(vm_page_t m)
5116 {
5117
5118         /*
5119          * Certain of the page's fields may only be modified by the
5120          * holder of a page or object busy.
5121          */
5122         if (m->object != NULL && !vm_page_busied(m))
5123                 VM_OBJECT_ASSERT_BUSY(m->object);
5124 }
5125
5126 void
5127 vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
5128 {
5129
5130         if ((bits & PGA_WRITEABLE) == 0)
5131                 return;
5132
5133         /*
5134          * The PGA_WRITEABLE flag can only be set if the page is
5135          * managed, is exclusively busied or the object is locked.
5136          * Currently, this flag is only set by pmap_enter().
5137          */
5138         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5139             ("PGA_WRITEABLE on unmanaged page"));
5140         if (!vm_page_xbusied(m))
5141                 VM_OBJECT_ASSERT_BUSY(m->object);
5142 }
5143 #endif
5144
5145 #include "opt_ddb.h"
5146 #ifdef DDB
5147 #include <sys/kernel.h>
5148
5149 #include <ddb/ddb.h>
5150
5151 DB_SHOW_COMMAND(page, vm_page_print_page_info)
5152 {
5153
5154         db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
5155         db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
5156         db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
5157         db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
5158         db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
5159         db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
5160         db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
5161         db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
5162         db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
5163 }
5164
5165 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
5166 {
5167         int dom;
5168
5169         db_printf("pq_free %d\n", vm_free_count());
5170         for (dom = 0; dom < vm_ndomains; dom++) {
5171                 db_printf(
5172     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
5173                     dom,
5174                     vm_dom[dom].vmd_page_count,
5175                     vm_dom[dom].vmd_free_count,
5176                     vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
5177                     vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
5178                     vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
5179                     vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
5180         }
5181 }
5182
5183 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
5184 {
5185         vm_page_t m;
5186         boolean_t phys, virt;
5187
5188         if (!have_addr) {
5189                 db_printf("show pginfo addr\n");
5190                 return;
5191         }
5192
5193         phys = strchr(modif, 'p') != NULL;
5194         virt = strchr(modif, 'v') != NULL;
5195         if (virt)
5196                 m = PHYS_TO_VM_PAGE(pmap_kextract(addr));
5197         else if (phys)
5198                 m = PHYS_TO_VM_PAGE(addr);
5199         else
5200                 m = (vm_page_t)addr;
5201         db_printf(
5202     "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref %u\n"
5203     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
5204             m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
5205             m->a.queue, m->ref_count, m->a.flags, m->oflags,
5206             m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
5207 }
5208 #endif /* DDB */