]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
MFV r324198: 8081 Compiler warnings in zdb
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_pagequeue.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117
118 /*
119  * System initialization
120  */
121
122 /* the kernel process "vm_pageout"*/
123 static void vm_pageout(void);
124 static void vm_pageout_init(void);
125 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
126 static int vm_pageout_cluster(vm_page_t m);
127 static bool vm_pageout_scan(struct vm_domain *vmd, int pass);
128 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
129     int starting_page_shortage);
130
131 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
132     NULL);
133
134 struct proc *pageproc;
135
136 static struct kproc_desc page_kp = {
137         "pagedaemon",
138         vm_pageout,
139         &pageproc
140 };
141 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142     &page_kp);
143
144 SDT_PROVIDER_DEFINE(vm);
145 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
146
147 /* Pagedaemon activity rates, in subdivisions of one second. */
148 #define VM_LAUNDER_RATE         10
149 #define VM_INACT_SCAN_RATE      2
150
151 static int vm_pageout_oom_seq = 12;
152
153 static int vm_pageout_update_period;
154 static int disable_swap_pageouts;
155 static int lowmem_period = 10;
156 static time_t lowmem_uptime;
157 static int swapdev_enabled;
158
159 static int vm_panic_on_oom = 0;
160
161 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
162         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
163         "panic on out of memory instead of killing the largest process");
164
165 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
166         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
167         "Maximum active LRU update period");
168   
169 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
170         "Low memory callback period");
171
172 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
173         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
174
175 static int pageout_lock_miss;
176 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
177         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
178
179 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
180         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
181         "back-to-back calls to oom detector to start OOM");
182
183 static int act_scan_laundry_weight = 3;
184 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
185     &act_scan_laundry_weight, 0,
186     "weight given to clean vs. dirty pages in active queue scans");
187
188 static u_int vm_background_launder_rate = 4096;
189 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
190     &vm_background_launder_rate, 0,
191     "background laundering rate, in kilobytes per second");
192
193 static u_int vm_background_launder_max = 20 * 1024;
194 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
195     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
196
197 int vm_pageout_page_count = 32;
198
199 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
200 SYSCTL_INT(_vm, OID_AUTO, max_wired,
201         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
202
203 static u_int isqrt(u_int num);
204 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
205 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
206     bool in_shortfall);
207 static void vm_pageout_laundry_worker(void *arg);
208 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
209
210 /*
211  * Initialize a dummy page for marking the caller's place in the specified
212  * paging queue.  In principle, this function only needs to set the flag
213  * PG_MARKER.  Nonetheless, it write busies and initializes the hold count
214  * to one as safety precautions.
215  */ 
216 static void
217 vm_pageout_init_marker(vm_page_t marker, u_short queue)
218 {
219
220         bzero(marker, sizeof(*marker));
221         marker->flags = PG_MARKER;
222         marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
223         marker->queue = queue;
224         marker->hold_count = 1;
225 }
226
227 /*
228  * vm_pageout_fallback_object_lock:
229  * 
230  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
231  * known to have failed and page queue must be either PQ_ACTIVE or
232  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
233  * while locking the vm object.  Use marker page to detect page queue
234  * changes and maintain notion of next page on page queue.  Return
235  * TRUE if no changes were detected, FALSE otherwise.  vm object is
236  * locked on return.
237  * 
238  * This function depends on both the lock portion of struct vm_object
239  * and normal struct vm_page being type stable.
240  */
241 static boolean_t
242 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
243 {
244         struct vm_page marker;
245         struct vm_pagequeue *pq;
246         boolean_t unchanged;
247         u_short queue;
248         vm_object_t object;
249
250         queue = m->queue;
251         vm_pageout_init_marker(&marker, queue);
252         pq = vm_page_pagequeue(m);
253         object = m->object;
254         
255         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
256         vm_pagequeue_unlock(pq);
257         vm_page_unlock(m);
258         VM_OBJECT_WLOCK(object);
259         vm_page_lock(m);
260         vm_pagequeue_lock(pq);
261
262         /*
263          * The page's object might have changed, and/or the page might
264          * have moved from its original position in the queue.  If the
265          * page's object has changed, then the caller should abandon
266          * processing the page because the wrong object lock was
267          * acquired.  Use the marker's plinks.q, not the page's, to
268          * determine if the page has been moved.  The state of the
269          * page's plinks.q can be indeterminate; whereas, the marker's
270          * plinks.q must be valid.
271          */
272         *next = TAILQ_NEXT(&marker, plinks.q);
273         unchanged = m->object == object &&
274             m == TAILQ_PREV(&marker, pglist, plinks.q);
275         KASSERT(!unchanged || m->queue == queue,
276             ("page %p queue %d %d", m, queue, m->queue));
277         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
278         return (unchanged);
279 }
280
281 /*
282  * Lock the page while holding the page queue lock.  Use marker page
283  * to detect page queue changes and maintain notion of next page on
284  * page queue.  Return TRUE if no changes were detected, FALSE
285  * otherwise.  The page is locked on return. The page queue lock might
286  * be dropped and reacquired.
287  *
288  * This function depends on normal struct vm_page being type stable.
289  */
290 static boolean_t
291 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
292 {
293         struct vm_page marker;
294         struct vm_pagequeue *pq;
295         boolean_t unchanged;
296         u_short queue;
297
298         vm_page_lock_assert(m, MA_NOTOWNED);
299         if (vm_page_trylock(m))
300                 return (TRUE);
301
302         queue = m->queue;
303         vm_pageout_init_marker(&marker, queue);
304         pq = vm_page_pagequeue(m);
305
306         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
307         vm_pagequeue_unlock(pq);
308         vm_page_lock(m);
309         vm_pagequeue_lock(pq);
310
311         /* Page queue might have changed. */
312         *next = TAILQ_NEXT(&marker, plinks.q);
313         unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
314         KASSERT(!unchanged || m->queue == queue,
315             ("page %p queue %d %d", m, queue, m->queue));
316         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
317         return (unchanged);
318 }
319
320 /*
321  * Scan for pages at adjacent offsets within the given page's object that are
322  * eligible for laundering, form a cluster of these pages and the given page,
323  * and launder that cluster.
324  */
325 static int
326 vm_pageout_cluster(vm_page_t m)
327 {
328         vm_object_t object;
329         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
330         vm_pindex_t pindex;
331         int ib, is, page_base, pageout_count;
332
333         vm_page_assert_locked(m);
334         object = m->object;
335         VM_OBJECT_ASSERT_WLOCKED(object);
336         pindex = m->pindex;
337
338         vm_page_assert_unbusied(m);
339         KASSERT(!vm_page_held(m), ("page %p is held", m));
340
341         pmap_remove_write(m);
342         vm_page_unlock(m);
343
344         mc[vm_pageout_page_count] = pb = ps = m;
345         pageout_count = 1;
346         page_base = vm_pageout_page_count;
347         ib = 1;
348         is = 1;
349
350         /*
351          * We can cluster only if the page is not clean, busy, or held, and
352          * the page is in the laundry queue.
353          *
354          * During heavy mmap/modification loads the pageout
355          * daemon can really fragment the underlying file
356          * due to flushing pages out of order and not trying to
357          * align the clusters (which leaves sporadic out-of-order
358          * holes).  To solve this problem we do the reverse scan
359          * first and attempt to align our cluster, then do a 
360          * forward scan if room remains.
361          */
362 more:
363         while (ib != 0 && pageout_count < vm_pageout_page_count) {
364                 if (ib > pindex) {
365                         ib = 0;
366                         break;
367                 }
368                 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
369                         ib = 0;
370                         break;
371                 }
372                 vm_page_test_dirty(p);
373                 if (p->dirty == 0) {
374                         ib = 0;
375                         break;
376                 }
377                 vm_page_lock(p);
378                 if (!vm_page_in_laundry(p) || vm_page_held(p)) {
379                         vm_page_unlock(p);
380                         ib = 0;
381                         break;
382                 }
383                 pmap_remove_write(p);
384                 vm_page_unlock(p);
385                 mc[--page_base] = pb = p;
386                 ++pageout_count;
387                 ++ib;
388
389                 /*
390                  * We are at an alignment boundary.  Stop here, and switch
391                  * directions.  Do not clear ib.
392                  */
393                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
394                         break;
395         }
396         while (pageout_count < vm_pageout_page_count && 
397             pindex + is < object->size) {
398                 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
399                         break;
400                 vm_page_test_dirty(p);
401                 if (p->dirty == 0)
402                         break;
403                 vm_page_lock(p);
404                 if (!vm_page_in_laundry(p) || vm_page_held(p)) {
405                         vm_page_unlock(p);
406                         break;
407                 }
408                 pmap_remove_write(p);
409                 vm_page_unlock(p);
410                 mc[page_base + pageout_count] = ps = p;
411                 ++pageout_count;
412                 ++is;
413         }
414
415         /*
416          * If we exhausted our forward scan, continue with the reverse scan
417          * when possible, even past an alignment boundary.  This catches
418          * boundary conditions.
419          */
420         if (ib != 0 && pageout_count < vm_pageout_page_count)
421                 goto more;
422
423         return (vm_pageout_flush(&mc[page_base], pageout_count,
424             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
425 }
426
427 /*
428  * vm_pageout_flush() - launder the given pages
429  *
430  *      The given pages are laundered.  Note that we setup for the start of
431  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
432  *      reference count all in here rather then in the parent.  If we want
433  *      the parent to do more sophisticated things we may have to change
434  *      the ordering.
435  *
436  *      Returned runlen is the count of pages between mreq and first
437  *      page after mreq with status VM_PAGER_AGAIN.
438  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
439  *      for any page in runlen set.
440  */
441 int
442 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
443     boolean_t *eio)
444 {
445         vm_object_t object = mc[0]->object;
446         int pageout_status[count];
447         int numpagedout = 0;
448         int i, runlen;
449
450         VM_OBJECT_ASSERT_WLOCKED(object);
451
452         /*
453          * Initiate I/O.  Mark the pages busy and verify that they're valid
454          * and read-only.
455          *
456          * We do not have to fixup the clean/dirty bits here... we can
457          * allow the pager to do it after the I/O completes.
458          *
459          * NOTE! mc[i]->dirty may be partial or fragmented due to an
460          * edge case with file fragments.
461          */
462         for (i = 0; i < count; i++) {
463                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
464                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
465                         mc[i], i, count));
466                 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
467                     ("vm_pageout_flush: writeable page %p", mc[i]));
468                 vm_page_sbusy(mc[i]);
469         }
470         vm_object_pip_add(object, count);
471
472         vm_pager_put_pages(object, mc, count, flags, pageout_status);
473
474         runlen = count - mreq;
475         if (eio != NULL)
476                 *eio = FALSE;
477         for (i = 0; i < count; i++) {
478                 vm_page_t mt = mc[i];
479
480                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
481                     !pmap_page_is_write_mapped(mt),
482                     ("vm_pageout_flush: page %p is not write protected", mt));
483                 switch (pageout_status[i]) {
484                 case VM_PAGER_OK:
485                         vm_page_lock(mt);
486                         if (vm_page_in_laundry(mt))
487                                 vm_page_deactivate_noreuse(mt);
488                         vm_page_unlock(mt);
489                         /* FALLTHROUGH */
490                 case VM_PAGER_PEND:
491                         numpagedout++;
492                         break;
493                 case VM_PAGER_BAD:
494                         /*
495                          * The page is outside the object's range.  We pretend
496                          * that the page out worked and clean the page, so the
497                          * changes will be lost if the page is reclaimed by
498                          * the page daemon.
499                          */
500                         vm_page_undirty(mt);
501                         vm_page_lock(mt);
502                         if (vm_page_in_laundry(mt))
503                                 vm_page_deactivate_noreuse(mt);
504                         vm_page_unlock(mt);
505                         break;
506                 case VM_PAGER_ERROR:
507                 case VM_PAGER_FAIL:
508                         /*
509                          * If the page couldn't be paged out to swap because the
510                          * pager wasn't able to find space, place the page in
511                          * the PQ_UNSWAPPABLE holding queue.  This is an
512                          * optimization that prevents the page daemon from
513                          * wasting CPU cycles on pages that cannot be reclaimed
514                          * becase no swap device is configured.
515                          *
516                          * Otherwise, reactivate the page so that it doesn't
517                          * clog the laundry and inactive queues.  (We will try
518                          * paging it out again later.)
519                          */
520                         vm_page_lock(mt);
521                         if (object->type == OBJT_SWAP &&
522                             pageout_status[i] == VM_PAGER_FAIL) {
523                                 vm_page_unswappable(mt);
524                                 numpagedout++;
525                         } else
526                                 vm_page_activate(mt);
527                         vm_page_unlock(mt);
528                         if (eio != NULL && i >= mreq && i - mreq < runlen)
529                                 *eio = TRUE;
530                         break;
531                 case VM_PAGER_AGAIN:
532                         if (i >= mreq && i - mreq < runlen)
533                                 runlen = i - mreq;
534                         break;
535                 }
536
537                 /*
538                  * If the operation is still going, leave the page busy to
539                  * block all other accesses. Also, leave the paging in
540                  * progress indicator set so that we don't attempt an object
541                  * collapse.
542                  */
543                 if (pageout_status[i] != VM_PAGER_PEND) {
544                         vm_object_pip_wakeup(object);
545                         vm_page_sunbusy(mt);
546                 }
547         }
548         if (prunlen != NULL)
549                 *prunlen = runlen;
550         return (numpagedout);
551 }
552
553 static void
554 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
555 {
556
557         atomic_store_rel_int(&swapdev_enabled, 1);
558 }
559
560 static void
561 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
562 {
563
564         if (swap_pager_nswapdev() == 1)
565                 atomic_store_rel_int(&swapdev_enabled, 0);
566 }
567
568 /*
569  * Attempt to acquire all of the necessary locks to launder a page and
570  * then call through the clustering layer to PUTPAGES.  Wait a short
571  * time for a vnode lock.
572  *
573  * Requires the page and object lock on entry, releases both before return.
574  * Returns 0 on success and an errno otherwise.
575  */
576 static int
577 vm_pageout_clean(vm_page_t m, int *numpagedout)
578 {
579         struct vnode *vp;
580         struct mount *mp;
581         vm_object_t object;
582         vm_pindex_t pindex;
583         int error, lockmode;
584
585         vm_page_assert_locked(m);
586         object = m->object;
587         VM_OBJECT_ASSERT_WLOCKED(object);
588         error = 0;
589         vp = NULL;
590         mp = NULL;
591
592         /*
593          * The object is already known NOT to be dead.   It
594          * is possible for the vget() to block the whole
595          * pageout daemon, but the new low-memory handling
596          * code should prevent it.
597          *
598          * We can't wait forever for the vnode lock, we might
599          * deadlock due to a vn_read() getting stuck in
600          * vm_wait while holding this vnode.  We skip the 
601          * vnode if we can't get it in a reasonable amount
602          * of time.
603          */
604         if (object->type == OBJT_VNODE) {
605                 vm_page_unlock(m);
606                 vp = object->handle;
607                 if (vp->v_type == VREG &&
608                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
609                         mp = NULL;
610                         error = EDEADLK;
611                         goto unlock_all;
612                 }
613                 KASSERT(mp != NULL,
614                     ("vp %p with NULL v_mount", vp));
615                 vm_object_reference_locked(object);
616                 pindex = m->pindex;
617                 VM_OBJECT_WUNLOCK(object);
618                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
619                     LK_SHARED : LK_EXCLUSIVE;
620                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
621                         vp = NULL;
622                         error = EDEADLK;
623                         goto unlock_mp;
624                 }
625                 VM_OBJECT_WLOCK(object);
626
627                 /*
628                  * Ensure that the object and vnode were not disassociated
629                  * while locks were dropped.
630                  */
631                 if (vp->v_object != object) {
632                         error = ENOENT;
633                         goto unlock_all;
634                 }
635                 vm_page_lock(m);
636
637                 /*
638                  * While the object and page were unlocked, the page
639                  * may have been:
640                  * (1) moved to a different queue,
641                  * (2) reallocated to a different object,
642                  * (3) reallocated to a different offset, or
643                  * (4) cleaned.
644                  */
645                 if (!vm_page_in_laundry(m) || m->object != object ||
646                     m->pindex != pindex || m->dirty == 0) {
647                         vm_page_unlock(m);
648                         error = ENXIO;
649                         goto unlock_all;
650                 }
651
652                 /*
653                  * The page may have been busied or referenced while the object
654                  * and page locks were released.
655                  */
656                 if (vm_page_busied(m) || vm_page_held(m)) {
657                         vm_page_unlock(m);
658                         error = EBUSY;
659                         goto unlock_all;
660                 }
661         }
662
663         /*
664          * If a page is dirty, then it is either being washed
665          * (but not yet cleaned) or it is still in the
666          * laundry.  If it is still in the laundry, then we
667          * start the cleaning operation. 
668          */
669         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
670                 error = EIO;
671
672 unlock_all:
673         VM_OBJECT_WUNLOCK(object);
674
675 unlock_mp:
676         vm_page_lock_assert(m, MA_NOTOWNED);
677         if (mp != NULL) {
678                 if (vp != NULL)
679                         vput(vp);
680                 vm_object_deallocate(object);
681                 vn_finished_write(mp);
682         }
683
684         return (error);
685 }
686
687 /*
688  * Attempt to launder the specified number of pages.
689  *
690  * Returns the number of pages successfully laundered.
691  */
692 static int
693 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
694 {
695         struct vm_pagequeue *pq;
696         vm_object_t object;
697         vm_page_t m, next;
698         int act_delta, error, maxscan, numpagedout, starting_target;
699         int vnodes_skipped;
700         bool pageout_ok, queue_locked;
701
702         starting_target = launder;
703         vnodes_skipped = 0;
704
705         /*
706          * Scan the laundry queues for pages eligible to be laundered.  We stop
707          * once the target number of dirty pages have been laundered, or once
708          * we've reached the end of the queue.  A single iteration of this loop
709          * may cause more than one page to be laundered because of clustering.
710          *
711          * maxscan ensures that we don't re-examine requeued pages.  Any
712          * additional pages written as part of a cluster are subtracted from
713          * maxscan since they must be taken from the laundry queue.
714          *
715          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
716          * swap devices are configured.
717          */
718         if (atomic_load_acq_int(&swapdev_enabled))
719                 pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
720         else
721                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
722
723 scan:
724         vm_pagequeue_lock(pq);
725         maxscan = pq->pq_cnt;
726         queue_locked = true;
727         for (m = TAILQ_FIRST(&pq->pq_pl);
728             m != NULL && maxscan-- > 0 && launder > 0;
729             m = next) {
730                 vm_pagequeue_assert_locked(pq);
731                 KASSERT(queue_locked, ("unlocked laundry queue"));
732                 KASSERT(vm_page_in_laundry(m),
733                     ("page %p has an inconsistent queue", m));
734                 next = TAILQ_NEXT(m, plinks.q);
735                 if ((m->flags & PG_MARKER) != 0)
736                         continue;
737                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
738                     ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
739                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
740                     ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
741                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
742                         vm_page_unlock(m);
743                         continue;
744                 }
745                 if (m->wire_count != 0) {
746                         vm_page_dequeue_locked(m);
747                         vm_page_unlock(m);
748                         continue;
749                 }
750                 object = m->object;
751                 if ((!VM_OBJECT_TRYWLOCK(object) &&
752                     (!vm_pageout_fallback_object_lock(m, &next) ||
753                     vm_page_held(m))) || vm_page_busied(m)) {
754                         VM_OBJECT_WUNLOCK(object);
755                         if (m->wire_count != 0 && vm_page_pagequeue(m) == pq)
756                                 vm_page_dequeue_locked(m);
757                         vm_page_unlock(m);
758                         continue;
759                 }
760
761                 /*
762                  * Unlock the laundry queue, invalidating the 'next' pointer.
763                  * Use a marker to remember our place in the laundry queue.
764                  */
765                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
766                     plinks.q);
767                 vm_pagequeue_unlock(pq);
768                 queue_locked = false;
769
770                 /*
771                  * Invalid pages can be easily freed.  They cannot be
772                  * mapped; vm_page_free() asserts this.
773                  */
774                 if (m->valid == 0)
775                         goto free_page;
776
777                 /*
778                  * If the page has been referenced and the object is not dead,
779                  * reactivate or requeue the page depending on whether the
780                  * object is mapped.
781                  */
782                 if ((m->aflags & PGA_REFERENCED) != 0) {
783                         vm_page_aflag_clear(m, PGA_REFERENCED);
784                         act_delta = 1;
785                 } else
786                         act_delta = 0;
787                 if (object->ref_count != 0)
788                         act_delta += pmap_ts_referenced(m);
789                 else {
790                         KASSERT(!pmap_page_is_mapped(m),
791                             ("page %p is mapped", m));
792                 }
793                 if (act_delta != 0) {
794                         if (object->ref_count != 0) {
795                                 VM_CNT_INC(v_reactivated);
796                                 vm_page_activate(m);
797
798                                 /*
799                                  * Increase the activation count if the page
800                                  * was referenced while in the laundry queue.
801                                  * This makes it less likely that the page will
802                                  * be returned prematurely to the inactive
803                                  * queue.
804                                  */
805                                 m->act_count += act_delta + ACT_ADVANCE;
806
807                                 /*
808                                  * If this was a background laundering, count
809                                  * activated pages towards our target.  The
810                                  * purpose of background laundering is to ensure
811                                  * that pages are eventually cycled through the
812                                  * laundry queue, and an activation is a valid
813                                  * way out.
814                                  */
815                                 if (!in_shortfall)
816                                         launder--;
817                                 goto drop_page;
818                         } else if ((object->flags & OBJ_DEAD) == 0)
819                                 goto requeue_page;
820                 }
821
822                 /*
823                  * If the page appears to be clean at the machine-independent
824                  * layer, then remove all of its mappings from the pmap in
825                  * anticipation of freeing it.  If, however, any of the page's
826                  * mappings allow write access, then the page may still be
827                  * modified until the last of those mappings are removed.
828                  */
829                 if (object->ref_count != 0) {
830                         vm_page_test_dirty(m);
831                         if (m->dirty == 0)
832                                 pmap_remove_all(m);
833                 }
834
835                 /*
836                  * Clean pages are freed, and dirty pages are paged out unless
837                  * they belong to a dead object.  Requeueing dirty pages from
838                  * dead objects is pointless, as they are being paged out and
839                  * freed by the thread that destroyed the object.
840                  */
841                 if (m->dirty == 0) {
842 free_page:
843                         vm_page_free(m);
844                         VM_CNT_INC(v_dfree);
845                 } else if ((object->flags & OBJ_DEAD) == 0) {
846                         if (object->type != OBJT_SWAP &&
847                             object->type != OBJT_DEFAULT)
848                                 pageout_ok = true;
849                         else if (disable_swap_pageouts)
850                                 pageout_ok = false;
851                         else
852                                 pageout_ok = true;
853                         if (!pageout_ok) {
854 requeue_page:
855                                 vm_pagequeue_lock(pq);
856                                 queue_locked = true;
857                                 vm_page_requeue_locked(m);
858                                 goto drop_page;
859                         }
860
861                         /*
862                          * Form a cluster with adjacent, dirty pages from the
863                          * same object, and page out that entire cluster.
864                          *
865                          * The adjacent, dirty pages must also be in the
866                          * laundry.  However, their mappings are not checked
867                          * for new references.  Consequently, a recently
868                          * referenced page may be paged out.  However, that
869                          * page will not be prematurely reclaimed.  After page
870                          * out, the page will be placed in the inactive queue,
871                          * where any new references will be detected and the
872                          * page reactivated.
873                          */
874                         error = vm_pageout_clean(m, &numpagedout);
875                         if (error == 0) {
876                                 launder -= numpagedout;
877                                 maxscan -= numpagedout - 1;
878                         } else if (error == EDEADLK) {
879                                 pageout_lock_miss++;
880                                 vnodes_skipped++;
881                         }
882                         goto relock_queue;
883                 }
884 drop_page:
885                 vm_page_unlock(m);
886                 VM_OBJECT_WUNLOCK(object);
887 relock_queue:
888                 if (!queue_locked) {
889                         vm_pagequeue_lock(pq);
890                         queue_locked = true;
891                 }
892                 next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
893                 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
894         }
895         vm_pagequeue_unlock(pq);
896
897         if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
898                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
899                 goto scan;
900         }
901
902         /*
903          * Wakeup the sync daemon if we skipped a vnode in a writeable object
904          * and we didn't launder enough pages.
905          */
906         if (vnodes_skipped > 0 && launder > 0)
907                 (void)speedup_syncer();
908
909         return (starting_target - launder);
910 }
911
912 /*
913  * Compute the integer square root.
914  */
915 static u_int
916 isqrt(u_int num)
917 {
918         u_int bit, root, tmp;
919
920         bit = 1u << ((NBBY * sizeof(u_int)) - 2);
921         while (bit > num)
922                 bit >>= 2;
923         root = 0;
924         while (bit != 0) {
925                 tmp = root + bit;
926                 root >>= 1;
927                 if (num >= tmp) {
928                         num -= tmp;
929                         root += bit;
930                 }
931                 bit >>= 2;
932         }
933         return (root);
934 }
935
936 /*
937  * Perform the work of the laundry thread: periodically wake up and determine
938  * whether any pages need to be laundered.  If so, determine the number of pages
939  * that need to be laundered, and launder them.
940  */
941 static void
942 vm_pageout_laundry_worker(void *arg)
943 {
944         struct vm_domain *vmd;
945         struct vm_pagequeue *pq;
946         uint64_t nclean, ndirty;
947         u_int inactq_scans, last_launder;
948         int domain, last_target, launder, shortfall, shortfall_cycle, target;
949         bool in_shortfall;
950
951         domain = (uintptr_t)arg;
952         vmd = VM_DOMAIN(domain);
953         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
954         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
955         vm_pageout_init_marker(&vmd->vmd_laundry_marker, PQ_LAUNDRY);
956
957         shortfall = 0;
958         in_shortfall = false;
959         shortfall_cycle = 0;
960         target = 0;
961         inactq_scans = 0;
962         last_launder = 0;
963
964         /*
965          * Calls to these handlers are serialized by the swap syscall lock.
966          */
967         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
968             EVENTHANDLER_PRI_ANY);
969         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
970             EVENTHANDLER_PRI_ANY);
971
972         /*
973          * The pageout laundry worker is never done, so loop forever.
974          */
975         for (;;) {
976                 KASSERT(target >= 0, ("negative target %d", target));
977                 KASSERT(shortfall_cycle >= 0,
978                     ("negative cycle %d", shortfall_cycle));
979                 launder = 0;
980
981                 /*
982                  * First determine whether we need to launder pages to meet a
983                  * shortage of free pages.
984                  */
985                 if (shortfall > 0) {
986                         in_shortfall = true;
987                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
988                         target = shortfall;
989                 } else if (!in_shortfall)
990                         goto trybackground;
991                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
992                         /*
993                          * We recently entered shortfall and began laundering
994                          * pages.  If we have completed that laundering run
995                          * (and we are no longer in shortfall) or we have met
996                          * our laundry target through other activity, then we
997                          * can stop laundering pages.
998                          */
999                         in_shortfall = false;
1000                         target = 0;
1001                         goto trybackground;
1002                 }
1003                 last_launder = inactq_scans;
1004                 launder = target / shortfall_cycle--;
1005                 goto dolaundry;
1006
1007                 /*
1008                  * There's no immediate need to launder any pages; see if we
1009                  * meet the conditions to perform background laundering:
1010                  *
1011                  * 1. The ratio of dirty to clean inactive pages exceeds the
1012                  *    background laundering threshold and the pagedaemon has
1013                  *    been woken up to reclaim pages since our last
1014                  *    laundering, or
1015                  * 2. we haven't yet reached the target of the current
1016                  *    background laundering run.
1017                  *
1018                  * The background laundering threshold is not a constant.
1019                  * Instead, it is a slowly growing function of the number of
1020                  * page daemon scans since the last laundering.  Thus, as the
1021                  * ratio of dirty to clean inactive pages grows, the amount of
1022                  * memory pressure required to trigger laundering decreases.
1023                  */
1024 trybackground:
1025                 nclean = vmd->vmd_free_count +
1026                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1027                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1028                 if (target == 0 && inactq_scans != last_launder &&
1029                     ndirty * isqrt(inactq_scans - last_launder) >= nclean) {
1030                         target = vmd->vmd_background_launder_target;
1031                 }
1032
1033                 /*
1034                  * We have a non-zero background laundering target.  If we've
1035                  * laundered up to our maximum without observing a page daemon
1036                  * request, just stop.  This is a safety belt that ensures we
1037                  * don't launder an excessive amount if memory pressure is low
1038                  * and the ratio of dirty to clean pages is large.  Otherwise,
1039                  * proceed at the background laundering rate.
1040                  */
1041                 if (target > 0) {
1042                         if (inactq_scans != last_launder) {
1043                                 last_launder = inactq_scans;
1044                                 last_target = target;
1045                         } else if (last_target - target >=
1046                             vm_background_launder_max * PAGE_SIZE / 1024) {
1047                                 target = 0;
1048                         }
1049                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1050                         launder /= VM_LAUNDER_RATE;
1051                         if (launder > target)
1052                                 launder = target;
1053                 }
1054
1055 dolaundry:
1056                 if (launder > 0) {
1057                         /*
1058                          * Because of I/O clustering, the number of laundered
1059                          * pages could exceed "target" by the maximum size of
1060                          * a cluster minus one. 
1061                          */
1062                         target -= min(vm_pageout_launder(vmd, launder,
1063                             in_shortfall), target);
1064                         pause("laundp", hz / VM_LAUNDER_RATE);
1065                 }
1066
1067                 /*
1068                  * If we're not currently laundering pages and the page daemon
1069                  * hasn't posted a new request, sleep until the page daemon
1070                  * kicks us.
1071                  */
1072                 vm_pagequeue_lock(pq);
1073                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1074                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1075                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1076
1077                 /*
1078                  * If the pagedaemon has indicated that it's in shortfall, start
1079                  * a shortfall laundering unless we're already in the middle of
1080                  * one.  This may preempt a background laundering.
1081                  */
1082                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1083                     (!in_shortfall || shortfall_cycle == 0)) {
1084                         shortfall = vm_laundry_target(vmd) +
1085                             vmd->vmd_pageout_deficit;
1086                         target = 0;
1087                 } else
1088                         shortfall = 0;
1089
1090                 if (target == 0)
1091                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1092                 inactq_scans = vmd->vmd_inactq_scans;
1093                 vm_pagequeue_unlock(pq);
1094         }
1095 }
1096
1097 /*
1098  *      vm_pageout_scan does the dirty work for the pageout daemon.
1099  *
1100  *      pass == 0: Update active LRU/deactivate pages
1101  *      pass >= 1: Free inactive pages
1102  *
1103  * Returns true if pass was zero or enough pages were freed by the inactive
1104  * queue scan to meet the target.
1105  */
1106 static bool
1107 vm_pageout_scan(struct vm_domain *vmd, int pass)
1108 {
1109         vm_page_t m, next;
1110         struct vm_pagequeue *pq;
1111         vm_object_t object;
1112         long min_scan;
1113         int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1114         int page_shortage, scan_tick, scanned, starting_page_shortage;
1115         boolean_t queue_locked;
1116
1117         /*
1118          * If we need to reclaim memory ask kernel caches to return
1119          * some.  We rate limit to avoid thrashing.
1120          */
1121         if (vmd == VM_DOMAIN(0) && pass > 0 &&
1122             (time_uptime - lowmem_uptime) >= lowmem_period) {
1123                 /*
1124                  * Decrease registered cache sizes.
1125                  */
1126                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1127                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1128                 /*
1129                  * We do this explicitly after the caches have been
1130                  * drained above.
1131                  */
1132                 uma_reclaim();
1133                 lowmem_uptime = time_uptime;
1134         }
1135
1136         /*
1137          * The addl_page_shortage is the number of temporarily
1138          * stuck pages in the inactive queue.  In other words, the
1139          * number of pages from the inactive count that should be
1140          * discounted in setting the target for the active queue scan.
1141          */
1142         addl_page_shortage = 0;
1143
1144         /*
1145          * Calculate the number of pages that we want to free.  This number
1146          * can be negative if many pages are freed between the wakeup call to
1147          * the page daemon and this calculation.
1148          */
1149         if (pass > 0) {
1150                 deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1151                 page_shortage = vm_paging_target(vmd) + deficit;
1152         } else
1153                 page_shortage = deficit = 0;
1154         starting_page_shortage = page_shortage;
1155
1156         /*
1157          * Start scanning the inactive queue for pages that we can free.  The
1158          * scan will stop when we reach the target or we have scanned the
1159          * entire queue.  (Note that m->act_count is not used to make
1160          * decisions for the inactive queue, only for the active queue.)
1161          */
1162         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1163         maxscan = pq->pq_cnt;
1164         vm_pagequeue_lock(pq);
1165         queue_locked = TRUE;
1166         for (m = TAILQ_FIRST(&pq->pq_pl);
1167              m != NULL && maxscan-- > 0 && page_shortage > 0;
1168              m = next) {
1169                 vm_pagequeue_assert_locked(pq);
1170                 KASSERT(queue_locked, ("unlocked inactive queue"));
1171                 KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1172
1173                 VM_CNT_INC(v_pdpages);
1174                 next = TAILQ_NEXT(m, plinks.q);
1175
1176                 /*
1177                  * skip marker pages
1178                  */
1179                 if (m->flags & PG_MARKER)
1180                         continue;
1181
1182                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1183                     ("Fictitious page %p cannot be in inactive queue", m));
1184                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1185                     ("Unmanaged page %p cannot be in inactive queue", m));
1186
1187                 /*
1188                  * The page or object lock acquisitions fail if the
1189                  * page was removed from the queue or moved to a
1190                  * different position within the queue.  In either
1191                  * case, addl_page_shortage should not be incremented.
1192                  */
1193                 if (!vm_pageout_page_lock(m, &next))
1194                         goto unlock_page;
1195                 else if (m->wire_count != 0) {
1196                         /*
1197                          * Wired pages may not be freed, and unwiring a queued
1198                          * page will cause it to be requeued.  Thus, remove them
1199                          * from the queue now to avoid unnecessary revisits.
1200                          */
1201                         vm_page_dequeue_locked(m);
1202                         addl_page_shortage++;
1203                         goto unlock_page;
1204                 } else if (m->hold_count != 0) {
1205                         /*
1206                          * Held pages are essentially stuck in the
1207                          * queue.  So, they ought to be discounted
1208                          * from the inactive count.  See the
1209                          * calculation of inactq_shortage before the
1210                          * loop over the active queue below.
1211                          */
1212                         addl_page_shortage++;
1213                         goto unlock_page;
1214                 }
1215                 object = m->object;
1216                 if (!VM_OBJECT_TRYWLOCK(object)) {
1217                         if (!vm_pageout_fallback_object_lock(m, &next))
1218                                 goto unlock_object;
1219                         else if (m->wire_count != 0) {
1220                                 vm_page_dequeue_locked(m);
1221                                 addl_page_shortage++;
1222                                 goto unlock_object;
1223                         } else if (m->hold_count != 0) {
1224                                 addl_page_shortage++;
1225                                 goto unlock_object;
1226                         }
1227                 }
1228                 if (vm_page_busied(m)) {
1229                         /*
1230                          * Don't mess with busy pages.  Leave them at
1231                          * the front of the queue.  Most likely, they
1232                          * are being paged out and will leave the
1233                          * queue shortly after the scan finishes.  So,
1234                          * they ought to be discounted from the
1235                          * inactive count.
1236                          */
1237                         addl_page_shortage++;
1238 unlock_object:
1239                         VM_OBJECT_WUNLOCK(object);
1240 unlock_page:
1241                         vm_page_unlock(m);
1242                         continue;
1243                 }
1244                 KASSERT(!vm_page_held(m), ("Held page %p", m));
1245
1246                 /*
1247                  * Dequeue the inactive page and unlock the inactive page
1248                  * queue, invalidating the 'next' pointer.  Dequeueing the
1249                  * page here avoids a later reacquisition (and release) of
1250                  * the inactive page queue lock when vm_page_activate(),
1251                  * vm_page_free(), or vm_page_launder() is called.  Use a
1252                  * marker to remember our place in the inactive queue.
1253                  */
1254                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1255                 vm_page_dequeue_locked(m);
1256                 vm_pagequeue_unlock(pq);
1257                 queue_locked = FALSE;
1258
1259                 /*
1260                  * Invalid pages can be easily freed. They cannot be
1261                  * mapped, vm_page_free() asserts this.
1262                  */
1263                 if (m->valid == 0)
1264                         goto free_page;
1265
1266                 /*
1267                  * If the page has been referenced and the object is not dead,
1268                  * reactivate or requeue the page depending on whether the
1269                  * object is mapped.
1270                  */
1271                 if ((m->aflags & PGA_REFERENCED) != 0) {
1272                         vm_page_aflag_clear(m, PGA_REFERENCED);
1273                         act_delta = 1;
1274                 } else
1275                         act_delta = 0;
1276                 if (object->ref_count != 0) {
1277                         act_delta += pmap_ts_referenced(m);
1278                 } else {
1279                         KASSERT(!pmap_page_is_mapped(m),
1280                             ("vm_pageout_scan: page %p is mapped", m));
1281                 }
1282                 if (act_delta != 0) {
1283                         if (object->ref_count != 0) {
1284                                 VM_CNT_INC(v_reactivated);
1285                                 vm_page_activate(m);
1286
1287                                 /*
1288                                  * Increase the activation count if the page
1289                                  * was referenced while in the inactive queue.
1290                                  * This makes it less likely that the page will
1291                                  * be returned prematurely to the inactive
1292                                  * queue.
1293                                  */
1294                                 m->act_count += act_delta + ACT_ADVANCE;
1295                                 goto drop_page;
1296                         } else if ((object->flags & OBJ_DEAD) == 0) {
1297                                 vm_pagequeue_lock(pq);
1298                                 queue_locked = TRUE;
1299                                 m->queue = PQ_INACTIVE;
1300                                 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1301                                 vm_pagequeue_cnt_inc(pq);
1302                                 goto drop_page;
1303                         }
1304                 }
1305
1306                 /*
1307                  * If the page appears to be clean at the machine-independent
1308                  * layer, then remove all of its mappings from the pmap in
1309                  * anticipation of freeing it.  If, however, any of the page's
1310                  * mappings allow write access, then the page may still be
1311                  * modified until the last of those mappings are removed.
1312                  */
1313                 if (object->ref_count != 0) {
1314                         vm_page_test_dirty(m);
1315                         if (m->dirty == 0)
1316                                 pmap_remove_all(m);
1317                 }
1318
1319                 /*
1320                  * Clean pages can be freed, but dirty pages must be sent back
1321                  * to the laundry, unless they belong to a dead object.
1322                  * Requeueing dirty pages from dead objects is pointless, as
1323                  * they are being paged out and freed by the thread that
1324                  * destroyed the object.
1325                  */
1326                 if (m->dirty == 0) {
1327 free_page:
1328                         vm_page_free(m);
1329                         VM_CNT_INC(v_dfree);
1330                         --page_shortage;
1331                 } else if ((object->flags & OBJ_DEAD) == 0)
1332                         vm_page_launder(m);
1333 drop_page:
1334                 vm_page_unlock(m);
1335                 VM_OBJECT_WUNLOCK(object);
1336                 if (!queue_locked) {
1337                         vm_pagequeue_lock(pq);
1338                         queue_locked = TRUE;
1339                 }
1340                 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1341                 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1342         }
1343         vm_pagequeue_unlock(pq);
1344
1345         /*
1346          * Wake up the laundry thread so that it can perform any needed
1347          * laundering.  If we didn't meet our target, we're in shortfall and
1348          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1349          * swap devices are configured, the laundry thread has no work to do, so
1350          * don't bother waking it up.
1351          *
1352          * The laundry thread uses the number of inactive queue scans elapsed
1353          * since the last laundering to determine whether to launder again, so
1354          * keep count.
1355          */
1356         if (starting_page_shortage > 0) {
1357                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1358                 vm_pagequeue_lock(pq);
1359                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1360                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1361                         if (page_shortage > 0) {
1362                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1363                                 VM_CNT_INC(v_pdshortfalls);
1364                         } else if (vmd->vmd_laundry_request !=
1365                             VM_LAUNDRY_SHORTFALL)
1366                                 vmd->vmd_laundry_request =
1367                                     VM_LAUNDRY_BACKGROUND;
1368                         wakeup(&vmd->vmd_laundry_request);
1369                 }
1370                 vmd->vmd_inactq_scans++;
1371                 vm_pagequeue_unlock(pq);
1372         }
1373
1374         /*
1375          * Wakeup the swapout daemon if we didn't free the targeted number of
1376          * pages.
1377          */
1378         if (page_shortage > 0)
1379                 vm_swapout_run();
1380
1381         /*
1382          * If the inactive queue scan fails repeatedly to meet its
1383          * target, kill the largest process.
1384          */
1385         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1386
1387         /*
1388          * Compute the number of pages we want to try to move from the
1389          * active queue to either the inactive or laundry queue.
1390          *
1391          * When scanning active pages, we make clean pages count more heavily
1392          * towards the page shortage than dirty pages.  This is because dirty
1393          * pages must be laundered before they can be reused and thus have less
1394          * utility when attempting to quickly alleviate a shortage.  However,
1395          * this weighting also causes the scan to deactivate dirty pages more
1396          * more aggressively, improving the effectiveness of clustering and
1397          * ensuring that they can eventually be reused.
1398          */
1399         inactq_shortage = vmd->vmd_inactive_target - (pq->pq_cnt +
1400             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight) +
1401             vm_paging_target(vmd) + deficit + addl_page_shortage;
1402         inactq_shortage *= act_scan_laundry_weight;
1403
1404         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1405         vm_pagequeue_lock(pq);
1406         maxscan = pq->pq_cnt;
1407
1408         /*
1409          * If we're just idle polling attempt to visit every
1410          * active page within 'update_period' seconds.
1411          */
1412         scan_tick = ticks;
1413         if (vm_pageout_update_period != 0) {
1414                 min_scan = pq->pq_cnt;
1415                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1416                 min_scan /= hz * vm_pageout_update_period;
1417         } else
1418                 min_scan = 0;
1419         if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
1420                 vmd->vmd_last_active_scan = scan_tick;
1421
1422         /*
1423          * Scan the active queue for pages that can be deactivated.  Update
1424          * the per-page activity counter and use it to identify deactivation
1425          * candidates.  Held pages may be deactivated.
1426          */
1427         for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1428             min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
1429             scanned++) {
1430                 KASSERT(m->queue == PQ_ACTIVE,
1431                     ("vm_pageout_scan: page %p isn't active", m));
1432                 next = TAILQ_NEXT(m, plinks.q);
1433                 if ((m->flags & PG_MARKER) != 0)
1434                         continue;
1435                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1436                     ("Fictitious page %p cannot be in active queue", m));
1437                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1438                     ("Unmanaged page %p cannot be in active queue", m));
1439                 if (!vm_pageout_page_lock(m, &next)) {
1440                         vm_page_unlock(m);
1441                         continue;
1442                 }
1443
1444                 /*
1445                  * The count for page daemon pages is updated after checking
1446                  * the page for eligibility.
1447                  */
1448                 VM_CNT_INC(v_pdpages);
1449
1450                 /*
1451                  * Wired pages are dequeued lazily.
1452                  */
1453                 if (m->wire_count != 0) {
1454                         vm_page_dequeue_locked(m);
1455                         vm_page_unlock(m);
1456                         continue;
1457                 }
1458
1459                 /*
1460                  * Check to see "how much" the page has been used.
1461                  */
1462                 if ((m->aflags & PGA_REFERENCED) != 0) {
1463                         vm_page_aflag_clear(m, PGA_REFERENCED);
1464                         act_delta = 1;
1465                 } else
1466                         act_delta = 0;
1467
1468                 /*
1469                  * Perform an unsynchronized object ref count check.  While
1470                  * the page lock ensures that the page is not reallocated to
1471                  * another object, in particular, one with unmanaged mappings
1472                  * that cannot support pmap_ts_referenced(), two races are,
1473                  * nonetheless, possible:
1474                  * 1) The count was transitioning to zero, but we saw a non-
1475                  *    zero value.  pmap_ts_referenced() will return zero
1476                  *    because the page is not mapped.
1477                  * 2) The count was transitioning to one, but we saw zero. 
1478                  *    This race delays the detection of a new reference.  At
1479                  *    worst, we will deactivate and reactivate the page.
1480                  */
1481                 if (m->object->ref_count != 0)
1482                         act_delta += pmap_ts_referenced(m);
1483
1484                 /*
1485                  * Advance or decay the act_count based on recent usage.
1486                  */
1487                 if (act_delta != 0) {
1488                         m->act_count += ACT_ADVANCE + act_delta;
1489                         if (m->act_count > ACT_MAX)
1490                                 m->act_count = ACT_MAX;
1491                 } else
1492                         m->act_count -= min(m->act_count, ACT_DECLINE);
1493
1494                 /*
1495                  * Move this page to the tail of the active, inactive or laundry
1496                  * queue depending on usage.
1497                  */
1498                 if (m->act_count == 0) {
1499                         /* Dequeue to avoid later lock recursion. */
1500                         vm_page_dequeue_locked(m);
1501
1502                         /*
1503                          * When not short for inactive pages, let dirty pages go
1504                          * through the inactive queue before moving to the
1505                          * laundry queues.  This gives them some extra time to
1506                          * be reactivated, potentially avoiding an expensive
1507                          * pageout.  During a page shortage, the inactive queue
1508                          * is necessarily small, so we may move dirty pages
1509                          * directly to the laundry queue.
1510                          */
1511                         if (inactq_shortage <= 0)
1512                                 vm_page_deactivate(m);
1513                         else {
1514                                 /*
1515                                  * Calling vm_page_test_dirty() here would
1516                                  * require acquisition of the object's write
1517                                  * lock.  However, during a page shortage,
1518                                  * directing dirty pages into the laundry
1519                                  * queue is only an optimization and not a
1520                                  * requirement.  Therefore, we simply rely on
1521                                  * the opportunistic updates to the page's
1522                                  * dirty field by the pmap.
1523                                  */
1524                                 if (m->dirty == 0) {
1525                                         vm_page_deactivate(m);
1526                                         inactq_shortage -=
1527                                             act_scan_laundry_weight;
1528                                 } else {
1529                                         vm_page_launder(m);
1530                                         inactq_shortage--;
1531                                 }
1532                         }
1533                 } else
1534                         vm_page_requeue_locked(m);
1535                 vm_page_unlock(m);
1536         }
1537         vm_pagequeue_unlock(pq);
1538         if (pass > 0)
1539                 vm_swapout_run_idle();
1540         return (page_shortage <= 0);
1541 }
1542
1543 static int vm_pageout_oom_vote;
1544
1545 /*
1546  * The pagedaemon threads randlomly select one to perform the
1547  * OOM.  Trying to kill processes before all pagedaemons
1548  * failed to reach free target is premature.
1549  */
1550 static void
1551 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1552     int starting_page_shortage)
1553 {
1554         int old_vote;
1555
1556         if (starting_page_shortage <= 0 || starting_page_shortage !=
1557             page_shortage)
1558                 vmd->vmd_oom_seq = 0;
1559         else
1560                 vmd->vmd_oom_seq++;
1561         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1562                 if (vmd->vmd_oom) {
1563                         vmd->vmd_oom = FALSE;
1564                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1565                 }
1566                 return;
1567         }
1568
1569         /*
1570          * Do not follow the call sequence until OOM condition is
1571          * cleared.
1572          */
1573         vmd->vmd_oom_seq = 0;
1574
1575         if (vmd->vmd_oom)
1576                 return;
1577
1578         vmd->vmd_oom = TRUE;
1579         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1580         if (old_vote != vm_ndomains - 1)
1581                 return;
1582
1583         /*
1584          * The current pagedaemon thread is the last in the quorum to
1585          * start OOM.  Initiate the selection and signaling of the
1586          * victim.
1587          */
1588         vm_pageout_oom(VM_OOM_MEM);
1589
1590         /*
1591          * After one round of OOM terror, recall our vote.  On the
1592          * next pass, current pagedaemon would vote again if the low
1593          * memory condition is still there, due to vmd_oom being
1594          * false.
1595          */
1596         vmd->vmd_oom = FALSE;
1597         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1598 }
1599
1600 /*
1601  * The OOM killer is the page daemon's action of last resort when
1602  * memory allocation requests have been stalled for a prolonged period
1603  * of time because it cannot reclaim memory.  This function computes
1604  * the approximate number of physical pages that could be reclaimed if
1605  * the specified address space is destroyed.
1606  *
1607  * Private, anonymous memory owned by the address space is the
1608  * principal resource that we expect to recover after an OOM kill.
1609  * Since the physical pages mapped by the address space's COW entries
1610  * are typically shared pages, they are unlikely to be released and so
1611  * they are not counted.
1612  *
1613  * To get to the point where the page daemon runs the OOM killer, its
1614  * efforts to write-back vnode-backed pages may have stalled.  This
1615  * could be caused by a memory allocation deadlock in the write path
1616  * that might be resolved by an OOM kill.  Therefore, physical pages
1617  * belonging to vnode-backed objects are counted, because they might
1618  * be freed without being written out first if the address space holds
1619  * the last reference to an unlinked vnode.
1620  *
1621  * Similarly, physical pages belonging to OBJT_PHYS objects are
1622  * counted because the address space might hold the last reference to
1623  * the object.
1624  */
1625 static long
1626 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1627 {
1628         vm_map_t map;
1629         vm_map_entry_t entry;
1630         vm_object_t obj;
1631         long res;
1632
1633         map = &vmspace->vm_map;
1634         KASSERT(!map->system_map, ("system map"));
1635         sx_assert(&map->lock, SA_LOCKED);
1636         res = 0;
1637         for (entry = map->header.next; entry != &map->header;
1638             entry = entry->next) {
1639                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1640                         continue;
1641                 obj = entry->object.vm_object;
1642                 if (obj == NULL)
1643                         continue;
1644                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1645                     obj->ref_count != 1)
1646                         continue;
1647                 switch (obj->type) {
1648                 case OBJT_DEFAULT:
1649                 case OBJT_SWAP:
1650                 case OBJT_PHYS:
1651                 case OBJT_VNODE:
1652                         res += obj->resident_page_count;
1653                         break;
1654                 }
1655         }
1656         return (res);
1657 }
1658
1659 void
1660 vm_pageout_oom(int shortage)
1661 {
1662         struct proc *p, *bigproc;
1663         vm_offset_t size, bigsize;
1664         struct thread *td;
1665         struct vmspace *vm;
1666         bool breakout;
1667
1668         /*
1669          * We keep the process bigproc locked once we find it to keep anyone
1670          * from messing with it; however, there is a possibility of
1671          * deadlock if process B is bigproc and one of its child processes
1672          * attempts to propagate a signal to B while we are waiting for A's
1673          * lock while walking this list.  To avoid this, we don't block on
1674          * the process lock but just skip a process if it is already locked.
1675          */
1676         bigproc = NULL;
1677         bigsize = 0;
1678         sx_slock(&allproc_lock);
1679         FOREACH_PROC_IN_SYSTEM(p) {
1680                 PROC_LOCK(p);
1681
1682                 /*
1683                  * If this is a system, protected or killed process, skip it.
1684                  */
1685                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1686                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1687                     p->p_pid == 1 || P_KILLED(p) ||
1688                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1689                         PROC_UNLOCK(p);
1690                         continue;
1691                 }
1692                 /*
1693                  * If the process is in a non-running type state,
1694                  * don't touch it.  Check all the threads individually.
1695                  */
1696                 breakout = false;
1697                 FOREACH_THREAD_IN_PROC(p, td) {
1698                         thread_lock(td);
1699                         if (!TD_ON_RUNQ(td) &&
1700                             !TD_IS_RUNNING(td) &&
1701                             !TD_IS_SLEEPING(td) &&
1702                             !TD_IS_SUSPENDED(td) &&
1703                             !TD_IS_SWAPPED(td)) {
1704                                 thread_unlock(td);
1705                                 breakout = true;
1706                                 break;
1707                         }
1708                         thread_unlock(td);
1709                 }
1710                 if (breakout) {
1711                         PROC_UNLOCK(p);
1712                         continue;
1713                 }
1714                 /*
1715                  * get the process size
1716                  */
1717                 vm = vmspace_acquire_ref(p);
1718                 if (vm == NULL) {
1719                         PROC_UNLOCK(p);
1720                         continue;
1721                 }
1722                 _PHOLD_LITE(p);
1723                 PROC_UNLOCK(p);
1724                 sx_sunlock(&allproc_lock);
1725                 if (!vm_map_trylock_read(&vm->vm_map)) {
1726                         vmspace_free(vm);
1727                         sx_slock(&allproc_lock);
1728                         PRELE(p);
1729                         continue;
1730                 }
1731                 size = vmspace_swap_count(vm);
1732                 if (shortage == VM_OOM_MEM)
1733                         size += vm_pageout_oom_pagecount(vm);
1734                 vm_map_unlock_read(&vm->vm_map);
1735                 vmspace_free(vm);
1736                 sx_slock(&allproc_lock);
1737
1738                 /*
1739                  * If this process is bigger than the biggest one,
1740                  * remember it.
1741                  */
1742                 if (size > bigsize) {
1743                         if (bigproc != NULL)
1744                                 PRELE(bigproc);
1745                         bigproc = p;
1746                         bigsize = size;
1747                 } else {
1748                         PRELE(p);
1749                 }
1750         }
1751         sx_sunlock(&allproc_lock);
1752         if (bigproc != NULL) {
1753                 int i;
1754
1755                 if (vm_panic_on_oom != 0)
1756                         panic("out of swap space");
1757                 PROC_LOCK(bigproc);
1758                 killproc(bigproc, "out of swap space");
1759                 sched_nice(bigproc, PRIO_MIN);
1760                 _PRELE(bigproc);
1761                 PROC_UNLOCK(bigproc);
1762                 for (i = 0; i < vm_ndomains; i++)
1763                         wakeup(&VM_DOMAIN(i)->vmd_free_count);
1764         }
1765 }
1766
1767 static void
1768 vm_pageout_worker(void *arg)
1769 {
1770         struct vm_domain *vmd;
1771         int domain, pass;
1772         bool target_met;
1773
1774         domain = (uintptr_t)arg;
1775         vmd = VM_DOMAIN(domain);
1776         pass = 0;
1777         target_met = true;
1778
1779         /*
1780          * XXXKIB It could be useful to bind pageout daemon threads to
1781          * the cores belonging to the domain, from which vm_page_array
1782          * is allocated.
1783          */
1784
1785         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1786         vmd->vmd_last_active_scan = ticks;
1787         vm_pageout_init_marker(&vmd->vmd_marker, PQ_INACTIVE);
1788         vm_pageout_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE);
1789         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
1790             &vmd->vmd_inacthead, plinks.q);
1791
1792         /*
1793          * The pageout daemon worker is never done, so loop forever.
1794          */
1795         while (TRUE) {
1796                 vm_domain_free_lock(vmd);
1797
1798                 /*
1799                  * Generally, after a level >= 1 scan, if there are enough
1800                  * free pages to wakeup the waiters, then they are already
1801                  * awake.  A call to vm_page_free() during the scan awakened
1802                  * them.  However, in the following case, this wakeup serves
1803                  * to bound the amount of time that a thread might wait.
1804                  * Suppose a thread's call to vm_page_alloc() fails, but
1805                  * before that thread calls VM_WAIT, enough pages are freed by
1806                  * other threads to alleviate the free page shortage.  The
1807                  * thread will, nonetheless, wait until another page is freed
1808                  * or this wakeup is performed.
1809                  */
1810                 if (vmd->vmd_pages_needed && !vm_paging_min(vmd)) {
1811                         vmd->vmd_pages_needed = false;
1812                         wakeup(&vmd->vmd_free_count);
1813                 }
1814
1815                 /*
1816                  * Do not clear vmd_pageout_wanted until we reach our free page
1817                  * target.  Otherwise, we may be awakened over and over again,
1818                  * wasting CPU time.
1819                  */
1820                 if (vmd->vmd_pageout_wanted && target_met)
1821                         vmd->vmd_pageout_wanted = false;
1822
1823                 /*
1824                  * Might the page daemon receive a wakeup call?
1825                  */
1826                 if (vmd->vmd_pageout_wanted) {
1827                         /*
1828                          * No.  Either vmd_pageout_wanted was set by another
1829                          * thread during the previous scan, which must have
1830                          * been a level 0 scan, or vmd_pageout_wanted was
1831                          * already set and the scan failed to free enough
1832                          * pages.  If we haven't yet performed a level >= 1
1833                          * (page reclamation) scan, then increase the level
1834                          * and scan again now.  Otherwise, sleep a bit and
1835                          * try again later.
1836                          */
1837                         vm_domain_free_unlock(vmd);
1838                         if (pass >= 1)
1839                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
1840                         pass++;
1841                 } else {
1842                         /*
1843                          * Yes.  If threads are still sleeping in VM_WAIT
1844                          * then we immediately start a new scan.  Otherwise,
1845                          * sleep until the next wakeup or until pages need to
1846                          * have their reference stats updated.
1847                          */
1848                         if (vmd->vmd_pages_needed) {
1849                                 vm_domain_free_unlock(vmd);
1850                                 if (pass == 0)
1851                                         pass++;
1852                         } else if (mtx_sleep(&vmd->vmd_pageout_wanted,
1853                             vm_domain_free_lockptr(vmd), PDROP | PVM,
1854                             "psleep", hz) == 0) {
1855                                 VM_CNT_INC(v_pdwakeups);
1856                                 pass = 1;
1857                         } else
1858                                 pass = 0;
1859                 }
1860
1861                 target_met = vm_pageout_scan(vmd, pass);
1862         }
1863 }
1864
1865 /*
1866  *      vm_pageout_init initialises basic pageout daemon settings.
1867  */
1868 static void
1869 vm_pageout_init_domain(int domain)
1870 {
1871         struct vm_domain *vmd;
1872
1873         vmd = VM_DOMAIN(domain);
1874         vmd->vmd_interrupt_free_min = 2;
1875
1876         /*
1877          * v_free_reserved needs to include enough for the largest
1878          * swap pager structures plus enough for any pv_entry structs
1879          * when paging. 
1880          */
1881         if (vmd->vmd_page_count > 1024)
1882                 vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
1883         else
1884                 vmd->vmd_free_min = 4;
1885         vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1886             vmd->vmd_interrupt_free_min;
1887         vmd->vmd_free_reserved = vm_pageout_page_count +
1888             vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
1889         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
1890         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
1891         vmd->vmd_free_min += vmd->vmd_free_reserved;
1892         vmd->vmd_free_severe += vmd->vmd_free_reserved;
1893         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
1894         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
1895                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
1896
1897         /*
1898          * Set the default wakeup threshold to be 10% above the minimum
1899          * page limit.  This keeps the steady state out of shortfall.
1900          */
1901         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_min / 10) * 11;
1902
1903         /*
1904          * Target amount of memory to move out of the laundry queue during a
1905          * background laundering.  This is proportional to the amount of system
1906          * memory.
1907          */
1908         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
1909             vmd->vmd_free_min) / 10;
1910 }
1911
1912 static void
1913 vm_pageout_init(void)
1914 {
1915         u_int freecount;
1916         int i;
1917
1918         /*
1919          * Initialize some paging parameters.
1920          */
1921         if (vm_cnt.v_page_count < 2000)
1922                 vm_pageout_page_count = 8;
1923
1924         freecount = 0;
1925         for (i = 0; i < vm_ndomains; i++) {
1926                 struct vm_domain *vmd;
1927
1928                 vm_pageout_init_domain(i);
1929                 vmd = VM_DOMAIN(i);
1930                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
1931                 vm_cnt.v_free_target += vmd->vmd_free_target;
1932                 vm_cnt.v_free_min += vmd->vmd_free_min;
1933                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
1934                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
1935                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
1936                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
1937                 freecount += vmd->vmd_free_count;
1938         }
1939
1940         /*
1941          * Set interval in seconds for active scan.  We want to visit each
1942          * page at least once every ten minutes.  This is to prevent worst
1943          * case paging behaviors with stale active LRU.
1944          */
1945         if (vm_pageout_update_period == 0)
1946                 vm_pageout_update_period = 600;
1947
1948         if (vm_page_max_wired == 0)
1949                 vm_page_max_wired = freecount / 3;
1950 }
1951
1952 /*
1953  *     vm_pageout is the high level pageout daemon.
1954  */
1955 static void
1956 vm_pageout(void)
1957 {
1958         int error;
1959         int i;
1960
1961         swap_pager_swap_init();
1962         error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
1963             0, 0, "laundry: dom0");
1964         if (error != 0)
1965                 panic("starting laundry for domain 0, error %d", error);
1966         for (i = 1; i < vm_ndomains; i++) {
1967                 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1968                     curproc, NULL, 0, 0, "dom%d", i);
1969                 if (error != 0) {
1970                         panic("starting pageout for domain %d, error %d\n",
1971                             i, error);
1972                 }
1973                 error = kthread_add(vm_pageout_laundry_worker,
1974                     (void *)(uintptr_t)i, curproc, NULL, 0, 0,
1975                     "laundry: dom%d", i);
1976                 if (error != 0)
1977                         panic("starting laundry for domain %d, error %d",
1978                             i, error);
1979         }
1980         error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
1981             0, 0, "uma");
1982         if (error != 0)
1983                 panic("starting uma_reclaim helper, error %d\n", error);
1984         vm_pageout_worker((void *)(uintptr_t)0);
1985 }
1986
1987 /*
1988  * Perform an advisory wakeup of the page daemon.
1989  */
1990 void
1991 pagedaemon_wakeup(int domain)
1992 {
1993         struct vm_domain *vmd;
1994
1995         vmd = VM_DOMAIN(domain);
1996         vm_domain_free_assert_unlocked(vmd);
1997
1998         if (!vmd->vmd_pageout_wanted && curthread->td_proc != pageproc) {
1999                 vmd->vmd_pageout_wanted = true;
2000                 wakeup(&vmd->vmd_pageout_wanted);
2001         }
2002 }
2003
2004 /*
2005  * Wake up the page daemon and wait for it to reclaim free pages.
2006  *
2007  * This function returns with the free queues mutex unlocked.
2008  */
2009 void
2010 pagedaemon_wait(int domain, int pri, const char *wmesg)
2011 {
2012         struct vm_domain *vmd;
2013
2014         vmd = VM_DOMAIN(domain);
2015         vm_domain_free_assert_locked(vmd);
2016
2017         /*
2018          * vmd_pageout_wanted may have been set by an advisory wakeup, but if
2019          * the page daemon is running on a CPU, the wakeup will have been lost.
2020          * Thus, deliver a potentially spurious wakeup to ensure that the page
2021          * daemon has been notified of the shortage.
2022          */
2023         if (!vmd->vmd_pageout_wanted || !vmd->vmd_pages_needed) {
2024                 vmd->vmd_pageout_wanted = true;
2025                 wakeup(&vmd->vmd_pageout_wanted);
2026         }
2027         vmd->vmd_pages_needed = true;
2028         vmd->vmd_waiters++;
2029         msleep(&vmd->vmd_free_count, vm_domain_free_lockptr(vmd), PDROP | pri,
2030             wmesg, 0);
2031         vmd->vmd_waiters--;
2032 }