]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
dts: Update our copy to Linux 4.17
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_pagequeue.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117
118 /*
119  * System initialization
120  */
121
122 /* the kernel process "vm_pageout"*/
123 static void vm_pageout(void);
124 static void vm_pageout_init(void);
125 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
126 static int vm_pageout_cluster(vm_page_t m);
127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
128     int starting_page_shortage);
129
130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
131     NULL);
132
133 struct proc *pageproc;
134
135 static struct kproc_desc page_kp = {
136         "pagedaemon",
137         vm_pageout,
138         &pageproc
139 };
140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141     &page_kp);
142
143 SDT_PROVIDER_DEFINE(vm);
144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
145
146 /* Pagedaemon activity rates, in subdivisions of one second. */
147 #define VM_LAUNDER_RATE         10
148 #define VM_INACT_SCAN_RATE      10
149
150 static int vm_pageout_oom_seq = 12;
151
152 static int vm_pageout_update_period;
153 static int disable_swap_pageouts;
154 static int lowmem_period = 10;
155 static time_t lowmem_uptime;
156 static int swapdev_enabled;
157
158 static int vm_panic_on_oom = 0;
159
160 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
161         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
162         "panic on out of memory instead of killing the largest process");
163
164 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
165         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
166         "Maximum active LRU update period");
167   
168 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
169         "Low memory callback period");
170
171 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
172         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
173
174 static int pageout_lock_miss;
175 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
176         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
177
178 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
179         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
180         "back-to-back calls to oom detector to start OOM");
181
182 static int act_scan_laundry_weight = 3;
183 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
184     &act_scan_laundry_weight, 0,
185     "weight given to clean vs. dirty pages in active queue scans");
186
187 static u_int vm_background_launder_rate = 4096;
188 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
189     &vm_background_launder_rate, 0,
190     "background laundering rate, in kilobytes per second");
191
192 static u_int vm_background_launder_max = 20 * 1024;
193 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
194     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
195
196 int vm_pageout_page_count = 32;
197
198 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
199 SYSCTL_INT(_vm, OID_AUTO, max_wired,
200         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
201
202 static u_int isqrt(u_int num);
203 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
204     bool in_shortfall);
205 static void vm_pageout_laundry_worker(void *arg);
206
207 struct scan_state {
208         struct vm_batchqueue bq;
209         struct vm_pagequeue *pq;
210         vm_page_t       marker;
211         int             maxscan;
212         int             scanned;
213 };
214
215 static void
216 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
217     vm_page_t marker, vm_page_t after, int maxscan)
218 {
219
220         vm_pagequeue_assert_locked(pq);
221         KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
222             ("marker %p already enqueued", marker));
223
224         if (after == NULL)
225                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
226         else
227                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
228         vm_page_aflag_set(marker, PGA_ENQUEUED);
229
230         vm_batchqueue_init(&ss->bq);
231         ss->pq = pq;
232         ss->marker = marker;
233         ss->maxscan = maxscan;
234         ss->scanned = 0;
235         vm_pagequeue_unlock(pq);
236 }
237
238 static void
239 vm_pageout_end_scan(struct scan_state *ss)
240 {
241         struct vm_pagequeue *pq;
242
243         pq = ss->pq;
244         vm_pagequeue_assert_locked(pq);
245         KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
246             ("marker %p not enqueued", ss->marker));
247
248         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
249         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
250         VM_CNT_ADD(v_pdpages, ss->scanned);
251 }
252
253 /*
254  * Add a small number of queued pages to a batch queue for later processing
255  * without the corresponding queue lock held.  The caller must have enqueued a
256  * marker page at the desired start point for the scan.  Pages will be
257  * physically dequeued if the caller so requests.  Otherwise, the returned
258  * batch may contain marker pages, and it is up to the caller to handle them.
259  *
260  * When processing the batch queue, vm_page_queue() must be used to
261  * determine whether the page has been logically dequeued by another thread.
262  * Once this check is performed, the page lock guarantees that the page will
263  * not be disassociated from the queue.
264  */
265 static __always_inline void
266 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
267 {
268         struct vm_pagequeue *pq;
269         vm_page_t m, marker;
270
271         marker = ss->marker;
272         pq = ss->pq;
273
274         KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
275             ("marker %p not enqueued", ss->marker));
276
277         vm_pagequeue_lock(pq);
278         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
279             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
280             m = TAILQ_NEXT(m, plinks.q), ss->scanned++) {
281                 if ((m->flags & PG_MARKER) == 0) {
282                         KASSERT((m->aflags & PGA_ENQUEUED) != 0,
283                             ("page %p not enqueued", m));
284                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
285                             ("Fictitious page %p cannot be in page queue", m));
286                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
287                             ("Unmanaged page %p cannot be in page queue", m));
288                 } else if (dequeue)
289                         continue;
290
291                 (void)vm_batchqueue_insert(&ss->bq, m);
292                 if (dequeue) {
293                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
294                         vm_page_aflag_clear(m, PGA_ENQUEUED);
295                 }
296         }
297         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
298         if (__predict_true(m != NULL))
299                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
300         else
301                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
302         if (dequeue)
303                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
304         vm_pagequeue_unlock(pq);
305 }
306
307 /* Return the next page to be scanned, or NULL if the scan is complete. */
308 static __always_inline vm_page_t
309 vm_pageout_next(struct scan_state *ss, const bool dequeue)
310 {
311
312         if (ss->bq.bq_cnt == 0)
313                 vm_pageout_collect_batch(ss, dequeue);
314         return (vm_batchqueue_pop(&ss->bq));
315 }
316
317 /*
318  * Scan for pages at adjacent offsets within the given page's object that are
319  * eligible for laundering, form a cluster of these pages and the given page,
320  * and launder that cluster.
321  */
322 static int
323 vm_pageout_cluster(vm_page_t m)
324 {
325         vm_object_t object;
326         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
327         vm_pindex_t pindex;
328         int ib, is, page_base, pageout_count;
329
330         vm_page_assert_locked(m);
331         object = m->object;
332         VM_OBJECT_ASSERT_WLOCKED(object);
333         pindex = m->pindex;
334
335         vm_page_assert_unbusied(m);
336         KASSERT(!vm_page_held(m), ("page %p is held", m));
337
338         pmap_remove_write(m);
339         vm_page_unlock(m);
340
341         mc[vm_pageout_page_count] = pb = ps = m;
342         pageout_count = 1;
343         page_base = vm_pageout_page_count;
344         ib = 1;
345         is = 1;
346
347         /*
348          * We can cluster only if the page is not clean, busy, or held, and
349          * the page is in the laundry queue.
350          *
351          * During heavy mmap/modification loads the pageout
352          * daemon can really fragment the underlying file
353          * due to flushing pages out of order and not trying to
354          * align the clusters (which leaves sporadic out-of-order
355          * holes).  To solve this problem we do the reverse scan
356          * first and attempt to align our cluster, then do a 
357          * forward scan if room remains.
358          */
359 more:
360         while (ib != 0 && pageout_count < vm_pageout_page_count) {
361                 if (ib > pindex) {
362                         ib = 0;
363                         break;
364                 }
365                 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
366                         ib = 0;
367                         break;
368                 }
369                 vm_page_test_dirty(p);
370                 if (p->dirty == 0) {
371                         ib = 0;
372                         break;
373                 }
374                 vm_page_lock(p);
375                 if (vm_page_held(p) || !vm_page_in_laundry(p)) {
376                         vm_page_unlock(p);
377                         ib = 0;
378                         break;
379                 }
380                 pmap_remove_write(p);
381                 vm_page_unlock(p);
382                 mc[--page_base] = pb = p;
383                 ++pageout_count;
384                 ++ib;
385
386                 /*
387                  * We are at an alignment boundary.  Stop here, and switch
388                  * directions.  Do not clear ib.
389                  */
390                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
391                         break;
392         }
393         while (pageout_count < vm_pageout_page_count && 
394             pindex + is < object->size) {
395                 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
396                         break;
397                 vm_page_test_dirty(p);
398                 if (p->dirty == 0)
399                         break;
400                 vm_page_lock(p);
401                 if (vm_page_held(p) || !vm_page_in_laundry(p)) {
402                         vm_page_unlock(p);
403                         break;
404                 }
405                 pmap_remove_write(p);
406                 vm_page_unlock(p);
407                 mc[page_base + pageout_count] = ps = p;
408                 ++pageout_count;
409                 ++is;
410         }
411
412         /*
413          * If we exhausted our forward scan, continue with the reverse scan
414          * when possible, even past an alignment boundary.  This catches
415          * boundary conditions.
416          */
417         if (ib != 0 && pageout_count < vm_pageout_page_count)
418                 goto more;
419
420         return (vm_pageout_flush(&mc[page_base], pageout_count,
421             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
422 }
423
424 /*
425  * vm_pageout_flush() - launder the given pages
426  *
427  *      The given pages are laundered.  Note that we setup for the start of
428  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
429  *      reference count all in here rather then in the parent.  If we want
430  *      the parent to do more sophisticated things we may have to change
431  *      the ordering.
432  *
433  *      Returned runlen is the count of pages between mreq and first
434  *      page after mreq with status VM_PAGER_AGAIN.
435  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
436  *      for any page in runlen set.
437  */
438 int
439 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
440     boolean_t *eio)
441 {
442         vm_object_t object = mc[0]->object;
443         int pageout_status[count];
444         int numpagedout = 0;
445         int i, runlen;
446
447         VM_OBJECT_ASSERT_WLOCKED(object);
448
449         /*
450          * Initiate I/O.  Mark the pages busy and verify that they're valid
451          * and read-only.
452          *
453          * We do not have to fixup the clean/dirty bits here... we can
454          * allow the pager to do it after the I/O completes.
455          *
456          * NOTE! mc[i]->dirty may be partial or fragmented due to an
457          * edge case with file fragments.
458          */
459         for (i = 0; i < count; i++) {
460                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
461                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
462                         mc[i], i, count));
463                 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
464                     ("vm_pageout_flush: writeable page %p", mc[i]));
465                 vm_page_sbusy(mc[i]);
466         }
467         vm_object_pip_add(object, count);
468
469         vm_pager_put_pages(object, mc, count, flags, pageout_status);
470
471         runlen = count - mreq;
472         if (eio != NULL)
473                 *eio = FALSE;
474         for (i = 0; i < count; i++) {
475                 vm_page_t mt = mc[i];
476
477                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
478                     !pmap_page_is_write_mapped(mt),
479                     ("vm_pageout_flush: page %p is not write protected", mt));
480                 switch (pageout_status[i]) {
481                 case VM_PAGER_OK:
482                         vm_page_lock(mt);
483                         if (vm_page_in_laundry(mt))
484                                 vm_page_deactivate_noreuse(mt);
485                         vm_page_unlock(mt);
486                         /* FALLTHROUGH */
487                 case VM_PAGER_PEND:
488                         numpagedout++;
489                         break;
490                 case VM_PAGER_BAD:
491                         /*
492                          * The page is outside the object's range.  We pretend
493                          * that the page out worked and clean the page, so the
494                          * changes will be lost if the page is reclaimed by
495                          * the page daemon.
496                          */
497                         vm_page_undirty(mt);
498                         vm_page_lock(mt);
499                         if (vm_page_in_laundry(mt))
500                                 vm_page_deactivate_noreuse(mt);
501                         vm_page_unlock(mt);
502                         break;
503                 case VM_PAGER_ERROR:
504                 case VM_PAGER_FAIL:
505                         /*
506                          * If the page couldn't be paged out to swap because the
507                          * pager wasn't able to find space, place the page in
508                          * the PQ_UNSWAPPABLE holding queue.  This is an
509                          * optimization that prevents the page daemon from
510                          * wasting CPU cycles on pages that cannot be reclaimed
511                          * becase no swap device is configured.
512                          *
513                          * Otherwise, reactivate the page so that it doesn't
514                          * clog the laundry and inactive queues.  (We will try
515                          * paging it out again later.)
516                          */
517                         vm_page_lock(mt);
518                         if (object->type == OBJT_SWAP &&
519                             pageout_status[i] == VM_PAGER_FAIL) {
520                                 vm_page_unswappable(mt);
521                                 numpagedout++;
522                         } else
523                                 vm_page_activate(mt);
524                         vm_page_unlock(mt);
525                         if (eio != NULL && i >= mreq && i - mreq < runlen)
526                                 *eio = TRUE;
527                         break;
528                 case VM_PAGER_AGAIN:
529                         if (i >= mreq && i - mreq < runlen)
530                                 runlen = i - mreq;
531                         break;
532                 }
533
534                 /*
535                  * If the operation is still going, leave the page busy to
536                  * block all other accesses. Also, leave the paging in
537                  * progress indicator set so that we don't attempt an object
538                  * collapse.
539                  */
540                 if (pageout_status[i] != VM_PAGER_PEND) {
541                         vm_object_pip_wakeup(object);
542                         vm_page_sunbusy(mt);
543                 }
544         }
545         if (prunlen != NULL)
546                 *prunlen = runlen;
547         return (numpagedout);
548 }
549
550 static void
551 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
552 {
553
554         atomic_store_rel_int(&swapdev_enabled, 1);
555 }
556
557 static void
558 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
559 {
560
561         if (swap_pager_nswapdev() == 1)
562                 atomic_store_rel_int(&swapdev_enabled, 0);
563 }
564
565 /*
566  * Attempt to acquire all of the necessary locks to launder a page and
567  * then call through the clustering layer to PUTPAGES.  Wait a short
568  * time for a vnode lock.
569  *
570  * Requires the page and object lock on entry, releases both before return.
571  * Returns 0 on success and an errno otherwise.
572  */
573 static int
574 vm_pageout_clean(vm_page_t m, int *numpagedout)
575 {
576         struct vnode *vp;
577         struct mount *mp;
578         vm_object_t object;
579         vm_pindex_t pindex;
580         int error, lockmode;
581
582         vm_page_assert_locked(m);
583         object = m->object;
584         VM_OBJECT_ASSERT_WLOCKED(object);
585         error = 0;
586         vp = NULL;
587         mp = NULL;
588
589         /*
590          * The object is already known NOT to be dead.   It
591          * is possible for the vget() to block the whole
592          * pageout daemon, but the new low-memory handling
593          * code should prevent it.
594          *
595          * We can't wait forever for the vnode lock, we might
596          * deadlock due to a vn_read() getting stuck in
597          * vm_wait while holding this vnode.  We skip the 
598          * vnode if we can't get it in a reasonable amount
599          * of time.
600          */
601         if (object->type == OBJT_VNODE) {
602                 vm_page_unlock(m);
603                 vp = object->handle;
604                 if (vp->v_type == VREG &&
605                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
606                         mp = NULL;
607                         error = EDEADLK;
608                         goto unlock_all;
609                 }
610                 KASSERT(mp != NULL,
611                     ("vp %p with NULL v_mount", vp));
612                 vm_object_reference_locked(object);
613                 pindex = m->pindex;
614                 VM_OBJECT_WUNLOCK(object);
615                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
616                     LK_SHARED : LK_EXCLUSIVE;
617                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
618                         vp = NULL;
619                         error = EDEADLK;
620                         goto unlock_mp;
621                 }
622                 VM_OBJECT_WLOCK(object);
623
624                 /*
625                  * Ensure that the object and vnode were not disassociated
626                  * while locks were dropped.
627                  */
628                 if (vp->v_object != object) {
629                         error = ENOENT;
630                         goto unlock_all;
631                 }
632                 vm_page_lock(m);
633
634                 /*
635                  * While the object and page were unlocked, the page
636                  * may have been:
637                  * (1) moved to a different queue,
638                  * (2) reallocated to a different object,
639                  * (3) reallocated to a different offset, or
640                  * (4) cleaned.
641                  */
642                 if (!vm_page_in_laundry(m) || m->object != object ||
643                     m->pindex != pindex || m->dirty == 0) {
644                         vm_page_unlock(m);
645                         error = ENXIO;
646                         goto unlock_all;
647                 }
648
649                 /*
650                  * The page may have been busied or referenced while the object
651                  * and page locks were released.
652                  */
653                 if (vm_page_busied(m) || vm_page_held(m)) {
654                         vm_page_unlock(m);
655                         error = EBUSY;
656                         goto unlock_all;
657                 }
658         }
659
660         /*
661          * If a page is dirty, then it is either being washed
662          * (but not yet cleaned) or it is still in the
663          * laundry.  If it is still in the laundry, then we
664          * start the cleaning operation. 
665          */
666         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
667                 error = EIO;
668
669 unlock_all:
670         VM_OBJECT_WUNLOCK(object);
671
672 unlock_mp:
673         vm_page_lock_assert(m, MA_NOTOWNED);
674         if (mp != NULL) {
675                 if (vp != NULL)
676                         vput(vp);
677                 vm_object_deallocate(object);
678                 vn_finished_write(mp);
679         }
680
681         return (error);
682 }
683
684 /*
685  * Attempt to launder the specified number of pages.
686  *
687  * Returns the number of pages successfully laundered.
688  */
689 static int
690 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
691 {
692         struct scan_state ss;
693         struct vm_pagequeue *pq;
694         struct mtx *mtx;
695         vm_object_t object;
696         vm_page_t m, marker;
697         int act_delta, error, numpagedout, queue, starting_target;
698         int vnodes_skipped;
699         bool obj_locked, pageout_ok;
700
701         mtx = NULL;
702         obj_locked = false;
703         object = NULL;
704         starting_target = launder;
705         vnodes_skipped = 0;
706
707         /*
708          * Scan the laundry queues for pages eligible to be laundered.  We stop
709          * once the target number of dirty pages have been laundered, or once
710          * we've reached the end of the queue.  A single iteration of this loop
711          * may cause more than one page to be laundered because of clustering.
712          *
713          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
714          * swap devices are configured.
715          */
716         if (atomic_load_acq_int(&swapdev_enabled))
717                 queue = PQ_UNSWAPPABLE;
718         else
719                 queue = PQ_LAUNDRY;
720
721 scan:
722         marker = &vmd->vmd_markers[queue];
723         pq = &vmd->vmd_pagequeues[queue];
724         vm_pagequeue_lock(pq);
725         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
726         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
727                 if (__predict_false((m->flags & PG_MARKER) != 0))
728                         continue;
729
730                 vm_page_change_lock(m, &mtx);
731
732 recheck:
733                 /*
734                  * The page may have been disassociated from the queue
735                  * while locks were dropped.
736                  */
737                 if (vm_page_queue(m) != queue)
738                         continue;
739
740                 /*
741                  * A requeue was requested, so this page gets a second
742                  * chance.
743                  */
744                 if ((m->aflags & PGA_REQUEUE) != 0) {
745                         vm_page_requeue(m);
746                         continue;
747                 }
748
749                 /*
750                  * Held pages are essentially stuck in the queue.
751                  *
752                  * Wired pages may not be freed.  Complete their removal
753                  * from the queue now to avoid needless revisits during
754                  * future scans.
755                  */
756                 if (m->hold_count != 0)
757                         continue;
758                 if (m->wire_count != 0) {
759                         vm_page_dequeue_deferred(m);
760                         continue;
761                 }
762
763                 if (object != m->object) {
764                         if (obj_locked) {
765                                 VM_OBJECT_WUNLOCK(object);
766                                 obj_locked = false;
767                         }
768                         object = m->object;
769                 }
770                 if (!obj_locked) {
771                         if (!VM_OBJECT_TRYWLOCK(object)) {
772                                 mtx_unlock(mtx);
773                                 /* Depends on type-stability. */
774                                 VM_OBJECT_WLOCK(object);
775                                 obj_locked = true;
776                                 mtx_lock(mtx);
777                                 goto recheck;
778                         } else
779                                 obj_locked = true;
780                 }
781
782                 if (vm_page_busied(m))
783                         continue;
784
785                 /*
786                  * Invalid pages can be easily freed.  They cannot be
787                  * mapped; vm_page_free() asserts this.
788                  */
789                 if (m->valid == 0)
790                         goto free_page;
791
792                 /*
793                  * If the page has been referenced and the object is not dead,
794                  * reactivate or requeue the page depending on whether the
795                  * object is mapped.
796                  */
797                 if ((m->aflags & PGA_REFERENCED) != 0) {
798                         vm_page_aflag_clear(m, PGA_REFERENCED);
799                         act_delta = 1;
800                 } else
801                         act_delta = 0;
802                 if (object->ref_count != 0)
803                         act_delta += pmap_ts_referenced(m);
804                 else {
805                         KASSERT(!pmap_page_is_mapped(m),
806                             ("page %p is mapped", m));
807                 }
808                 if (act_delta != 0) {
809                         if (object->ref_count != 0) {
810                                 VM_CNT_INC(v_reactivated);
811                                 vm_page_activate(m);
812
813                                 /*
814                                  * Increase the activation count if the page
815                                  * was referenced while in the laundry queue.
816                                  * This makes it less likely that the page will
817                                  * be returned prematurely to the inactive
818                                  * queue.
819                                  */
820                                 m->act_count += act_delta + ACT_ADVANCE;
821
822                                 /*
823                                  * If this was a background laundering, count
824                                  * activated pages towards our target.  The
825                                  * purpose of background laundering is to ensure
826                                  * that pages are eventually cycled through the
827                                  * laundry queue, and an activation is a valid
828                                  * way out.
829                                  */
830                                 if (!in_shortfall)
831                                         launder--;
832                                 continue;
833                         } else if ((object->flags & OBJ_DEAD) == 0) {
834                                 vm_page_requeue(m);
835                                 continue;
836                         }
837                 }
838
839                 /*
840                  * If the page appears to be clean at the machine-independent
841                  * layer, then remove all of its mappings from the pmap in
842                  * anticipation of freeing it.  If, however, any of the page's
843                  * mappings allow write access, then the page may still be
844                  * modified until the last of those mappings are removed.
845                  */
846                 if (object->ref_count != 0) {
847                         vm_page_test_dirty(m);
848                         if (m->dirty == 0)
849                                 pmap_remove_all(m);
850                 }
851
852                 /*
853                  * Clean pages are freed, and dirty pages are paged out unless
854                  * they belong to a dead object.  Requeueing dirty pages from
855                  * dead objects is pointless, as they are being paged out and
856                  * freed by the thread that destroyed the object.
857                  */
858                 if (m->dirty == 0) {
859 free_page:
860                         vm_page_free(m);
861                         VM_CNT_INC(v_dfree);
862                 } else if ((object->flags & OBJ_DEAD) == 0) {
863                         if (object->type != OBJT_SWAP &&
864                             object->type != OBJT_DEFAULT)
865                                 pageout_ok = true;
866                         else if (disable_swap_pageouts)
867                                 pageout_ok = false;
868                         else
869                                 pageout_ok = true;
870                         if (!pageout_ok) {
871                                 vm_page_requeue(m);
872                                 continue;
873                         }
874
875                         /*
876                          * Form a cluster with adjacent, dirty pages from the
877                          * same object, and page out that entire cluster.
878                          *
879                          * The adjacent, dirty pages must also be in the
880                          * laundry.  However, their mappings are not checked
881                          * for new references.  Consequently, a recently
882                          * referenced page may be paged out.  However, that
883                          * page will not be prematurely reclaimed.  After page
884                          * out, the page will be placed in the inactive queue,
885                          * where any new references will be detected and the
886                          * page reactivated.
887                          */
888                         error = vm_pageout_clean(m, &numpagedout);
889                         if (error == 0) {
890                                 launder -= numpagedout;
891                                 ss.scanned += numpagedout;
892                         } else if (error == EDEADLK) {
893                                 pageout_lock_miss++;
894                                 vnodes_skipped++;
895                         }
896                         mtx = NULL;
897                         obj_locked = false;
898                 }
899         }
900         if (mtx != NULL) {
901                 mtx_unlock(mtx);
902                 mtx = NULL;
903         }
904         if (obj_locked) {
905                 VM_OBJECT_WUNLOCK(object);
906                 obj_locked = false;
907         }
908         vm_pagequeue_lock(pq);
909         vm_pageout_end_scan(&ss);
910         vm_pagequeue_unlock(pq);
911
912         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
913                 queue = PQ_LAUNDRY;
914                 goto scan;
915         }
916
917         /*
918          * Wakeup the sync daemon if we skipped a vnode in a writeable object
919          * and we didn't launder enough pages.
920          */
921         if (vnodes_skipped > 0 && launder > 0)
922                 (void)speedup_syncer();
923
924         return (starting_target - launder);
925 }
926
927 /*
928  * Compute the integer square root.
929  */
930 static u_int
931 isqrt(u_int num)
932 {
933         u_int bit, root, tmp;
934
935         bit = 1u << ((NBBY * sizeof(u_int)) - 2);
936         while (bit > num)
937                 bit >>= 2;
938         root = 0;
939         while (bit != 0) {
940                 tmp = root + bit;
941                 root >>= 1;
942                 if (num >= tmp) {
943                         num -= tmp;
944                         root += bit;
945                 }
946                 bit >>= 2;
947         }
948         return (root);
949 }
950
951 /*
952  * Perform the work of the laundry thread: periodically wake up and determine
953  * whether any pages need to be laundered.  If so, determine the number of pages
954  * that need to be laundered, and launder them.
955  */
956 static void
957 vm_pageout_laundry_worker(void *arg)
958 {
959         struct vm_domain *vmd;
960         struct vm_pagequeue *pq;
961         uint64_t nclean, ndirty, nfreed;
962         int domain, last_target, launder, shortfall, shortfall_cycle, target;
963         bool in_shortfall;
964
965         domain = (uintptr_t)arg;
966         vmd = VM_DOMAIN(domain);
967         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
968         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
969
970         shortfall = 0;
971         in_shortfall = false;
972         shortfall_cycle = 0;
973         target = 0;
974         nfreed = 0;
975
976         /*
977          * Calls to these handlers are serialized by the swap syscall lock.
978          */
979         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
980             EVENTHANDLER_PRI_ANY);
981         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
982             EVENTHANDLER_PRI_ANY);
983
984         /*
985          * The pageout laundry worker is never done, so loop forever.
986          */
987         for (;;) {
988                 KASSERT(target >= 0, ("negative target %d", target));
989                 KASSERT(shortfall_cycle >= 0,
990                     ("negative cycle %d", shortfall_cycle));
991                 launder = 0;
992
993                 /*
994                  * First determine whether we need to launder pages to meet a
995                  * shortage of free pages.
996                  */
997                 if (shortfall > 0) {
998                         in_shortfall = true;
999                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1000                         target = shortfall;
1001                 } else if (!in_shortfall)
1002                         goto trybackground;
1003                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1004                         /*
1005                          * We recently entered shortfall and began laundering
1006                          * pages.  If we have completed that laundering run
1007                          * (and we are no longer in shortfall) or we have met
1008                          * our laundry target through other activity, then we
1009                          * can stop laundering pages.
1010                          */
1011                         in_shortfall = false;
1012                         target = 0;
1013                         goto trybackground;
1014                 }
1015                 launder = target / shortfall_cycle--;
1016                 goto dolaundry;
1017
1018                 /*
1019                  * There's no immediate need to launder any pages; see if we
1020                  * meet the conditions to perform background laundering:
1021                  *
1022                  * 1. The ratio of dirty to clean inactive pages exceeds the
1023                  *    background laundering threshold, or
1024                  * 2. we haven't yet reached the target of the current
1025                  *    background laundering run.
1026                  *
1027                  * The background laundering threshold is not a constant.
1028                  * Instead, it is a slowly growing function of the number of
1029                  * clean pages freed by the page daemon since the last
1030                  * background laundering.  Thus, as the ratio of dirty to
1031                  * clean inactive pages grows, the amount of memory pressure
1032                  * required to trigger laundering decreases.  We ensure
1033                  * that the threshold is non-zero after an inactive queue
1034                  * scan, even if that scan failed to free a single clean page.
1035                  */
1036 trybackground:
1037                 nclean = vmd->vmd_free_count +
1038                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1039                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1040                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1041                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1042                         target = vmd->vmd_background_launder_target;
1043                 }
1044
1045                 /*
1046                  * We have a non-zero background laundering target.  If we've
1047                  * laundered up to our maximum without observing a page daemon
1048                  * request, just stop.  This is a safety belt that ensures we
1049                  * don't launder an excessive amount if memory pressure is low
1050                  * and the ratio of dirty to clean pages is large.  Otherwise,
1051                  * proceed at the background laundering rate.
1052                  */
1053                 if (target > 0) {
1054                         if (nfreed > 0) {
1055                                 nfreed = 0;
1056                                 last_target = target;
1057                         } else if (last_target - target >=
1058                             vm_background_launder_max * PAGE_SIZE / 1024) {
1059                                 target = 0;
1060                         }
1061                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1062                         launder /= VM_LAUNDER_RATE;
1063                         if (launder > target)
1064                                 launder = target;
1065                 }
1066
1067 dolaundry:
1068                 if (launder > 0) {
1069                         /*
1070                          * Because of I/O clustering, the number of laundered
1071                          * pages could exceed "target" by the maximum size of
1072                          * a cluster minus one. 
1073                          */
1074                         target -= min(vm_pageout_launder(vmd, launder,
1075                             in_shortfall), target);
1076                         pause("laundp", hz / VM_LAUNDER_RATE);
1077                 }
1078
1079                 /*
1080                  * If we're not currently laundering pages and the page daemon
1081                  * hasn't posted a new request, sleep until the page daemon
1082                  * kicks us.
1083                  */
1084                 vm_pagequeue_lock(pq);
1085                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1086                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1087                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1088
1089                 /*
1090                  * If the pagedaemon has indicated that it's in shortfall, start
1091                  * a shortfall laundering unless we're already in the middle of
1092                  * one.  This may preempt a background laundering.
1093                  */
1094                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1095                     (!in_shortfall || shortfall_cycle == 0)) {
1096                         shortfall = vm_laundry_target(vmd) +
1097                             vmd->vmd_pageout_deficit;
1098                         target = 0;
1099                 } else
1100                         shortfall = 0;
1101
1102                 if (target == 0)
1103                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1104                 nfreed += vmd->vmd_clean_pages_freed;
1105                 vmd->vmd_clean_pages_freed = 0;
1106                 vm_pagequeue_unlock(pq);
1107         }
1108 }
1109
1110 /*
1111  * Compute the number of pages we want to try to move from the
1112  * active queue to either the inactive or laundry queue.
1113  *
1114  * When scanning active pages during a shortage, we make clean pages
1115  * count more heavily towards the page shortage than dirty pages.
1116  * This is because dirty pages must be laundered before they can be
1117  * reused and thus have less utility when attempting to quickly
1118  * alleviate a free page shortage.  However, this weighting also
1119  * causes the scan to deactivate dirty pages more aggressively,
1120  * improving the effectiveness of clustering.
1121  */
1122 static int
1123 vm_pageout_active_target(struct vm_domain *vmd)
1124 {
1125         int shortage;
1126
1127         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1128             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1129             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1130         shortage *= act_scan_laundry_weight;
1131         return (shortage);
1132 }
1133
1134 /*
1135  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1136  * small portion of the queue in order to maintain quasi-LRU.
1137  */
1138 static void
1139 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1140 {
1141         struct scan_state ss;
1142         struct mtx *mtx;
1143         vm_page_t m, marker;
1144         struct vm_pagequeue *pq;
1145         long min_scan;
1146         int act_delta, max_scan, scan_tick;
1147
1148         marker = &vmd->vmd_markers[PQ_ACTIVE];
1149         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1150         vm_pagequeue_lock(pq);
1151
1152         /*
1153          * If we're just idle polling attempt to visit every
1154          * active page within 'update_period' seconds.
1155          */
1156         scan_tick = ticks;
1157         if (vm_pageout_update_period != 0) {
1158                 min_scan = pq->pq_cnt;
1159                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1160                 min_scan /= hz * vm_pageout_update_period;
1161         } else
1162                 min_scan = 0;
1163         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1164                 vmd->vmd_last_active_scan = scan_tick;
1165
1166         /*
1167          * Scan the active queue for pages that can be deactivated.  Update
1168          * the per-page activity counter and use it to identify deactivation
1169          * candidates.  Held pages may be deactivated.
1170          *
1171          * To avoid requeuing each page that remains in the active queue, we
1172          * implement the CLOCK algorithm.  To keep the implementation of the
1173          * enqueue operation consistent for all page queues, we use two hands,
1174          * represented by marker pages. Scans begin at the first hand, which
1175          * precedes the second hand in the queue.  When the two hands meet,
1176          * they are moved back to the head and tail of the queue, respectively,
1177          * and scanning resumes.
1178          */
1179         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1180         mtx = NULL;
1181 act_scan:
1182         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1183         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1184                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1185                         vm_pagequeue_lock(pq);
1186                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1187                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1188                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1189                             plinks.q);
1190                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1191                             plinks.q);
1192                         max_scan -= ss.scanned;
1193                         vm_pageout_end_scan(&ss);
1194                         goto act_scan;
1195                 }
1196                 if (__predict_false((m->flags & PG_MARKER) != 0))
1197                         continue;
1198
1199                 vm_page_change_lock(m, &mtx);
1200
1201                 /*
1202                  * The page may have been disassociated from the queue
1203                  * while locks were dropped.
1204                  */
1205                 if (vm_page_queue(m) != PQ_ACTIVE)
1206                         continue;
1207
1208                 /*
1209                  * Wired pages are dequeued lazily.
1210                  */
1211                 if (m->wire_count != 0) {
1212                         vm_page_dequeue_deferred(m);
1213                         continue;
1214                 }
1215
1216                 /*
1217                  * Check to see "how much" the page has been used.
1218                  */
1219                 if ((m->aflags & PGA_REFERENCED) != 0) {
1220                         vm_page_aflag_clear(m, PGA_REFERENCED);
1221                         act_delta = 1;
1222                 } else
1223                         act_delta = 0;
1224
1225                 /*
1226                  * Perform an unsynchronized object ref count check.  While
1227                  * the page lock ensures that the page is not reallocated to
1228                  * another object, in particular, one with unmanaged mappings
1229                  * that cannot support pmap_ts_referenced(), two races are,
1230                  * nonetheless, possible:
1231                  * 1) The count was transitioning to zero, but we saw a non-
1232                  *    zero value.  pmap_ts_referenced() will return zero
1233                  *    because the page is not mapped.
1234                  * 2) The count was transitioning to one, but we saw zero.
1235                  *    This race delays the detection of a new reference.  At
1236                  *    worst, we will deactivate and reactivate the page.
1237                  */
1238                 if (m->object->ref_count != 0)
1239                         act_delta += pmap_ts_referenced(m);
1240
1241                 /*
1242                  * Advance or decay the act_count based on recent usage.
1243                  */
1244                 if (act_delta != 0) {
1245                         m->act_count += ACT_ADVANCE + act_delta;
1246                         if (m->act_count > ACT_MAX)
1247                                 m->act_count = ACT_MAX;
1248                 } else
1249                         m->act_count -= min(m->act_count, ACT_DECLINE);
1250
1251                 if (m->act_count == 0) {
1252                         /*
1253                          * When not short for inactive pages, let dirty pages go
1254                          * through the inactive queue before moving to the
1255                          * laundry queues.  This gives them some extra time to
1256                          * be reactivated, potentially avoiding an expensive
1257                          * pageout.  However, during a page shortage, the
1258                          * inactive queue is necessarily small, and so dirty
1259                          * pages would only spend a trivial amount of time in
1260                          * the inactive queue.  Therefore, we might as well
1261                          * place them directly in the laundry queue to reduce
1262                          * queuing overhead.
1263                          */
1264                         if (page_shortage <= 0)
1265                                 vm_page_deactivate(m);
1266                         else {
1267                                 /*
1268                                  * Calling vm_page_test_dirty() here would
1269                                  * require acquisition of the object's write
1270                                  * lock.  However, during a page shortage,
1271                                  * directing dirty pages into the laundry
1272                                  * queue is only an optimization and not a
1273                                  * requirement.  Therefore, we simply rely on
1274                                  * the opportunistic updates to the page's
1275                                  * dirty field by the pmap.
1276                                  */
1277                                 if (m->dirty == 0) {
1278                                         vm_page_deactivate(m);
1279                                         page_shortage -=
1280                                             act_scan_laundry_weight;
1281                                 } else {
1282                                         vm_page_launder(m);
1283                                         page_shortage--;
1284                                 }
1285                         }
1286                 }
1287         }
1288         if (mtx != NULL) {
1289                 mtx_unlock(mtx);
1290                 mtx = NULL;
1291         }
1292         vm_pagequeue_lock(pq);
1293         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1294         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1295         vm_pageout_end_scan(&ss);
1296         vm_pagequeue_unlock(pq);
1297 }
1298
1299 static int
1300 vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
1301 {
1302         struct vm_domain *vmd;
1303
1304         if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
1305                 return (0);
1306         vm_page_aflag_set(m, PGA_ENQUEUED);
1307         if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {
1308                 vmd = vm_pagequeue_domain(m);
1309                 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
1310                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
1311         } else if ((m->aflags & PGA_REQUEUE) != 0) {
1312                 TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
1313                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
1314         } else
1315                 TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
1316         return (1);
1317 }
1318
1319 /*
1320  * Re-add stuck pages to the inactive queue.  We will examine them again
1321  * during the next scan.  If the queue state of a page has changed since
1322  * it was physically removed from the page queue in
1323  * vm_pageout_collect_batch(), don't do anything with that page.
1324  */
1325 static void
1326 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1327     vm_page_t m)
1328 {
1329         struct vm_pagequeue *pq;
1330         int delta;
1331
1332         delta = 0;
1333         pq = ss->pq;
1334
1335         if (m != NULL) {
1336                 if (vm_batchqueue_insert(bq, m))
1337                         return;
1338                 vm_pagequeue_lock(pq);
1339                 delta += vm_pageout_reinsert_inactive_page(ss, m);
1340         } else
1341                 vm_pagequeue_lock(pq);
1342         while ((m = vm_batchqueue_pop(bq)) != NULL)
1343                 delta += vm_pageout_reinsert_inactive_page(ss, m);
1344         vm_pagequeue_cnt_add(pq, delta);
1345         vm_pagequeue_unlock(pq);
1346         vm_batchqueue_init(bq);
1347 }
1348
1349 /*
1350  * Attempt to reclaim the requested number of pages from the inactive queue.
1351  * Returns true if the shortage was addressed.
1352  */
1353 static int
1354 vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
1355     int *addl_shortage)
1356 {
1357         struct scan_state ss;
1358         struct vm_batchqueue rq;
1359         struct mtx *mtx;
1360         vm_page_t m, marker;
1361         struct vm_pagequeue *pq;
1362         vm_object_t object;
1363         int act_delta, addl_page_shortage, deficit, page_shortage;
1364         int starting_page_shortage;
1365         bool obj_locked;
1366
1367         /*
1368          * The addl_page_shortage is an estimate of the number of temporarily
1369          * stuck pages in the inactive queue.  In other words, the
1370          * number of pages from the inactive count that should be
1371          * discounted in setting the target for the active queue scan.
1372          */
1373         addl_page_shortage = 0;
1374
1375         /*
1376          * vmd_pageout_deficit counts the number of pages requested in
1377          * allocations that failed because of a free page shortage.  We assume
1378          * that the allocations will be reattempted and thus include the deficit
1379          * in our scan target.
1380          */
1381         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1382         starting_page_shortage = page_shortage = shortage + deficit;
1383
1384         mtx = NULL;
1385         obj_locked = false;
1386         object = NULL;
1387         vm_batchqueue_init(&rq);
1388
1389         /*
1390          * Start scanning the inactive queue for pages that we can free.  The
1391          * scan will stop when we reach the target or we have scanned the
1392          * entire queue.  (Note that m->act_count is not used to make
1393          * decisions for the inactive queue, only for the active queue.)
1394          */
1395         marker = &vmd->vmd_markers[PQ_INACTIVE];
1396         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1397         vm_pagequeue_lock(pq);
1398         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1399         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1400                 KASSERT((m->flags & PG_MARKER) == 0,
1401                     ("marker page %p was dequeued", m));
1402
1403                 vm_page_change_lock(m, &mtx);
1404
1405 recheck:
1406                 /*
1407                  * The page may have been disassociated from the queue
1408                  * while locks were dropped.
1409                  */
1410                 if (vm_page_queue(m) != PQ_INACTIVE) {
1411                         addl_page_shortage++;
1412                         continue;
1413                 }
1414
1415                 /*
1416                  * The page was re-enqueued after the page queue lock was
1417                  * dropped, or a requeue was requested.  This page gets a second
1418                  * chance.
1419                  */
1420                 if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE |
1421                     PGA_REQUEUE_HEAD)) != 0)
1422                         goto reinsert;
1423
1424                 /*
1425                  * Held pages are essentially stuck in the queue.  So,
1426                  * they ought to be discounted from the inactive count.
1427                  * See the description of addl_page_shortage above.
1428                  *
1429                  * Wired pages may not be freed.  Complete their removal
1430                  * from the queue now to avoid needless revisits during
1431                  * future scans.
1432                  */
1433                 if (m->hold_count != 0) {
1434                         addl_page_shortage++;
1435                         goto reinsert;
1436                 }
1437                 if (m->wire_count != 0) {
1438                         vm_page_dequeue_deferred(m);
1439                         continue;
1440                 }
1441
1442                 if (object != m->object) {
1443                         if (obj_locked) {
1444                                 VM_OBJECT_WUNLOCK(object);
1445                                 obj_locked = false;
1446                         }
1447                         object = m->object;
1448                 }
1449                 if (!obj_locked) {
1450                         if (!VM_OBJECT_TRYWLOCK(object)) {
1451                                 mtx_unlock(mtx);
1452                                 /* Depends on type-stability. */
1453                                 VM_OBJECT_WLOCK(object);
1454                                 obj_locked = true;
1455                                 mtx_lock(mtx);
1456                                 goto recheck;
1457                         } else
1458                                 obj_locked = true;
1459                 }
1460
1461                 if (vm_page_busied(m)) {
1462                         /*
1463                          * Don't mess with busy pages.  Leave them at
1464                          * the front of the queue.  Most likely, they
1465                          * are being paged out and will leave the
1466                          * queue shortly after the scan finishes.  So,
1467                          * they ought to be discounted from the
1468                          * inactive count.
1469                          */
1470                         addl_page_shortage++;
1471                         goto reinsert;
1472                 }
1473
1474                 /*
1475                  * Invalid pages can be easily freed. They cannot be
1476                  * mapped, vm_page_free() asserts this.
1477                  */
1478                 if (m->valid == 0)
1479                         goto free_page;
1480
1481                 /*
1482                  * If the page has been referenced and the object is not dead,
1483                  * reactivate or requeue the page depending on whether the
1484                  * object is mapped.
1485                  */
1486                 if ((m->aflags & PGA_REFERENCED) != 0) {
1487                         vm_page_aflag_clear(m, PGA_REFERENCED);
1488                         act_delta = 1;
1489                 } else
1490                         act_delta = 0;
1491                 if (object->ref_count != 0) {
1492                         act_delta += pmap_ts_referenced(m);
1493                 } else {
1494                         KASSERT(!pmap_page_is_mapped(m),
1495                             ("page %p is mapped", m));
1496                 }
1497                 if (act_delta != 0) {
1498                         if (object->ref_count != 0) {
1499                                 VM_CNT_INC(v_reactivated);
1500                                 vm_page_activate(m);
1501
1502                                 /*
1503                                  * Increase the activation count if the page
1504                                  * was referenced while in the inactive queue.
1505                                  * This makes it less likely that the page will
1506                                  * be returned prematurely to the inactive
1507                                  * queue.
1508                                  */
1509                                 m->act_count += act_delta + ACT_ADVANCE;
1510                                 continue;
1511                         } else if ((object->flags & OBJ_DEAD) == 0) {
1512                                 vm_page_aflag_set(m, PGA_REQUEUE);
1513                                 goto reinsert;
1514                         }
1515                 }
1516
1517                 /*
1518                  * If the page appears to be clean at the machine-independent
1519                  * layer, then remove all of its mappings from the pmap in
1520                  * anticipation of freeing it.  If, however, any of the page's
1521                  * mappings allow write access, then the page may still be
1522                  * modified until the last of those mappings are removed.
1523                  */
1524                 if (object->ref_count != 0) {
1525                         vm_page_test_dirty(m);
1526                         if (m->dirty == 0)
1527                                 pmap_remove_all(m);
1528                 }
1529
1530                 /*
1531                  * Clean pages can be freed, but dirty pages must be sent back
1532                  * to the laundry, unless they belong to a dead object.
1533                  * Requeueing dirty pages from dead objects is pointless, as
1534                  * they are being paged out and freed by the thread that
1535                  * destroyed the object.
1536                  */
1537                 if (m->dirty == 0) {
1538 free_page:
1539                         /*
1540                          * Because we dequeued the page and have already
1541                          * checked for concurrent dequeue and enqueue
1542                          * requests, we can safely disassociate the page
1543                          * from the inactive queue.
1544                          */
1545                         KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
1546                             ("page %p has queue state", m));
1547                         m->queue = PQ_NONE;
1548                         vm_page_free(m);
1549                         page_shortage--;
1550                 } else if ((object->flags & OBJ_DEAD) == 0)
1551                         vm_page_launder(m);
1552                 continue;
1553 reinsert:
1554                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1555         }
1556         if (mtx != NULL) {
1557                 mtx_unlock(mtx);
1558                 mtx = NULL;
1559         }
1560         if (obj_locked) {
1561                 VM_OBJECT_WUNLOCK(object);
1562                 obj_locked = false;
1563         }
1564         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1565         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1566         vm_pagequeue_lock(pq);
1567         vm_pageout_end_scan(&ss);
1568         vm_pagequeue_unlock(pq);
1569
1570         VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage);
1571
1572         /*
1573          * Wake up the laundry thread so that it can perform any needed
1574          * laundering.  If we didn't meet our target, we're in shortfall and
1575          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1576          * swap devices are configured, the laundry thread has no work to do, so
1577          * don't bother waking it up.
1578          *
1579          * The laundry thread uses the number of inactive queue scans elapsed
1580          * since the last laundering to determine whether to launder again, so
1581          * keep count.
1582          */
1583         if (starting_page_shortage > 0) {
1584                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1585                 vm_pagequeue_lock(pq);
1586                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1587                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1588                         if (page_shortage > 0) {
1589                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1590                                 VM_CNT_INC(v_pdshortfalls);
1591                         } else if (vmd->vmd_laundry_request !=
1592                             VM_LAUNDRY_SHORTFALL)
1593                                 vmd->vmd_laundry_request =
1594                                     VM_LAUNDRY_BACKGROUND;
1595                         wakeup(&vmd->vmd_laundry_request);
1596                 }
1597                 vmd->vmd_clean_pages_freed +=
1598                     starting_page_shortage - page_shortage;
1599                 vm_pagequeue_unlock(pq);
1600         }
1601
1602         /*
1603          * Wakeup the swapout daemon if we didn't free the targeted number of
1604          * pages.
1605          */
1606         if (page_shortage > 0)
1607                 vm_swapout_run();
1608
1609         /*
1610          * If the inactive queue scan fails repeatedly to meet its
1611          * target, kill the largest process.
1612          */
1613         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1614
1615         /*
1616          * Reclaim pages by swapping out idle processes, if configured to do so.
1617          */
1618         vm_swapout_run_idle();
1619
1620         /*
1621          * See the description of addl_page_shortage above.
1622          */
1623         *addl_shortage = addl_page_shortage + deficit;
1624
1625         return (page_shortage <= 0);
1626 }
1627
1628 static int vm_pageout_oom_vote;
1629
1630 /*
1631  * The pagedaemon threads randlomly select one to perform the
1632  * OOM.  Trying to kill processes before all pagedaemons
1633  * failed to reach free target is premature.
1634  */
1635 static void
1636 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1637     int starting_page_shortage)
1638 {
1639         int old_vote;
1640
1641         if (starting_page_shortage <= 0 || starting_page_shortage !=
1642             page_shortage)
1643                 vmd->vmd_oom_seq = 0;
1644         else
1645                 vmd->vmd_oom_seq++;
1646         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1647                 if (vmd->vmd_oom) {
1648                         vmd->vmd_oom = FALSE;
1649                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1650                 }
1651                 return;
1652         }
1653
1654         /*
1655          * Do not follow the call sequence until OOM condition is
1656          * cleared.
1657          */
1658         vmd->vmd_oom_seq = 0;
1659
1660         if (vmd->vmd_oom)
1661                 return;
1662
1663         vmd->vmd_oom = TRUE;
1664         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1665         if (old_vote != vm_ndomains - 1)
1666                 return;
1667
1668         /*
1669          * The current pagedaemon thread is the last in the quorum to
1670          * start OOM.  Initiate the selection and signaling of the
1671          * victim.
1672          */
1673         vm_pageout_oom(VM_OOM_MEM);
1674
1675         /*
1676          * After one round of OOM terror, recall our vote.  On the
1677          * next pass, current pagedaemon would vote again if the low
1678          * memory condition is still there, due to vmd_oom being
1679          * false.
1680          */
1681         vmd->vmd_oom = FALSE;
1682         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1683 }
1684
1685 /*
1686  * The OOM killer is the page daemon's action of last resort when
1687  * memory allocation requests have been stalled for a prolonged period
1688  * of time because it cannot reclaim memory.  This function computes
1689  * the approximate number of physical pages that could be reclaimed if
1690  * the specified address space is destroyed.
1691  *
1692  * Private, anonymous memory owned by the address space is the
1693  * principal resource that we expect to recover after an OOM kill.
1694  * Since the physical pages mapped by the address space's COW entries
1695  * are typically shared pages, they are unlikely to be released and so
1696  * they are not counted.
1697  *
1698  * To get to the point where the page daemon runs the OOM killer, its
1699  * efforts to write-back vnode-backed pages may have stalled.  This
1700  * could be caused by a memory allocation deadlock in the write path
1701  * that might be resolved by an OOM kill.  Therefore, physical pages
1702  * belonging to vnode-backed objects are counted, because they might
1703  * be freed without being written out first if the address space holds
1704  * the last reference to an unlinked vnode.
1705  *
1706  * Similarly, physical pages belonging to OBJT_PHYS objects are
1707  * counted because the address space might hold the last reference to
1708  * the object.
1709  */
1710 static long
1711 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1712 {
1713         vm_map_t map;
1714         vm_map_entry_t entry;
1715         vm_object_t obj;
1716         long res;
1717
1718         map = &vmspace->vm_map;
1719         KASSERT(!map->system_map, ("system map"));
1720         sx_assert(&map->lock, SA_LOCKED);
1721         res = 0;
1722         for (entry = map->header.next; entry != &map->header;
1723             entry = entry->next) {
1724                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1725                         continue;
1726                 obj = entry->object.vm_object;
1727                 if (obj == NULL)
1728                         continue;
1729                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1730                     obj->ref_count != 1)
1731                         continue;
1732                 switch (obj->type) {
1733                 case OBJT_DEFAULT:
1734                 case OBJT_SWAP:
1735                 case OBJT_PHYS:
1736                 case OBJT_VNODE:
1737                         res += obj->resident_page_count;
1738                         break;
1739                 }
1740         }
1741         return (res);
1742 }
1743
1744 void
1745 vm_pageout_oom(int shortage)
1746 {
1747         struct proc *p, *bigproc;
1748         vm_offset_t size, bigsize;
1749         struct thread *td;
1750         struct vmspace *vm;
1751         bool breakout;
1752
1753         /*
1754          * We keep the process bigproc locked once we find it to keep anyone
1755          * from messing with it; however, there is a possibility of
1756          * deadlock if process B is bigproc and one of its child processes
1757          * attempts to propagate a signal to B while we are waiting for A's
1758          * lock while walking this list.  To avoid this, we don't block on
1759          * the process lock but just skip a process if it is already locked.
1760          */
1761         bigproc = NULL;
1762         bigsize = 0;
1763         sx_slock(&allproc_lock);
1764         FOREACH_PROC_IN_SYSTEM(p) {
1765                 PROC_LOCK(p);
1766
1767                 /*
1768                  * If this is a system, protected or killed process, skip it.
1769                  */
1770                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1771                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1772                     p->p_pid == 1 || P_KILLED(p) ||
1773                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1774                         PROC_UNLOCK(p);
1775                         continue;
1776                 }
1777                 /*
1778                  * If the process is in a non-running type state,
1779                  * don't touch it.  Check all the threads individually.
1780                  */
1781                 breakout = false;
1782                 FOREACH_THREAD_IN_PROC(p, td) {
1783                         thread_lock(td);
1784                         if (!TD_ON_RUNQ(td) &&
1785                             !TD_IS_RUNNING(td) &&
1786                             !TD_IS_SLEEPING(td) &&
1787                             !TD_IS_SUSPENDED(td) &&
1788                             !TD_IS_SWAPPED(td)) {
1789                                 thread_unlock(td);
1790                                 breakout = true;
1791                                 break;
1792                         }
1793                         thread_unlock(td);
1794                 }
1795                 if (breakout) {
1796                         PROC_UNLOCK(p);
1797                         continue;
1798                 }
1799                 /*
1800                  * get the process size
1801                  */
1802                 vm = vmspace_acquire_ref(p);
1803                 if (vm == NULL) {
1804                         PROC_UNLOCK(p);
1805                         continue;
1806                 }
1807                 _PHOLD_LITE(p);
1808                 PROC_UNLOCK(p);
1809                 sx_sunlock(&allproc_lock);
1810                 if (!vm_map_trylock_read(&vm->vm_map)) {
1811                         vmspace_free(vm);
1812                         sx_slock(&allproc_lock);
1813                         PRELE(p);
1814                         continue;
1815                 }
1816                 size = vmspace_swap_count(vm);
1817                 if (shortage == VM_OOM_MEM)
1818                         size += vm_pageout_oom_pagecount(vm);
1819                 vm_map_unlock_read(&vm->vm_map);
1820                 vmspace_free(vm);
1821                 sx_slock(&allproc_lock);
1822
1823                 /*
1824                  * If this process is bigger than the biggest one,
1825                  * remember it.
1826                  */
1827                 if (size > bigsize) {
1828                         if (bigproc != NULL)
1829                                 PRELE(bigproc);
1830                         bigproc = p;
1831                         bigsize = size;
1832                 } else {
1833                         PRELE(p);
1834                 }
1835         }
1836         sx_sunlock(&allproc_lock);
1837         if (bigproc != NULL) {
1838                 if (vm_panic_on_oom != 0)
1839                         panic("out of swap space");
1840                 PROC_LOCK(bigproc);
1841                 killproc(bigproc, "out of swap space");
1842                 sched_nice(bigproc, PRIO_MIN);
1843                 _PRELE(bigproc);
1844                 PROC_UNLOCK(bigproc);
1845         }
1846 }
1847
1848 static void
1849 vm_pageout_lowmem(struct vm_domain *vmd)
1850 {
1851
1852         if (vmd == VM_DOMAIN(0) &&
1853             time_uptime - lowmem_uptime >= lowmem_period) {
1854                 /*
1855                  * Decrease registered cache sizes.
1856                  */
1857                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1858                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1859
1860                 /*
1861                  * We do this explicitly after the caches have been
1862                  * drained above.
1863                  */
1864                 uma_reclaim();
1865                 lowmem_uptime = time_uptime;
1866         }
1867 }
1868
1869 static void
1870 vm_pageout_worker(void *arg)
1871 {
1872         struct vm_domain *vmd;
1873         int addl_shortage, domain, shortage;
1874         bool target_met;
1875
1876         domain = (uintptr_t)arg;
1877         vmd = VM_DOMAIN(domain);
1878         shortage = 0;
1879         target_met = true;
1880
1881         /*
1882          * XXXKIB It could be useful to bind pageout daemon threads to
1883          * the cores belonging to the domain, from which vm_page_array
1884          * is allocated.
1885          */
1886
1887         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1888         vmd->vmd_last_active_scan = ticks;
1889
1890         /*
1891          * The pageout daemon worker is never done, so loop forever.
1892          */
1893         while (TRUE) {
1894                 vm_domain_pageout_lock(vmd);
1895
1896                 /*
1897                  * We need to clear wanted before we check the limits.  This
1898                  * prevents races with wakers who will check wanted after they
1899                  * reach the limit.
1900                  */
1901                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
1902
1903                 /*
1904                  * Might the page daemon need to run again?
1905                  */
1906                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
1907                         /*
1908                          * Yes.  If the scan failed to produce enough free
1909                          * pages, sleep uninterruptibly for some time in the
1910                          * hope that the laundry thread will clean some pages.
1911                          */
1912                         vm_domain_pageout_unlock(vmd);
1913                         if (!target_met)
1914                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
1915                 } else {
1916                         /*
1917                          * No, sleep until the next wakeup or until pages
1918                          * need to have their reference stats updated.
1919                          */
1920                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
1921                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
1922                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
1923                                 VM_CNT_INC(v_pdwakeups);
1924                 }
1925
1926                 /* Prevent spurious wakeups by ensuring that wanted is set. */
1927                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
1928
1929                 /*
1930                  * Use the controller to calculate how many pages to free in
1931                  * this interval, and scan the inactive queue.
1932                  */
1933                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
1934                 if (shortage > 0) {
1935                         vm_pageout_lowmem(vmd);
1936                         target_met = vm_pageout_scan_inactive(vmd, shortage,
1937                             &addl_shortage);
1938                 } else
1939                         addl_shortage = 0;
1940
1941                 /*
1942                  * Scan the active queue.  A positive value for shortage
1943                  * indicates that we must aggressively deactivate pages to avoid
1944                  * a shortfall.
1945                  */
1946                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
1947                 vm_pageout_scan_active(vmd, shortage);
1948         }
1949 }
1950
1951 /*
1952  *      vm_pageout_init initialises basic pageout daemon settings.
1953  */
1954 static void
1955 vm_pageout_init_domain(int domain)
1956 {
1957         struct vm_domain *vmd;
1958         struct sysctl_oid *oid;
1959
1960         vmd = VM_DOMAIN(domain);
1961         vmd->vmd_interrupt_free_min = 2;
1962
1963         /*
1964          * v_free_reserved needs to include enough for the largest
1965          * swap pager structures plus enough for any pv_entry structs
1966          * when paging. 
1967          */
1968         if (vmd->vmd_page_count > 1024)
1969                 vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
1970         else
1971                 vmd->vmd_free_min = 4;
1972         vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1973             vmd->vmd_interrupt_free_min;
1974         vmd->vmd_free_reserved = vm_pageout_page_count +
1975             vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
1976         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
1977         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
1978         vmd->vmd_free_min += vmd->vmd_free_reserved;
1979         vmd->vmd_free_severe += vmd->vmd_free_reserved;
1980         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
1981         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
1982                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
1983
1984         /*
1985          * Set the default wakeup threshold to be 10% below the paging
1986          * target.  This keeps the steady state out of shortfall.
1987          */
1988         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
1989
1990         /*
1991          * Target amount of memory to move out of the laundry queue during a
1992          * background laundering.  This is proportional to the amount of system
1993          * memory.
1994          */
1995         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
1996             vmd->vmd_free_min) / 10;
1997
1998         /* Initialize the pageout daemon pid controller. */
1999         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2000             vmd->vmd_free_target, PIDCTRL_BOUND,
2001             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2002         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2003             "pidctrl", CTLFLAG_RD, NULL, "");
2004         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2005 }
2006
2007 static void
2008 vm_pageout_init(void)
2009 {
2010         u_int freecount;
2011         int i;
2012
2013         /*
2014          * Initialize some paging parameters.
2015          */
2016         if (vm_cnt.v_page_count < 2000)
2017                 vm_pageout_page_count = 8;
2018
2019         freecount = 0;
2020         for (i = 0; i < vm_ndomains; i++) {
2021                 struct vm_domain *vmd;
2022
2023                 vm_pageout_init_domain(i);
2024                 vmd = VM_DOMAIN(i);
2025                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2026                 vm_cnt.v_free_target += vmd->vmd_free_target;
2027                 vm_cnt.v_free_min += vmd->vmd_free_min;
2028                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2029                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2030                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2031                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2032                 freecount += vmd->vmd_free_count;
2033         }
2034
2035         /*
2036          * Set interval in seconds for active scan.  We want to visit each
2037          * page at least once every ten minutes.  This is to prevent worst
2038          * case paging behaviors with stale active LRU.
2039          */
2040         if (vm_pageout_update_period == 0)
2041                 vm_pageout_update_period = 600;
2042
2043         if (vm_page_max_wired == 0)
2044                 vm_page_max_wired = freecount / 3;
2045 }
2046
2047 /*
2048  *     vm_pageout is the high level pageout daemon.
2049  */
2050 static void
2051 vm_pageout(void)
2052 {
2053         int error;
2054         int i;
2055
2056         swap_pager_swap_init();
2057         snprintf(curthread->td_name, sizeof(curthread->td_name), "dom0");
2058         error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
2059             0, 0, "laundry: dom0");
2060         if (error != 0)
2061                 panic("starting laundry for domain 0, error %d", error);
2062         for (i = 1; i < vm_ndomains; i++) {
2063                 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
2064                     curproc, NULL, 0, 0, "dom%d", i);
2065                 if (error != 0) {
2066                         panic("starting pageout for domain %d, error %d\n",
2067                             i, error);
2068                 }
2069                 error = kthread_add(vm_pageout_laundry_worker,
2070                     (void *)(uintptr_t)i, curproc, NULL, 0, 0,
2071                     "laundry: dom%d", i);
2072                 if (error != 0)
2073                         panic("starting laundry for domain %d, error %d",
2074                             i, error);
2075         }
2076         error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
2077             0, 0, "uma");
2078         if (error != 0)
2079                 panic("starting uma_reclaim helper, error %d\n", error);
2080         vm_pageout_worker((void *)(uintptr_t)0);
2081 }
2082
2083 /*
2084  * Perform an advisory wakeup of the page daemon.
2085  */
2086 void
2087 pagedaemon_wakeup(int domain)
2088 {
2089         struct vm_domain *vmd;
2090
2091         vmd = VM_DOMAIN(domain);
2092         vm_domain_pageout_assert_unlocked(vmd);
2093         if (curproc == pageproc)
2094                 return;
2095
2096         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2097                 vm_domain_pageout_lock(vmd);
2098                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2099                 wakeup(&vmd->vmd_pageout_wanted);
2100                 vm_domain_pageout_unlock(vmd);
2101         }
2102 }