]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Introduce vm_page_astate.
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_pagequeue.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117
118 /*
119  * System initialization
120  */
121
122 /* the kernel process "vm_pageout"*/
123 static void vm_pageout(void);
124 static void vm_pageout_init(void);
125 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
126 static int vm_pageout_cluster(vm_page_t m);
127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
128     int starting_page_shortage);
129
130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
131     NULL);
132
133 struct proc *pageproc;
134
135 static struct kproc_desc page_kp = {
136         "pagedaemon",
137         vm_pageout,
138         &pageproc
139 };
140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141     &page_kp);
142
143 SDT_PROVIDER_DEFINE(vm);
144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
145
146 /* Pagedaemon activity rates, in subdivisions of one second. */
147 #define VM_LAUNDER_RATE         10
148 #define VM_INACT_SCAN_RATE      10
149
150 static int vm_pageout_oom_seq = 12;
151
152 static int vm_pageout_update_period;
153 static int disable_swap_pageouts;
154 static int lowmem_period = 10;
155 static int swapdev_enabled;
156
157 static int vm_panic_on_oom = 0;
158
159 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
160         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
161         "panic on out of memory instead of killing the largest process");
162
163 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
164         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
165         "Maximum active LRU update period");
166   
167 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
168         "Low memory callback period");
169
170 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
171         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
172
173 static int pageout_lock_miss;
174 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
175         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
176
177 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
178         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
179         "back-to-back calls to oom detector to start OOM");
180
181 static int act_scan_laundry_weight = 3;
182 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
183     &act_scan_laundry_weight, 0,
184     "weight given to clean vs. dirty pages in active queue scans");
185
186 static u_int vm_background_launder_rate = 4096;
187 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
188     &vm_background_launder_rate, 0,
189     "background laundering rate, in kilobytes per second");
190
191 static u_int vm_background_launder_max = 20 * 1024;
192 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
193     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
194
195 int vm_pageout_page_count = 32;
196
197 u_long vm_page_max_user_wired;
198 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
199     &vm_page_max_user_wired, 0,
200     "system-wide limit to user-wired page count");
201
202 static u_int isqrt(u_int num);
203 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
204     bool in_shortfall);
205 static void vm_pageout_laundry_worker(void *arg);
206
207 struct scan_state {
208         struct vm_batchqueue bq;
209         struct vm_pagequeue *pq;
210         vm_page_t       marker;
211         int             maxscan;
212         int             scanned;
213 };
214
215 static void
216 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
217     vm_page_t marker, vm_page_t after, int maxscan)
218 {
219
220         vm_pagequeue_assert_locked(pq);
221         KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
222             ("marker %p already enqueued", marker));
223
224         if (after == NULL)
225                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
226         else
227                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
228         vm_page_aflag_set(marker, PGA_ENQUEUED);
229
230         vm_batchqueue_init(&ss->bq);
231         ss->pq = pq;
232         ss->marker = marker;
233         ss->maxscan = maxscan;
234         ss->scanned = 0;
235         vm_pagequeue_unlock(pq);
236 }
237
238 static void
239 vm_pageout_end_scan(struct scan_state *ss)
240 {
241         struct vm_pagequeue *pq;
242
243         pq = ss->pq;
244         vm_pagequeue_assert_locked(pq);
245         KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
246             ("marker %p not enqueued", ss->marker));
247
248         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
249         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
250         pq->pq_pdpages += ss->scanned;
251 }
252
253 /*
254  * Add a small number of queued pages to a batch queue for later processing
255  * without the corresponding queue lock held.  The caller must have enqueued a
256  * marker page at the desired start point for the scan.  Pages will be
257  * physically dequeued if the caller so requests.  Otherwise, the returned
258  * batch may contain marker pages, and it is up to the caller to handle them.
259  *
260  * When processing the batch queue, vm_page_queue() must be used to
261  * determine whether the page has been logically dequeued by another thread.
262  * Once this check is performed, the page lock guarantees that the page will
263  * not be disassociated from the queue.
264  */
265 static __always_inline void
266 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
267 {
268         struct vm_pagequeue *pq;
269         vm_page_t m, marker, n;
270
271         marker = ss->marker;
272         pq = ss->pq;
273
274         KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
275             ("marker %p not enqueued", ss->marker));
276
277         vm_pagequeue_lock(pq);
278         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
279             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
280             m = n, ss->scanned++) {
281                 n = TAILQ_NEXT(m, plinks.q);
282                 if ((m->flags & PG_MARKER) == 0) {
283                         KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
284                             ("page %p not enqueued", m));
285                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
286                             ("Fictitious page %p cannot be in page queue", m));
287                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
288                             ("Unmanaged page %p cannot be in page queue", m));
289                 } else if (dequeue)
290                         continue;
291
292                 (void)vm_batchqueue_insert(&ss->bq, m);
293                 if (dequeue) {
294                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
295                         vm_page_aflag_clear(m, PGA_ENQUEUED);
296                 }
297         }
298         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
299         if (__predict_true(m != NULL))
300                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
301         else
302                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
303         if (dequeue)
304                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
305         vm_pagequeue_unlock(pq);
306 }
307
308 /*
309  * Return the next page to be scanned, or NULL if the scan is complete.
310  */
311 static __always_inline vm_page_t
312 vm_pageout_next(struct scan_state *ss, const bool dequeue)
313 {
314
315         if (ss->bq.bq_cnt == 0)
316                 vm_pageout_collect_batch(ss, dequeue);
317         return (vm_batchqueue_pop(&ss->bq));
318 }
319
320 /*
321  * Scan for pages at adjacent offsets within the given page's object that are
322  * eligible for laundering, form a cluster of these pages and the given page,
323  * and launder that cluster.
324  */
325 static int
326 vm_pageout_cluster(vm_page_t m)
327 {
328         vm_object_t object;
329         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
330         vm_pindex_t pindex;
331         int ib, is, page_base, pageout_count;
332
333         object = m->object;
334         VM_OBJECT_ASSERT_WLOCKED(object);
335         pindex = m->pindex;
336
337         vm_page_assert_xbusied(m);
338
339         mc[vm_pageout_page_count] = pb = ps = m;
340         pageout_count = 1;
341         page_base = vm_pageout_page_count;
342         ib = 1;
343         is = 1;
344
345         /*
346          * We can cluster only if the page is not clean, busy, or held, and
347          * the page is in the laundry queue.
348          *
349          * During heavy mmap/modification loads the pageout
350          * daemon can really fragment the underlying file
351          * due to flushing pages out of order and not trying to
352          * align the clusters (which leaves sporadic out-of-order
353          * holes).  To solve this problem we do the reverse scan
354          * first and attempt to align our cluster, then do a 
355          * forward scan if room remains.
356          */
357 more:
358         while (ib != 0 && pageout_count < vm_pageout_page_count) {
359                 if (ib > pindex) {
360                         ib = 0;
361                         break;
362                 }
363                 if ((p = vm_page_prev(pb)) == NULL ||
364                     vm_page_tryxbusy(p) == 0) {
365                         ib = 0;
366                         break;
367                 }
368                 if (vm_page_wired(p)) {
369                         ib = 0;
370                         vm_page_xunbusy(p);
371                         break;
372                 }
373                 vm_page_test_dirty(p);
374                 if (p->dirty == 0) {
375                         ib = 0;
376                         vm_page_xunbusy(p);
377                         break;
378                 }
379                 vm_page_lock(p);
380                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
381                         vm_page_unlock(p);
382                         vm_page_xunbusy(p);
383                         ib = 0;
384                         break;
385                 }
386                 vm_page_unlock(p);
387                 mc[--page_base] = pb = p;
388                 ++pageout_count;
389                 ++ib;
390
391                 /*
392                  * We are at an alignment boundary.  Stop here, and switch
393                  * directions.  Do not clear ib.
394                  */
395                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
396                         break;
397         }
398         while (pageout_count < vm_pageout_page_count && 
399             pindex + is < object->size) {
400                 if ((p = vm_page_next(ps)) == NULL ||
401                     vm_page_tryxbusy(p) == 0)
402                         break;
403                 if (vm_page_wired(p)) {
404                         vm_page_xunbusy(p);
405                         break;
406                 }
407                 vm_page_test_dirty(p);
408                 if (p->dirty == 0) {
409                         vm_page_xunbusy(p);
410                         break;
411                 }
412                 vm_page_lock(p);
413                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
414                         vm_page_unlock(p);
415                         vm_page_xunbusy(p);
416                         break;
417                 }
418                 vm_page_unlock(p);
419                 mc[page_base + pageout_count] = ps = p;
420                 ++pageout_count;
421                 ++is;
422         }
423
424         /*
425          * If we exhausted our forward scan, continue with the reverse scan
426          * when possible, even past an alignment boundary.  This catches
427          * boundary conditions.
428          */
429         if (ib != 0 && pageout_count < vm_pageout_page_count)
430                 goto more;
431
432         return (vm_pageout_flush(&mc[page_base], pageout_count,
433             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
434 }
435
436 /*
437  * vm_pageout_flush() - launder the given pages
438  *
439  *      The given pages are laundered.  Note that we setup for the start of
440  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
441  *      reference count all in here rather then in the parent.  If we want
442  *      the parent to do more sophisticated things we may have to change
443  *      the ordering.
444  *
445  *      Returned runlen is the count of pages between mreq and first
446  *      page after mreq with status VM_PAGER_AGAIN.
447  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
448  *      for any page in runlen set.
449  */
450 int
451 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
452     boolean_t *eio)
453 {
454         vm_object_t object = mc[0]->object;
455         int pageout_status[count];
456         int numpagedout = 0;
457         int i, runlen;
458
459         VM_OBJECT_ASSERT_WLOCKED(object);
460
461         /*
462          * Initiate I/O.  Mark the pages shared busy and verify that they're
463          * valid and read-only.
464          *
465          * We do not have to fixup the clean/dirty bits here... we can
466          * allow the pager to do it after the I/O completes.
467          *
468          * NOTE! mc[i]->dirty may be partial or fragmented due to an
469          * edge case with file fragments.
470          */
471         for (i = 0; i < count; i++) {
472                 KASSERT(vm_page_all_valid(mc[i]),
473                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
474                         mc[i], i, count));
475                 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
476                     ("vm_pageout_flush: writeable page %p", mc[i]));
477                 vm_page_busy_downgrade(mc[i]);
478         }
479         vm_object_pip_add(object, count);
480
481         vm_pager_put_pages(object, mc, count, flags, pageout_status);
482
483         runlen = count - mreq;
484         if (eio != NULL)
485                 *eio = FALSE;
486         for (i = 0; i < count; i++) {
487                 vm_page_t mt = mc[i];
488
489                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
490                     !pmap_page_is_write_mapped(mt),
491                     ("vm_pageout_flush: page %p is not write protected", mt));
492                 switch (pageout_status[i]) {
493                 case VM_PAGER_OK:
494                         vm_page_lock(mt);
495                         if (vm_page_in_laundry(mt))
496                                 vm_page_deactivate_noreuse(mt);
497                         vm_page_unlock(mt);
498                         /* FALLTHROUGH */
499                 case VM_PAGER_PEND:
500                         numpagedout++;
501                         break;
502                 case VM_PAGER_BAD:
503                         /*
504                          * The page is outside the object's range.  We pretend
505                          * that the page out worked and clean the page, so the
506                          * changes will be lost if the page is reclaimed by
507                          * the page daemon.
508                          */
509                         vm_page_undirty(mt);
510                         vm_page_lock(mt);
511                         if (vm_page_in_laundry(mt))
512                                 vm_page_deactivate_noreuse(mt);
513                         vm_page_unlock(mt);
514                         break;
515                 case VM_PAGER_ERROR:
516                 case VM_PAGER_FAIL:
517                         /*
518                          * If the page couldn't be paged out to swap because the
519                          * pager wasn't able to find space, place the page in
520                          * the PQ_UNSWAPPABLE holding queue.  This is an
521                          * optimization that prevents the page daemon from
522                          * wasting CPU cycles on pages that cannot be reclaimed
523                          * becase no swap device is configured.
524                          *
525                          * Otherwise, reactivate the page so that it doesn't
526                          * clog the laundry and inactive queues.  (We will try
527                          * paging it out again later.)
528                          */
529                         vm_page_lock(mt);
530                         if (object->type == OBJT_SWAP &&
531                             pageout_status[i] == VM_PAGER_FAIL) {
532                                 vm_page_unswappable(mt);
533                                 numpagedout++;
534                         } else
535                                 vm_page_activate(mt);
536                         vm_page_unlock(mt);
537                         if (eio != NULL && i >= mreq && i - mreq < runlen)
538                                 *eio = TRUE;
539                         break;
540                 case VM_PAGER_AGAIN:
541                         if (i >= mreq && i - mreq < runlen)
542                                 runlen = i - mreq;
543                         break;
544                 }
545
546                 /*
547                  * If the operation is still going, leave the page busy to
548                  * block all other accesses. Also, leave the paging in
549                  * progress indicator set so that we don't attempt an object
550                  * collapse.
551                  */
552                 if (pageout_status[i] != VM_PAGER_PEND) {
553                         vm_object_pip_wakeup(object);
554                         vm_page_sunbusy(mt);
555                 }
556         }
557         if (prunlen != NULL)
558                 *prunlen = runlen;
559         return (numpagedout);
560 }
561
562 static void
563 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
564 {
565
566         atomic_store_rel_int(&swapdev_enabled, 1);
567 }
568
569 static void
570 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
571 {
572
573         if (swap_pager_nswapdev() == 1)
574                 atomic_store_rel_int(&swapdev_enabled, 0);
575 }
576
577 /*
578  * Attempt to acquire all of the necessary locks to launder a page and
579  * then call through the clustering layer to PUTPAGES.  Wait a short
580  * time for a vnode lock.
581  *
582  * Requires the page and object lock on entry, releases both before return.
583  * Returns 0 on success and an errno otherwise.
584  */
585 static int
586 vm_pageout_clean(vm_page_t m, int *numpagedout)
587 {
588         struct vnode *vp;
589         struct mount *mp;
590         vm_object_t object;
591         vm_pindex_t pindex;
592         int error, lockmode;
593
594         vm_page_assert_locked(m);
595         object = m->object;
596         VM_OBJECT_ASSERT_WLOCKED(object);
597         error = 0;
598         vp = NULL;
599         mp = NULL;
600
601         /*
602          * The object is already known NOT to be dead.   It
603          * is possible for the vget() to block the whole
604          * pageout daemon, but the new low-memory handling
605          * code should prevent it.
606          *
607          * We can't wait forever for the vnode lock, we might
608          * deadlock due to a vn_read() getting stuck in
609          * vm_wait while holding this vnode.  We skip the 
610          * vnode if we can't get it in a reasonable amount
611          * of time.
612          */
613         if (object->type == OBJT_VNODE) {
614                 vm_page_unlock(m);
615                 vm_page_xunbusy(m);
616                 vp = object->handle;
617                 if (vp->v_type == VREG &&
618                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
619                         mp = NULL;
620                         error = EDEADLK;
621                         goto unlock_all;
622                 }
623                 KASSERT(mp != NULL,
624                     ("vp %p with NULL v_mount", vp));
625                 vm_object_reference_locked(object);
626                 pindex = m->pindex;
627                 VM_OBJECT_WUNLOCK(object);
628                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
629                     LK_SHARED : LK_EXCLUSIVE;
630                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
631                         vp = NULL;
632                         error = EDEADLK;
633                         goto unlock_mp;
634                 }
635                 VM_OBJECT_WLOCK(object);
636
637                 /*
638                  * Ensure that the object and vnode were not disassociated
639                  * while locks were dropped.
640                  */
641                 if (vp->v_object != object) {
642                         error = ENOENT;
643                         goto unlock_all;
644                 }
645                 vm_page_lock(m);
646
647                 /*
648                  * While the object and page were unlocked, the page
649                  * may have been:
650                  * (1) moved to a different queue,
651                  * (2) reallocated to a different object,
652                  * (3) reallocated to a different offset, or
653                  * (4) cleaned.
654                  */
655                 if (!vm_page_in_laundry(m) || m->object != object ||
656                     m->pindex != pindex || m->dirty == 0) {
657                         vm_page_unlock(m);
658                         error = ENXIO;
659                         goto unlock_all;
660                 }
661
662                 /*
663                  * The page may have been busied while the object and page
664                  * locks were released.
665                  */
666                 if (vm_page_tryxbusy(m) == 0) {
667                         vm_page_unlock(m);
668                         error = EBUSY;
669                         goto unlock_all;
670                 }
671         }
672
673         /*
674          * Remove all writeable mappings, failing if the page is wired.
675          */
676         if (!vm_page_try_remove_write(m)) {
677                 vm_page_xunbusy(m);
678                 vm_page_unlock(m);
679                 error = EBUSY;
680                 goto unlock_all;
681         }
682         vm_page_unlock(m);
683
684         /*
685          * If a page is dirty, then it is either being washed
686          * (but not yet cleaned) or it is still in the
687          * laundry.  If it is still in the laundry, then we
688          * start the cleaning operation. 
689          */
690         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
691                 error = EIO;
692
693 unlock_all:
694         VM_OBJECT_WUNLOCK(object);
695
696 unlock_mp:
697         vm_page_lock_assert(m, MA_NOTOWNED);
698         if (mp != NULL) {
699                 if (vp != NULL)
700                         vput(vp);
701                 vm_object_deallocate(object);
702                 vn_finished_write(mp);
703         }
704
705         return (error);
706 }
707
708 /*
709  * Attempt to launder the specified number of pages.
710  *
711  * Returns the number of pages successfully laundered.
712  */
713 static int
714 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
715 {
716         struct scan_state ss;
717         struct vm_pagequeue *pq;
718         struct mtx *mtx;
719         vm_object_t object;
720         vm_page_t m, marker;
721         int act_delta, error, numpagedout, queue, starting_target;
722         int vnodes_skipped;
723         bool pageout_ok;
724
725         mtx = NULL;
726         object = NULL;
727         starting_target = launder;
728         vnodes_skipped = 0;
729
730         /*
731          * Scan the laundry queues for pages eligible to be laundered.  We stop
732          * once the target number of dirty pages have been laundered, or once
733          * we've reached the end of the queue.  A single iteration of this loop
734          * may cause more than one page to be laundered because of clustering.
735          *
736          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
737          * swap devices are configured.
738          */
739         if (atomic_load_acq_int(&swapdev_enabled))
740                 queue = PQ_UNSWAPPABLE;
741         else
742                 queue = PQ_LAUNDRY;
743
744 scan:
745         marker = &vmd->vmd_markers[queue];
746         pq = &vmd->vmd_pagequeues[queue];
747         vm_pagequeue_lock(pq);
748         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
749         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
750                 if (__predict_false((m->flags & PG_MARKER) != 0))
751                         continue;
752
753                 vm_page_change_lock(m, &mtx);
754
755 recheck:
756                 /*
757                  * The page may have been disassociated from the queue
758                  * or even freed while locks were dropped.  We thus must be
759                  * careful whenever modifying page state.  Once the object lock
760                  * has been acquired, we have a stable reference to the page.
761                  */
762                 if (vm_page_queue(m) != queue)
763                         continue;
764
765                 /*
766                  * A requeue was requested, so this page gets a second
767                  * chance.
768                  */
769                 if ((m->a.flags & PGA_REQUEUE) != 0) {
770                         vm_page_pqbatch_submit(m, queue);
771                         continue;
772                 }
773
774                 /*
775                  * Wired pages may not be freed.  Complete their removal
776                  * from the queue now to avoid needless revisits during
777                  * future scans.  This check is racy and must be reverified once
778                  * we hold the object lock and have verified that the page
779                  * is not busy.
780                  */
781                 if (vm_page_wired(m)) {
782                         vm_page_dequeue_deferred(m);
783                         continue;
784                 }
785
786                 if (object != m->object) {
787                         if (object != NULL)
788                                 VM_OBJECT_WUNLOCK(object);
789
790                         /*
791                          * A page's object pointer may be set to NULL before
792                          * the object lock is acquired.
793                          */
794                         object = (vm_object_t)atomic_load_ptr(&m->object);
795                         if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) {
796                                 mtx_unlock(mtx);
797                                 /* Depends on type-stability. */
798                                 VM_OBJECT_WLOCK(object);
799                                 mtx_lock(mtx);
800                                 goto recheck;
801                         }
802                 }
803                 if (__predict_false(m->object == NULL))
804                         /*
805                          * The page has been removed from its object.
806                          */
807                         continue;
808                 KASSERT(m->object == object, ("page %p does not belong to %p",
809                     m, object));
810
811                 if (vm_page_tryxbusy(m) == 0)
812                         continue;
813
814                 /*
815                  * Re-check for wirings now that we hold the object lock and
816                  * have verified that the page is unbusied.  If the page is
817                  * mapped, it may still be wired by pmap lookups.  The call to
818                  * vm_page_try_remove_all() below atomically checks for such
819                  * wirings and removes mappings.  If the page is unmapped, the
820                  * wire count is guaranteed not to increase.
821                  */
822                 if (__predict_false(vm_page_wired(m))) {
823                         vm_page_xunbusy(m);
824                         vm_page_dequeue_deferred(m);
825                         continue;
826                 }
827
828                 /*
829                  * Invalid pages can be easily freed.  They cannot be
830                  * mapped; vm_page_free() asserts this.
831                  */
832                 if (vm_page_none_valid(m))
833                         goto free_page;
834
835                 /*
836                  * If the page has been referenced and the object is not dead,
837                  * reactivate or requeue the page depending on whether the
838                  * object is mapped.
839                  *
840                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
841                  * that a reference from a concurrently destroyed mapping is
842                  * observed here and now.
843                  */
844                 if (object->ref_count != 0)
845                         act_delta = pmap_ts_referenced(m);
846                 else {
847                         KASSERT(!pmap_page_is_mapped(m),
848                             ("page %p is mapped", m));
849                         act_delta = 0;
850                 }
851                 if ((m->a.flags & PGA_REFERENCED) != 0) {
852                         vm_page_aflag_clear(m, PGA_REFERENCED);
853                         act_delta++;
854                 }
855                 if (act_delta != 0) {
856                         if (object->ref_count != 0) {
857                                 vm_page_xunbusy(m);
858                                 VM_CNT_INC(v_reactivated);
859                                 vm_page_activate(m);
860
861                                 /*
862                                  * Increase the activation count if the page
863                                  * was referenced while in the laundry queue.
864                                  * This makes it less likely that the page will
865                                  * be returned prematurely to the inactive
866                                  * queue.
867                                  */
868                                 m->a.act_count += act_delta + ACT_ADVANCE;
869
870                                 /*
871                                  * If this was a background laundering, count
872                                  * activated pages towards our target.  The
873                                  * purpose of background laundering is to ensure
874                                  * that pages are eventually cycled through the
875                                  * laundry queue, and an activation is a valid
876                                  * way out.
877                                  */
878                                 if (!in_shortfall)
879                                         launder--;
880                                 continue;
881                         } else if ((object->flags & OBJ_DEAD) == 0) {
882                                 vm_page_xunbusy(m);
883                                 vm_page_requeue(m);
884                                 continue;
885                         }
886                 }
887
888                 /*
889                  * If the page appears to be clean at the machine-independent
890                  * layer, then remove all of its mappings from the pmap in
891                  * anticipation of freeing it.  If, however, any of the page's
892                  * mappings allow write access, then the page may still be
893                  * modified until the last of those mappings are removed.
894                  */
895                 if (object->ref_count != 0) {
896                         vm_page_test_dirty(m);
897                         if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
898                                 vm_page_xunbusy(m);
899                                 vm_page_dequeue_deferred(m);
900                                 continue;
901                         }
902                 }
903
904                 /*
905                  * Clean pages are freed, and dirty pages are paged out unless
906                  * they belong to a dead object.  Requeueing dirty pages from
907                  * dead objects is pointless, as they are being paged out and
908                  * freed by the thread that destroyed the object.
909                  */
910                 if (m->dirty == 0) {
911 free_page:
912                         vm_page_free(m);
913                         VM_CNT_INC(v_dfree);
914                 } else if ((object->flags & OBJ_DEAD) == 0) {
915                         if (object->type != OBJT_SWAP &&
916                             object->type != OBJT_DEFAULT)
917                                 pageout_ok = true;
918                         else if (disable_swap_pageouts)
919                                 pageout_ok = false;
920                         else
921                                 pageout_ok = true;
922                         if (!pageout_ok) {
923                                 vm_page_xunbusy(m);
924                                 vm_page_requeue(m);
925                                 continue;
926                         }
927
928                         /*
929                          * Form a cluster with adjacent, dirty pages from the
930                          * same object, and page out that entire cluster.
931                          *
932                          * The adjacent, dirty pages must also be in the
933                          * laundry.  However, their mappings are not checked
934                          * for new references.  Consequently, a recently
935                          * referenced page may be paged out.  However, that
936                          * page will not be prematurely reclaimed.  After page
937                          * out, the page will be placed in the inactive queue,
938                          * where any new references will be detected and the
939                          * page reactivated.
940                          */
941                         error = vm_pageout_clean(m, &numpagedout);
942                         if (error == 0) {
943                                 launder -= numpagedout;
944                                 ss.scanned += numpagedout;
945                         } else if (error == EDEADLK) {
946                                 pageout_lock_miss++;
947                                 vnodes_skipped++;
948                         }
949                         mtx = NULL;
950                         object = NULL;
951                 } else
952                         vm_page_xunbusy(m);
953         }
954         if (mtx != NULL) {
955                 mtx_unlock(mtx);
956                 mtx = NULL;
957         }
958         if (object != NULL) {
959                 VM_OBJECT_WUNLOCK(object);
960                 object = NULL;
961         }
962         vm_pagequeue_lock(pq);
963         vm_pageout_end_scan(&ss);
964         vm_pagequeue_unlock(pq);
965
966         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
967                 queue = PQ_LAUNDRY;
968                 goto scan;
969         }
970
971         /*
972          * Wakeup the sync daemon if we skipped a vnode in a writeable object
973          * and we didn't launder enough pages.
974          */
975         if (vnodes_skipped > 0 && launder > 0)
976                 (void)speedup_syncer();
977
978         return (starting_target - launder);
979 }
980
981 /*
982  * Compute the integer square root.
983  */
984 static u_int
985 isqrt(u_int num)
986 {
987         u_int bit, root, tmp;
988
989         bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
990         root = 0;
991         while (bit != 0) {
992                 tmp = root + bit;
993                 root >>= 1;
994                 if (num >= tmp) {
995                         num -= tmp;
996                         root += bit;
997                 }
998                 bit >>= 2;
999         }
1000         return (root);
1001 }
1002
1003 /*
1004  * Perform the work of the laundry thread: periodically wake up and determine
1005  * whether any pages need to be laundered.  If so, determine the number of pages
1006  * that need to be laundered, and launder them.
1007  */
1008 static void
1009 vm_pageout_laundry_worker(void *arg)
1010 {
1011         struct vm_domain *vmd;
1012         struct vm_pagequeue *pq;
1013         uint64_t nclean, ndirty, nfreed;
1014         int domain, last_target, launder, shortfall, shortfall_cycle, target;
1015         bool in_shortfall;
1016
1017         domain = (uintptr_t)arg;
1018         vmd = VM_DOMAIN(domain);
1019         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1020         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1021
1022         shortfall = 0;
1023         in_shortfall = false;
1024         shortfall_cycle = 0;
1025         last_target = target = 0;
1026         nfreed = 0;
1027
1028         /*
1029          * Calls to these handlers are serialized by the swap syscall lock.
1030          */
1031         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1032             EVENTHANDLER_PRI_ANY);
1033         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1034             EVENTHANDLER_PRI_ANY);
1035
1036         /*
1037          * The pageout laundry worker is never done, so loop forever.
1038          */
1039         for (;;) {
1040                 KASSERT(target >= 0, ("negative target %d", target));
1041                 KASSERT(shortfall_cycle >= 0,
1042                     ("negative cycle %d", shortfall_cycle));
1043                 launder = 0;
1044
1045                 /*
1046                  * First determine whether we need to launder pages to meet a
1047                  * shortage of free pages.
1048                  */
1049                 if (shortfall > 0) {
1050                         in_shortfall = true;
1051                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1052                         target = shortfall;
1053                 } else if (!in_shortfall)
1054                         goto trybackground;
1055                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1056                         /*
1057                          * We recently entered shortfall and began laundering
1058                          * pages.  If we have completed that laundering run
1059                          * (and we are no longer in shortfall) or we have met
1060                          * our laundry target through other activity, then we
1061                          * can stop laundering pages.
1062                          */
1063                         in_shortfall = false;
1064                         target = 0;
1065                         goto trybackground;
1066                 }
1067                 launder = target / shortfall_cycle--;
1068                 goto dolaundry;
1069
1070                 /*
1071                  * There's no immediate need to launder any pages; see if we
1072                  * meet the conditions to perform background laundering:
1073                  *
1074                  * 1. The ratio of dirty to clean inactive pages exceeds the
1075                  *    background laundering threshold, or
1076                  * 2. we haven't yet reached the target of the current
1077                  *    background laundering run.
1078                  *
1079                  * The background laundering threshold is not a constant.
1080                  * Instead, it is a slowly growing function of the number of
1081                  * clean pages freed by the page daemon since the last
1082                  * background laundering.  Thus, as the ratio of dirty to
1083                  * clean inactive pages grows, the amount of memory pressure
1084                  * required to trigger laundering decreases.  We ensure
1085                  * that the threshold is non-zero after an inactive queue
1086                  * scan, even if that scan failed to free a single clean page.
1087                  */
1088 trybackground:
1089                 nclean = vmd->vmd_free_count +
1090                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1091                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1092                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1093                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1094                         target = vmd->vmd_background_launder_target;
1095                 }
1096
1097                 /*
1098                  * We have a non-zero background laundering target.  If we've
1099                  * laundered up to our maximum without observing a page daemon
1100                  * request, just stop.  This is a safety belt that ensures we
1101                  * don't launder an excessive amount if memory pressure is low
1102                  * and the ratio of dirty to clean pages is large.  Otherwise,
1103                  * proceed at the background laundering rate.
1104                  */
1105                 if (target > 0) {
1106                         if (nfreed > 0) {
1107                                 nfreed = 0;
1108                                 last_target = target;
1109                         } else if (last_target - target >=
1110                             vm_background_launder_max * PAGE_SIZE / 1024) {
1111                                 target = 0;
1112                         }
1113                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1114                         launder /= VM_LAUNDER_RATE;
1115                         if (launder > target)
1116                                 launder = target;
1117                 }
1118
1119 dolaundry:
1120                 if (launder > 0) {
1121                         /*
1122                          * Because of I/O clustering, the number of laundered
1123                          * pages could exceed "target" by the maximum size of
1124                          * a cluster minus one. 
1125                          */
1126                         target -= min(vm_pageout_launder(vmd, launder,
1127                             in_shortfall), target);
1128                         pause("laundp", hz / VM_LAUNDER_RATE);
1129                 }
1130
1131                 /*
1132                  * If we're not currently laundering pages and the page daemon
1133                  * hasn't posted a new request, sleep until the page daemon
1134                  * kicks us.
1135                  */
1136                 vm_pagequeue_lock(pq);
1137                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1138                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1139                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1140
1141                 /*
1142                  * If the pagedaemon has indicated that it's in shortfall, start
1143                  * a shortfall laundering unless we're already in the middle of
1144                  * one.  This may preempt a background laundering.
1145                  */
1146                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1147                     (!in_shortfall || shortfall_cycle == 0)) {
1148                         shortfall = vm_laundry_target(vmd) +
1149                             vmd->vmd_pageout_deficit;
1150                         target = 0;
1151                 } else
1152                         shortfall = 0;
1153
1154                 if (target == 0)
1155                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1156                 nfreed += vmd->vmd_clean_pages_freed;
1157                 vmd->vmd_clean_pages_freed = 0;
1158                 vm_pagequeue_unlock(pq);
1159         }
1160 }
1161
1162 /*
1163  * Compute the number of pages we want to try to move from the
1164  * active queue to either the inactive or laundry queue.
1165  *
1166  * When scanning active pages during a shortage, we make clean pages
1167  * count more heavily towards the page shortage than dirty pages.
1168  * This is because dirty pages must be laundered before they can be
1169  * reused and thus have less utility when attempting to quickly
1170  * alleviate a free page shortage.  However, this weighting also
1171  * causes the scan to deactivate dirty pages more aggressively,
1172  * improving the effectiveness of clustering.
1173  */
1174 static int
1175 vm_pageout_active_target(struct vm_domain *vmd)
1176 {
1177         int shortage;
1178
1179         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1180             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1181             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1182         shortage *= act_scan_laundry_weight;
1183         return (shortage);
1184 }
1185
1186 /*
1187  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1188  * small portion of the queue in order to maintain quasi-LRU.
1189  */
1190 static void
1191 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1192 {
1193         struct scan_state ss;
1194         struct mtx *mtx;
1195         vm_object_t object;
1196         vm_page_t m, marker;
1197         struct vm_pagequeue *pq;
1198         long min_scan;
1199         int act_delta, max_scan, scan_tick;
1200
1201         marker = &vmd->vmd_markers[PQ_ACTIVE];
1202         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1203         vm_pagequeue_lock(pq);
1204
1205         /*
1206          * If we're just idle polling attempt to visit every
1207          * active page within 'update_period' seconds.
1208          */
1209         scan_tick = ticks;
1210         if (vm_pageout_update_period != 0) {
1211                 min_scan = pq->pq_cnt;
1212                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1213                 min_scan /= hz * vm_pageout_update_period;
1214         } else
1215                 min_scan = 0;
1216         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1217                 vmd->vmd_last_active_scan = scan_tick;
1218
1219         /*
1220          * Scan the active queue for pages that can be deactivated.  Update
1221          * the per-page activity counter and use it to identify deactivation
1222          * candidates.  Held pages may be deactivated.
1223          *
1224          * To avoid requeuing each page that remains in the active queue, we
1225          * implement the CLOCK algorithm.  To keep the implementation of the
1226          * enqueue operation consistent for all page queues, we use two hands,
1227          * represented by marker pages. Scans begin at the first hand, which
1228          * precedes the second hand in the queue.  When the two hands meet,
1229          * they are moved back to the head and tail of the queue, respectively,
1230          * and scanning resumes.
1231          */
1232         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1233         mtx = NULL;
1234 act_scan:
1235         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1236         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1237                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1238                         vm_pagequeue_lock(pq);
1239                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1240                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1241                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1242                             plinks.q);
1243                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1244                             plinks.q);
1245                         max_scan -= ss.scanned;
1246                         vm_pageout_end_scan(&ss);
1247                         goto act_scan;
1248                 }
1249                 if (__predict_false((m->flags & PG_MARKER) != 0))
1250                         continue;
1251
1252                 vm_page_change_lock(m, &mtx);
1253
1254                 /*
1255                  * The page may have been disassociated from the queue
1256                  * or even freed while locks were dropped.  We thus must be
1257                  * careful whenever modifying page state.  Once the object lock
1258                  * has been acquired, we have a stable reference to the page.
1259                  */
1260                 if (vm_page_queue(m) != PQ_ACTIVE)
1261                         continue;
1262
1263                 /*
1264                  * Wired pages are dequeued lazily.
1265                  */
1266                 if (vm_page_wired(m)) {
1267                         vm_page_dequeue_deferred(m);
1268                         continue;
1269                 }
1270
1271                 /*
1272                  * A page's object pointer may be set to NULL before
1273                  * the object lock is acquired.
1274                  */
1275                 object = (vm_object_t)atomic_load_ptr(&m->object);
1276                 if (__predict_false(object == NULL))
1277                         /*
1278                          * The page has been removed from its object.
1279                          */
1280                         continue;
1281
1282                 /*
1283                  * Check to see "how much" the page has been used.
1284                  *
1285                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1286                  * that a reference from a concurrently destroyed mapping is
1287                  * observed here and now.
1288                  *
1289                  * Perform an unsynchronized object ref count check.  While
1290                  * the page lock ensures that the page is not reallocated to
1291                  * another object, in particular, one with unmanaged mappings
1292                  * that cannot support pmap_ts_referenced(), two races are,
1293                  * nonetheless, possible:
1294                  * 1) The count was transitioning to zero, but we saw a non-
1295                  *    zero value.  pmap_ts_referenced() will return zero
1296                  *    because the page is not mapped.
1297                  * 2) The count was transitioning to one, but we saw zero.
1298                  *    This race delays the detection of a new reference.  At
1299                  *    worst, we will deactivate and reactivate the page.
1300                  */
1301                 if (object->ref_count != 0)
1302                         act_delta = pmap_ts_referenced(m);
1303                 else
1304                         act_delta = 0;
1305                 if ((m->a.flags & PGA_REFERENCED) != 0) {
1306                         vm_page_aflag_clear(m, PGA_REFERENCED);
1307                         act_delta++;
1308                 }
1309
1310                 /*
1311                  * Advance or decay the act_count based on recent usage.
1312                  */
1313                 if (act_delta != 0) {
1314                         m->a.act_count += ACT_ADVANCE + act_delta;
1315                         if (m->a.act_count > ACT_MAX)
1316                                 m->a.act_count = ACT_MAX;
1317                 } else
1318                         m->a.act_count -= min(m->a.act_count, ACT_DECLINE);
1319
1320                 if (m->a.act_count == 0) {
1321                         /*
1322                          * When not short for inactive pages, let dirty pages go
1323                          * through the inactive queue before moving to the
1324                          * laundry queues.  This gives them some extra time to
1325                          * be reactivated, potentially avoiding an expensive
1326                          * pageout.  However, during a page shortage, the
1327                          * inactive queue is necessarily small, and so dirty
1328                          * pages would only spend a trivial amount of time in
1329                          * the inactive queue.  Therefore, we might as well
1330                          * place them directly in the laundry queue to reduce
1331                          * queuing overhead.
1332                          */
1333                         if (page_shortage <= 0) {
1334                                 vm_page_swapqueue(m, PQ_ACTIVE, PQ_INACTIVE);
1335                         } else {
1336                                 /*
1337                                  * Calling vm_page_test_dirty() here would
1338                                  * require acquisition of the object's write
1339                                  * lock.  However, during a page shortage,
1340                                  * directing dirty pages into the laundry
1341                                  * queue is only an optimization and not a
1342                                  * requirement.  Therefore, we simply rely on
1343                                  * the opportunistic updates to the page's
1344                                  * dirty field by the pmap.
1345                                  */
1346                                 if (m->dirty == 0) {
1347                                         vm_page_swapqueue(m, PQ_ACTIVE,
1348                                             PQ_INACTIVE);
1349                                         page_shortage -=
1350                                             act_scan_laundry_weight;
1351                                 } else {
1352                                         vm_page_swapqueue(m, PQ_ACTIVE,
1353                                             PQ_LAUNDRY);
1354                                         page_shortage--;
1355                                 }
1356                         }
1357                 }
1358         }
1359         if (mtx != NULL) {
1360                 mtx_unlock(mtx);
1361                 mtx = NULL;
1362         }
1363         vm_pagequeue_lock(pq);
1364         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1365         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1366         vm_pageout_end_scan(&ss);
1367         vm_pagequeue_unlock(pq);
1368 }
1369
1370 static int
1371 vm_pageout_reinsert_inactive_page(struct scan_state *ss, vm_page_t m)
1372 {
1373         struct vm_domain *vmd;
1374
1375         if (m->a.queue != PQ_INACTIVE || (m->a.flags & PGA_ENQUEUED) != 0)
1376                 return (0);
1377         vm_page_aflag_set(m, PGA_ENQUEUED);
1378         if ((m->a.flags & PGA_REQUEUE_HEAD) != 0) {
1379                 vmd = vm_pagequeue_domain(m);
1380                 TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
1381                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
1382         } else if ((m->a.flags & PGA_REQUEUE) != 0) {
1383                 TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
1384                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
1385         } else
1386                 TAILQ_INSERT_BEFORE(ss->marker, m, plinks.q);
1387         return (1);
1388 }
1389
1390 /*
1391  * Re-add stuck pages to the inactive queue.  We will examine them again
1392  * during the next scan.  If the queue state of a page has changed since
1393  * it was physically removed from the page queue in
1394  * vm_pageout_collect_batch(), don't do anything with that page.
1395  */
1396 static void
1397 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1398     vm_page_t m)
1399 {
1400         struct vm_pagequeue *pq;
1401         int delta;
1402
1403         delta = 0;
1404         pq = ss->pq;
1405
1406         if (m != NULL) {
1407                 if (vm_batchqueue_insert(bq, m))
1408                         return;
1409                 vm_pagequeue_lock(pq);
1410                 delta += vm_pageout_reinsert_inactive_page(ss, m);
1411         } else
1412                 vm_pagequeue_lock(pq);
1413         while ((m = vm_batchqueue_pop(bq)) != NULL)
1414                 delta += vm_pageout_reinsert_inactive_page(ss, m);
1415         vm_pagequeue_cnt_add(pq, delta);
1416         vm_pagequeue_unlock(pq);
1417         vm_batchqueue_init(bq);
1418 }
1419
1420 /*
1421  * Attempt to reclaim the requested number of pages from the inactive queue.
1422  * Returns true if the shortage was addressed.
1423  */
1424 static int
1425 vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
1426     int *addl_shortage)
1427 {
1428         struct scan_state ss;
1429         struct vm_batchqueue rq;
1430         struct mtx *mtx;
1431         vm_page_t m, marker;
1432         struct vm_pagequeue *pq;
1433         vm_object_t object;
1434         int act_delta, addl_page_shortage, deficit, page_shortage;
1435         int starting_page_shortage;
1436
1437         /*
1438          * The addl_page_shortage is an estimate of the number of temporarily
1439          * stuck pages in the inactive queue.  In other words, the
1440          * number of pages from the inactive count that should be
1441          * discounted in setting the target for the active queue scan.
1442          */
1443         addl_page_shortage = 0;
1444
1445         /*
1446          * vmd_pageout_deficit counts the number of pages requested in
1447          * allocations that failed because of a free page shortage.  We assume
1448          * that the allocations will be reattempted and thus include the deficit
1449          * in our scan target.
1450          */
1451         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1452         starting_page_shortage = page_shortage = shortage + deficit;
1453
1454         mtx = NULL;
1455         object = NULL;
1456         vm_batchqueue_init(&rq);
1457
1458         /*
1459          * Start scanning the inactive queue for pages that we can free.  The
1460          * scan will stop when we reach the target or we have scanned the
1461          * entire queue.  (Note that m->a.act_count is not used to make
1462          * decisions for the inactive queue, only for the active queue.)
1463          */
1464         marker = &vmd->vmd_markers[PQ_INACTIVE];
1465         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1466         vm_pagequeue_lock(pq);
1467         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1468         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1469                 KASSERT((m->flags & PG_MARKER) == 0,
1470                     ("marker page %p was dequeued", m));
1471
1472                 vm_page_change_lock(m, &mtx);
1473
1474 recheck:
1475                 /*
1476                  * The page may have been disassociated from the queue
1477                  * or even freed while locks were dropped.  We thus must be
1478                  * careful whenever modifying page state.  Once the object lock
1479                  * has been acquired, we have a stable reference to the page.
1480                  */
1481                 if (vm_page_queue(m) != PQ_INACTIVE) {
1482                         addl_page_shortage++;
1483                         continue;
1484                 }
1485
1486                 /*
1487                  * The page was re-enqueued after the page queue lock was
1488                  * dropped, or a requeue was requested.  This page gets a second
1489                  * chance.
1490                  */
1491                 if ((m->a.flags & (PGA_ENQUEUED | PGA_REQUEUE |
1492                     PGA_REQUEUE_HEAD)) != 0)
1493                         goto reinsert;
1494
1495                 /*
1496                  * Wired pages may not be freed.  Complete their removal
1497                  * from the queue now to avoid needless revisits during
1498                  * future scans.  This check is racy and must be reverified once
1499                  * we hold the object lock and have verified that the page
1500                  * is not busy.
1501                  */
1502                 if (vm_page_wired(m)) {
1503                         vm_page_dequeue_deferred(m);
1504                         continue;
1505                 }
1506
1507                 if (object != m->object) {
1508                         if (object != NULL)
1509                                 VM_OBJECT_WUNLOCK(object);
1510
1511                         /*
1512                          * A page's object pointer may be set to NULL before
1513                          * the object lock is acquired.
1514                          */
1515                         object = (vm_object_t)atomic_load_ptr(&m->object);
1516                         if (object != NULL && !VM_OBJECT_TRYWLOCK(object)) {
1517                                 mtx_unlock(mtx);
1518                                 /* Depends on type-stability. */
1519                                 VM_OBJECT_WLOCK(object);
1520                                 mtx_lock(mtx);
1521                                 goto recheck;
1522                         }
1523                 }
1524                 if (__predict_false(m->object == NULL))
1525                         /*
1526                          * The page has been removed from its object.
1527                          */
1528                         continue;
1529                 KASSERT(m->object == object, ("page %p does not belong to %p",
1530                     m, object));
1531
1532                 if (vm_page_tryxbusy(m) == 0) {
1533                         /*
1534                          * Don't mess with busy pages.  Leave them at
1535                          * the front of the queue.  Most likely, they
1536                          * are being paged out and will leave the
1537                          * queue shortly after the scan finishes.  So,
1538                          * they ought to be discounted from the
1539                          * inactive count.
1540                          */
1541                         addl_page_shortage++;
1542                         goto reinsert;
1543                 }
1544
1545                 /*
1546                  * Re-check for wirings now that we hold the object lock and
1547                  * have verified that the page is unbusied.  If the page is
1548                  * mapped, it may still be wired by pmap lookups.  The call to
1549                  * vm_page_try_remove_all() below atomically checks for such
1550                  * wirings and removes mappings.  If the page is unmapped, the
1551                  * wire count is guaranteed not to increase.
1552                  */
1553                 if (__predict_false(vm_page_wired(m))) {
1554                         vm_page_xunbusy(m);
1555                         vm_page_dequeue_deferred(m);
1556                         continue;
1557                 }
1558
1559                 /*
1560                  * Invalid pages can be easily freed. They cannot be
1561                  * mapped, vm_page_free() asserts this.
1562                  */
1563                 if (vm_page_none_valid(m))
1564                         goto free_page;
1565
1566                 /*
1567                  * If the page has been referenced and the object is not dead,
1568                  * reactivate or requeue the page depending on whether the
1569                  * object is mapped.
1570                  *
1571                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1572                  * that a reference from a concurrently destroyed mapping is
1573                  * observed here and now.
1574                  */
1575                 if (object->ref_count != 0)
1576                         act_delta = pmap_ts_referenced(m);
1577                 else {
1578                         KASSERT(!pmap_page_is_mapped(m),
1579                             ("page %p is mapped", m));
1580                         act_delta = 0;
1581                 }
1582                 if ((m->a.flags & PGA_REFERENCED) != 0) {
1583                         vm_page_aflag_clear(m, PGA_REFERENCED);
1584                         act_delta++;
1585                 }
1586                 if (act_delta != 0) {
1587                         if (object->ref_count != 0) {
1588                                 vm_page_xunbusy(m);
1589                                 VM_CNT_INC(v_reactivated);
1590                                 vm_page_activate(m);
1591
1592                                 /*
1593                                  * Increase the activation count if the page
1594                                  * was referenced while in the inactive queue.
1595                                  * This makes it less likely that the page will
1596                                  * be returned prematurely to the inactive
1597                                  * queue.
1598                                  */
1599                                 m->a.act_count += act_delta + ACT_ADVANCE;
1600                                 continue;
1601                         } else if ((object->flags & OBJ_DEAD) == 0) {
1602                                 vm_page_xunbusy(m);
1603                                 vm_page_aflag_set(m, PGA_REQUEUE);
1604                                 goto reinsert;
1605                         }
1606                 }
1607
1608                 /*
1609                  * If the page appears to be clean at the machine-independent
1610                  * layer, then remove all of its mappings from the pmap in
1611                  * anticipation of freeing it.  If, however, any of the page's
1612                  * mappings allow write access, then the page may still be
1613                  * modified until the last of those mappings are removed.
1614                  */
1615                 if (object->ref_count != 0) {
1616                         vm_page_test_dirty(m);
1617                         if (m->dirty == 0 && !vm_page_try_remove_all(m)) {
1618                                 vm_page_xunbusy(m);
1619                                 vm_page_dequeue_deferred(m);
1620                                 continue;
1621                         }
1622                 }
1623
1624                 /*
1625                  * Clean pages can be freed, but dirty pages must be sent back
1626                  * to the laundry, unless they belong to a dead object.
1627                  * Requeueing dirty pages from dead objects is pointless, as
1628                  * they are being paged out and freed by the thread that
1629                  * destroyed the object.
1630                  */
1631                 if (m->dirty == 0) {
1632 free_page:
1633                         /*
1634                          * Because we dequeued the page and have already
1635                          * checked for concurrent dequeue and enqueue
1636                          * requests, we can safely disassociate the page
1637                          * from the inactive queue.
1638                          */
1639                         KASSERT((m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
1640                             ("page %p has queue state", m));
1641                         m->a.queue = PQ_NONE;
1642                         vm_page_free(m);
1643                         page_shortage--;
1644                         continue;
1645                 }
1646                 vm_page_xunbusy(m);
1647                 if ((object->flags & OBJ_DEAD) == 0)
1648                         vm_page_launder(m);
1649                 continue;
1650 reinsert:
1651                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1652         }
1653         if (mtx != NULL)
1654                 mtx_unlock(mtx);
1655         if (object != NULL)
1656                 VM_OBJECT_WUNLOCK(object);
1657         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1658         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1659         vm_pagequeue_lock(pq);
1660         vm_pageout_end_scan(&ss);
1661         vm_pagequeue_unlock(pq);
1662
1663         VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage);
1664
1665         /*
1666          * Wake up the laundry thread so that it can perform any needed
1667          * laundering.  If we didn't meet our target, we're in shortfall and
1668          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1669          * swap devices are configured, the laundry thread has no work to do, so
1670          * don't bother waking it up.
1671          *
1672          * The laundry thread uses the number of inactive queue scans elapsed
1673          * since the last laundering to determine whether to launder again, so
1674          * keep count.
1675          */
1676         if (starting_page_shortage > 0) {
1677                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1678                 vm_pagequeue_lock(pq);
1679                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1680                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1681                         if (page_shortage > 0) {
1682                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1683                                 VM_CNT_INC(v_pdshortfalls);
1684                         } else if (vmd->vmd_laundry_request !=
1685                             VM_LAUNDRY_SHORTFALL)
1686                                 vmd->vmd_laundry_request =
1687                                     VM_LAUNDRY_BACKGROUND;
1688                         wakeup(&vmd->vmd_laundry_request);
1689                 }
1690                 vmd->vmd_clean_pages_freed +=
1691                     starting_page_shortage - page_shortage;
1692                 vm_pagequeue_unlock(pq);
1693         }
1694
1695         /*
1696          * Wakeup the swapout daemon if we didn't free the targeted number of
1697          * pages.
1698          */
1699         if (page_shortage > 0)
1700                 vm_swapout_run();
1701
1702         /*
1703          * If the inactive queue scan fails repeatedly to meet its
1704          * target, kill the largest process.
1705          */
1706         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1707
1708         /*
1709          * Reclaim pages by swapping out idle processes, if configured to do so.
1710          */
1711         vm_swapout_run_idle();
1712
1713         /*
1714          * See the description of addl_page_shortage above.
1715          */
1716         *addl_shortage = addl_page_shortage + deficit;
1717
1718         return (page_shortage <= 0);
1719 }
1720
1721 static int vm_pageout_oom_vote;
1722
1723 /*
1724  * The pagedaemon threads randlomly select one to perform the
1725  * OOM.  Trying to kill processes before all pagedaemons
1726  * failed to reach free target is premature.
1727  */
1728 static void
1729 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1730     int starting_page_shortage)
1731 {
1732         int old_vote;
1733
1734         if (starting_page_shortage <= 0 || starting_page_shortage !=
1735             page_shortage)
1736                 vmd->vmd_oom_seq = 0;
1737         else
1738                 vmd->vmd_oom_seq++;
1739         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1740                 if (vmd->vmd_oom) {
1741                         vmd->vmd_oom = FALSE;
1742                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1743                 }
1744                 return;
1745         }
1746
1747         /*
1748          * Do not follow the call sequence until OOM condition is
1749          * cleared.
1750          */
1751         vmd->vmd_oom_seq = 0;
1752
1753         if (vmd->vmd_oom)
1754                 return;
1755
1756         vmd->vmd_oom = TRUE;
1757         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1758         if (old_vote != vm_ndomains - 1)
1759                 return;
1760
1761         /*
1762          * The current pagedaemon thread is the last in the quorum to
1763          * start OOM.  Initiate the selection and signaling of the
1764          * victim.
1765          */
1766         vm_pageout_oom(VM_OOM_MEM);
1767
1768         /*
1769          * After one round of OOM terror, recall our vote.  On the
1770          * next pass, current pagedaemon would vote again if the low
1771          * memory condition is still there, due to vmd_oom being
1772          * false.
1773          */
1774         vmd->vmd_oom = FALSE;
1775         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1776 }
1777
1778 /*
1779  * The OOM killer is the page daemon's action of last resort when
1780  * memory allocation requests have been stalled for a prolonged period
1781  * of time because it cannot reclaim memory.  This function computes
1782  * the approximate number of physical pages that could be reclaimed if
1783  * the specified address space is destroyed.
1784  *
1785  * Private, anonymous memory owned by the address space is the
1786  * principal resource that we expect to recover after an OOM kill.
1787  * Since the physical pages mapped by the address space's COW entries
1788  * are typically shared pages, they are unlikely to be released and so
1789  * they are not counted.
1790  *
1791  * To get to the point where the page daemon runs the OOM killer, its
1792  * efforts to write-back vnode-backed pages may have stalled.  This
1793  * could be caused by a memory allocation deadlock in the write path
1794  * that might be resolved by an OOM kill.  Therefore, physical pages
1795  * belonging to vnode-backed objects are counted, because they might
1796  * be freed without being written out first if the address space holds
1797  * the last reference to an unlinked vnode.
1798  *
1799  * Similarly, physical pages belonging to OBJT_PHYS objects are
1800  * counted because the address space might hold the last reference to
1801  * the object.
1802  */
1803 static long
1804 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1805 {
1806         vm_map_t map;
1807         vm_map_entry_t entry;
1808         vm_object_t obj;
1809         long res;
1810
1811         map = &vmspace->vm_map;
1812         KASSERT(!map->system_map, ("system map"));
1813         sx_assert(&map->lock, SA_LOCKED);
1814         res = 0;
1815         VM_MAP_ENTRY_FOREACH(entry, map) {
1816                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1817                         continue;
1818                 obj = entry->object.vm_object;
1819                 if (obj == NULL)
1820                         continue;
1821                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1822                     obj->ref_count != 1)
1823                         continue;
1824                 switch (obj->type) {
1825                 case OBJT_DEFAULT:
1826                 case OBJT_SWAP:
1827                 case OBJT_PHYS:
1828                 case OBJT_VNODE:
1829                         res += obj->resident_page_count;
1830                         break;
1831                 }
1832         }
1833         return (res);
1834 }
1835
1836 static int vm_oom_ratelim_last;
1837 static int vm_oom_pf_secs = 10;
1838 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1839     "");
1840 static struct mtx vm_oom_ratelim_mtx;
1841
1842 void
1843 vm_pageout_oom(int shortage)
1844 {
1845         struct proc *p, *bigproc;
1846         vm_offset_t size, bigsize;
1847         struct thread *td;
1848         struct vmspace *vm;
1849         int now;
1850         bool breakout;
1851
1852         /*
1853          * For OOM requests originating from vm_fault(), there is a high
1854          * chance that a single large process faults simultaneously in
1855          * several threads.  Also, on an active system running many
1856          * processes of middle-size, like buildworld, all of them
1857          * could fault almost simultaneously as well.
1858          *
1859          * To avoid killing too many processes, rate-limit OOMs
1860          * initiated by vm_fault() time-outs on the waits for free
1861          * pages.
1862          */
1863         mtx_lock(&vm_oom_ratelim_mtx);
1864         now = ticks;
1865         if (shortage == VM_OOM_MEM_PF &&
1866             (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1867                 mtx_unlock(&vm_oom_ratelim_mtx);
1868                 return;
1869         }
1870         vm_oom_ratelim_last = now;
1871         mtx_unlock(&vm_oom_ratelim_mtx);
1872
1873         /*
1874          * We keep the process bigproc locked once we find it to keep anyone
1875          * from messing with it; however, there is a possibility of
1876          * deadlock if process B is bigproc and one of its child processes
1877          * attempts to propagate a signal to B while we are waiting for A's
1878          * lock while walking this list.  To avoid this, we don't block on
1879          * the process lock but just skip a process if it is already locked.
1880          */
1881         bigproc = NULL;
1882         bigsize = 0;
1883         sx_slock(&allproc_lock);
1884         FOREACH_PROC_IN_SYSTEM(p) {
1885                 PROC_LOCK(p);
1886
1887                 /*
1888                  * If this is a system, protected or killed process, skip it.
1889                  */
1890                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1891                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1892                     p->p_pid == 1 || P_KILLED(p) ||
1893                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1894                         PROC_UNLOCK(p);
1895                         continue;
1896                 }
1897                 /*
1898                  * If the process is in a non-running type state,
1899                  * don't touch it.  Check all the threads individually.
1900                  */
1901                 breakout = false;
1902                 FOREACH_THREAD_IN_PROC(p, td) {
1903                         thread_lock(td);
1904                         if (!TD_ON_RUNQ(td) &&
1905                             !TD_IS_RUNNING(td) &&
1906                             !TD_IS_SLEEPING(td) &&
1907                             !TD_IS_SUSPENDED(td) &&
1908                             !TD_IS_SWAPPED(td)) {
1909                                 thread_unlock(td);
1910                                 breakout = true;
1911                                 break;
1912                         }
1913                         thread_unlock(td);
1914                 }
1915                 if (breakout) {
1916                         PROC_UNLOCK(p);
1917                         continue;
1918                 }
1919                 /*
1920                  * get the process size
1921                  */
1922                 vm = vmspace_acquire_ref(p);
1923                 if (vm == NULL) {
1924                         PROC_UNLOCK(p);
1925                         continue;
1926                 }
1927                 _PHOLD_LITE(p);
1928                 PROC_UNLOCK(p);
1929                 sx_sunlock(&allproc_lock);
1930                 if (!vm_map_trylock_read(&vm->vm_map)) {
1931                         vmspace_free(vm);
1932                         sx_slock(&allproc_lock);
1933                         PRELE(p);
1934                         continue;
1935                 }
1936                 size = vmspace_swap_count(vm);
1937                 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
1938                         size += vm_pageout_oom_pagecount(vm);
1939                 vm_map_unlock_read(&vm->vm_map);
1940                 vmspace_free(vm);
1941                 sx_slock(&allproc_lock);
1942
1943                 /*
1944                  * If this process is bigger than the biggest one,
1945                  * remember it.
1946                  */
1947                 if (size > bigsize) {
1948                         if (bigproc != NULL)
1949                                 PRELE(bigproc);
1950                         bigproc = p;
1951                         bigsize = size;
1952                 } else {
1953                         PRELE(p);
1954                 }
1955         }
1956         sx_sunlock(&allproc_lock);
1957         if (bigproc != NULL) {
1958                 if (vm_panic_on_oom != 0)
1959                         panic("out of swap space");
1960                 PROC_LOCK(bigproc);
1961                 killproc(bigproc, "out of swap space");
1962                 sched_nice(bigproc, PRIO_MIN);
1963                 _PRELE(bigproc);
1964                 PROC_UNLOCK(bigproc);
1965         }
1966 }
1967
1968 /*
1969  * Signal a free page shortage to subsystems that have registered an event
1970  * handler.  Reclaim memory from UMA in the event of a severe shortage.
1971  * Return true if the free page count should be re-evaluated.
1972  */
1973 static bool
1974 vm_pageout_lowmem(void)
1975 {
1976         static int lowmem_ticks = 0;
1977         int last;
1978         bool ret;
1979
1980         ret = false;
1981
1982         last = atomic_load_int(&lowmem_ticks);
1983         while ((u_int)(ticks - last) / hz >= lowmem_period) {
1984                 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
1985                         continue;
1986
1987                 /*
1988                  * Decrease registered cache sizes.
1989                  */
1990                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1991                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1992
1993                 /*
1994                  * We do this explicitly after the caches have been
1995                  * drained above.
1996                  */
1997                 uma_reclaim(UMA_RECLAIM_TRIM);
1998                 ret = true;
1999         }
2000
2001         /*
2002          * Kick off an asynchronous reclaim of cached memory if one of the
2003          * page daemons is failing to keep up with demand.  Use the "severe"
2004          * threshold instead of "min" to ensure that we do not blow away the
2005          * caches if a subset of the NUMA domains are depleted by kernel memory
2006          * allocations; the domainset iterators automatically skip domains
2007          * below the "min" threshold on the first pass.
2008          *
2009          * UMA reclaim worker has its own rate-limiting mechanism, so don't
2010          * worry about kicking it too often.
2011          */
2012         if (vm_page_count_severe())
2013                 uma_reclaim_wakeup();
2014
2015         return (ret);
2016 }
2017
2018 static void
2019 vm_pageout_worker(void *arg)
2020 {
2021         struct vm_domain *vmd;
2022         u_int ofree;
2023         int addl_shortage, domain, shortage;
2024         bool target_met;
2025
2026         domain = (uintptr_t)arg;
2027         vmd = VM_DOMAIN(domain);
2028         shortage = 0;
2029         target_met = true;
2030
2031         /*
2032          * XXXKIB It could be useful to bind pageout daemon threads to
2033          * the cores belonging to the domain, from which vm_page_array
2034          * is allocated.
2035          */
2036
2037         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2038         vmd->vmd_last_active_scan = ticks;
2039
2040         /*
2041          * The pageout daemon worker is never done, so loop forever.
2042          */
2043         while (TRUE) {
2044                 vm_domain_pageout_lock(vmd);
2045
2046                 /*
2047                  * We need to clear wanted before we check the limits.  This
2048                  * prevents races with wakers who will check wanted after they
2049                  * reach the limit.
2050                  */
2051                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2052
2053                 /*
2054                  * Might the page daemon need to run again?
2055                  */
2056                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2057                         /*
2058                          * Yes.  If the scan failed to produce enough free
2059                          * pages, sleep uninterruptibly for some time in the
2060                          * hope that the laundry thread will clean some pages.
2061                          */
2062                         vm_domain_pageout_unlock(vmd);
2063                         if (!target_met)
2064                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
2065                 } else {
2066                         /*
2067                          * No, sleep until the next wakeup or until pages
2068                          * need to have their reference stats updated.
2069                          */
2070                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
2071                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2072                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2073                                 VM_CNT_INC(v_pdwakeups);
2074                 }
2075
2076                 /* Prevent spurious wakeups by ensuring that wanted is set. */
2077                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2078
2079                 /*
2080                  * Use the controller to calculate how many pages to free in
2081                  * this interval, and scan the inactive queue.  If the lowmem
2082                  * handlers appear to have freed up some pages, subtract the
2083                  * difference from the inactive queue scan target.
2084                  */
2085                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2086                 if (shortage > 0) {
2087                         ofree = vmd->vmd_free_count;
2088                         if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2089                                 shortage -= min(vmd->vmd_free_count - ofree,
2090                                     (u_int)shortage);
2091                         target_met = vm_pageout_scan_inactive(vmd, shortage,
2092                             &addl_shortage);
2093                 } else
2094                         addl_shortage = 0;
2095
2096                 /*
2097                  * Scan the active queue.  A positive value for shortage
2098                  * indicates that we must aggressively deactivate pages to avoid
2099                  * a shortfall.
2100                  */
2101                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
2102                 vm_pageout_scan_active(vmd, shortage);
2103         }
2104 }
2105
2106 /*
2107  * Initialize basic pageout daemon settings.  See the comment above the
2108  * definition of vm_domain for some explanation of how these thresholds are
2109  * used.
2110  */
2111 static void
2112 vm_pageout_init_domain(int domain)
2113 {
2114         struct vm_domain *vmd;
2115         struct sysctl_oid *oid;
2116
2117         vmd = VM_DOMAIN(domain);
2118         vmd->vmd_interrupt_free_min = 2;
2119
2120         /*
2121          * v_free_reserved needs to include enough for the largest
2122          * swap pager structures plus enough for any pv_entry structs
2123          * when paging. 
2124          */
2125         vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2126             vmd->vmd_interrupt_free_min;
2127         vmd->vmd_free_reserved = vm_pageout_page_count +
2128             vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2129         vmd->vmd_free_min = vmd->vmd_page_count / 200;
2130         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2131         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2132         vmd->vmd_free_min += vmd->vmd_free_reserved;
2133         vmd->vmd_free_severe += vmd->vmd_free_reserved;
2134         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2135         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2136                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2137
2138         /*
2139          * Set the default wakeup threshold to be 10% below the paging
2140          * target.  This keeps the steady state out of shortfall.
2141          */
2142         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2143
2144         /*
2145          * Target amount of memory to move out of the laundry queue during a
2146          * background laundering.  This is proportional to the amount of system
2147          * memory.
2148          */
2149         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2150             vmd->vmd_free_min) / 10;
2151
2152         /* Initialize the pageout daemon pid controller. */
2153         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2154             vmd->vmd_free_target, PIDCTRL_BOUND,
2155             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2156         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2157             "pidctrl", CTLFLAG_RD, NULL, "");
2158         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2159 }
2160
2161 static void
2162 vm_pageout_init(void)
2163 {
2164         u_int freecount;
2165         int i;
2166
2167         /*
2168          * Initialize some paging parameters.
2169          */
2170         if (vm_cnt.v_page_count < 2000)
2171                 vm_pageout_page_count = 8;
2172
2173         freecount = 0;
2174         for (i = 0; i < vm_ndomains; i++) {
2175                 struct vm_domain *vmd;
2176
2177                 vm_pageout_init_domain(i);
2178                 vmd = VM_DOMAIN(i);
2179                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2180                 vm_cnt.v_free_target += vmd->vmd_free_target;
2181                 vm_cnt.v_free_min += vmd->vmd_free_min;
2182                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2183                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2184                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2185                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2186                 freecount += vmd->vmd_free_count;
2187         }
2188
2189         /*
2190          * Set interval in seconds for active scan.  We want to visit each
2191          * page at least once every ten minutes.  This is to prevent worst
2192          * case paging behaviors with stale active LRU.
2193          */
2194         if (vm_pageout_update_period == 0)
2195                 vm_pageout_update_period = 600;
2196
2197         if (vm_page_max_user_wired == 0)
2198                 vm_page_max_user_wired = freecount / 3;
2199 }
2200
2201 /*
2202  *     vm_pageout is the high level pageout daemon.
2203  */
2204 static void
2205 vm_pageout(void)
2206 {
2207         struct proc *p;
2208         struct thread *td;
2209         int error, first, i;
2210
2211         p = curproc;
2212         td = curthread;
2213
2214         mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2215         swap_pager_swap_init();
2216         for (first = -1, i = 0; i < vm_ndomains; i++) {
2217                 if (VM_DOMAIN_EMPTY(i)) {
2218                         if (bootverbose)
2219                                 printf("domain %d empty; skipping pageout\n",
2220                                     i);
2221                         continue;
2222                 }
2223                 if (first == -1)
2224                         first = i;
2225                 else {
2226                         error = kthread_add(vm_pageout_worker,
2227                             (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2228                         if (error != 0)
2229                                 panic("starting pageout for domain %d: %d\n",
2230                                     i, error);
2231                 }
2232                 error = kthread_add(vm_pageout_laundry_worker,
2233                     (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2234                 if (error != 0)
2235                         panic("starting laundry for domain %d: %d", i, error);
2236         }
2237         error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2238         if (error != 0)
2239                 panic("starting uma_reclaim helper, error %d\n", error);
2240
2241         snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2242         vm_pageout_worker((void *)(uintptr_t)first);
2243 }
2244
2245 /*
2246  * Perform an advisory wakeup of the page daemon.
2247  */
2248 void
2249 pagedaemon_wakeup(int domain)
2250 {
2251         struct vm_domain *vmd;
2252
2253         vmd = VM_DOMAIN(domain);
2254         vm_domain_pageout_assert_unlocked(vmd);
2255         if (curproc == pageproc)
2256                 return;
2257
2258         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2259                 vm_domain_pageout_lock(vmd);
2260                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2261                 wakeup(&vmd->vmd_pageout_wanted);
2262                 vm_domain_pageout_unlock(vmd);
2263         }
2264 }