]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Merge OpenSSL 1.1.1g.
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_pagequeue.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117
118 /*
119  * System initialization
120  */
121
122 /* the kernel process "vm_pageout"*/
123 static void vm_pageout(void);
124 static void vm_pageout_init(void);
125 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
126 static int vm_pageout_cluster(vm_page_t m);
127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
128     int starting_page_shortage);
129
130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
131     NULL);
132
133 struct proc *pageproc;
134
135 static struct kproc_desc page_kp = {
136         "pagedaemon",
137         vm_pageout,
138         &pageproc
139 };
140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141     &page_kp);
142
143 SDT_PROVIDER_DEFINE(vm);
144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
145
146 /* Pagedaemon activity rates, in subdivisions of one second. */
147 #define VM_LAUNDER_RATE         10
148 #define VM_INACT_SCAN_RATE      10
149
150 static int vm_pageout_oom_seq = 12;
151
152 static int vm_pageout_update_period;
153 static int disable_swap_pageouts;
154 static int lowmem_period = 10;
155 static int swapdev_enabled;
156
157 static int vm_panic_on_oom = 0;
158
159 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
160         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
161         "Panic on the given number of out-of-memory errors instead of killing the largest process");
162
163 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
164         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
165         "Maximum active LRU update period");
166   
167 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
168         "Low memory callback period");
169
170 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
171         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
172
173 static int pageout_lock_miss;
174 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
175         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
176
177 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
178         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
179         "back-to-back calls to oom detector to start OOM");
180
181 static int act_scan_laundry_weight = 3;
182 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
183     &act_scan_laundry_weight, 0,
184     "weight given to clean vs. dirty pages in active queue scans");
185
186 static u_int vm_background_launder_rate = 4096;
187 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
188     &vm_background_launder_rate, 0,
189     "background laundering rate, in kilobytes per second");
190
191 static u_int vm_background_launder_max = 20 * 1024;
192 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
193     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
194
195 int vm_pageout_page_count = 32;
196
197 u_long vm_page_max_user_wired;
198 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
199     &vm_page_max_user_wired, 0,
200     "system-wide limit to user-wired page count");
201
202 static u_int isqrt(u_int num);
203 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
204     bool in_shortfall);
205 static void vm_pageout_laundry_worker(void *arg);
206
207 struct scan_state {
208         struct vm_batchqueue bq;
209         struct vm_pagequeue *pq;
210         vm_page_t       marker;
211         int             maxscan;
212         int             scanned;
213 };
214
215 static void
216 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
217     vm_page_t marker, vm_page_t after, int maxscan)
218 {
219
220         vm_pagequeue_assert_locked(pq);
221         KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
222             ("marker %p already enqueued", marker));
223
224         if (after == NULL)
225                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
226         else
227                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
228         vm_page_aflag_set(marker, PGA_ENQUEUED);
229
230         vm_batchqueue_init(&ss->bq);
231         ss->pq = pq;
232         ss->marker = marker;
233         ss->maxscan = maxscan;
234         ss->scanned = 0;
235         vm_pagequeue_unlock(pq);
236 }
237
238 static void
239 vm_pageout_end_scan(struct scan_state *ss)
240 {
241         struct vm_pagequeue *pq;
242
243         pq = ss->pq;
244         vm_pagequeue_assert_locked(pq);
245         KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
246             ("marker %p not enqueued", ss->marker));
247
248         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
249         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
250         pq->pq_pdpages += ss->scanned;
251 }
252
253 /*
254  * Add a small number of queued pages to a batch queue for later processing
255  * without the corresponding queue lock held.  The caller must have enqueued a
256  * marker page at the desired start point for the scan.  Pages will be
257  * physically dequeued if the caller so requests.  Otherwise, the returned
258  * batch may contain marker pages, and it is up to the caller to handle them.
259  *
260  * When processing the batch queue, vm_page_queue() must be used to
261  * determine whether the page has been logically dequeued by another thread.
262  * Once this check is performed, the page lock guarantees that the page will
263  * not be disassociated from the queue.
264  */
265 static __always_inline void
266 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
267 {
268         struct vm_pagequeue *pq;
269         vm_page_t m, marker, n;
270
271         marker = ss->marker;
272         pq = ss->pq;
273
274         KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
275             ("marker %p not enqueued", ss->marker));
276
277         vm_pagequeue_lock(pq);
278         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
279             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
280             m = n, ss->scanned++) {
281                 n = TAILQ_NEXT(m, plinks.q);
282                 if ((m->flags & PG_MARKER) == 0) {
283                         KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
284                             ("page %p not enqueued", m));
285                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
286                             ("Fictitious page %p cannot be in page queue", m));
287                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
288                             ("Unmanaged page %p cannot be in page queue", m));
289                 } else if (dequeue)
290                         continue;
291
292                 (void)vm_batchqueue_insert(&ss->bq, m);
293                 if (dequeue) {
294                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
295                         vm_page_aflag_clear(m, PGA_ENQUEUED);
296                 }
297         }
298         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
299         if (__predict_true(m != NULL))
300                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
301         else
302                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
303         if (dequeue)
304                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
305         vm_pagequeue_unlock(pq);
306 }
307
308 /*
309  * Return the next page to be scanned, or NULL if the scan is complete.
310  */
311 static __always_inline vm_page_t
312 vm_pageout_next(struct scan_state *ss, const bool dequeue)
313 {
314
315         if (ss->bq.bq_cnt == 0)
316                 vm_pageout_collect_batch(ss, dequeue);
317         return (vm_batchqueue_pop(&ss->bq));
318 }
319
320 /*
321  * Determine whether processing of a page should be deferred and ensure that any
322  * outstanding queue operations are processed.
323  */
324 static __always_inline bool
325 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
326 {
327         vm_page_astate_t as;
328
329         as = vm_page_astate_load(m);
330         if (__predict_false(as.queue != queue ||
331             ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
332                 return (true);
333         if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
334                 vm_page_pqbatch_submit(m, queue);
335                 return (true);
336         }
337         return (false);
338 }
339
340 /*
341  * Scan for pages at adjacent offsets within the given page's object that are
342  * eligible for laundering, form a cluster of these pages and the given page,
343  * and launder that cluster.
344  */
345 static int
346 vm_pageout_cluster(vm_page_t m)
347 {
348         vm_object_t object;
349         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
350         vm_pindex_t pindex;
351         int ib, is, page_base, pageout_count;
352
353         object = m->object;
354         VM_OBJECT_ASSERT_WLOCKED(object);
355         pindex = m->pindex;
356
357         vm_page_assert_xbusied(m);
358
359         mc[vm_pageout_page_count] = pb = ps = m;
360         pageout_count = 1;
361         page_base = vm_pageout_page_count;
362         ib = 1;
363         is = 1;
364
365         /*
366          * We can cluster only if the page is not clean, busy, or held, and
367          * the page is in the laundry queue.
368          *
369          * During heavy mmap/modification loads the pageout
370          * daemon can really fragment the underlying file
371          * due to flushing pages out of order and not trying to
372          * align the clusters (which leaves sporadic out-of-order
373          * holes).  To solve this problem we do the reverse scan
374          * first and attempt to align our cluster, then do a 
375          * forward scan if room remains.
376          */
377 more:
378         while (ib != 0 && pageout_count < vm_pageout_page_count) {
379                 if (ib > pindex) {
380                         ib = 0;
381                         break;
382                 }
383                 if ((p = vm_page_prev(pb)) == NULL ||
384                     vm_page_tryxbusy(p) == 0) {
385                         ib = 0;
386                         break;
387                 }
388                 if (vm_page_wired(p)) {
389                         ib = 0;
390                         vm_page_xunbusy(p);
391                         break;
392                 }
393                 vm_page_test_dirty(p);
394                 if (p->dirty == 0) {
395                         ib = 0;
396                         vm_page_xunbusy(p);
397                         break;
398                 }
399                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
400                         vm_page_xunbusy(p);
401                         ib = 0;
402                         break;
403                 }
404                 mc[--page_base] = pb = p;
405                 ++pageout_count;
406                 ++ib;
407
408                 /*
409                  * We are at an alignment boundary.  Stop here, and switch
410                  * directions.  Do not clear ib.
411                  */
412                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
413                         break;
414         }
415         while (pageout_count < vm_pageout_page_count && 
416             pindex + is < object->size) {
417                 if ((p = vm_page_next(ps)) == NULL ||
418                     vm_page_tryxbusy(p) == 0)
419                         break;
420                 if (vm_page_wired(p)) {
421                         vm_page_xunbusy(p);
422                         break;
423                 }
424                 vm_page_test_dirty(p);
425                 if (p->dirty == 0) {
426                         vm_page_xunbusy(p);
427                         break;
428                 }
429                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
430                         vm_page_xunbusy(p);
431                         break;
432                 }
433                 mc[page_base + pageout_count] = ps = p;
434                 ++pageout_count;
435                 ++is;
436         }
437
438         /*
439          * If we exhausted our forward scan, continue with the reverse scan
440          * when possible, even past an alignment boundary.  This catches
441          * boundary conditions.
442          */
443         if (ib != 0 && pageout_count < vm_pageout_page_count)
444                 goto more;
445
446         return (vm_pageout_flush(&mc[page_base], pageout_count,
447             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
448 }
449
450 /*
451  * vm_pageout_flush() - launder the given pages
452  *
453  *      The given pages are laundered.  Note that we setup for the start of
454  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
455  *      reference count all in here rather then in the parent.  If we want
456  *      the parent to do more sophisticated things we may have to change
457  *      the ordering.
458  *
459  *      Returned runlen is the count of pages between mreq and first
460  *      page after mreq with status VM_PAGER_AGAIN.
461  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
462  *      for any page in runlen set.
463  */
464 int
465 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
466     boolean_t *eio)
467 {
468         vm_object_t object = mc[0]->object;
469         int pageout_status[count];
470         int numpagedout = 0;
471         int i, runlen;
472
473         VM_OBJECT_ASSERT_WLOCKED(object);
474
475         /*
476          * Initiate I/O.  Mark the pages shared busy and verify that they're
477          * valid and read-only.
478          *
479          * We do not have to fixup the clean/dirty bits here... we can
480          * allow the pager to do it after the I/O completes.
481          *
482          * NOTE! mc[i]->dirty may be partial or fragmented due to an
483          * edge case with file fragments.
484          */
485         for (i = 0; i < count; i++) {
486                 KASSERT(vm_page_all_valid(mc[i]),
487                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
488                         mc[i], i, count));
489                 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
490                     ("vm_pageout_flush: writeable page %p", mc[i]));
491                 vm_page_busy_downgrade(mc[i]);
492         }
493         vm_object_pip_add(object, count);
494
495         vm_pager_put_pages(object, mc, count, flags, pageout_status);
496
497         runlen = count - mreq;
498         if (eio != NULL)
499                 *eio = FALSE;
500         for (i = 0; i < count; i++) {
501                 vm_page_t mt = mc[i];
502
503                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
504                     !pmap_page_is_write_mapped(mt),
505                     ("vm_pageout_flush: page %p is not write protected", mt));
506                 switch (pageout_status[i]) {
507                 case VM_PAGER_OK:
508                         /*
509                          * The page may have moved since laundering started, in
510                          * which case it should be left alone.
511                          */
512                         if (vm_page_in_laundry(mt))
513                                 vm_page_deactivate_noreuse(mt);
514                         /* FALLTHROUGH */
515                 case VM_PAGER_PEND:
516                         numpagedout++;
517                         break;
518                 case VM_PAGER_BAD:
519                         /*
520                          * The page is outside the object's range.  We pretend
521                          * that the page out worked and clean the page, so the
522                          * changes will be lost if the page is reclaimed by
523                          * the page daemon.
524                          */
525                         vm_page_undirty(mt);
526                         if (vm_page_in_laundry(mt))
527                                 vm_page_deactivate_noreuse(mt);
528                         break;
529                 case VM_PAGER_ERROR:
530                 case VM_PAGER_FAIL:
531                         /*
532                          * If the page couldn't be paged out to swap because the
533                          * pager wasn't able to find space, place the page in
534                          * the PQ_UNSWAPPABLE holding queue.  This is an
535                          * optimization that prevents the page daemon from
536                          * wasting CPU cycles on pages that cannot be reclaimed
537                          * becase no swap device is configured.
538                          *
539                          * Otherwise, reactivate the page so that it doesn't
540                          * clog the laundry and inactive queues.  (We will try
541                          * paging it out again later.)
542                          */
543                         if (object->type == OBJT_SWAP &&
544                             pageout_status[i] == VM_PAGER_FAIL) {
545                                 vm_page_unswappable(mt);
546                                 numpagedout++;
547                         } else
548                                 vm_page_activate(mt);
549                         if (eio != NULL && i >= mreq && i - mreq < runlen)
550                                 *eio = TRUE;
551                         break;
552                 case VM_PAGER_AGAIN:
553                         if (i >= mreq && i - mreq < runlen)
554                                 runlen = i - mreq;
555                         break;
556                 }
557
558                 /*
559                  * If the operation is still going, leave the page busy to
560                  * block all other accesses. Also, leave the paging in
561                  * progress indicator set so that we don't attempt an object
562                  * collapse.
563                  */
564                 if (pageout_status[i] != VM_PAGER_PEND) {
565                         vm_object_pip_wakeup(object);
566                         vm_page_sunbusy(mt);
567                 }
568         }
569         if (prunlen != NULL)
570                 *prunlen = runlen;
571         return (numpagedout);
572 }
573
574 static void
575 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
576 {
577
578         atomic_store_rel_int(&swapdev_enabled, 1);
579 }
580
581 static void
582 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
583 {
584
585         if (swap_pager_nswapdev() == 1)
586                 atomic_store_rel_int(&swapdev_enabled, 0);
587 }
588
589 /*
590  * Attempt to acquire all of the necessary locks to launder a page and
591  * then call through the clustering layer to PUTPAGES.  Wait a short
592  * time for a vnode lock.
593  *
594  * Requires the page and object lock on entry, releases both before return.
595  * Returns 0 on success and an errno otherwise.
596  */
597 static int
598 vm_pageout_clean(vm_page_t m, int *numpagedout)
599 {
600         struct vnode *vp;
601         struct mount *mp;
602         vm_object_t object;
603         vm_pindex_t pindex;
604         int error, lockmode;
605
606         object = m->object;
607         VM_OBJECT_ASSERT_WLOCKED(object);
608         error = 0;
609         vp = NULL;
610         mp = NULL;
611
612         /*
613          * The object is already known NOT to be dead.   It
614          * is possible for the vget() to block the whole
615          * pageout daemon, but the new low-memory handling
616          * code should prevent it.
617          *
618          * We can't wait forever for the vnode lock, we might
619          * deadlock due to a vn_read() getting stuck in
620          * vm_wait while holding this vnode.  We skip the 
621          * vnode if we can't get it in a reasonable amount
622          * of time.
623          */
624         if (object->type == OBJT_VNODE) {
625                 vm_page_xunbusy(m);
626                 vp = object->handle;
627                 if (vp->v_type == VREG &&
628                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
629                         mp = NULL;
630                         error = EDEADLK;
631                         goto unlock_all;
632                 }
633                 KASSERT(mp != NULL,
634                     ("vp %p with NULL v_mount", vp));
635                 vm_object_reference_locked(object);
636                 pindex = m->pindex;
637                 VM_OBJECT_WUNLOCK(object);
638                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
639                     LK_SHARED : LK_EXCLUSIVE;
640                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
641                         vp = NULL;
642                         error = EDEADLK;
643                         goto unlock_mp;
644                 }
645                 VM_OBJECT_WLOCK(object);
646
647                 /*
648                  * Ensure that the object and vnode were not disassociated
649                  * while locks were dropped.
650                  */
651                 if (vp->v_object != object) {
652                         error = ENOENT;
653                         goto unlock_all;
654                 }
655
656                 /*
657                  * While the object was unlocked, the page may have been:
658                  * (1) moved to a different queue,
659                  * (2) reallocated to a different object,
660                  * (3) reallocated to a different offset, or
661                  * (4) cleaned.
662                  */
663                 if (!vm_page_in_laundry(m) || m->object != object ||
664                     m->pindex != pindex || m->dirty == 0) {
665                         error = ENXIO;
666                         goto unlock_all;
667                 }
668
669                 /*
670                  * The page may have been busied while the object lock was
671                  * released.
672                  */
673                 if (vm_page_tryxbusy(m) == 0) {
674                         error = EBUSY;
675                         goto unlock_all;
676                 }
677         }
678
679         /*
680          * Remove all writeable mappings, failing if the page is wired.
681          */
682         if (!vm_page_try_remove_write(m)) {
683                 vm_page_xunbusy(m);
684                 error = EBUSY;
685                 goto unlock_all;
686         }
687
688         /*
689          * If a page is dirty, then it is either being washed
690          * (but not yet cleaned) or it is still in the
691          * laundry.  If it is still in the laundry, then we
692          * start the cleaning operation. 
693          */
694         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
695                 error = EIO;
696
697 unlock_all:
698         VM_OBJECT_WUNLOCK(object);
699
700 unlock_mp:
701         if (mp != NULL) {
702                 if (vp != NULL)
703                         vput(vp);
704                 vm_object_deallocate(object);
705                 vn_finished_write(mp);
706         }
707
708         return (error);
709 }
710
711 /*
712  * Attempt to launder the specified number of pages.
713  *
714  * Returns the number of pages successfully laundered.
715  */
716 static int
717 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
718 {
719         struct scan_state ss;
720         struct vm_pagequeue *pq;
721         vm_object_t object;
722         vm_page_t m, marker;
723         vm_page_astate_t new, old;
724         int act_delta, error, numpagedout, queue, refs, starting_target;
725         int vnodes_skipped;
726         bool pageout_ok;
727
728         object = NULL;
729         starting_target = launder;
730         vnodes_skipped = 0;
731
732         /*
733          * Scan the laundry queues for pages eligible to be laundered.  We stop
734          * once the target number of dirty pages have been laundered, or once
735          * we've reached the end of the queue.  A single iteration of this loop
736          * may cause more than one page to be laundered because of clustering.
737          *
738          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
739          * swap devices are configured.
740          */
741         if (atomic_load_acq_int(&swapdev_enabled))
742                 queue = PQ_UNSWAPPABLE;
743         else
744                 queue = PQ_LAUNDRY;
745
746 scan:
747         marker = &vmd->vmd_markers[queue];
748         pq = &vmd->vmd_pagequeues[queue];
749         vm_pagequeue_lock(pq);
750         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
751         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
752                 if (__predict_false((m->flags & PG_MARKER) != 0))
753                         continue;
754
755                 /*
756                  * Don't touch a page that was removed from the queue after the
757                  * page queue lock was released.  Otherwise, ensure that any
758                  * pending queue operations, such as dequeues for wired pages,
759                  * are handled.
760                  */
761                 if (vm_pageout_defer(m, queue, true))
762                         continue;
763
764                 /*
765                  * Lock the page's object.
766                  */
767                 if (object == NULL || object != m->object) {
768                         if (object != NULL)
769                                 VM_OBJECT_WUNLOCK(object);
770                         object = atomic_load_ptr(&m->object);
771                         if (__predict_false(object == NULL))
772                                 /* The page is being freed by another thread. */
773                                 continue;
774
775                         /* Depends on type-stability. */
776                         VM_OBJECT_WLOCK(object);
777                         if (__predict_false(m->object != object)) {
778                                 VM_OBJECT_WUNLOCK(object);
779                                 object = NULL;
780                                 continue;
781                         }
782                 }
783
784                 if (vm_page_tryxbusy(m) == 0)
785                         continue;
786
787                 /*
788                  * Check for wirings now that we hold the object lock and have
789                  * exclusively busied the page.  If the page is mapped, it may
790                  * still be wired by pmap lookups.  The call to
791                  * vm_page_try_remove_all() below atomically checks for such
792                  * wirings and removes mappings.  If the page is unmapped, the
793                  * wire count is guaranteed not to increase after this check.
794                  */
795                 if (__predict_false(vm_page_wired(m)))
796                         goto skip_page;
797
798                 /*
799                  * Invalid pages can be easily freed.  They cannot be
800                  * mapped; vm_page_free() asserts this.
801                  */
802                 if (vm_page_none_valid(m))
803                         goto free_page;
804
805                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
806
807                 for (old = vm_page_astate_load(m);;) {
808                         /*
809                          * Check to see if the page has been removed from the
810                          * queue since the first such check.  Leave it alone if
811                          * so, discarding any references collected by
812                          * pmap_ts_referenced().
813                          */
814                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
815                                 goto skip_page;
816
817                         new = old;
818                         act_delta = refs;
819                         if ((old.flags & PGA_REFERENCED) != 0) {
820                                 new.flags &= ~PGA_REFERENCED;
821                                 act_delta++;
822                         }
823                         if (act_delta == 0) {
824                                 ;
825                         } else if (object->ref_count != 0) {
826                                 /*
827                                  * Increase the activation count if the page was
828                                  * referenced while in the laundry queue.  This
829                                  * makes it less likely that the page will be
830                                  * returned prematurely to the laundry queue.
831                                  */
832                                 new.act_count += ACT_ADVANCE +
833                                     act_delta;
834                                 if (new.act_count > ACT_MAX)
835                                         new.act_count = ACT_MAX;
836
837                                 new.flags &= ~PGA_QUEUE_OP_MASK;
838                                 new.flags |= PGA_REQUEUE;
839                                 new.queue = PQ_ACTIVE;
840                                 if (!vm_page_pqstate_commit(m, &old, new))
841                                         continue;
842
843                                 /*
844                                  * If this was a background laundering, count
845                                  * activated pages towards our target.  The
846                                  * purpose of background laundering is to ensure
847                                  * that pages are eventually cycled through the
848                                  * laundry queue, and an activation is a valid
849                                  * way out.
850                                  */
851                                 if (!in_shortfall)
852                                         launder--;
853                                 VM_CNT_INC(v_reactivated);
854                                 goto skip_page;
855                         } else if ((object->flags & OBJ_DEAD) == 0) {
856                                 new.flags |= PGA_REQUEUE;
857                                 if (!vm_page_pqstate_commit(m, &old, new))
858                                         continue;
859                                 goto skip_page;
860                         }
861                         break;
862                 }
863
864                 /*
865                  * If the page appears to be clean at the machine-independent
866                  * layer, then remove all of its mappings from the pmap in
867                  * anticipation of freeing it.  If, however, any of the page's
868                  * mappings allow write access, then the page may still be
869                  * modified until the last of those mappings are removed.
870                  */
871                 if (object->ref_count != 0) {
872                         vm_page_test_dirty(m);
873                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
874                                 goto skip_page;
875                 }
876
877                 /*
878                  * Clean pages are freed, and dirty pages are paged out unless
879                  * they belong to a dead object.  Requeueing dirty pages from
880                  * dead objects is pointless, as they are being paged out and
881                  * freed by the thread that destroyed the object.
882                  */
883                 if (m->dirty == 0) {
884 free_page:
885                         /*
886                          * Now we are guaranteed that no other threads are
887                          * manipulating the page, check for a last-second
888                          * reference.
889                          */
890                         if (vm_pageout_defer(m, queue, true))
891                                 goto skip_page;
892                         vm_page_free(m);
893                         VM_CNT_INC(v_dfree);
894                 } else if ((object->flags & OBJ_DEAD) == 0) {
895                         if (object->type != OBJT_SWAP &&
896                             object->type != OBJT_DEFAULT)
897                                 pageout_ok = true;
898                         else if (disable_swap_pageouts)
899                                 pageout_ok = false;
900                         else
901                                 pageout_ok = true;
902                         if (!pageout_ok) {
903                                 vm_page_launder(m);
904                                 goto skip_page;
905                         }
906
907                         /*
908                          * Form a cluster with adjacent, dirty pages from the
909                          * same object, and page out that entire cluster.
910                          *
911                          * The adjacent, dirty pages must also be in the
912                          * laundry.  However, their mappings are not checked
913                          * for new references.  Consequently, a recently
914                          * referenced page may be paged out.  However, that
915                          * page will not be prematurely reclaimed.  After page
916                          * out, the page will be placed in the inactive queue,
917                          * where any new references will be detected and the
918                          * page reactivated.
919                          */
920                         error = vm_pageout_clean(m, &numpagedout);
921                         if (error == 0) {
922                                 launder -= numpagedout;
923                                 ss.scanned += numpagedout;
924                         } else if (error == EDEADLK) {
925                                 pageout_lock_miss++;
926                                 vnodes_skipped++;
927                         }
928                         object = NULL;
929                 } else {
930 skip_page:
931                         vm_page_xunbusy(m);
932                 }
933         }
934         if (object != NULL) {
935                 VM_OBJECT_WUNLOCK(object);
936                 object = NULL;
937         }
938         vm_pagequeue_lock(pq);
939         vm_pageout_end_scan(&ss);
940         vm_pagequeue_unlock(pq);
941
942         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
943                 queue = PQ_LAUNDRY;
944                 goto scan;
945         }
946
947         /*
948          * Wakeup the sync daemon if we skipped a vnode in a writeable object
949          * and we didn't launder enough pages.
950          */
951         if (vnodes_skipped > 0 && launder > 0)
952                 (void)speedup_syncer();
953
954         return (starting_target - launder);
955 }
956
957 /*
958  * Compute the integer square root.
959  */
960 static u_int
961 isqrt(u_int num)
962 {
963         u_int bit, root, tmp;
964
965         bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
966         root = 0;
967         while (bit != 0) {
968                 tmp = root + bit;
969                 root >>= 1;
970                 if (num >= tmp) {
971                         num -= tmp;
972                         root += bit;
973                 }
974                 bit >>= 2;
975         }
976         return (root);
977 }
978
979 /*
980  * Perform the work of the laundry thread: periodically wake up and determine
981  * whether any pages need to be laundered.  If so, determine the number of pages
982  * that need to be laundered, and launder them.
983  */
984 static void
985 vm_pageout_laundry_worker(void *arg)
986 {
987         struct vm_domain *vmd;
988         struct vm_pagequeue *pq;
989         uint64_t nclean, ndirty, nfreed;
990         int domain, last_target, launder, shortfall, shortfall_cycle, target;
991         bool in_shortfall;
992
993         domain = (uintptr_t)arg;
994         vmd = VM_DOMAIN(domain);
995         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
996         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
997
998         shortfall = 0;
999         in_shortfall = false;
1000         shortfall_cycle = 0;
1001         last_target = target = 0;
1002         nfreed = 0;
1003
1004         /*
1005          * Calls to these handlers are serialized by the swap syscall lock.
1006          */
1007         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1008             EVENTHANDLER_PRI_ANY);
1009         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1010             EVENTHANDLER_PRI_ANY);
1011
1012         /*
1013          * The pageout laundry worker is never done, so loop forever.
1014          */
1015         for (;;) {
1016                 KASSERT(target >= 0, ("negative target %d", target));
1017                 KASSERT(shortfall_cycle >= 0,
1018                     ("negative cycle %d", shortfall_cycle));
1019                 launder = 0;
1020
1021                 /*
1022                  * First determine whether we need to launder pages to meet a
1023                  * shortage of free pages.
1024                  */
1025                 if (shortfall > 0) {
1026                         in_shortfall = true;
1027                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1028                         target = shortfall;
1029                 } else if (!in_shortfall)
1030                         goto trybackground;
1031                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1032                         /*
1033                          * We recently entered shortfall and began laundering
1034                          * pages.  If we have completed that laundering run
1035                          * (and we are no longer in shortfall) or we have met
1036                          * our laundry target through other activity, then we
1037                          * can stop laundering pages.
1038                          */
1039                         in_shortfall = false;
1040                         target = 0;
1041                         goto trybackground;
1042                 }
1043                 launder = target / shortfall_cycle--;
1044                 goto dolaundry;
1045
1046                 /*
1047                  * There's no immediate need to launder any pages; see if we
1048                  * meet the conditions to perform background laundering:
1049                  *
1050                  * 1. The ratio of dirty to clean inactive pages exceeds the
1051                  *    background laundering threshold, or
1052                  * 2. we haven't yet reached the target of the current
1053                  *    background laundering run.
1054                  *
1055                  * The background laundering threshold is not a constant.
1056                  * Instead, it is a slowly growing function of the number of
1057                  * clean pages freed by the page daemon since the last
1058                  * background laundering.  Thus, as the ratio of dirty to
1059                  * clean inactive pages grows, the amount of memory pressure
1060                  * required to trigger laundering decreases.  We ensure
1061                  * that the threshold is non-zero after an inactive queue
1062                  * scan, even if that scan failed to free a single clean page.
1063                  */
1064 trybackground:
1065                 nclean = vmd->vmd_free_count +
1066                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1067                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1068                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1069                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1070                         target = vmd->vmd_background_launder_target;
1071                 }
1072
1073                 /*
1074                  * We have a non-zero background laundering target.  If we've
1075                  * laundered up to our maximum without observing a page daemon
1076                  * request, just stop.  This is a safety belt that ensures we
1077                  * don't launder an excessive amount if memory pressure is low
1078                  * and the ratio of dirty to clean pages is large.  Otherwise,
1079                  * proceed at the background laundering rate.
1080                  */
1081                 if (target > 0) {
1082                         if (nfreed > 0) {
1083                                 nfreed = 0;
1084                                 last_target = target;
1085                         } else if (last_target - target >=
1086                             vm_background_launder_max * PAGE_SIZE / 1024) {
1087                                 target = 0;
1088                         }
1089                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1090                         launder /= VM_LAUNDER_RATE;
1091                         if (launder > target)
1092                                 launder = target;
1093                 }
1094
1095 dolaundry:
1096                 if (launder > 0) {
1097                         /*
1098                          * Because of I/O clustering, the number of laundered
1099                          * pages could exceed "target" by the maximum size of
1100                          * a cluster minus one. 
1101                          */
1102                         target -= min(vm_pageout_launder(vmd, launder,
1103                             in_shortfall), target);
1104                         pause("laundp", hz / VM_LAUNDER_RATE);
1105                 }
1106
1107                 /*
1108                  * If we're not currently laundering pages and the page daemon
1109                  * hasn't posted a new request, sleep until the page daemon
1110                  * kicks us.
1111                  */
1112                 vm_pagequeue_lock(pq);
1113                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1114                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1115                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1116
1117                 /*
1118                  * If the pagedaemon has indicated that it's in shortfall, start
1119                  * a shortfall laundering unless we're already in the middle of
1120                  * one.  This may preempt a background laundering.
1121                  */
1122                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1123                     (!in_shortfall || shortfall_cycle == 0)) {
1124                         shortfall = vm_laundry_target(vmd) +
1125                             vmd->vmd_pageout_deficit;
1126                         target = 0;
1127                 } else
1128                         shortfall = 0;
1129
1130                 if (target == 0)
1131                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1132                 nfreed += vmd->vmd_clean_pages_freed;
1133                 vmd->vmd_clean_pages_freed = 0;
1134                 vm_pagequeue_unlock(pq);
1135         }
1136 }
1137
1138 /*
1139  * Compute the number of pages we want to try to move from the
1140  * active queue to either the inactive or laundry queue.
1141  *
1142  * When scanning active pages during a shortage, we make clean pages
1143  * count more heavily towards the page shortage than dirty pages.
1144  * This is because dirty pages must be laundered before they can be
1145  * reused and thus have less utility when attempting to quickly
1146  * alleviate a free page shortage.  However, this weighting also
1147  * causes the scan to deactivate dirty pages more aggressively,
1148  * improving the effectiveness of clustering.
1149  */
1150 static int
1151 vm_pageout_active_target(struct vm_domain *vmd)
1152 {
1153         int shortage;
1154
1155         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1156             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1157             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1158         shortage *= act_scan_laundry_weight;
1159         return (shortage);
1160 }
1161
1162 /*
1163  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1164  * small portion of the queue in order to maintain quasi-LRU.
1165  */
1166 static void
1167 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1168 {
1169         struct scan_state ss;
1170         vm_object_t object;
1171         vm_page_t m, marker;
1172         struct vm_pagequeue *pq;
1173         vm_page_astate_t old, new;
1174         long min_scan;
1175         int act_delta, max_scan, ps_delta, refs, scan_tick;
1176         uint8_t nqueue;
1177
1178         marker = &vmd->vmd_markers[PQ_ACTIVE];
1179         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1180         vm_pagequeue_lock(pq);
1181
1182         /*
1183          * If we're just idle polling attempt to visit every
1184          * active page within 'update_period' seconds.
1185          */
1186         scan_tick = ticks;
1187         if (vm_pageout_update_period != 0) {
1188                 min_scan = pq->pq_cnt;
1189                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1190                 min_scan /= hz * vm_pageout_update_period;
1191         } else
1192                 min_scan = 0;
1193         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1194                 vmd->vmd_last_active_scan = scan_tick;
1195
1196         /*
1197          * Scan the active queue for pages that can be deactivated.  Update
1198          * the per-page activity counter and use it to identify deactivation
1199          * candidates.  Held pages may be deactivated.
1200          *
1201          * To avoid requeuing each page that remains in the active queue, we
1202          * implement the CLOCK algorithm.  To keep the implementation of the
1203          * enqueue operation consistent for all page queues, we use two hands,
1204          * represented by marker pages. Scans begin at the first hand, which
1205          * precedes the second hand in the queue.  When the two hands meet,
1206          * they are moved back to the head and tail of the queue, respectively,
1207          * and scanning resumes.
1208          */
1209         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1210 act_scan:
1211         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1212         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1213                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1214                         vm_pagequeue_lock(pq);
1215                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1216                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1217                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1218                             plinks.q);
1219                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1220                             plinks.q);
1221                         max_scan -= ss.scanned;
1222                         vm_pageout_end_scan(&ss);
1223                         goto act_scan;
1224                 }
1225                 if (__predict_false((m->flags & PG_MARKER) != 0))
1226                         continue;
1227
1228                 /*
1229                  * Don't touch a page that was removed from the queue after the
1230                  * page queue lock was released.  Otherwise, ensure that any
1231                  * pending queue operations, such as dequeues for wired pages,
1232                  * are handled.
1233                  */
1234                 if (vm_pageout_defer(m, PQ_ACTIVE, true))
1235                         continue;
1236
1237                 /*
1238                  * A page's object pointer may be set to NULL before
1239                  * the object lock is acquired.
1240                  */
1241                 object = atomic_load_ptr(&m->object);
1242                 if (__predict_false(object == NULL))
1243                         /*
1244                          * The page has been removed from its object.
1245                          */
1246                         continue;
1247
1248                 /* Deferred free of swap space. */
1249                 if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1250                     VM_OBJECT_TRYWLOCK(object)) {
1251                         if (m->object == object)
1252                                 vm_pager_page_unswapped(m);
1253                         VM_OBJECT_WUNLOCK(object);
1254                 }
1255
1256                 /*
1257                  * Check to see "how much" the page has been used.
1258                  *
1259                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1260                  * that a reference from a concurrently destroyed mapping is
1261                  * observed here and now.
1262                  *
1263                  * Perform an unsynchronized object ref count check.  While
1264                  * the page lock ensures that the page is not reallocated to
1265                  * another object, in particular, one with unmanaged mappings
1266                  * that cannot support pmap_ts_referenced(), two races are,
1267                  * nonetheless, possible:
1268                  * 1) The count was transitioning to zero, but we saw a non-
1269                  *    zero value.  pmap_ts_referenced() will return zero
1270                  *    because the page is not mapped.
1271                  * 2) The count was transitioning to one, but we saw zero.
1272                  *    This race delays the detection of a new reference.  At
1273                  *    worst, we will deactivate and reactivate the page.
1274                  */
1275                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1276
1277                 old = vm_page_astate_load(m);
1278                 do {
1279                         /*
1280                          * Check to see if the page has been removed from the
1281                          * queue since the first such check.  Leave it alone if
1282                          * so, discarding any references collected by
1283                          * pmap_ts_referenced().
1284                          */
1285                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1286                                 break;
1287
1288                         /*
1289                          * Advance or decay the act_count based on recent usage.
1290                          */
1291                         new = old;
1292                         act_delta = refs;
1293                         if ((old.flags & PGA_REFERENCED) != 0) {
1294                                 new.flags &= ~PGA_REFERENCED;
1295                                 act_delta++;
1296                         }
1297                         if (act_delta != 0) {
1298                                 new.act_count += ACT_ADVANCE + act_delta;
1299                                 if (new.act_count > ACT_MAX)
1300                                         new.act_count = ACT_MAX;
1301                         } else {
1302                                 new.act_count -= min(new.act_count,
1303                                     ACT_DECLINE);
1304                         }
1305
1306                         if (new.act_count > 0) {
1307                                 /*
1308                                  * Adjust the activation count and keep the page
1309                                  * in the active queue.  The count might be left
1310                                  * unchanged if it is saturated.  The page may
1311                                  * have been moved to a different queue since we
1312                                  * started the scan, in which case we move it
1313                                  * back.
1314                                  */
1315                                 ps_delta = 0;
1316                                 if (old.queue != PQ_ACTIVE) {
1317                                         new.flags &= ~PGA_QUEUE_OP_MASK;
1318                                         new.flags |= PGA_REQUEUE;
1319                                         new.queue = PQ_ACTIVE;
1320                                 }
1321                         } else {
1322                                 /*
1323                                  * When not short for inactive pages, let dirty
1324                                  * pages go through the inactive queue before
1325                                  * moving to the laundry queue.  This gives them
1326                                  * some extra time to be reactivated,
1327                                  * potentially avoiding an expensive pageout.
1328                                  * However, during a page shortage, the inactive
1329                                  * queue is necessarily small, and so dirty
1330                                  * pages would only spend a trivial amount of
1331                                  * time in the inactive queue.  Therefore, we
1332                                  * might as well place them directly in the
1333                                  * laundry queue to reduce queuing overhead.
1334                                  *
1335                                  * Calling vm_page_test_dirty() here would
1336                                  * require acquisition of the object's write
1337                                  * lock.  However, during a page shortage,
1338                                  * directing dirty pages into the laundry queue
1339                                  * is only an optimization and not a
1340                                  * requirement.  Therefore, we simply rely on
1341                                  * the opportunistic updates to the page's dirty
1342                                  * field by the pmap.
1343                                  */
1344                                 if (page_shortage <= 0) {
1345                                         nqueue = PQ_INACTIVE;
1346                                         ps_delta = 0;
1347                                 } else if (m->dirty == 0) {
1348                                         nqueue = PQ_INACTIVE;
1349                                         ps_delta = act_scan_laundry_weight;
1350                                 } else {
1351                                         nqueue = PQ_LAUNDRY;
1352                                         ps_delta = 1;
1353                                 }
1354
1355                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1356                                 new.flags |= PGA_REQUEUE;
1357                                 new.queue = nqueue;
1358                         }
1359                 } while (!vm_page_pqstate_commit(m, &old, new));
1360
1361                 page_shortage -= ps_delta;
1362         }
1363         vm_pagequeue_lock(pq);
1364         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1365         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1366         vm_pageout_end_scan(&ss);
1367         vm_pagequeue_unlock(pq);
1368 }
1369
1370 static int
1371 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1372     vm_page_t m)
1373 {
1374         vm_page_astate_t as;
1375
1376         vm_pagequeue_assert_locked(pq);
1377
1378         as = vm_page_astate_load(m);
1379         if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1380                 return (0);
1381         vm_page_aflag_set(m, PGA_ENQUEUED);
1382         TAILQ_INSERT_BEFORE(marker, m, plinks.q);
1383         return (1);
1384 }
1385
1386 /*
1387  * Re-add stuck pages to the inactive queue.  We will examine them again
1388  * during the next scan.  If the queue state of a page has changed since
1389  * it was physically removed from the page queue in
1390  * vm_pageout_collect_batch(), don't do anything with that page.
1391  */
1392 static void
1393 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1394     vm_page_t m)
1395 {
1396         struct vm_pagequeue *pq;
1397         vm_page_t marker;
1398         int delta;
1399
1400         delta = 0;
1401         marker = ss->marker;
1402         pq = ss->pq;
1403
1404         if (m != NULL) {
1405                 if (vm_batchqueue_insert(bq, m))
1406                         return;
1407                 vm_pagequeue_lock(pq);
1408                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1409         } else
1410                 vm_pagequeue_lock(pq);
1411         while ((m = vm_batchqueue_pop(bq)) != NULL)
1412                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1413         vm_pagequeue_cnt_add(pq, delta);
1414         vm_pagequeue_unlock(pq);
1415         vm_batchqueue_init(bq);
1416 }
1417
1418 /*
1419  * Attempt to reclaim the requested number of pages from the inactive queue.
1420  * Returns true if the shortage was addressed.
1421  */
1422 static int
1423 vm_pageout_scan_inactive(struct vm_domain *vmd, int shortage,
1424     int *addl_shortage)
1425 {
1426         struct scan_state ss;
1427         struct vm_batchqueue rq;
1428         vm_page_t m, marker;
1429         struct vm_pagequeue *pq;
1430         vm_object_t object;
1431         vm_page_astate_t old, new;
1432         int act_delta, addl_page_shortage, deficit, page_shortage, refs;
1433         int starting_page_shortage;
1434
1435         /*
1436          * The addl_page_shortage is an estimate of the number of temporarily
1437          * stuck pages in the inactive queue.  In other words, the
1438          * number of pages from the inactive count that should be
1439          * discounted in setting the target for the active queue scan.
1440          */
1441         addl_page_shortage = 0;
1442
1443         /*
1444          * vmd_pageout_deficit counts the number of pages requested in
1445          * allocations that failed because of a free page shortage.  We assume
1446          * that the allocations will be reattempted and thus include the deficit
1447          * in our scan target.
1448          */
1449         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1450         starting_page_shortage = page_shortage = shortage + deficit;
1451
1452         object = NULL;
1453         vm_batchqueue_init(&rq);
1454
1455         /*
1456          * Start scanning the inactive queue for pages that we can free.  The
1457          * scan will stop when we reach the target or we have scanned the
1458          * entire queue.  (Note that m->a.act_count is not used to make
1459          * decisions for the inactive queue, only for the active queue.)
1460          */
1461         marker = &vmd->vmd_markers[PQ_INACTIVE];
1462         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1463         vm_pagequeue_lock(pq);
1464         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1465         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1466                 KASSERT((m->flags & PG_MARKER) == 0,
1467                     ("marker page %p was dequeued", m));
1468
1469                 /*
1470                  * Don't touch a page that was removed from the queue after the
1471                  * page queue lock was released.  Otherwise, ensure that any
1472                  * pending queue operations, such as dequeues for wired pages,
1473                  * are handled.
1474                  */
1475                 if (vm_pageout_defer(m, PQ_INACTIVE, false))
1476                         continue;
1477
1478                 /*
1479                  * Lock the page's object.
1480                  */
1481                 if (object == NULL || object != m->object) {
1482                         if (object != NULL)
1483                                 VM_OBJECT_WUNLOCK(object);
1484                         object = atomic_load_ptr(&m->object);
1485                         if (__predict_false(object == NULL))
1486                                 /* The page is being freed by another thread. */
1487                                 continue;
1488
1489                         /* Depends on type-stability. */
1490                         VM_OBJECT_WLOCK(object);
1491                         if (__predict_false(m->object != object)) {
1492                                 VM_OBJECT_WUNLOCK(object);
1493                                 object = NULL;
1494                                 goto reinsert;
1495                         }
1496                 }
1497
1498                 if (vm_page_tryxbusy(m) == 0) {
1499                         /*
1500                          * Don't mess with busy pages.  Leave them at
1501                          * the front of the queue.  Most likely, they
1502                          * are being paged out and will leave the
1503                          * queue shortly after the scan finishes.  So,
1504                          * they ought to be discounted from the
1505                          * inactive count.
1506                          */
1507                         addl_page_shortage++;
1508                         goto reinsert;
1509                 }
1510
1511                 /* Deferred free of swap space. */
1512                 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1513                         vm_pager_page_unswapped(m);
1514
1515                 /*
1516                  * Check for wirings now that we hold the object lock and have
1517                  * exclusively busied the page.  If the page is mapped, it may
1518                  * still be wired by pmap lookups.  The call to
1519                  * vm_page_try_remove_all() below atomically checks for such
1520                  * wirings and removes mappings.  If the page is unmapped, the
1521                  * wire count is guaranteed not to increase after this check.
1522                  */
1523                 if (__predict_false(vm_page_wired(m)))
1524                         goto skip_page;
1525
1526                 /*
1527                  * Invalid pages can be easily freed. They cannot be
1528                  * mapped, vm_page_free() asserts this.
1529                  */
1530                 if (vm_page_none_valid(m))
1531                         goto free_page;
1532
1533                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1534
1535                 for (old = vm_page_astate_load(m);;) {
1536                         /*
1537                          * Check to see if the page has been removed from the
1538                          * queue since the first such check.  Leave it alone if
1539                          * so, discarding any references collected by
1540                          * pmap_ts_referenced().
1541                          */
1542                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1543                                 goto skip_page;
1544
1545                         new = old;
1546                         act_delta = refs;
1547                         if ((old.flags & PGA_REFERENCED) != 0) {
1548                                 new.flags &= ~PGA_REFERENCED;
1549                                 act_delta++;
1550                         }
1551                         if (act_delta == 0) {
1552                                 ;
1553                         } else if (object->ref_count != 0) {
1554                                 /*
1555                                  * Increase the activation count if the
1556                                  * page was referenced while in the
1557                                  * inactive queue.  This makes it less
1558                                  * likely that the page will be returned
1559                                  * prematurely to the inactive queue.
1560                                  */
1561                                 new.act_count += ACT_ADVANCE +
1562                                     act_delta;
1563                                 if (new.act_count > ACT_MAX)
1564                                         new.act_count = ACT_MAX;
1565
1566                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1567                                 new.flags |= PGA_REQUEUE;
1568                                 new.queue = PQ_ACTIVE;
1569                                 if (!vm_page_pqstate_commit(m, &old, new))
1570                                         continue;
1571
1572                                 VM_CNT_INC(v_reactivated);
1573                                 goto skip_page;
1574                         } else if ((object->flags & OBJ_DEAD) == 0) {
1575                                 new.queue = PQ_INACTIVE;
1576                                 new.flags |= PGA_REQUEUE;
1577                                 if (!vm_page_pqstate_commit(m, &old, new))
1578                                         continue;
1579                                 goto skip_page;
1580                         }
1581                         break;
1582                 }
1583
1584                 /*
1585                  * If the page appears to be clean at the machine-independent
1586                  * layer, then remove all of its mappings from the pmap in
1587                  * anticipation of freeing it.  If, however, any of the page's
1588                  * mappings allow write access, then the page may still be
1589                  * modified until the last of those mappings are removed.
1590                  */
1591                 if (object->ref_count != 0) {
1592                         vm_page_test_dirty(m);
1593                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
1594                                 goto skip_page;
1595                 }
1596
1597                 /*
1598                  * Clean pages can be freed, but dirty pages must be sent back
1599                  * to the laundry, unless they belong to a dead object.
1600                  * Requeueing dirty pages from dead objects is pointless, as
1601                  * they are being paged out and freed by the thread that
1602                  * destroyed the object.
1603                  */
1604                 if (m->dirty == 0) {
1605 free_page:
1606                         /*
1607                          * Now we are guaranteed that no other threads are
1608                          * manipulating the page, check for a last-second
1609                          * reference that would save it from doom.
1610                          */
1611                         if (vm_pageout_defer(m, PQ_INACTIVE, false))
1612                                 goto skip_page;
1613
1614                         /*
1615                          * Because we dequeued the page and have already checked
1616                          * for pending dequeue and enqueue requests, we can
1617                          * safely disassociate the page from the inactive queue
1618                          * without holding the queue lock.
1619                          */
1620                         m->a.queue = PQ_NONE;
1621                         vm_page_free(m);
1622                         page_shortage--;
1623                         continue;
1624                 }
1625                 if ((object->flags & OBJ_DEAD) == 0)
1626                         vm_page_launder(m);
1627 skip_page:
1628                 vm_page_xunbusy(m);
1629                 continue;
1630 reinsert:
1631                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1632         }
1633         if (object != NULL)
1634                 VM_OBJECT_WUNLOCK(object);
1635         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1636         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1637         vm_pagequeue_lock(pq);
1638         vm_pageout_end_scan(&ss);
1639         vm_pagequeue_unlock(pq);
1640
1641         VM_CNT_ADD(v_dfree, starting_page_shortage - page_shortage);
1642
1643         /*
1644          * Wake up the laundry thread so that it can perform any needed
1645          * laundering.  If we didn't meet our target, we're in shortfall and
1646          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1647          * swap devices are configured, the laundry thread has no work to do, so
1648          * don't bother waking it up.
1649          *
1650          * The laundry thread uses the number of inactive queue scans elapsed
1651          * since the last laundering to determine whether to launder again, so
1652          * keep count.
1653          */
1654         if (starting_page_shortage > 0) {
1655                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1656                 vm_pagequeue_lock(pq);
1657                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1658                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1659                         if (page_shortage > 0) {
1660                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1661                                 VM_CNT_INC(v_pdshortfalls);
1662                         } else if (vmd->vmd_laundry_request !=
1663                             VM_LAUNDRY_SHORTFALL)
1664                                 vmd->vmd_laundry_request =
1665                                     VM_LAUNDRY_BACKGROUND;
1666                         wakeup(&vmd->vmd_laundry_request);
1667                 }
1668                 vmd->vmd_clean_pages_freed +=
1669                     starting_page_shortage - page_shortage;
1670                 vm_pagequeue_unlock(pq);
1671         }
1672
1673         /*
1674          * Wakeup the swapout daemon if we didn't free the targeted number of
1675          * pages.
1676          */
1677         if (page_shortage > 0)
1678                 vm_swapout_run();
1679
1680         /*
1681          * If the inactive queue scan fails repeatedly to meet its
1682          * target, kill the largest process.
1683          */
1684         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1685
1686         /*
1687          * Reclaim pages by swapping out idle processes, if configured to do so.
1688          */
1689         vm_swapout_run_idle();
1690
1691         /*
1692          * See the description of addl_page_shortage above.
1693          */
1694         *addl_shortage = addl_page_shortage + deficit;
1695
1696         return (page_shortage <= 0);
1697 }
1698
1699 static int vm_pageout_oom_vote;
1700
1701 /*
1702  * The pagedaemon threads randlomly select one to perform the
1703  * OOM.  Trying to kill processes before all pagedaemons
1704  * failed to reach free target is premature.
1705  */
1706 static void
1707 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1708     int starting_page_shortage)
1709 {
1710         int old_vote;
1711
1712         if (starting_page_shortage <= 0 || starting_page_shortage !=
1713             page_shortage)
1714                 vmd->vmd_oom_seq = 0;
1715         else
1716                 vmd->vmd_oom_seq++;
1717         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1718                 if (vmd->vmd_oom) {
1719                         vmd->vmd_oom = FALSE;
1720                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1721                 }
1722                 return;
1723         }
1724
1725         /*
1726          * Do not follow the call sequence until OOM condition is
1727          * cleared.
1728          */
1729         vmd->vmd_oom_seq = 0;
1730
1731         if (vmd->vmd_oom)
1732                 return;
1733
1734         vmd->vmd_oom = TRUE;
1735         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1736         if (old_vote != vm_ndomains - 1)
1737                 return;
1738
1739         /*
1740          * The current pagedaemon thread is the last in the quorum to
1741          * start OOM.  Initiate the selection and signaling of the
1742          * victim.
1743          */
1744         vm_pageout_oom(VM_OOM_MEM);
1745
1746         /*
1747          * After one round of OOM terror, recall our vote.  On the
1748          * next pass, current pagedaemon would vote again if the low
1749          * memory condition is still there, due to vmd_oom being
1750          * false.
1751          */
1752         vmd->vmd_oom = FALSE;
1753         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1754 }
1755
1756 /*
1757  * The OOM killer is the page daemon's action of last resort when
1758  * memory allocation requests have been stalled for a prolonged period
1759  * of time because it cannot reclaim memory.  This function computes
1760  * the approximate number of physical pages that could be reclaimed if
1761  * the specified address space is destroyed.
1762  *
1763  * Private, anonymous memory owned by the address space is the
1764  * principal resource that we expect to recover after an OOM kill.
1765  * Since the physical pages mapped by the address space's COW entries
1766  * are typically shared pages, they are unlikely to be released and so
1767  * they are not counted.
1768  *
1769  * To get to the point where the page daemon runs the OOM killer, its
1770  * efforts to write-back vnode-backed pages may have stalled.  This
1771  * could be caused by a memory allocation deadlock in the write path
1772  * that might be resolved by an OOM kill.  Therefore, physical pages
1773  * belonging to vnode-backed objects are counted, because they might
1774  * be freed without being written out first if the address space holds
1775  * the last reference to an unlinked vnode.
1776  *
1777  * Similarly, physical pages belonging to OBJT_PHYS objects are
1778  * counted because the address space might hold the last reference to
1779  * the object.
1780  */
1781 static long
1782 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1783 {
1784         vm_map_t map;
1785         vm_map_entry_t entry;
1786         vm_object_t obj;
1787         long res;
1788
1789         map = &vmspace->vm_map;
1790         KASSERT(!map->system_map, ("system map"));
1791         sx_assert(&map->lock, SA_LOCKED);
1792         res = 0;
1793         VM_MAP_ENTRY_FOREACH(entry, map) {
1794                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1795                         continue;
1796                 obj = entry->object.vm_object;
1797                 if (obj == NULL)
1798                         continue;
1799                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1800                     obj->ref_count != 1)
1801                         continue;
1802                 switch (obj->type) {
1803                 case OBJT_DEFAULT:
1804                 case OBJT_SWAP:
1805                 case OBJT_PHYS:
1806                 case OBJT_VNODE:
1807                         res += obj->resident_page_count;
1808                         break;
1809                 }
1810         }
1811         return (res);
1812 }
1813
1814 static int vm_oom_ratelim_last;
1815 static int vm_oom_pf_secs = 10;
1816 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1817     "");
1818 static struct mtx vm_oom_ratelim_mtx;
1819
1820 void
1821 vm_pageout_oom(int shortage)
1822 {
1823         struct proc *p, *bigproc;
1824         vm_offset_t size, bigsize;
1825         struct thread *td;
1826         struct vmspace *vm;
1827         int now;
1828         bool breakout;
1829
1830         /*
1831          * For OOM requests originating from vm_fault(), there is a high
1832          * chance that a single large process faults simultaneously in
1833          * several threads.  Also, on an active system running many
1834          * processes of middle-size, like buildworld, all of them
1835          * could fault almost simultaneously as well.
1836          *
1837          * To avoid killing too many processes, rate-limit OOMs
1838          * initiated by vm_fault() time-outs on the waits for free
1839          * pages.
1840          */
1841         mtx_lock(&vm_oom_ratelim_mtx);
1842         now = ticks;
1843         if (shortage == VM_OOM_MEM_PF &&
1844             (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1845                 mtx_unlock(&vm_oom_ratelim_mtx);
1846                 return;
1847         }
1848         vm_oom_ratelim_last = now;
1849         mtx_unlock(&vm_oom_ratelim_mtx);
1850
1851         /*
1852          * We keep the process bigproc locked once we find it to keep anyone
1853          * from messing with it; however, there is a possibility of
1854          * deadlock if process B is bigproc and one of its child processes
1855          * attempts to propagate a signal to B while we are waiting for A's
1856          * lock while walking this list.  To avoid this, we don't block on
1857          * the process lock but just skip a process if it is already locked.
1858          */
1859         bigproc = NULL;
1860         bigsize = 0;
1861         sx_slock(&allproc_lock);
1862         FOREACH_PROC_IN_SYSTEM(p) {
1863                 PROC_LOCK(p);
1864
1865                 /*
1866                  * If this is a system, protected or killed process, skip it.
1867                  */
1868                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1869                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1870                     p->p_pid == 1 || P_KILLED(p) ||
1871                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1872                         PROC_UNLOCK(p);
1873                         continue;
1874                 }
1875                 /*
1876                  * If the process is in a non-running type state,
1877                  * don't touch it.  Check all the threads individually.
1878                  */
1879                 breakout = false;
1880                 FOREACH_THREAD_IN_PROC(p, td) {
1881                         thread_lock(td);
1882                         if (!TD_ON_RUNQ(td) &&
1883                             !TD_IS_RUNNING(td) &&
1884                             !TD_IS_SLEEPING(td) &&
1885                             !TD_IS_SUSPENDED(td) &&
1886                             !TD_IS_SWAPPED(td)) {
1887                                 thread_unlock(td);
1888                                 breakout = true;
1889                                 break;
1890                         }
1891                         thread_unlock(td);
1892                 }
1893                 if (breakout) {
1894                         PROC_UNLOCK(p);
1895                         continue;
1896                 }
1897                 /*
1898                  * get the process size
1899                  */
1900                 vm = vmspace_acquire_ref(p);
1901                 if (vm == NULL) {
1902                         PROC_UNLOCK(p);
1903                         continue;
1904                 }
1905                 _PHOLD_LITE(p);
1906                 PROC_UNLOCK(p);
1907                 sx_sunlock(&allproc_lock);
1908                 if (!vm_map_trylock_read(&vm->vm_map)) {
1909                         vmspace_free(vm);
1910                         sx_slock(&allproc_lock);
1911                         PRELE(p);
1912                         continue;
1913                 }
1914                 size = vmspace_swap_count(vm);
1915                 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
1916                         size += vm_pageout_oom_pagecount(vm);
1917                 vm_map_unlock_read(&vm->vm_map);
1918                 vmspace_free(vm);
1919                 sx_slock(&allproc_lock);
1920
1921                 /*
1922                  * If this process is bigger than the biggest one,
1923                  * remember it.
1924                  */
1925                 if (size > bigsize) {
1926                         if (bigproc != NULL)
1927                                 PRELE(bigproc);
1928                         bigproc = p;
1929                         bigsize = size;
1930                 } else {
1931                         PRELE(p);
1932                 }
1933         }
1934         sx_sunlock(&allproc_lock);
1935         if (bigproc != NULL) {
1936                 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
1937                         panic("out of swap space");
1938                 PROC_LOCK(bigproc);
1939                 killproc(bigproc, "out of swap space");
1940                 sched_nice(bigproc, PRIO_MIN);
1941                 _PRELE(bigproc);
1942                 PROC_UNLOCK(bigproc);
1943         }
1944 }
1945
1946 /*
1947  * Signal a free page shortage to subsystems that have registered an event
1948  * handler.  Reclaim memory from UMA in the event of a severe shortage.
1949  * Return true if the free page count should be re-evaluated.
1950  */
1951 static bool
1952 vm_pageout_lowmem(void)
1953 {
1954         static int lowmem_ticks = 0;
1955         int last;
1956         bool ret;
1957
1958         ret = false;
1959
1960         last = atomic_load_int(&lowmem_ticks);
1961         while ((u_int)(ticks - last) / hz >= lowmem_period) {
1962                 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
1963                         continue;
1964
1965                 /*
1966                  * Decrease registered cache sizes.
1967                  */
1968                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1969                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1970
1971                 /*
1972                  * We do this explicitly after the caches have been
1973                  * drained above.
1974                  */
1975                 uma_reclaim(UMA_RECLAIM_TRIM);
1976                 ret = true;
1977                 break;
1978         }
1979
1980         /*
1981          * Kick off an asynchronous reclaim of cached memory if one of the
1982          * page daemons is failing to keep up with demand.  Use the "severe"
1983          * threshold instead of "min" to ensure that we do not blow away the
1984          * caches if a subset of the NUMA domains are depleted by kernel memory
1985          * allocations; the domainset iterators automatically skip domains
1986          * below the "min" threshold on the first pass.
1987          *
1988          * UMA reclaim worker has its own rate-limiting mechanism, so don't
1989          * worry about kicking it too often.
1990          */
1991         if (vm_page_count_severe())
1992                 uma_reclaim_wakeup();
1993
1994         return (ret);
1995 }
1996
1997 static void
1998 vm_pageout_worker(void *arg)
1999 {
2000         struct vm_domain *vmd;
2001         u_int ofree;
2002         int addl_shortage, domain, shortage;
2003         bool target_met;
2004
2005         domain = (uintptr_t)arg;
2006         vmd = VM_DOMAIN(domain);
2007         shortage = 0;
2008         target_met = true;
2009
2010         /*
2011          * XXXKIB It could be useful to bind pageout daemon threads to
2012          * the cores belonging to the domain, from which vm_page_array
2013          * is allocated.
2014          */
2015
2016         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2017         vmd->vmd_last_active_scan = ticks;
2018
2019         /*
2020          * The pageout daemon worker is never done, so loop forever.
2021          */
2022         while (TRUE) {
2023                 vm_domain_pageout_lock(vmd);
2024
2025                 /*
2026                  * We need to clear wanted before we check the limits.  This
2027                  * prevents races with wakers who will check wanted after they
2028                  * reach the limit.
2029                  */
2030                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2031
2032                 /*
2033                  * Might the page daemon need to run again?
2034                  */
2035                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2036                         /*
2037                          * Yes.  If the scan failed to produce enough free
2038                          * pages, sleep uninterruptibly for some time in the
2039                          * hope that the laundry thread will clean some pages.
2040                          */
2041                         vm_domain_pageout_unlock(vmd);
2042                         if (!target_met)
2043                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
2044                 } else {
2045                         /*
2046                          * No, sleep until the next wakeup or until pages
2047                          * need to have their reference stats updated.
2048                          */
2049                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
2050                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2051                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2052                                 VM_CNT_INC(v_pdwakeups);
2053                 }
2054
2055                 /* Prevent spurious wakeups by ensuring that wanted is set. */
2056                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2057
2058                 /*
2059                  * Use the controller to calculate how many pages to free in
2060                  * this interval, and scan the inactive queue.  If the lowmem
2061                  * handlers appear to have freed up some pages, subtract the
2062                  * difference from the inactive queue scan target.
2063                  */
2064                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2065                 if (shortage > 0) {
2066                         ofree = vmd->vmd_free_count;
2067                         if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2068                                 shortage -= min(vmd->vmd_free_count - ofree,
2069                                     (u_int)shortage);
2070                         target_met = vm_pageout_scan_inactive(vmd, shortage,
2071                             &addl_shortage);
2072                 } else
2073                         addl_shortage = 0;
2074
2075                 /*
2076                  * Scan the active queue.  A positive value for shortage
2077                  * indicates that we must aggressively deactivate pages to avoid
2078                  * a shortfall.
2079                  */
2080                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
2081                 vm_pageout_scan_active(vmd, shortage);
2082         }
2083 }
2084
2085 /*
2086  * Initialize basic pageout daemon settings.  See the comment above the
2087  * definition of vm_domain for some explanation of how these thresholds are
2088  * used.
2089  */
2090 static void
2091 vm_pageout_init_domain(int domain)
2092 {
2093         struct vm_domain *vmd;
2094         struct sysctl_oid *oid;
2095
2096         vmd = VM_DOMAIN(domain);
2097         vmd->vmd_interrupt_free_min = 2;
2098
2099         /*
2100          * v_free_reserved needs to include enough for the largest
2101          * swap pager structures plus enough for any pv_entry structs
2102          * when paging. 
2103          */
2104         vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2105             vmd->vmd_interrupt_free_min;
2106         vmd->vmd_free_reserved = vm_pageout_page_count +
2107             vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2108         vmd->vmd_free_min = vmd->vmd_page_count / 200;
2109         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2110         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2111         vmd->vmd_free_min += vmd->vmd_free_reserved;
2112         vmd->vmd_free_severe += vmd->vmd_free_reserved;
2113         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2114         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2115                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2116
2117         /*
2118          * Set the default wakeup threshold to be 10% below the paging
2119          * target.  This keeps the steady state out of shortfall.
2120          */
2121         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2122
2123         /*
2124          * Target amount of memory to move out of the laundry queue during a
2125          * background laundering.  This is proportional to the amount of system
2126          * memory.
2127          */
2128         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2129             vmd->vmd_free_min) / 10;
2130
2131         /* Initialize the pageout daemon pid controller. */
2132         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2133             vmd->vmd_free_target, PIDCTRL_BOUND,
2134             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2135         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2136             "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2137         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2138 }
2139
2140 static void
2141 vm_pageout_init(void)
2142 {
2143         u_int freecount;
2144         int i;
2145
2146         /*
2147          * Initialize some paging parameters.
2148          */
2149         if (vm_cnt.v_page_count < 2000)
2150                 vm_pageout_page_count = 8;
2151
2152         freecount = 0;
2153         for (i = 0; i < vm_ndomains; i++) {
2154                 struct vm_domain *vmd;
2155
2156                 vm_pageout_init_domain(i);
2157                 vmd = VM_DOMAIN(i);
2158                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2159                 vm_cnt.v_free_target += vmd->vmd_free_target;
2160                 vm_cnt.v_free_min += vmd->vmd_free_min;
2161                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2162                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2163                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2164                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2165                 freecount += vmd->vmd_free_count;
2166         }
2167
2168         /*
2169          * Set interval in seconds for active scan.  We want to visit each
2170          * page at least once every ten minutes.  This is to prevent worst
2171          * case paging behaviors with stale active LRU.
2172          */
2173         if (vm_pageout_update_period == 0)
2174                 vm_pageout_update_period = 600;
2175
2176         if (vm_page_max_user_wired == 0)
2177                 vm_page_max_user_wired = freecount / 3;
2178 }
2179
2180 /*
2181  *     vm_pageout is the high level pageout daemon.
2182  */
2183 static void
2184 vm_pageout(void)
2185 {
2186         struct proc *p;
2187         struct thread *td;
2188         int error, first, i;
2189
2190         p = curproc;
2191         td = curthread;
2192
2193         mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2194         swap_pager_swap_init();
2195         for (first = -1, i = 0; i < vm_ndomains; i++) {
2196                 if (VM_DOMAIN_EMPTY(i)) {
2197                         if (bootverbose)
2198                                 printf("domain %d empty; skipping pageout\n",
2199                                     i);
2200                         continue;
2201                 }
2202                 if (first == -1)
2203                         first = i;
2204                 else {
2205                         error = kthread_add(vm_pageout_worker,
2206                             (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2207                         if (error != 0)
2208                                 panic("starting pageout for domain %d: %d\n",
2209                                     i, error);
2210                 }
2211                 error = kthread_add(vm_pageout_laundry_worker,
2212                     (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2213                 if (error != 0)
2214                         panic("starting laundry for domain %d: %d", i, error);
2215         }
2216         error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2217         if (error != 0)
2218                 panic("starting uma_reclaim helper, error %d\n", error);
2219
2220         snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2221         vm_pageout_worker((void *)(uintptr_t)first);
2222 }
2223
2224 /*
2225  * Perform an advisory wakeup of the page daemon.
2226  */
2227 void
2228 pagedaemon_wakeup(int domain)
2229 {
2230         struct vm_domain *vmd;
2231
2232         vmd = VM_DOMAIN(domain);
2233         vm_domain_pageout_assert_unlocked(vmd);
2234         if (curproc == pageproc)
2235                 return;
2236
2237         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2238                 vm_domain_pageout_lock(vmd);
2239                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2240                 wakeup(&vmd->vmd_pageout_wanted);
2241                 vm_domain_pageout_unlock(vmd);
2242         }
2243 }