]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Update comment describing struct vm_map
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/blockcount.h>
86 #include <sys/eventhandler.h>
87 #include <sys/lock.h>
88 #include <sys/mutex.h>
89 #include <sys/proc.h>
90 #include <sys/kthread.h>
91 #include <sys/ktr.h>
92 #include <sys/mount.h>
93 #include <sys/racct.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sched.h>
96 #include <sys/sdt.h>
97 #include <sys/signalvar.h>
98 #include <sys/smp.h>
99 #include <sys/time.h>
100 #include <sys/vnode.h>
101 #include <sys/vmmeter.h>
102 #include <sys/rwlock.h>
103 #include <sys/sx.h>
104 #include <sys/sysctl.h>
105
106 #include <vm/vm.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_pager.h>
113 #include <vm/vm_phys.h>
114 #include <vm/vm_pagequeue.h>
115 #include <vm/swap_pager.h>
116 #include <vm/vm_extern.h>
117 #include <vm/uma.h>
118
119 /*
120  * System initialization
121  */
122
123 /* the kernel process "vm_pageout"*/
124 static void vm_pageout(void);
125 static void vm_pageout_init(void);
126 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
127 static int vm_pageout_cluster(vm_page_t m);
128 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
129     int starting_page_shortage);
130
131 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
132     NULL);
133
134 struct proc *pageproc;
135
136 static struct kproc_desc page_kp = {
137         "pagedaemon",
138         vm_pageout,
139         &pageproc
140 };
141 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142     &page_kp);
143
144 SDT_PROVIDER_DEFINE(vm);
145 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
146
147 /* Pagedaemon activity rates, in subdivisions of one second. */
148 #define VM_LAUNDER_RATE         10
149 #define VM_INACT_SCAN_RATE      10
150
151 static int swapdev_enabled;
152 int vm_pageout_page_count = 32;
153
154 static int vm_panic_on_oom = 0;
155 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
156     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
157     "Panic on the given number of out-of-memory errors instead of "
158     "killing the largest process");
159
160 static int vm_pageout_update_period;
161 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
162     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
163     "Maximum active LRU update period");
164
165 static int pageout_cpus_per_thread = 16;
166 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
167     &pageout_cpus_per_thread, 0,
168     "Number of CPUs per pagedaemon worker thread");
169   
170 static int lowmem_period = 10;
171 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
172     "Low memory callback period");
173
174 static int disable_swap_pageouts;
175 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
176     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
177     "Disallow swapout of dirty pages");
178
179 static int pageout_lock_miss;
180 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
181     CTLFLAG_RD, &pageout_lock_miss, 0,
182     "vget() lock misses during pageout");
183
184 static int vm_pageout_oom_seq = 12;
185 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
186     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
187     "back-to-back calls to oom detector to start OOM");
188
189 static int act_scan_laundry_weight = 3;
190 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
191     &act_scan_laundry_weight, 0,
192     "weight given to clean vs. dirty pages in active queue scans");
193
194 static u_int vm_background_launder_rate = 4096;
195 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
196     &vm_background_launder_rate, 0,
197     "background laundering rate, in kilobytes per second");
198
199 static u_int vm_background_launder_max = 20 * 1024;
200 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
201     &vm_background_launder_max, 0,
202     "background laundering cap, in kilobytes");
203
204 u_long vm_page_max_user_wired;
205 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
206     &vm_page_max_user_wired, 0,
207     "system-wide limit to user-wired page count");
208
209 static u_int isqrt(u_int num);
210 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
211     bool in_shortfall);
212 static void vm_pageout_laundry_worker(void *arg);
213
214 struct scan_state {
215         struct vm_batchqueue bq;
216         struct vm_pagequeue *pq;
217         vm_page_t       marker;
218         int             maxscan;
219         int             scanned;
220 };
221
222 static void
223 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
224     vm_page_t marker, vm_page_t after, int maxscan)
225 {
226
227         vm_pagequeue_assert_locked(pq);
228         KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
229             ("marker %p already enqueued", marker));
230
231         if (after == NULL)
232                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
233         else
234                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
235         vm_page_aflag_set(marker, PGA_ENQUEUED);
236
237         vm_batchqueue_init(&ss->bq);
238         ss->pq = pq;
239         ss->marker = marker;
240         ss->maxscan = maxscan;
241         ss->scanned = 0;
242         vm_pagequeue_unlock(pq);
243 }
244
245 static void
246 vm_pageout_end_scan(struct scan_state *ss)
247 {
248         struct vm_pagequeue *pq;
249
250         pq = ss->pq;
251         vm_pagequeue_assert_locked(pq);
252         KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
253             ("marker %p not enqueued", ss->marker));
254
255         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
256         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
257         pq->pq_pdpages += ss->scanned;
258 }
259
260 /*
261  * Add a small number of queued pages to a batch queue for later processing
262  * without the corresponding queue lock held.  The caller must have enqueued a
263  * marker page at the desired start point for the scan.  Pages will be
264  * physically dequeued if the caller so requests.  Otherwise, the returned
265  * batch may contain marker pages, and it is up to the caller to handle them.
266  *
267  * When processing the batch queue, vm_pageout_defer() must be used to
268  * determine whether the page has been logically dequeued since the batch was
269  * collected.
270  */
271 static __always_inline void
272 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
273 {
274         struct vm_pagequeue *pq;
275         vm_page_t m, marker, n;
276
277         marker = ss->marker;
278         pq = ss->pq;
279
280         KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
281             ("marker %p not enqueued", ss->marker));
282
283         vm_pagequeue_lock(pq);
284         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
285             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
286             m = n, ss->scanned++) {
287                 n = TAILQ_NEXT(m, plinks.q);
288                 if ((m->flags & PG_MARKER) == 0) {
289                         KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
290                             ("page %p not enqueued", m));
291                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
292                             ("Fictitious page %p cannot be in page queue", m));
293                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
294                             ("Unmanaged page %p cannot be in page queue", m));
295                 } else if (dequeue)
296                         continue;
297
298                 (void)vm_batchqueue_insert(&ss->bq, m);
299                 if (dequeue) {
300                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
301                         vm_page_aflag_clear(m, PGA_ENQUEUED);
302                 }
303         }
304         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
305         if (__predict_true(m != NULL))
306                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
307         else
308                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
309         if (dequeue)
310                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
311         vm_pagequeue_unlock(pq);
312 }
313
314 /*
315  * Return the next page to be scanned, or NULL if the scan is complete.
316  */
317 static __always_inline vm_page_t
318 vm_pageout_next(struct scan_state *ss, const bool dequeue)
319 {
320
321         if (ss->bq.bq_cnt == 0)
322                 vm_pageout_collect_batch(ss, dequeue);
323         return (vm_batchqueue_pop(&ss->bq));
324 }
325
326 /*
327  * Determine whether processing of a page should be deferred and ensure that any
328  * outstanding queue operations are processed.
329  */
330 static __always_inline bool
331 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
332 {
333         vm_page_astate_t as;
334
335         as = vm_page_astate_load(m);
336         if (__predict_false(as.queue != queue ||
337             ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
338                 return (true);
339         if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
340                 vm_page_pqbatch_submit(m, queue);
341                 return (true);
342         }
343         return (false);
344 }
345
346 /*
347  * Scan for pages at adjacent offsets within the given page's object that are
348  * eligible for laundering, form a cluster of these pages and the given page,
349  * and launder that cluster.
350  */
351 static int
352 vm_pageout_cluster(vm_page_t m)
353 {
354         vm_object_t object;
355         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
356         vm_pindex_t pindex;
357         int ib, is, page_base, pageout_count;
358
359         object = m->object;
360         VM_OBJECT_ASSERT_WLOCKED(object);
361         pindex = m->pindex;
362
363         vm_page_assert_xbusied(m);
364
365         mc[vm_pageout_page_count] = pb = ps = m;
366         pageout_count = 1;
367         page_base = vm_pageout_page_count;
368         ib = 1;
369         is = 1;
370
371         /*
372          * We can cluster only if the page is not clean, busy, or held, and
373          * the page is in the laundry queue.
374          *
375          * During heavy mmap/modification loads the pageout
376          * daemon can really fragment the underlying file
377          * due to flushing pages out of order and not trying to
378          * align the clusters (which leaves sporadic out-of-order
379          * holes).  To solve this problem we do the reverse scan
380          * first and attempt to align our cluster, then do a 
381          * forward scan if room remains.
382          */
383 more:
384         while (ib != 0 && pageout_count < vm_pageout_page_count) {
385                 if (ib > pindex) {
386                         ib = 0;
387                         break;
388                 }
389                 if ((p = vm_page_prev(pb)) == NULL ||
390                     vm_page_tryxbusy(p) == 0) {
391                         ib = 0;
392                         break;
393                 }
394                 if (vm_page_wired(p)) {
395                         ib = 0;
396                         vm_page_xunbusy(p);
397                         break;
398                 }
399                 vm_page_test_dirty(p);
400                 if (p->dirty == 0) {
401                         ib = 0;
402                         vm_page_xunbusy(p);
403                         break;
404                 }
405                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
406                         vm_page_xunbusy(p);
407                         ib = 0;
408                         break;
409                 }
410                 mc[--page_base] = pb = p;
411                 ++pageout_count;
412                 ++ib;
413
414                 /*
415                  * We are at an alignment boundary.  Stop here, and switch
416                  * directions.  Do not clear ib.
417                  */
418                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
419                         break;
420         }
421         while (pageout_count < vm_pageout_page_count && 
422             pindex + is < object->size) {
423                 if ((p = vm_page_next(ps)) == NULL ||
424                     vm_page_tryxbusy(p) == 0)
425                         break;
426                 if (vm_page_wired(p)) {
427                         vm_page_xunbusy(p);
428                         break;
429                 }
430                 vm_page_test_dirty(p);
431                 if (p->dirty == 0) {
432                         vm_page_xunbusy(p);
433                         break;
434                 }
435                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
436                         vm_page_xunbusy(p);
437                         break;
438                 }
439                 mc[page_base + pageout_count] = ps = p;
440                 ++pageout_count;
441                 ++is;
442         }
443
444         /*
445          * If we exhausted our forward scan, continue with the reverse scan
446          * when possible, even past an alignment boundary.  This catches
447          * boundary conditions.
448          */
449         if (ib != 0 && pageout_count < vm_pageout_page_count)
450                 goto more;
451
452         return (vm_pageout_flush(&mc[page_base], pageout_count,
453             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
454 }
455
456 /*
457  * vm_pageout_flush() - launder the given pages
458  *
459  *      The given pages are laundered.  Note that we setup for the start of
460  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
461  *      reference count all in here rather then in the parent.  If we want
462  *      the parent to do more sophisticated things we may have to change
463  *      the ordering.
464  *
465  *      Returned runlen is the count of pages between mreq and first
466  *      page after mreq with status VM_PAGER_AGAIN.
467  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
468  *      for any page in runlen set.
469  */
470 int
471 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
472     boolean_t *eio)
473 {
474         vm_object_t object = mc[0]->object;
475         int pageout_status[count];
476         int numpagedout = 0;
477         int i, runlen;
478
479         VM_OBJECT_ASSERT_WLOCKED(object);
480
481         /*
482          * Initiate I/O.  Mark the pages shared busy and verify that they're
483          * valid and read-only.
484          *
485          * We do not have to fixup the clean/dirty bits here... we can
486          * allow the pager to do it after the I/O completes.
487          *
488          * NOTE! mc[i]->dirty may be partial or fragmented due to an
489          * edge case with file fragments.
490          */
491         for (i = 0; i < count; i++) {
492                 KASSERT(vm_page_all_valid(mc[i]),
493                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
494                         mc[i], i, count));
495                 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
496                     ("vm_pageout_flush: writeable page %p", mc[i]));
497                 vm_page_busy_downgrade(mc[i]);
498         }
499         vm_object_pip_add(object, count);
500
501         vm_pager_put_pages(object, mc, count, flags, pageout_status);
502
503         runlen = count - mreq;
504         if (eio != NULL)
505                 *eio = FALSE;
506         for (i = 0; i < count; i++) {
507                 vm_page_t mt = mc[i];
508
509                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
510                     !pmap_page_is_write_mapped(mt),
511                     ("vm_pageout_flush: page %p is not write protected", mt));
512                 switch (pageout_status[i]) {
513                 case VM_PAGER_OK:
514                         /*
515                          * The page may have moved since laundering started, in
516                          * which case it should be left alone.
517                          */
518                         if (vm_page_in_laundry(mt))
519                                 vm_page_deactivate_noreuse(mt);
520                         /* FALLTHROUGH */
521                 case VM_PAGER_PEND:
522                         numpagedout++;
523                         break;
524                 case VM_PAGER_BAD:
525                         /*
526                          * The page is outside the object's range.  We pretend
527                          * that the page out worked and clean the page, so the
528                          * changes will be lost if the page is reclaimed by
529                          * the page daemon.
530                          */
531                         vm_page_undirty(mt);
532                         if (vm_page_in_laundry(mt))
533                                 vm_page_deactivate_noreuse(mt);
534                         break;
535                 case VM_PAGER_ERROR:
536                 case VM_PAGER_FAIL:
537                         /*
538                          * If the page couldn't be paged out to swap because the
539                          * pager wasn't able to find space, place the page in
540                          * the PQ_UNSWAPPABLE holding queue.  This is an
541                          * optimization that prevents the page daemon from
542                          * wasting CPU cycles on pages that cannot be reclaimed
543                          * because no swap device is configured.
544                          *
545                          * Otherwise, reactivate the page so that it doesn't
546                          * clog the laundry and inactive queues.  (We will try
547                          * paging it out again later.)
548                          */
549                         if ((object->flags & OBJ_SWAP) != 0 &&
550                             pageout_status[i] == VM_PAGER_FAIL) {
551                                 vm_page_unswappable(mt);
552                                 numpagedout++;
553                         } else
554                                 vm_page_activate(mt);
555                         if (eio != NULL && i >= mreq && i - mreq < runlen)
556                                 *eio = TRUE;
557                         break;
558                 case VM_PAGER_AGAIN:
559                         if (i >= mreq && i - mreq < runlen)
560                                 runlen = i - mreq;
561                         break;
562                 }
563
564                 /*
565                  * If the operation is still going, leave the page busy to
566                  * block all other accesses. Also, leave the paging in
567                  * progress indicator set so that we don't attempt an object
568                  * collapse.
569                  */
570                 if (pageout_status[i] != VM_PAGER_PEND) {
571                         vm_object_pip_wakeup(object);
572                         vm_page_sunbusy(mt);
573                 }
574         }
575         if (prunlen != NULL)
576                 *prunlen = runlen;
577         return (numpagedout);
578 }
579
580 static void
581 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
582 {
583
584         atomic_store_rel_int(&swapdev_enabled, 1);
585 }
586
587 static void
588 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
589 {
590
591         if (swap_pager_nswapdev() == 1)
592                 atomic_store_rel_int(&swapdev_enabled, 0);
593 }
594
595 /*
596  * Attempt to acquire all of the necessary locks to launder a page and
597  * then call through the clustering layer to PUTPAGES.  Wait a short
598  * time for a vnode lock.
599  *
600  * Requires the page and object lock on entry, releases both before return.
601  * Returns 0 on success and an errno otherwise.
602  */
603 static int
604 vm_pageout_clean(vm_page_t m, int *numpagedout)
605 {
606         struct vnode *vp;
607         struct mount *mp;
608         vm_object_t object;
609         vm_pindex_t pindex;
610         int error;
611
612         object = m->object;
613         VM_OBJECT_ASSERT_WLOCKED(object);
614         error = 0;
615         vp = NULL;
616         mp = NULL;
617
618         /*
619          * The object is already known NOT to be dead.   It
620          * is possible for the vget() to block the whole
621          * pageout daemon, but the new low-memory handling
622          * code should prevent it.
623          *
624          * We can't wait forever for the vnode lock, we might
625          * deadlock due to a vn_read() getting stuck in
626          * vm_wait while holding this vnode.  We skip the 
627          * vnode if we can't get it in a reasonable amount
628          * of time.
629          */
630         if (object->type == OBJT_VNODE) {
631                 vm_page_xunbusy(m);
632                 vp = object->handle;
633                 if (vp->v_type == VREG &&
634                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
635                         mp = NULL;
636                         error = EDEADLK;
637                         goto unlock_all;
638                 }
639                 KASSERT(mp != NULL,
640                     ("vp %p with NULL v_mount", vp));
641                 vm_object_reference_locked(object);
642                 pindex = m->pindex;
643                 VM_OBJECT_WUNLOCK(object);
644                 if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
645                         vp = NULL;
646                         error = EDEADLK;
647                         goto unlock_mp;
648                 }
649                 VM_OBJECT_WLOCK(object);
650
651                 /*
652                  * Ensure that the object and vnode were not disassociated
653                  * while locks were dropped.
654                  */
655                 if (vp->v_object != object) {
656                         error = ENOENT;
657                         goto unlock_all;
658                 }
659
660                 /*
661                  * While the object was unlocked, the page may have been:
662                  * (1) moved to a different queue,
663                  * (2) reallocated to a different object,
664                  * (3) reallocated to a different offset, or
665                  * (4) cleaned.
666                  */
667                 if (!vm_page_in_laundry(m) || m->object != object ||
668                     m->pindex != pindex || m->dirty == 0) {
669                         error = ENXIO;
670                         goto unlock_all;
671                 }
672
673                 /*
674                  * The page may have been busied while the object lock was
675                  * released.
676                  */
677                 if (vm_page_tryxbusy(m) == 0) {
678                         error = EBUSY;
679                         goto unlock_all;
680                 }
681         }
682
683         /*
684          * Remove all writeable mappings, failing if the page is wired.
685          */
686         if (!vm_page_try_remove_write(m)) {
687                 vm_page_xunbusy(m);
688                 error = EBUSY;
689                 goto unlock_all;
690         }
691
692         /*
693          * If a page is dirty, then it is either being washed
694          * (but not yet cleaned) or it is still in the
695          * laundry.  If it is still in the laundry, then we
696          * start the cleaning operation. 
697          */
698         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
699                 error = EIO;
700
701 unlock_all:
702         VM_OBJECT_WUNLOCK(object);
703
704 unlock_mp:
705         if (mp != NULL) {
706                 if (vp != NULL)
707                         vput(vp);
708                 vm_object_deallocate(object);
709                 vn_finished_write(mp);
710         }
711
712         return (error);
713 }
714
715 /*
716  * Attempt to launder the specified number of pages.
717  *
718  * Returns the number of pages successfully laundered.
719  */
720 static int
721 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
722 {
723         struct scan_state ss;
724         struct vm_pagequeue *pq;
725         vm_object_t object;
726         vm_page_t m, marker;
727         vm_page_astate_t new, old;
728         int act_delta, error, numpagedout, queue, refs, starting_target;
729         int vnodes_skipped;
730         bool pageout_ok;
731
732         object = NULL;
733         starting_target = launder;
734         vnodes_skipped = 0;
735
736         /*
737          * Scan the laundry queues for pages eligible to be laundered.  We stop
738          * once the target number of dirty pages have been laundered, or once
739          * we've reached the end of the queue.  A single iteration of this loop
740          * may cause more than one page to be laundered because of clustering.
741          *
742          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
743          * swap devices are configured.
744          */
745         if (atomic_load_acq_int(&swapdev_enabled))
746                 queue = PQ_UNSWAPPABLE;
747         else
748                 queue = PQ_LAUNDRY;
749
750 scan:
751         marker = &vmd->vmd_markers[queue];
752         pq = &vmd->vmd_pagequeues[queue];
753         vm_pagequeue_lock(pq);
754         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
755         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
756                 if (__predict_false((m->flags & PG_MARKER) != 0))
757                         continue;
758
759                 /*
760                  * Don't touch a page that was removed from the queue after the
761                  * page queue lock was released.  Otherwise, ensure that any
762                  * pending queue operations, such as dequeues for wired pages,
763                  * are handled.
764                  */
765                 if (vm_pageout_defer(m, queue, true))
766                         continue;
767
768                 /*
769                  * Lock the page's object.
770                  */
771                 if (object == NULL || object != m->object) {
772                         if (object != NULL)
773                                 VM_OBJECT_WUNLOCK(object);
774                         object = atomic_load_ptr(&m->object);
775                         if (__predict_false(object == NULL))
776                                 /* The page is being freed by another thread. */
777                                 continue;
778
779                         /* Depends on type-stability. */
780                         VM_OBJECT_WLOCK(object);
781                         if (__predict_false(m->object != object)) {
782                                 VM_OBJECT_WUNLOCK(object);
783                                 object = NULL;
784                                 continue;
785                         }
786                 }
787
788                 if (vm_page_tryxbusy(m) == 0)
789                         continue;
790
791                 /*
792                  * Check for wirings now that we hold the object lock and have
793                  * exclusively busied the page.  If the page is mapped, it may
794                  * still be wired by pmap lookups.  The call to
795                  * vm_page_try_remove_all() below atomically checks for such
796                  * wirings and removes mappings.  If the page is unmapped, the
797                  * wire count is guaranteed not to increase after this check.
798                  */
799                 if (__predict_false(vm_page_wired(m)))
800                         goto skip_page;
801
802                 /*
803                  * Invalid pages can be easily freed.  They cannot be
804                  * mapped; vm_page_free() asserts this.
805                  */
806                 if (vm_page_none_valid(m))
807                         goto free_page;
808
809                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
810
811                 for (old = vm_page_astate_load(m);;) {
812                         /*
813                          * Check to see if the page has been removed from the
814                          * queue since the first such check.  Leave it alone if
815                          * so, discarding any references collected by
816                          * pmap_ts_referenced().
817                          */
818                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
819                                 goto skip_page;
820
821                         new = old;
822                         act_delta = refs;
823                         if ((old.flags & PGA_REFERENCED) != 0) {
824                                 new.flags &= ~PGA_REFERENCED;
825                                 act_delta++;
826                         }
827                         if (act_delta == 0) {
828                                 ;
829                         } else if (object->ref_count != 0) {
830                                 /*
831                                  * Increase the activation count if the page was
832                                  * referenced while in the laundry queue.  This
833                                  * makes it less likely that the page will be
834                                  * returned prematurely to the laundry queue.
835                                  */
836                                 new.act_count += ACT_ADVANCE +
837                                     act_delta;
838                                 if (new.act_count > ACT_MAX)
839                                         new.act_count = ACT_MAX;
840
841                                 new.flags &= ~PGA_QUEUE_OP_MASK;
842                                 new.flags |= PGA_REQUEUE;
843                                 new.queue = PQ_ACTIVE;
844                                 if (!vm_page_pqstate_commit(m, &old, new))
845                                         continue;
846
847                                 /*
848                                  * If this was a background laundering, count
849                                  * activated pages towards our target.  The
850                                  * purpose of background laundering is to ensure
851                                  * that pages are eventually cycled through the
852                                  * laundry queue, and an activation is a valid
853                                  * way out.
854                                  */
855                                 if (!in_shortfall)
856                                         launder--;
857                                 VM_CNT_INC(v_reactivated);
858                                 goto skip_page;
859                         } else if ((object->flags & OBJ_DEAD) == 0) {
860                                 new.flags |= PGA_REQUEUE;
861                                 if (!vm_page_pqstate_commit(m, &old, new))
862                                         continue;
863                                 goto skip_page;
864                         }
865                         break;
866                 }
867
868                 /*
869                  * If the page appears to be clean at the machine-independent
870                  * layer, then remove all of its mappings from the pmap in
871                  * anticipation of freeing it.  If, however, any of the page's
872                  * mappings allow write access, then the page may still be
873                  * modified until the last of those mappings are removed.
874                  */
875                 if (object->ref_count != 0) {
876                         vm_page_test_dirty(m);
877                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
878                                 goto skip_page;
879                 }
880
881                 /*
882                  * Clean pages are freed, and dirty pages are paged out unless
883                  * they belong to a dead object.  Requeueing dirty pages from
884                  * dead objects is pointless, as they are being paged out and
885                  * freed by the thread that destroyed the object.
886                  */
887                 if (m->dirty == 0) {
888 free_page:
889                         /*
890                          * Now we are guaranteed that no other threads are
891                          * manipulating the page, check for a last-second
892                          * reference.
893                          */
894                         if (vm_pageout_defer(m, queue, true))
895                                 goto skip_page;
896                         vm_page_free(m);
897                         VM_CNT_INC(v_dfree);
898                 } else if ((object->flags & OBJ_DEAD) == 0) {
899                         if ((object->flags & OBJ_SWAP) == 0 &&
900                             object->type != OBJT_DEFAULT)
901                                 pageout_ok = true;
902                         else if (disable_swap_pageouts)
903                                 pageout_ok = false;
904                         else
905                                 pageout_ok = true;
906                         if (!pageout_ok) {
907                                 vm_page_launder(m);
908                                 goto skip_page;
909                         }
910
911                         /*
912                          * Form a cluster with adjacent, dirty pages from the
913                          * same object, and page out that entire cluster.
914                          *
915                          * The adjacent, dirty pages must also be in the
916                          * laundry.  However, their mappings are not checked
917                          * for new references.  Consequently, a recently
918                          * referenced page may be paged out.  However, that
919                          * page will not be prematurely reclaimed.  After page
920                          * out, the page will be placed in the inactive queue,
921                          * where any new references will be detected and the
922                          * page reactivated.
923                          */
924                         error = vm_pageout_clean(m, &numpagedout);
925                         if (error == 0) {
926                                 launder -= numpagedout;
927                                 ss.scanned += numpagedout;
928                         } else if (error == EDEADLK) {
929                                 pageout_lock_miss++;
930                                 vnodes_skipped++;
931                         }
932                         object = NULL;
933                 } else {
934 skip_page:
935                         vm_page_xunbusy(m);
936                 }
937         }
938         if (object != NULL) {
939                 VM_OBJECT_WUNLOCK(object);
940                 object = NULL;
941         }
942         vm_pagequeue_lock(pq);
943         vm_pageout_end_scan(&ss);
944         vm_pagequeue_unlock(pq);
945
946         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
947                 queue = PQ_LAUNDRY;
948                 goto scan;
949         }
950
951         /*
952          * Wakeup the sync daemon if we skipped a vnode in a writeable object
953          * and we didn't launder enough pages.
954          */
955         if (vnodes_skipped > 0 && launder > 0)
956                 (void)speedup_syncer();
957
958         return (starting_target - launder);
959 }
960
961 /*
962  * Compute the integer square root.
963  */
964 static u_int
965 isqrt(u_int num)
966 {
967         u_int bit, root, tmp;
968
969         bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
970         root = 0;
971         while (bit != 0) {
972                 tmp = root + bit;
973                 root >>= 1;
974                 if (num >= tmp) {
975                         num -= tmp;
976                         root += bit;
977                 }
978                 bit >>= 2;
979         }
980         return (root);
981 }
982
983 /*
984  * Perform the work of the laundry thread: periodically wake up and determine
985  * whether any pages need to be laundered.  If so, determine the number of pages
986  * that need to be laundered, and launder them.
987  */
988 static void
989 vm_pageout_laundry_worker(void *arg)
990 {
991         struct vm_domain *vmd;
992         struct vm_pagequeue *pq;
993         uint64_t nclean, ndirty, nfreed;
994         int domain, last_target, launder, shortfall, shortfall_cycle, target;
995         bool in_shortfall;
996
997         domain = (uintptr_t)arg;
998         vmd = VM_DOMAIN(domain);
999         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1000         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1001
1002         shortfall = 0;
1003         in_shortfall = false;
1004         shortfall_cycle = 0;
1005         last_target = target = 0;
1006         nfreed = 0;
1007
1008         /*
1009          * Calls to these handlers are serialized by the swap syscall lock.
1010          */
1011         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1012             EVENTHANDLER_PRI_ANY);
1013         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1014             EVENTHANDLER_PRI_ANY);
1015
1016         /*
1017          * The pageout laundry worker is never done, so loop forever.
1018          */
1019         for (;;) {
1020                 KASSERT(target >= 0, ("negative target %d", target));
1021                 KASSERT(shortfall_cycle >= 0,
1022                     ("negative cycle %d", shortfall_cycle));
1023                 launder = 0;
1024
1025                 /*
1026                  * First determine whether we need to launder pages to meet a
1027                  * shortage of free pages.
1028                  */
1029                 if (shortfall > 0) {
1030                         in_shortfall = true;
1031                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1032                         target = shortfall;
1033                 } else if (!in_shortfall)
1034                         goto trybackground;
1035                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1036                         /*
1037                          * We recently entered shortfall and began laundering
1038                          * pages.  If we have completed that laundering run
1039                          * (and we are no longer in shortfall) or we have met
1040                          * our laundry target through other activity, then we
1041                          * can stop laundering pages.
1042                          */
1043                         in_shortfall = false;
1044                         target = 0;
1045                         goto trybackground;
1046                 }
1047                 launder = target / shortfall_cycle--;
1048                 goto dolaundry;
1049
1050                 /*
1051                  * There's no immediate need to launder any pages; see if we
1052                  * meet the conditions to perform background laundering:
1053                  *
1054                  * 1. The ratio of dirty to clean inactive pages exceeds the
1055                  *    background laundering threshold, or
1056                  * 2. we haven't yet reached the target of the current
1057                  *    background laundering run.
1058                  *
1059                  * The background laundering threshold is not a constant.
1060                  * Instead, it is a slowly growing function of the number of
1061                  * clean pages freed by the page daemon since the last
1062                  * background laundering.  Thus, as the ratio of dirty to
1063                  * clean inactive pages grows, the amount of memory pressure
1064                  * required to trigger laundering decreases.  We ensure
1065                  * that the threshold is non-zero after an inactive queue
1066                  * scan, even if that scan failed to free a single clean page.
1067                  */
1068 trybackground:
1069                 nclean = vmd->vmd_free_count +
1070                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1071                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1072                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1073                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1074                         target = vmd->vmd_background_launder_target;
1075                 }
1076
1077                 /*
1078                  * We have a non-zero background laundering target.  If we've
1079                  * laundered up to our maximum without observing a page daemon
1080                  * request, just stop.  This is a safety belt that ensures we
1081                  * don't launder an excessive amount if memory pressure is low
1082                  * and the ratio of dirty to clean pages is large.  Otherwise,
1083                  * proceed at the background laundering rate.
1084                  */
1085                 if (target > 0) {
1086                         if (nfreed > 0) {
1087                                 nfreed = 0;
1088                                 last_target = target;
1089                         } else if (last_target - target >=
1090                             vm_background_launder_max * PAGE_SIZE / 1024) {
1091                                 target = 0;
1092                         }
1093                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1094                         launder /= VM_LAUNDER_RATE;
1095                         if (launder > target)
1096                                 launder = target;
1097                 }
1098
1099 dolaundry:
1100                 if (launder > 0) {
1101                         /*
1102                          * Because of I/O clustering, the number of laundered
1103                          * pages could exceed "target" by the maximum size of
1104                          * a cluster minus one. 
1105                          */
1106                         target -= min(vm_pageout_launder(vmd, launder,
1107                             in_shortfall), target);
1108                         pause("laundp", hz / VM_LAUNDER_RATE);
1109                 }
1110
1111                 /*
1112                  * If we're not currently laundering pages and the page daemon
1113                  * hasn't posted a new request, sleep until the page daemon
1114                  * kicks us.
1115                  */
1116                 vm_pagequeue_lock(pq);
1117                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1118                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1119                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1120
1121                 /*
1122                  * If the pagedaemon has indicated that it's in shortfall, start
1123                  * a shortfall laundering unless we're already in the middle of
1124                  * one.  This may preempt a background laundering.
1125                  */
1126                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1127                     (!in_shortfall || shortfall_cycle == 0)) {
1128                         shortfall = vm_laundry_target(vmd) +
1129                             vmd->vmd_pageout_deficit;
1130                         target = 0;
1131                 } else
1132                         shortfall = 0;
1133
1134                 if (target == 0)
1135                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1136                 nfreed += vmd->vmd_clean_pages_freed;
1137                 vmd->vmd_clean_pages_freed = 0;
1138                 vm_pagequeue_unlock(pq);
1139         }
1140 }
1141
1142 /*
1143  * Compute the number of pages we want to try to move from the
1144  * active queue to either the inactive or laundry queue.
1145  *
1146  * When scanning active pages during a shortage, we make clean pages
1147  * count more heavily towards the page shortage than dirty pages.
1148  * This is because dirty pages must be laundered before they can be
1149  * reused and thus have less utility when attempting to quickly
1150  * alleviate a free page shortage.  However, this weighting also
1151  * causes the scan to deactivate dirty pages more aggressively,
1152  * improving the effectiveness of clustering.
1153  */
1154 static int
1155 vm_pageout_active_target(struct vm_domain *vmd)
1156 {
1157         int shortage;
1158
1159         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1160             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1161             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1162         shortage *= act_scan_laundry_weight;
1163         return (shortage);
1164 }
1165
1166 /*
1167  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1168  * small portion of the queue in order to maintain quasi-LRU.
1169  */
1170 static void
1171 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1172 {
1173         struct scan_state ss;
1174         vm_object_t object;
1175         vm_page_t m, marker;
1176         struct vm_pagequeue *pq;
1177         vm_page_astate_t old, new;
1178         long min_scan;
1179         int act_delta, max_scan, ps_delta, refs, scan_tick;
1180         uint8_t nqueue;
1181
1182         marker = &vmd->vmd_markers[PQ_ACTIVE];
1183         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1184         vm_pagequeue_lock(pq);
1185
1186         /*
1187          * If we're just idle polling attempt to visit every
1188          * active page within 'update_period' seconds.
1189          */
1190         scan_tick = ticks;
1191         if (vm_pageout_update_period != 0) {
1192                 min_scan = pq->pq_cnt;
1193                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1194                 min_scan /= hz * vm_pageout_update_period;
1195         } else
1196                 min_scan = 0;
1197         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1198                 vmd->vmd_last_active_scan = scan_tick;
1199
1200         /*
1201          * Scan the active queue for pages that can be deactivated.  Update
1202          * the per-page activity counter and use it to identify deactivation
1203          * candidates.  Held pages may be deactivated.
1204          *
1205          * To avoid requeuing each page that remains in the active queue, we
1206          * implement the CLOCK algorithm.  To keep the implementation of the
1207          * enqueue operation consistent for all page queues, we use two hands,
1208          * represented by marker pages. Scans begin at the first hand, which
1209          * precedes the second hand in the queue.  When the two hands meet,
1210          * they are moved back to the head and tail of the queue, respectively,
1211          * and scanning resumes.
1212          */
1213         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1214 act_scan:
1215         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1216         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1217                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1218                         vm_pagequeue_lock(pq);
1219                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1220                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1221                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1222                             plinks.q);
1223                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1224                             plinks.q);
1225                         max_scan -= ss.scanned;
1226                         vm_pageout_end_scan(&ss);
1227                         goto act_scan;
1228                 }
1229                 if (__predict_false((m->flags & PG_MARKER) != 0))
1230                         continue;
1231
1232                 /*
1233                  * Don't touch a page that was removed from the queue after the
1234                  * page queue lock was released.  Otherwise, ensure that any
1235                  * pending queue operations, such as dequeues for wired pages,
1236                  * are handled.
1237                  */
1238                 if (vm_pageout_defer(m, PQ_ACTIVE, true))
1239                         continue;
1240
1241                 /*
1242                  * A page's object pointer may be set to NULL before
1243                  * the object lock is acquired.
1244                  */
1245                 object = atomic_load_ptr(&m->object);
1246                 if (__predict_false(object == NULL))
1247                         /*
1248                          * The page has been removed from its object.
1249                          */
1250                         continue;
1251
1252                 /* Deferred free of swap space. */
1253                 if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1254                     VM_OBJECT_TRYWLOCK(object)) {
1255                         if (m->object == object)
1256                                 vm_pager_page_unswapped(m);
1257                         VM_OBJECT_WUNLOCK(object);
1258                 }
1259
1260                 /*
1261                  * Check to see "how much" the page has been used.
1262                  *
1263                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1264                  * that a reference from a concurrently destroyed mapping is
1265                  * observed here and now.
1266                  *
1267                  * Perform an unsynchronized object ref count check.  While
1268                  * the page lock ensures that the page is not reallocated to
1269                  * another object, in particular, one with unmanaged mappings
1270                  * that cannot support pmap_ts_referenced(), two races are,
1271                  * nonetheless, possible:
1272                  * 1) The count was transitioning to zero, but we saw a non-
1273                  *    zero value.  pmap_ts_referenced() will return zero
1274                  *    because the page is not mapped.
1275                  * 2) The count was transitioning to one, but we saw zero.
1276                  *    This race delays the detection of a new reference.  At
1277                  *    worst, we will deactivate and reactivate the page.
1278                  */
1279                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1280
1281                 old = vm_page_astate_load(m);
1282                 do {
1283                         /*
1284                          * Check to see if the page has been removed from the
1285                          * queue since the first such check.  Leave it alone if
1286                          * so, discarding any references collected by
1287                          * pmap_ts_referenced().
1288                          */
1289                         if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
1290                                 ps_delta = 0;
1291                                 break;
1292                         }
1293
1294                         /*
1295                          * Advance or decay the act_count based on recent usage.
1296                          */
1297                         new = old;
1298                         act_delta = refs;
1299                         if ((old.flags & PGA_REFERENCED) != 0) {
1300                                 new.flags &= ~PGA_REFERENCED;
1301                                 act_delta++;
1302                         }
1303                         if (act_delta != 0) {
1304                                 new.act_count += ACT_ADVANCE + act_delta;
1305                                 if (new.act_count > ACT_MAX)
1306                                         new.act_count = ACT_MAX;
1307                         } else {
1308                                 new.act_count -= min(new.act_count,
1309                                     ACT_DECLINE);
1310                         }
1311
1312                         if (new.act_count > 0) {
1313                                 /*
1314                                  * Adjust the activation count and keep the page
1315                                  * in the active queue.  The count might be left
1316                                  * unchanged if it is saturated.  The page may
1317                                  * have been moved to a different queue since we
1318                                  * started the scan, in which case we move it
1319                                  * back.
1320                                  */
1321                                 ps_delta = 0;
1322                                 if (old.queue != PQ_ACTIVE) {
1323                                         new.flags &= ~PGA_QUEUE_OP_MASK;
1324                                         new.flags |= PGA_REQUEUE;
1325                                         new.queue = PQ_ACTIVE;
1326                                 }
1327                         } else {
1328                                 /*
1329                                  * When not short for inactive pages, let dirty
1330                                  * pages go through the inactive queue before
1331                                  * moving to the laundry queue.  This gives them
1332                                  * some extra time to be reactivated,
1333                                  * potentially avoiding an expensive pageout.
1334                                  * However, during a page shortage, the inactive
1335                                  * queue is necessarily small, and so dirty
1336                                  * pages would only spend a trivial amount of
1337                                  * time in the inactive queue.  Therefore, we
1338                                  * might as well place them directly in the
1339                                  * laundry queue to reduce queuing overhead.
1340                                  *
1341                                  * Calling vm_page_test_dirty() here would
1342                                  * require acquisition of the object's write
1343                                  * lock.  However, during a page shortage,
1344                                  * directing dirty pages into the laundry queue
1345                                  * is only an optimization and not a
1346                                  * requirement.  Therefore, we simply rely on
1347                                  * the opportunistic updates to the page's dirty
1348                                  * field by the pmap.
1349                                  */
1350                                 if (page_shortage <= 0) {
1351                                         nqueue = PQ_INACTIVE;
1352                                         ps_delta = 0;
1353                                 } else if (m->dirty == 0) {
1354                                         nqueue = PQ_INACTIVE;
1355                                         ps_delta = act_scan_laundry_weight;
1356                                 } else {
1357                                         nqueue = PQ_LAUNDRY;
1358                                         ps_delta = 1;
1359                                 }
1360
1361                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1362                                 new.flags |= PGA_REQUEUE;
1363                                 new.queue = nqueue;
1364                         }
1365                 } while (!vm_page_pqstate_commit(m, &old, new));
1366
1367                 page_shortage -= ps_delta;
1368         }
1369         vm_pagequeue_lock(pq);
1370         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1371         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1372         vm_pageout_end_scan(&ss);
1373         vm_pagequeue_unlock(pq);
1374 }
1375
1376 static int
1377 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1378     vm_page_t m)
1379 {
1380         vm_page_astate_t as;
1381
1382         vm_pagequeue_assert_locked(pq);
1383
1384         as = vm_page_astate_load(m);
1385         if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1386                 return (0);
1387         vm_page_aflag_set(m, PGA_ENQUEUED);
1388         TAILQ_INSERT_BEFORE(marker, m, plinks.q);
1389         return (1);
1390 }
1391
1392 /*
1393  * Re-add stuck pages to the inactive queue.  We will examine them again
1394  * during the next scan.  If the queue state of a page has changed since
1395  * it was physically removed from the page queue in
1396  * vm_pageout_collect_batch(), don't do anything with that page.
1397  */
1398 static void
1399 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1400     vm_page_t m)
1401 {
1402         struct vm_pagequeue *pq;
1403         vm_page_t marker;
1404         int delta;
1405
1406         delta = 0;
1407         marker = ss->marker;
1408         pq = ss->pq;
1409
1410         if (m != NULL) {
1411                 if (vm_batchqueue_insert(bq, m))
1412                         return;
1413                 vm_pagequeue_lock(pq);
1414                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1415         } else
1416                 vm_pagequeue_lock(pq);
1417         while ((m = vm_batchqueue_pop(bq)) != NULL)
1418                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1419         vm_pagequeue_cnt_add(pq, delta);
1420         vm_pagequeue_unlock(pq);
1421         vm_batchqueue_init(bq);
1422 }
1423
1424 static void
1425 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1426 {
1427         struct timeval start, end;
1428         struct scan_state ss;
1429         struct vm_batchqueue rq;
1430         struct vm_page marker_page;
1431         vm_page_t m, marker;
1432         struct vm_pagequeue *pq;
1433         vm_object_t object;
1434         vm_page_astate_t old, new;
1435         int act_delta, addl_page_shortage, starting_page_shortage, refs;
1436
1437         object = NULL;
1438         vm_batchqueue_init(&rq);
1439         getmicrouptime(&start);
1440
1441         /*
1442          * The addl_page_shortage is an estimate of the number of temporarily
1443          * stuck pages in the inactive queue.  In other words, the
1444          * number of pages from the inactive count that should be
1445          * discounted in setting the target for the active queue scan.
1446          */
1447         addl_page_shortage = 0;
1448
1449         /*
1450          * Start scanning the inactive queue for pages that we can free.  The
1451          * scan will stop when we reach the target or we have scanned the
1452          * entire queue.  (Note that m->a.act_count is not used to make
1453          * decisions for the inactive queue, only for the active queue.)
1454          */
1455         starting_page_shortage = page_shortage;
1456         marker = &marker_page;
1457         vm_page_init_marker(marker, PQ_INACTIVE, 0);
1458         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1459         vm_pagequeue_lock(pq);
1460         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1461         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1462                 KASSERT((m->flags & PG_MARKER) == 0,
1463                     ("marker page %p was dequeued", m));
1464
1465                 /*
1466                  * Don't touch a page that was removed from the queue after the
1467                  * page queue lock was released.  Otherwise, ensure that any
1468                  * pending queue operations, such as dequeues for wired pages,
1469                  * are handled.
1470                  */
1471                 if (vm_pageout_defer(m, PQ_INACTIVE, false))
1472                         continue;
1473
1474                 /*
1475                  * Lock the page's object.
1476                  */
1477                 if (object == NULL || object != m->object) {
1478                         if (object != NULL)
1479                                 VM_OBJECT_WUNLOCK(object);
1480                         object = atomic_load_ptr(&m->object);
1481                         if (__predict_false(object == NULL))
1482                                 /* The page is being freed by another thread. */
1483                                 continue;
1484
1485                         /* Depends on type-stability. */
1486                         VM_OBJECT_WLOCK(object);
1487                         if (__predict_false(m->object != object)) {
1488                                 VM_OBJECT_WUNLOCK(object);
1489                                 object = NULL;
1490                                 goto reinsert;
1491                         }
1492                 }
1493
1494                 if (vm_page_tryxbusy(m) == 0) {
1495                         /*
1496                          * Don't mess with busy pages.  Leave them at
1497                          * the front of the queue.  Most likely, they
1498                          * are being paged out and will leave the
1499                          * queue shortly after the scan finishes.  So,
1500                          * they ought to be discounted from the
1501                          * inactive count.
1502                          */
1503                         addl_page_shortage++;
1504                         goto reinsert;
1505                 }
1506
1507                 /* Deferred free of swap space. */
1508                 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1509                         vm_pager_page_unswapped(m);
1510
1511                 /*
1512                  * Check for wirings now that we hold the object lock and have
1513                  * exclusively busied the page.  If the page is mapped, it may
1514                  * still be wired by pmap lookups.  The call to
1515                  * vm_page_try_remove_all() below atomically checks for such
1516                  * wirings and removes mappings.  If the page is unmapped, the
1517                  * wire count is guaranteed not to increase after this check.
1518                  */
1519                 if (__predict_false(vm_page_wired(m)))
1520                         goto skip_page;
1521
1522                 /*
1523                  * Invalid pages can be easily freed. They cannot be
1524                  * mapped, vm_page_free() asserts this.
1525                  */
1526                 if (vm_page_none_valid(m))
1527                         goto free_page;
1528
1529                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1530
1531                 for (old = vm_page_astate_load(m);;) {
1532                         /*
1533                          * Check to see if the page has been removed from the
1534                          * queue since the first such check.  Leave it alone if
1535                          * so, discarding any references collected by
1536                          * pmap_ts_referenced().
1537                          */
1538                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1539                                 goto skip_page;
1540
1541                         new = old;
1542                         act_delta = refs;
1543                         if ((old.flags & PGA_REFERENCED) != 0) {
1544                                 new.flags &= ~PGA_REFERENCED;
1545                                 act_delta++;
1546                         }
1547                         if (act_delta == 0) {
1548                                 ;
1549                         } else if (object->ref_count != 0) {
1550                                 /*
1551                                  * Increase the activation count if the
1552                                  * page was referenced while in the
1553                                  * inactive queue.  This makes it less
1554                                  * likely that the page will be returned
1555                                  * prematurely to the inactive queue.
1556                                  */
1557                                 new.act_count += ACT_ADVANCE +
1558                                     act_delta;
1559                                 if (new.act_count > ACT_MAX)
1560                                         new.act_count = ACT_MAX;
1561
1562                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1563                                 new.flags |= PGA_REQUEUE;
1564                                 new.queue = PQ_ACTIVE;
1565                                 if (!vm_page_pqstate_commit(m, &old, new))
1566                                         continue;
1567
1568                                 VM_CNT_INC(v_reactivated);
1569                                 goto skip_page;
1570                         } else if ((object->flags & OBJ_DEAD) == 0) {
1571                                 new.queue = PQ_INACTIVE;
1572                                 new.flags |= PGA_REQUEUE;
1573                                 if (!vm_page_pqstate_commit(m, &old, new))
1574                                         continue;
1575                                 goto skip_page;
1576                         }
1577                         break;
1578                 }
1579
1580                 /*
1581                  * If the page appears to be clean at the machine-independent
1582                  * layer, then remove all of its mappings from the pmap in
1583                  * anticipation of freeing it.  If, however, any of the page's
1584                  * mappings allow write access, then the page may still be
1585                  * modified until the last of those mappings are removed.
1586                  */
1587                 if (object->ref_count != 0) {
1588                         vm_page_test_dirty(m);
1589                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
1590                                 goto skip_page;
1591                 }
1592
1593                 /*
1594                  * Clean pages can be freed, but dirty pages must be sent back
1595                  * to the laundry, unless they belong to a dead object.
1596                  * Requeueing dirty pages from dead objects is pointless, as
1597                  * they are being paged out and freed by the thread that
1598                  * destroyed the object.
1599                  */
1600                 if (m->dirty == 0) {
1601 free_page:
1602                         /*
1603                          * Now we are guaranteed that no other threads are
1604                          * manipulating the page, check for a last-second
1605                          * reference that would save it from doom.
1606                          */
1607                         if (vm_pageout_defer(m, PQ_INACTIVE, false))
1608                                 goto skip_page;
1609
1610                         /*
1611                          * Because we dequeued the page and have already checked
1612                          * for pending dequeue and enqueue requests, we can
1613                          * safely disassociate the page from the inactive queue
1614                          * without holding the queue lock.
1615                          */
1616                         m->a.queue = PQ_NONE;
1617                         vm_page_free(m);
1618                         page_shortage--;
1619                         continue;
1620                 }
1621                 if ((object->flags & OBJ_DEAD) == 0)
1622                         vm_page_launder(m);
1623 skip_page:
1624                 vm_page_xunbusy(m);
1625                 continue;
1626 reinsert:
1627                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1628         }
1629         if (object != NULL)
1630                 VM_OBJECT_WUNLOCK(object);
1631         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1632         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1633         vm_pagequeue_lock(pq);
1634         vm_pageout_end_scan(&ss);
1635         vm_pagequeue_unlock(pq);
1636
1637         /*
1638          * Record the remaining shortage and the progress and rate it was made.
1639          */
1640         atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
1641         getmicrouptime(&end);
1642         timevalsub(&end, &start);
1643         atomic_add_int(&vmd->vmd_inactive_us,
1644             end.tv_sec * 1000000 + end.tv_usec);
1645         atomic_add_int(&vmd->vmd_inactive_freed,
1646             starting_page_shortage - page_shortage);
1647 }
1648
1649 /*
1650  * Dispatch a number of inactive threads according to load and collect the
1651  * results to present a coherent view of paging activity on this domain.
1652  */
1653 static int
1654 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
1655 {
1656         u_int freed, pps, slop, threads, us;
1657
1658         vmd->vmd_inactive_shortage = shortage;
1659         slop = 0;
1660
1661         /*
1662          * If we have more work than we can do in a quarter of our interval, we
1663          * fire off multiple threads to process it.
1664          */
1665         threads = vmd->vmd_inactive_threads;
1666         if (threads > 1 && vmd->vmd_inactive_pps != 0 &&
1667             shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
1668                 vmd->vmd_inactive_shortage /= threads;
1669                 slop = shortage % threads;
1670                 vm_domain_pageout_lock(vmd);
1671                 blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
1672                 blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
1673                 wakeup(&vmd->vmd_inactive_shortage);
1674                 vm_domain_pageout_unlock(vmd);
1675         }
1676
1677         /* Run the local thread scan. */
1678         vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
1679
1680         /*
1681          * Block until helper threads report results and then accumulate
1682          * totals.
1683          */
1684         blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
1685         freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
1686         VM_CNT_ADD(v_dfree, freed);
1687
1688         /*
1689          * Calculate the per-thread paging rate with an exponential decay of
1690          * prior results.  Careful to avoid integer rounding errors with large
1691          * us values.
1692          */
1693         us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
1694         if (us > 1000000)
1695                 /* Keep rounding to tenths */
1696                 pps = (freed * 10) / ((us * 10) / 1000000);
1697         else
1698                 pps = (1000000 / us) * freed;
1699         vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
1700
1701         return (shortage - freed);
1702 }
1703
1704 /*
1705  * Attempt to reclaim the requested number of pages from the inactive queue.
1706  * Returns true if the shortage was addressed.
1707  */
1708 static int
1709 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
1710 {
1711         struct vm_pagequeue *pq;
1712         u_int addl_page_shortage, deficit, page_shortage;
1713         u_int starting_page_shortage;
1714
1715         /*
1716          * vmd_pageout_deficit counts the number of pages requested in
1717          * allocations that failed because of a free page shortage.  We assume
1718          * that the allocations will be reattempted and thus include the deficit
1719          * in our scan target.
1720          */
1721         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1722         starting_page_shortage = shortage + deficit;
1723
1724         /*
1725          * Run the inactive scan on as many threads as is necessary.
1726          */
1727         page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
1728         addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
1729
1730         /*
1731          * Wake up the laundry thread so that it can perform any needed
1732          * laundering.  If we didn't meet our target, we're in shortfall and
1733          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1734          * swap devices are configured, the laundry thread has no work to do, so
1735          * don't bother waking it up.
1736          *
1737          * The laundry thread uses the number of inactive queue scans elapsed
1738          * since the last laundering to determine whether to launder again, so
1739          * keep count.
1740          */
1741         if (starting_page_shortage > 0) {
1742                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1743                 vm_pagequeue_lock(pq);
1744                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1745                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1746                         if (page_shortage > 0) {
1747                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1748                                 VM_CNT_INC(v_pdshortfalls);
1749                         } else if (vmd->vmd_laundry_request !=
1750                             VM_LAUNDRY_SHORTFALL)
1751                                 vmd->vmd_laundry_request =
1752                                     VM_LAUNDRY_BACKGROUND;
1753                         wakeup(&vmd->vmd_laundry_request);
1754                 }
1755                 vmd->vmd_clean_pages_freed +=
1756                     starting_page_shortage - page_shortage;
1757                 vm_pagequeue_unlock(pq);
1758         }
1759
1760         /*
1761          * Wakeup the swapout daemon if we didn't free the targeted number of
1762          * pages.
1763          */
1764         if (page_shortage > 0)
1765                 vm_swapout_run();
1766
1767         /*
1768          * If the inactive queue scan fails repeatedly to meet its
1769          * target, kill the largest process.
1770          */
1771         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1772
1773         /*
1774          * Reclaim pages by swapping out idle processes, if configured to do so.
1775          */
1776         vm_swapout_run_idle();
1777
1778         /*
1779          * See the description of addl_page_shortage above.
1780          */
1781         *addl_shortage = addl_page_shortage + deficit;
1782
1783         return (page_shortage <= 0);
1784 }
1785
1786 static int vm_pageout_oom_vote;
1787
1788 /*
1789  * The pagedaemon threads randlomly select one to perform the
1790  * OOM.  Trying to kill processes before all pagedaemons
1791  * failed to reach free target is premature.
1792  */
1793 static void
1794 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1795     int starting_page_shortage)
1796 {
1797         int old_vote;
1798
1799         if (starting_page_shortage <= 0 || starting_page_shortage !=
1800             page_shortage)
1801                 vmd->vmd_oom_seq = 0;
1802         else
1803                 vmd->vmd_oom_seq++;
1804         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1805                 if (vmd->vmd_oom) {
1806                         vmd->vmd_oom = FALSE;
1807                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1808                 }
1809                 return;
1810         }
1811
1812         /*
1813          * Do not follow the call sequence until OOM condition is
1814          * cleared.
1815          */
1816         vmd->vmd_oom_seq = 0;
1817
1818         if (vmd->vmd_oom)
1819                 return;
1820
1821         vmd->vmd_oom = TRUE;
1822         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1823         if (old_vote != vm_ndomains - 1)
1824                 return;
1825
1826         /*
1827          * The current pagedaemon thread is the last in the quorum to
1828          * start OOM.  Initiate the selection and signaling of the
1829          * victim.
1830          */
1831         vm_pageout_oom(VM_OOM_MEM);
1832
1833         /*
1834          * After one round of OOM terror, recall our vote.  On the
1835          * next pass, current pagedaemon would vote again if the low
1836          * memory condition is still there, due to vmd_oom being
1837          * false.
1838          */
1839         vmd->vmd_oom = FALSE;
1840         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1841 }
1842
1843 /*
1844  * The OOM killer is the page daemon's action of last resort when
1845  * memory allocation requests have been stalled for a prolonged period
1846  * of time because it cannot reclaim memory.  This function computes
1847  * the approximate number of physical pages that could be reclaimed if
1848  * the specified address space is destroyed.
1849  *
1850  * Private, anonymous memory owned by the address space is the
1851  * principal resource that we expect to recover after an OOM kill.
1852  * Since the physical pages mapped by the address space's COW entries
1853  * are typically shared pages, they are unlikely to be released and so
1854  * they are not counted.
1855  *
1856  * To get to the point where the page daemon runs the OOM killer, its
1857  * efforts to write-back vnode-backed pages may have stalled.  This
1858  * could be caused by a memory allocation deadlock in the write path
1859  * that might be resolved by an OOM kill.  Therefore, physical pages
1860  * belonging to vnode-backed objects are counted, because they might
1861  * be freed without being written out first if the address space holds
1862  * the last reference to an unlinked vnode.
1863  *
1864  * Similarly, physical pages belonging to OBJT_PHYS objects are
1865  * counted because the address space might hold the last reference to
1866  * the object.
1867  */
1868 static long
1869 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1870 {
1871         vm_map_t map;
1872         vm_map_entry_t entry;
1873         vm_object_t obj;
1874         long res;
1875
1876         map = &vmspace->vm_map;
1877         KASSERT(!map->system_map, ("system map"));
1878         sx_assert(&map->lock, SA_LOCKED);
1879         res = 0;
1880         VM_MAP_ENTRY_FOREACH(entry, map) {
1881                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1882                         continue;
1883                 obj = entry->object.vm_object;
1884                 if (obj == NULL)
1885                         continue;
1886                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1887                     obj->ref_count != 1)
1888                         continue;
1889                 if (obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP ||
1890                     obj->type == OBJT_PHYS || obj->type == OBJT_VNODE ||
1891                     (obj->flags & OBJ_SWAP) != 0)
1892                         res += obj->resident_page_count;
1893         }
1894         return (res);
1895 }
1896
1897 static int vm_oom_ratelim_last;
1898 static int vm_oom_pf_secs = 10;
1899 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1900     "");
1901 static struct mtx vm_oom_ratelim_mtx;
1902
1903 void
1904 vm_pageout_oom(int shortage)
1905 {
1906         const char *reason;
1907         struct proc *p, *bigproc;
1908         vm_offset_t size, bigsize;
1909         struct thread *td;
1910         struct vmspace *vm;
1911         int now;
1912         bool breakout;
1913
1914         /*
1915          * For OOM requests originating from vm_fault(), there is a high
1916          * chance that a single large process faults simultaneously in
1917          * several threads.  Also, on an active system running many
1918          * processes of middle-size, like buildworld, all of them
1919          * could fault almost simultaneously as well.
1920          *
1921          * To avoid killing too many processes, rate-limit OOMs
1922          * initiated by vm_fault() time-outs on the waits for free
1923          * pages.
1924          */
1925         mtx_lock(&vm_oom_ratelim_mtx);
1926         now = ticks;
1927         if (shortage == VM_OOM_MEM_PF &&
1928             (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1929                 mtx_unlock(&vm_oom_ratelim_mtx);
1930                 return;
1931         }
1932         vm_oom_ratelim_last = now;
1933         mtx_unlock(&vm_oom_ratelim_mtx);
1934
1935         /*
1936          * We keep the process bigproc locked once we find it to keep anyone
1937          * from messing with it; however, there is a possibility of
1938          * deadlock if process B is bigproc and one of its child processes
1939          * attempts to propagate a signal to B while we are waiting for A's
1940          * lock while walking this list.  To avoid this, we don't block on
1941          * the process lock but just skip a process if it is already locked.
1942          */
1943         bigproc = NULL;
1944         bigsize = 0;
1945         sx_slock(&allproc_lock);
1946         FOREACH_PROC_IN_SYSTEM(p) {
1947                 PROC_LOCK(p);
1948
1949                 /*
1950                  * If this is a system, protected or killed process, skip it.
1951                  */
1952                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1953                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1954                     p->p_pid == 1 || P_KILLED(p) ||
1955                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1956                         PROC_UNLOCK(p);
1957                         continue;
1958                 }
1959                 /*
1960                  * If the process is in a non-running type state,
1961                  * don't touch it.  Check all the threads individually.
1962                  */
1963                 breakout = false;
1964                 FOREACH_THREAD_IN_PROC(p, td) {
1965                         thread_lock(td);
1966                         if (!TD_ON_RUNQ(td) &&
1967                             !TD_IS_RUNNING(td) &&
1968                             !TD_IS_SLEEPING(td) &&
1969                             !TD_IS_SUSPENDED(td) &&
1970                             !TD_IS_SWAPPED(td)) {
1971                                 thread_unlock(td);
1972                                 breakout = true;
1973                                 break;
1974                         }
1975                         thread_unlock(td);
1976                 }
1977                 if (breakout) {
1978                         PROC_UNLOCK(p);
1979                         continue;
1980                 }
1981                 /*
1982                  * get the process size
1983                  */
1984                 vm = vmspace_acquire_ref(p);
1985                 if (vm == NULL) {
1986                         PROC_UNLOCK(p);
1987                         continue;
1988                 }
1989                 _PHOLD_LITE(p);
1990                 PROC_UNLOCK(p);
1991                 sx_sunlock(&allproc_lock);
1992                 if (!vm_map_trylock_read(&vm->vm_map)) {
1993                         vmspace_free(vm);
1994                         sx_slock(&allproc_lock);
1995                         PRELE(p);
1996                         continue;
1997                 }
1998                 size = vmspace_swap_count(vm);
1999                 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
2000                         size += vm_pageout_oom_pagecount(vm);
2001                 vm_map_unlock_read(&vm->vm_map);
2002                 vmspace_free(vm);
2003                 sx_slock(&allproc_lock);
2004
2005                 /*
2006                  * If this process is bigger than the biggest one,
2007                  * remember it.
2008                  */
2009                 if (size > bigsize) {
2010                         if (bigproc != NULL)
2011                                 PRELE(bigproc);
2012                         bigproc = p;
2013                         bigsize = size;
2014                 } else {
2015                         PRELE(p);
2016                 }
2017         }
2018         sx_sunlock(&allproc_lock);
2019
2020         if (bigproc != NULL) {
2021                 switch (shortage) {
2022                 case VM_OOM_MEM:
2023                         reason = "failed to reclaim memory";
2024                         break;
2025                 case VM_OOM_MEM_PF:
2026                         reason = "a thread waited too long to allocate a page";
2027                         break;
2028                 case VM_OOM_SWAPZ:
2029                         reason = "out of swap space";
2030                         break;
2031                 default:
2032                         panic("unknown OOM reason %d", shortage);
2033                 }
2034                 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
2035                         panic("%s", reason);
2036                 PROC_LOCK(bigproc);
2037                 killproc(bigproc, reason);
2038                 sched_nice(bigproc, PRIO_MIN);
2039                 _PRELE(bigproc);
2040                 PROC_UNLOCK(bigproc);
2041         }
2042 }
2043
2044 /*
2045  * Signal a free page shortage to subsystems that have registered an event
2046  * handler.  Reclaim memory from UMA in the event of a severe shortage.
2047  * Return true if the free page count should be re-evaluated.
2048  */
2049 static bool
2050 vm_pageout_lowmem(void)
2051 {
2052         static int lowmem_ticks = 0;
2053         int last;
2054         bool ret;
2055
2056         ret = false;
2057
2058         last = atomic_load_int(&lowmem_ticks);
2059         while ((u_int)(ticks - last) / hz >= lowmem_period) {
2060                 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2061                         continue;
2062
2063                 /*
2064                  * Decrease registered cache sizes.
2065                  */
2066                 SDT_PROBE0(vm, , , vm__lowmem_scan);
2067                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
2068
2069                 /*
2070                  * We do this explicitly after the caches have been
2071                  * drained above.
2072                  */
2073                 uma_reclaim(UMA_RECLAIM_TRIM);
2074                 ret = true;
2075                 break;
2076         }
2077
2078         /*
2079          * Kick off an asynchronous reclaim of cached memory if one of the
2080          * page daemons is failing to keep up with demand.  Use the "severe"
2081          * threshold instead of "min" to ensure that we do not blow away the
2082          * caches if a subset of the NUMA domains are depleted by kernel memory
2083          * allocations; the domainset iterators automatically skip domains
2084          * below the "min" threshold on the first pass.
2085          *
2086          * UMA reclaim worker has its own rate-limiting mechanism, so don't
2087          * worry about kicking it too often.
2088          */
2089         if (vm_page_count_severe())
2090                 uma_reclaim_wakeup();
2091
2092         return (ret);
2093 }
2094
2095 static void
2096 vm_pageout_worker(void *arg)
2097 {
2098         struct vm_domain *vmd;
2099         u_int ofree;
2100         int addl_shortage, domain, shortage;
2101         bool target_met;
2102
2103         domain = (uintptr_t)arg;
2104         vmd = VM_DOMAIN(domain);
2105         shortage = 0;
2106         target_met = true;
2107
2108         /*
2109          * XXXKIB It could be useful to bind pageout daemon threads to
2110          * the cores belonging to the domain, from which vm_page_array
2111          * is allocated.
2112          */
2113
2114         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2115         vmd->vmd_last_active_scan = ticks;
2116
2117         /*
2118          * The pageout daemon worker is never done, so loop forever.
2119          */
2120         while (TRUE) {
2121                 vm_domain_pageout_lock(vmd);
2122
2123                 /*
2124                  * We need to clear wanted before we check the limits.  This
2125                  * prevents races with wakers who will check wanted after they
2126                  * reach the limit.
2127                  */
2128                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2129
2130                 /*
2131                  * Might the page daemon need to run again?
2132                  */
2133                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2134                         /*
2135                          * Yes.  If the scan failed to produce enough free
2136                          * pages, sleep uninterruptibly for some time in the
2137                          * hope that the laundry thread will clean some pages.
2138                          */
2139                         vm_domain_pageout_unlock(vmd);
2140                         if (!target_met)
2141                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
2142                 } else {
2143                         /*
2144                          * No, sleep until the next wakeup or until pages
2145                          * need to have their reference stats updated.
2146                          */
2147                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
2148                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2149                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2150                                 VM_CNT_INC(v_pdwakeups);
2151                 }
2152
2153                 /* Prevent spurious wakeups by ensuring that wanted is set. */
2154                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2155
2156                 /*
2157                  * Use the controller to calculate how many pages to free in
2158                  * this interval, and scan the inactive queue.  If the lowmem
2159                  * handlers appear to have freed up some pages, subtract the
2160                  * difference from the inactive queue scan target.
2161                  */
2162                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2163                 if (shortage > 0) {
2164                         ofree = vmd->vmd_free_count;
2165                         if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2166                                 shortage -= min(vmd->vmd_free_count - ofree,
2167                                     (u_int)shortage);
2168                         target_met = vm_pageout_inactive(vmd, shortage,
2169                             &addl_shortage);
2170                 } else
2171                         addl_shortage = 0;
2172
2173                 /*
2174                  * Scan the active queue.  A positive value for shortage
2175                  * indicates that we must aggressively deactivate pages to avoid
2176                  * a shortfall.
2177                  */
2178                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
2179                 vm_pageout_scan_active(vmd, shortage);
2180         }
2181 }
2182
2183 /*
2184  * vm_pageout_helper runs additional pageout daemons in times of high paging
2185  * activity.
2186  */
2187 static void
2188 vm_pageout_helper(void *arg)
2189 {
2190         struct vm_domain *vmd;
2191         int domain;
2192
2193         domain = (uintptr_t)arg;
2194         vmd = VM_DOMAIN(domain);
2195
2196         vm_domain_pageout_lock(vmd);
2197         for (;;) {
2198                 msleep(&vmd->vmd_inactive_shortage,
2199                     vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
2200                 blockcount_release(&vmd->vmd_inactive_starting, 1);
2201
2202                 vm_domain_pageout_unlock(vmd);
2203                 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
2204                 vm_domain_pageout_lock(vmd);
2205
2206                 /*
2207                  * Release the running count while the pageout lock is held to
2208                  * prevent wakeup races.
2209                  */
2210                 blockcount_release(&vmd->vmd_inactive_running, 1);
2211         }
2212 }
2213
2214 static int
2215 get_pageout_threads_per_domain(const struct vm_domain *vmd)
2216 {
2217         unsigned total_pageout_threads, eligible_cpus, domain_cpus;
2218
2219         if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
2220                 return (0);
2221
2222         /*
2223          * Semi-arbitrarily constrain pagedaemon threads to less than half the
2224          * total number of CPUs in the system as an upper limit.
2225          */
2226         if (pageout_cpus_per_thread < 2)
2227                 pageout_cpus_per_thread = 2;
2228         else if (pageout_cpus_per_thread > mp_ncpus)
2229                 pageout_cpus_per_thread = mp_ncpus;
2230
2231         total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
2232         domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
2233
2234         /* Pagedaemons are not run in empty domains. */
2235         eligible_cpus = mp_ncpus;
2236         for (unsigned i = 0; i < vm_ndomains; i++)
2237                 if (VM_DOMAIN_EMPTY(i))
2238                         eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
2239
2240         /*
2241          * Assign a portion of the total pageout threads to this domain
2242          * corresponding to the fraction of pagedaemon-eligible CPUs in the
2243          * domain.  In asymmetric NUMA systems, domains with more CPUs may be
2244          * allocated more threads than domains with fewer CPUs.
2245          */
2246         return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
2247 }
2248
2249 /*
2250  * Initialize basic pageout daemon settings.  See the comment above the
2251  * definition of vm_domain for some explanation of how these thresholds are
2252  * used.
2253  */
2254 static void
2255 vm_pageout_init_domain(int domain)
2256 {
2257         struct vm_domain *vmd;
2258         struct sysctl_oid *oid;
2259
2260         vmd = VM_DOMAIN(domain);
2261         vmd->vmd_interrupt_free_min = 2;
2262
2263         /*
2264          * v_free_reserved needs to include enough for the largest
2265          * swap pager structures plus enough for any pv_entry structs
2266          * when paging. 
2267          */
2268         vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2269             vmd->vmd_interrupt_free_min;
2270         vmd->vmd_free_reserved = vm_pageout_page_count +
2271             vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2272         vmd->vmd_free_min = vmd->vmd_page_count / 200;
2273         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2274         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2275         vmd->vmd_free_min += vmd->vmd_free_reserved;
2276         vmd->vmd_free_severe += vmd->vmd_free_reserved;
2277         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2278         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2279                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2280
2281         /*
2282          * Set the default wakeup threshold to be 10% below the paging
2283          * target.  This keeps the steady state out of shortfall.
2284          */
2285         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2286
2287         /*
2288          * Target amount of memory to move out of the laundry queue during a
2289          * background laundering.  This is proportional to the amount of system
2290          * memory.
2291          */
2292         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2293             vmd->vmd_free_min) / 10;
2294
2295         /* Initialize the pageout daemon pid controller. */
2296         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2297             vmd->vmd_free_target, PIDCTRL_BOUND,
2298             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2299         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2300             "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2301         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2302
2303         vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2304 }
2305
2306 static void
2307 vm_pageout_init(void)
2308 {
2309         u_long freecount;
2310         int i;
2311
2312         /*
2313          * Initialize some paging parameters.
2314          */
2315         if (vm_cnt.v_page_count < 2000)
2316                 vm_pageout_page_count = 8;
2317
2318         freecount = 0;
2319         for (i = 0; i < vm_ndomains; i++) {
2320                 struct vm_domain *vmd;
2321
2322                 vm_pageout_init_domain(i);
2323                 vmd = VM_DOMAIN(i);
2324                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2325                 vm_cnt.v_free_target += vmd->vmd_free_target;
2326                 vm_cnt.v_free_min += vmd->vmd_free_min;
2327                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2328                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2329                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2330                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2331                 freecount += vmd->vmd_free_count;
2332         }
2333
2334         /*
2335          * Set interval in seconds for active scan.  We want to visit each
2336          * page at least once every ten minutes.  This is to prevent worst
2337          * case paging behaviors with stale active LRU.
2338          */
2339         if (vm_pageout_update_period == 0)
2340                 vm_pageout_update_period = 600;
2341
2342         /*
2343          * Set the maximum number of user-wired virtual pages.  Historically the
2344          * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
2345          * may also request user-wired memory.
2346          */
2347         if (vm_page_max_user_wired == 0)
2348                 vm_page_max_user_wired = 4 * freecount / 5;
2349 }
2350
2351 /*
2352  *     vm_pageout is the high level pageout daemon.
2353  */
2354 static void
2355 vm_pageout(void)
2356 {
2357         struct proc *p;
2358         struct thread *td;
2359         int error, first, i, j, pageout_threads;
2360
2361         p = curproc;
2362         td = curthread;
2363
2364         mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2365         swap_pager_swap_init();
2366         for (first = -1, i = 0; i < vm_ndomains; i++) {
2367                 if (VM_DOMAIN_EMPTY(i)) {
2368                         if (bootverbose)
2369                                 printf("domain %d empty; skipping pageout\n",
2370                                     i);
2371                         continue;
2372                 }
2373                 if (first == -1)
2374                         first = i;
2375                 else {
2376                         error = kthread_add(vm_pageout_worker,
2377                             (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2378                         if (error != 0)
2379                                 panic("starting pageout for domain %d: %d\n",
2380                                     i, error);
2381                 }
2382                 pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
2383                 for (j = 0; j < pageout_threads - 1; j++) {
2384                         error = kthread_add(vm_pageout_helper,
2385                             (void *)(uintptr_t)i, p, NULL, 0, 0,
2386                             "dom%d helper%d", i, j);
2387                         if (error != 0)
2388                                 panic("starting pageout helper %d for domain "
2389                                     "%d: %d\n", j, i, error);
2390                 }
2391                 error = kthread_add(vm_pageout_laundry_worker,
2392                     (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2393                 if (error != 0)
2394                         panic("starting laundry for domain %d: %d", i, error);
2395         }
2396         error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2397         if (error != 0)
2398                 panic("starting uma_reclaim helper, error %d\n", error);
2399
2400         snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2401         vm_pageout_worker((void *)(uintptr_t)first);
2402 }
2403
2404 /*
2405  * Perform an advisory wakeup of the page daemon.
2406  */
2407 void
2408 pagedaemon_wakeup(int domain)
2409 {
2410         struct vm_domain *vmd;
2411
2412         vmd = VM_DOMAIN(domain);
2413         vm_domain_pageout_assert_unlocked(vmd);
2414         if (curproc == pageproc)
2415                 return;
2416
2417         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2418                 vm_domain_pageout_lock(vmd);
2419                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2420                 wakeup(&vmd->vmd_pageout_wanted);
2421                 vm_domain_pageout_unlock(vmd);
2422         }
2423 }