]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
vm_pageout: Scale worker threads with CPUs
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/blockcount.h>
86 #include <sys/eventhandler.h>
87 #include <sys/lock.h>
88 #include <sys/mutex.h>
89 #include <sys/proc.h>
90 #include <sys/kthread.h>
91 #include <sys/ktr.h>
92 #include <sys/mount.h>
93 #include <sys/racct.h>
94 #include <sys/resourcevar.h>
95 #include <sys/sched.h>
96 #include <sys/sdt.h>
97 #include <sys/signalvar.h>
98 #include <sys/smp.h>
99 #include <sys/time.h>
100 #include <sys/vnode.h>
101 #include <sys/vmmeter.h>
102 #include <sys/rwlock.h>
103 #include <sys/sx.h>
104 #include <sys/sysctl.h>
105
106 #include <vm/vm.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_pager.h>
113 #include <vm/vm_phys.h>
114 #include <vm/vm_pagequeue.h>
115 #include <vm/swap_pager.h>
116 #include <vm/vm_extern.h>
117 #include <vm/uma.h>
118
119 /*
120  * System initialization
121  */
122
123 /* the kernel process "vm_pageout"*/
124 static void vm_pageout(void);
125 static void vm_pageout_init(void);
126 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
127 static int vm_pageout_cluster(vm_page_t m);
128 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
129     int starting_page_shortage);
130
131 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
132     NULL);
133
134 struct proc *pageproc;
135
136 static struct kproc_desc page_kp = {
137         "pagedaemon",
138         vm_pageout,
139         &pageproc
140 };
141 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142     &page_kp);
143
144 SDT_PROVIDER_DEFINE(vm);
145 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
146
147 /* Pagedaemon activity rates, in subdivisions of one second. */
148 #define VM_LAUNDER_RATE         10
149 #define VM_INACT_SCAN_RATE      10
150
151 static int vm_pageout_oom_seq = 12;
152
153 static int vm_pageout_update_period;
154 static int disable_swap_pageouts;
155 static int lowmem_period = 10;
156 static int swapdev_enabled;
157
158 static int vm_panic_on_oom = 0;
159
160 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
161         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
162         "Panic on the given number of out-of-memory errors instead of killing the largest process");
163
164 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
165         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
166         "Maximum active LRU update period");
167
168 static int pageout_cpus_per_thread = 16;
169 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
170     &pageout_cpus_per_thread, 0,
171     "Number of CPUs per pagedaemon worker thread");
172   
173 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
174         "Low memory callback period");
175
176 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
177         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
178
179 static int pageout_lock_miss;
180 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
181         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
182
183 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
184         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
185         "back-to-back calls to oom detector to start OOM");
186
187 static int act_scan_laundry_weight = 3;
188 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
189     &act_scan_laundry_weight, 0,
190     "weight given to clean vs. dirty pages in active queue scans");
191
192 static u_int vm_background_launder_rate = 4096;
193 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
194     &vm_background_launder_rate, 0,
195     "background laundering rate, in kilobytes per second");
196
197 static u_int vm_background_launder_max = 20 * 1024;
198 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
199     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
200
201 int vm_pageout_page_count = 32;
202
203 u_long vm_page_max_user_wired;
204 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
205     &vm_page_max_user_wired, 0,
206     "system-wide limit to user-wired page count");
207
208 static u_int isqrt(u_int num);
209 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
210     bool in_shortfall);
211 static void vm_pageout_laundry_worker(void *arg);
212
213 struct scan_state {
214         struct vm_batchqueue bq;
215         struct vm_pagequeue *pq;
216         vm_page_t       marker;
217         int             maxscan;
218         int             scanned;
219 };
220
221 static void
222 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
223     vm_page_t marker, vm_page_t after, int maxscan)
224 {
225
226         vm_pagequeue_assert_locked(pq);
227         KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
228             ("marker %p already enqueued", marker));
229
230         if (after == NULL)
231                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
232         else
233                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
234         vm_page_aflag_set(marker, PGA_ENQUEUED);
235
236         vm_batchqueue_init(&ss->bq);
237         ss->pq = pq;
238         ss->marker = marker;
239         ss->maxscan = maxscan;
240         ss->scanned = 0;
241         vm_pagequeue_unlock(pq);
242 }
243
244 static void
245 vm_pageout_end_scan(struct scan_state *ss)
246 {
247         struct vm_pagequeue *pq;
248
249         pq = ss->pq;
250         vm_pagequeue_assert_locked(pq);
251         KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
252             ("marker %p not enqueued", ss->marker));
253
254         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
255         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
256         pq->pq_pdpages += ss->scanned;
257 }
258
259 /*
260  * Add a small number of queued pages to a batch queue for later processing
261  * without the corresponding queue lock held.  The caller must have enqueued a
262  * marker page at the desired start point for the scan.  Pages will be
263  * physically dequeued if the caller so requests.  Otherwise, the returned
264  * batch may contain marker pages, and it is up to the caller to handle them.
265  *
266  * When processing the batch queue, vm_pageout_defer() must be used to
267  * determine whether the page has been logically dequeued since the batch was
268  * collected.
269  */
270 static __always_inline void
271 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
272 {
273         struct vm_pagequeue *pq;
274         vm_page_t m, marker, n;
275
276         marker = ss->marker;
277         pq = ss->pq;
278
279         KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
280             ("marker %p not enqueued", ss->marker));
281
282         vm_pagequeue_lock(pq);
283         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
284             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
285             m = n, ss->scanned++) {
286                 n = TAILQ_NEXT(m, plinks.q);
287                 if ((m->flags & PG_MARKER) == 0) {
288                         KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
289                             ("page %p not enqueued", m));
290                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
291                             ("Fictitious page %p cannot be in page queue", m));
292                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
293                             ("Unmanaged page %p cannot be in page queue", m));
294                 } else if (dequeue)
295                         continue;
296
297                 (void)vm_batchqueue_insert(&ss->bq, m);
298                 if (dequeue) {
299                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
300                         vm_page_aflag_clear(m, PGA_ENQUEUED);
301                 }
302         }
303         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
304         if (__predict_true(m != NULL))
305                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
306         else
307                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
308         if (dequeue)
309                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
310         vm_pagequeue_unlock(pq);
311 }
312
313 /*
314  * Return the next page to be scanned, or NULL if the scan is complete.
315  */
316 static __always_inline vm_page_t
317 vm_pageout_next(struct scan_state *ss, const bool dequeue)
318 {
319
320         if (ss->bq.bq_cnt == 0)
321                 vm_pageout_collect_batch(ss, dequeue);
322         return (vm_batchqueue_pop(&ss->bq));
323 }
324
325 /*
326  * Determine whether processing of a page should be deferred and ensure that any
327  * outstanding queue operations are processed.
328  */
329 static __always_inline bool
330 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
331 {
332         vm_page_astate_t as;
333
334         as = vm_page_astate_load(m);
335         if (__predict_false(as.queue != queue ||
336             ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
337                 return (true);
338         if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
339                 vm_page_pqbatch_submit(m, queue);
340                 return (true);
341         }
342         return (false);
343 }
344
345 /*
346  * Scan for pages at adjacent offsets within the given page's object that are
347  * eligible for laundering, form a cluster of these pages and the given page,
348  * and launder that cluster.
349  */
350 static int
351 vm_pageout_cluster(vm_page_t m)
352 {
353         vm_object_t object;
354         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
355         vm_pindex_t pindex;
356         int ib, is, page_base, pageout_count;
357
358         object = m->object;
359         VM_OBJECT_ASSERT_WLOCKED(object);
360         pindex = m->pindex;
361
362         vm_page_assert_xbusied(m);
363
364         mc[vm_pageout_page_count] = pb = ps = m;
365         pageout_count = 1;
366         page_base = vm_pageout_page_count;
367         ib = 1;
368         is = 1;
369
370         /*
371          * We can cluster only if the page is not clean, busy, or held, and
372          * the page is in the laundry queue.
373          *
374          * During heavy mmap/modification loads the pageout
375          * daemon can really fragment the underlying file
376          * due to flushing pages out of order and not trying to
377          * align the clusters (which leaves sporadic out-of-order
378          * holes).  To solve this problem we do the reverse scan
379          * first and attempt to align our cluster, then do a 
380          * forward scan if room remains.
381          */
382 more:
383         while (ib != 0 && pageout_count < vm_pageout_page_count) {
384                 if (ib > pindex) {
385                         ib = 0;
386                         break;
387                 }
388                 if ((p = vm_page_prev(pb)) == NULL ||
389                     vm_page_tryxbusy(p) == 0) {
390                         ib = 0;
391                         break;
392                 }
393                 if (vm_page_wired(p)) {
394                         ib = 0;
395                         vm_page_xunbusy(p);
396                         break;
397                 }
398                 vm_page_test_dirty(p);
399                 if (p->dirty == 0) {
400                         ib = 0;
401                         vm_page_xunbusy(p);
402                         break;
403                 }
404                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
405                         vm_page_xunbusy(p);
406                         ib = 0;
407                         break;
408                 }
409                 mc[--page_base] = pb = p;
410                 ++pageout_count;
411                 ++ib;
412
413                 /*
414                  * We are at an alignment boundary.  Stop here, and switch
415                  * directions.  Do not clear ib.
416                  */
417                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
418                         break;
419         }
420         while (pageout_count < vm_pageout_page_count && 
421             pindex + is < object->size) {
422                 if ((p = vm_page_next(ps)) == NULL ||
423                     vm_page_tryxbusy(p) == 0)
424                         break;
425                 if (vm_page_wired(p)) {
426                         vm_page_xunbusy(p);
427                         break;
428                 }
429                 vm_page_test_dirty(p);
430                 if (p->dirty == 0) {
431                         vm_page_xunbusy(p);
432                         break;
433                 }
434                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
435                         vm_page_xunbusy(p);
436                         break;
437                 }
438                 mc[page_base + pageout_count] = ps = p;
439                 ++pageout_count;
440                 ++is;
441         }
442
443         /*
444          * If we exhausted our forward scan, continue with the reverse scan
445          * when possible, even past an alignment boundary.  This catches
446          * boundary conditions.
447          */
448         if (ib != 0 && pageout_count < vm_pageout_page_count)
449                 goto more;
450
451         return (vm_pageout_flush(&mc[page_base], pageout_count,
452             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
453 }
454
455 /*
456  * vm_pageout_flush() - launder the given pages
457  *
458  *      The given pages are laundered.  Note that we setup for the start of
459  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
460  *      reference count all in here rather then in the parent.  If we want
461  *      the parent to do more sophisticated things we may have to change
462  *      the ordering.
463  *
464  *      Returned runlen is the count of pages between mreq and first
465  *      page after mreq with status VM_PAGER_AGAIN.
466  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
467  *      for any page in runlen set.
468  */
469 int
470 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
471     boolean_t *eio)
472 {
473         vm_object_t object = mc[0]->object;
474         int pageout_status[count];
475         int numpagedout = 0;
476         int i, runlen;
477
478         VM_OBJECT_ASSERT_WLOCKED(object);
479
480         /*
481          * Initiate I/O.  Mark the pages shared busy and verify that they're
482          * valid and read-only.
483          *
484          * We do not have to fixup the clean/dirty bits here... we can
485          * allow the pager to do it after the I/O completes.
486          *
487          * NOTE! mc[i]->dirty may be partial or fragmented due to an
488          * edge case with file fragments.
489          */
490         for (i = 0; i < count; i++) {
491                 KASSERT(vm_page_all_valid(mc[i]),
492                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
493                         mc[i], i, count));
494                 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
495                     ("vm_pageout_flush: writeable page %p", mc[i]));
496                 vm_page_busy_downgrade(mc[i]);
497         }
498         vm_object_pip_add(object, count);
499
500         vm_pager_put_pages(object, mc, count, flags, pageout_status);
501
502         runlen = count - mreq;
503         if (eio != NULL)
504                 *eio = FALSE;
505         for (i = 0; i < count; i++) {
506                 vm_page_t mt = mc[i];
507
508                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
509                     !pmap_page_is_write_mapped(mt),
510                     ("vm_pageout_flush: page %p is not write protected", mt));
511                 switch (pageout_status[i]) {
512                 case VM_PAGER_OK:
513                         /*
514                          * The page may have moved since laundering started, in
515                          * which case it should be left alone.
516                          */
517                         if (vm_page_in_laundry(mt))
518                                 vm_page_deactivate_noreuse(mt);
519                         /* FALLTHROUGH */
520                 case VM_PAGER_PEND:
521                         numpagedout++;
522                         break;
523                 case VM_PAGER_BAD:
524                         /*
525                          * The page is outside the object's range.  We pretend
526                          * that the page out worked and clean the page, so the
527                          * changes will be lost if the page is reclaimed by
528                          * the page daemon.
529                          */
530                         vm_page_undirty(mt);
531                         if (vm_page_in_laundry(mt))
532                                 vm_page_deactivate_noreuse(mt);
533                         break;
534                 case VM_PAGER_ERROR:
535                 case VM_PAGER_FAIL:
536                         /*
537                          * If the page couldn't be paged out to swap because the
538                          * pager wasn't able to find space, place the page in
539                          * the PQ_UNSWAPPABLE holding queue.  This is an
540                          * optimization that prevents the page daemon from
541                          * wasting CPU cycles on pages that cannot be reclaimed
542                          * becase no swap device is configured.
543                          *
544                          * Otherwise, reactivate the page so that it doesn't
545                          * clog the laundry and inactive queues.  (We will try
546                          * paging it out again later.)
547                          */
548                         if (object->type == OBJT_SWAP &&
549                             pageout_status[i] == VM_PAGER_FAIL) {
550                                 vm_page_unswappable(mt);
551                                 numpagedout++;
552                         } else
553                                 vm_page_activate(mt);
554                         if (eio != NULL && i >= mreq && i - mreq < runlen)
555                                 *eio = TRUE;
556                         break;
557                 case VM_PAGER_AGAIN:
558                         if (i >= mreq && i - mreq < runlen)
559                                 runlen = i - mreq;
560                         break;
561                 }
562
563                 /*
564                  * If the operation is still going, leave the page busy to
565                  * block all other accesses. Also, leave the paging in
566                  * progress indicator set so that we don't attempt an object
567                  * collapse.
568                  */
569                 if (pageout_status[i] != VM_PAGER_PEND) {
570                         vm_object_pip_wakeup(object);
571                         vm_page_sunbusy(mt);
572                 }
573         }
574         if (prunlen != NULL)
575                 *prunlen = runlen;
576         return (numpagedout);
577 }
578
579 static void
580 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
581 {
582
583         atomic_store_rel_int(&swapdev_enabled, 1);
584 }
585
586 static void
587 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
588 {
589
590         if (swap_pager_nswapdev() == 1)
591                 atomic_store_rel_int(&swapdev_enabled, 0);
592 }
593
594 /*
595  * Attempt to acquire all of the necessary locks to launder a page and
596  * then call through the clustering layer to PUTPAGES.  Wait a short
597  * time for a vnode lock.
598  *
599  * Requires the page and object lock on entry, releases both before return.
600  * Returns 0 on success and an errno otherwise.
601  */
602 static int
603 vm_pageout_clean(vm_page_t m, int *numpagedout)
604 {
605         struct vnode *vp;
606         struct mount *mp;
607         vm_object_t object;
608         vm_pindex_t pindex;
609         int error, lockmode;
610
611         object = m->object;
612         VM_OBJECT_ASSERT_WLOCKED(object);
613         error = 0;
614         vp = NULL;
615         mp = NULL;
616
617         /*
618          * The object is already known NOT to be dead.   It
619          * is possible for the vget() to block the whole
620          * pageout daemon, but the new low-memory handling
621          * code should prevent it.
622          *
623          * We can't wait forever for the vnode lock, we might
624          * deadlock due to a vn_read() getting stuck in
625          * vm_wait while holding this vnode.  We skip the 
626          * vnode if we can't get it in a reasonable amount
627          * of time.
628          */
629         if (object->type == OBJT_VNODE) {
630                 vm_page_xunbusy(m);
631                 vp = object->handle;
632                 if (vp->v_type == VREG &&
633                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
634                         mp = NULL;
635                         error = EDEADLK;
636                         goto unlock_all;
637                 }
638                 KASSERT(mp != NULL,
639                     ("vp %p with NULL v_mount", vp));
640                 vm_object_reference_locked(object);
641                 pindex = m->pindex;
642                 VM_OBJECT_WUNLOCK(object);
643                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
644                     LK_SHARED : LK_EXCLUSIVE;
645                 if (vget(vp, lockmode | LK_TIMELOCK)) {
646                         vp = NULL;
647                         error = EDEADLK;
648                         goto unlock_mp;
649                 }
650                 VM_OBJECT_WLOCK(object);
651
652                 /*
653                  * Ensure that the object and vnode were not disassociated
654                  * while locks were dropped.
655                  */
656                 if (vp->v_object != object) {
657                         error = ENOENT;
658                         goto unlock_all;
659                 }
660
661                 /*
662                  * While the object was unlocked, the page may have been:
663                  * (1) moved to a different queue,
664                  * (2) reallocated to a different object,
665                  * (3) reallocated to a different offset, or
666                  * (4) cleaned.
667                  */
668                 if (!vm_page_in_laundry(m) || m->object != object ||
669                     m->pindex != pindex || m->dirty == 0) {
670                         error = ENXIO;
671                         goto unlock_all;
672                 }
673
674                 /*
675                  * The page may have been busied while the object lock was
676                  * released.
677                  */
678                 if (vm_page_tryxbusy(m) == 0) {
679                         error = EBUSY;
680                         goto unlock_all;
681                 }
682         }
683
684         /*
685          * Remove all writeable mappings, failing if the page is wired.
686          */
687         if (!vm_page_try_remove_write(m)) {
688                 vm_page_xunbusy(m);
689                 error = EBUSY;
690                 goto unlock_all;
691         }
692
693         /*
694          * If a page is dirty, then it is either being washed
695          * (but not yet cleaned) or it is still in the
696          * laundry.  If it is still in the laundry, then we
697          * start the cleaning operation. 
698          */
699         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
700                 error = EIO;
701
702 unlock_all:
703         VM_OBJECT_WUNLOCK(object);
704
705 unlock_mp:
706         if (mp != NULL) {
707                 if (vp != NULL)
708                         vput(vp);
709                 vm_object_deallocate(object);
710                 vn_finished_write(mp);
711         }
712
713         return (error);
714 }
715
716 /*
717  * Attempt to launder the specified number of pages.
718  *
719  * Returns the number of pages successfully laundered.
720  */
721 static int
722 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
723 {
724         struct scan_state ss;
725         struct vm_pagequeue *pq;
726         vm_object_t object;
727         vm_page_t m, marker;
728         vm_page_astate_t new, old;
729         int act_delta, error, numpagedout, queue, refs, starting_target;
730         int vnodes_skipped;
731         bool pageout_ok;
732
733         object = NULL;
734         starting_target = launder;
735         vnodes_skipped = 0;
736
737         /*
738          * Scan the laundry queues for pages eligible to be laundered.  We stop
739          * once the target number of dirty pages have been laundered, or once
740          * we've reached the end of the queue.  A single iteration of this loop
741          * may cause more than one page to be laundered because of clustering.
742          *
743          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
744          * swap devices are configured.
745          */
746         if (atomic_load_acq_int(&swapdev_enabled))
747                 queue = PQ_UNSWAPPABLE;
748         else
749                 queue = PQ_LAUNDRY;
750
751 scan:
752         marker = &vmd->vmd_markers[queue];
753         pq = &vmd->vmd_pagequeues[queue];
754         vm_pagequeue_lock(pq);
755         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
756         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
757                 if (__predict_false((m->flags & PG_MARKER) != 0))
758                         continue;
759
760                 /*
761                  * Don't touch a page that was removed from the queue after the
762                  * page queue lock was released.  Otherwise, ensure that any
763                  * pending queue operations, such as dequeues for wired pages,
764                  * are handled.
765                  */
766                 if (vm_pageout_defer(m, queue, true))
767                         continue;
768
769                 /*
770                  * Lock the page's object.
771                  */
772                 if (object == NULL || object != m->object) {
773                         if (object != NULL)
774                                 VM_OBJECT_WUNLOCK(object);
775                         object = atomic_load_ptr(&m->object);
776                         if (__predict_false(object == NULL))
777                                 /* The page is being freed by another thread. */
778                                 continue;
779
780                         /* Depends on type-stability. */
781                         VM_OBJECT_WLOCK(object);
782                         if (__predict_false(m->object != object)) {
783                                 VM_OBJECT_WUNLOCK(object);
784                                 object = NULL;
785                                 continue;
786                         }
787                 }
788
789                 if (vm_page_tryxbusy(m) == 0)
790                         continue;
791
792                 /*
793                  * Check for wirings now that we hold the object lock and have
794                  * exclusively busied the page.  If the page is mapped, it may
795                  * still be wired by pmap lookups.  The call to
796                  * vm_page_try_remove_all() below atomically checks for such
797                  * wirings and removes mappings.  If the page is unmapped, the
798                  * wire count is guaranteed not to increase after this check.
799                  */
800                 if (__predict_false(vm_page_wired(m)))
801                         goto skip_page;
802
803                 /*
804                  * Invalid pages can be easily freed.  They cannot be
805                  * mapped; vm_page_free() asserts this.
806                  */
807                 if (vm_page_none_valid(m))
808                         goto free_page;
809
810                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
811
812                 for (old = vm_page_astate_load(m);;) {
813                         /*
814                          * Check to see if the page has been removed from the
815                          * queue since the first such check.  Leave it alone if
816                          * so, discarding any references collected by
817                          * pmap_ts_referenced().
818                          */
819                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
820                                 goto skip_page;
821
822                         new = old;
823                         act_delta = refs;
824                         if ((old.flags & PGA_REFERENCED) != 0) {
825                                 new.flags &= ~PGA_REFERENCED;
826                                 act_delta++;
827                         }
828                         if (act_delta == 0) {
829                                 ;
830                         } else if (object->ref_count != 0) {
831                                 /*
832                                  * Increase the activation count if the page was
833                                  * referenced while in the laundry queue.  This
834                                  * makes it less likely that the page will be
835                                  * returned prematurely to the laundry queue.
836                                  */
837                                 new.act_count += ACT_ADVANCE +
838                                     act_delta;
839                                 if (new.act_count > ACT_MAX)
840                                         new.act_count = ACT_MAX;
841
842                                 new.flags &= ~PGA_QUEUE_OP_MASK;
843                                 new.flags |= PGA_REQUEUE;
844                                 new.queue = PQ_ACTIVE;
845                                 if (!vm_page_pqstate_commit(m, &old, new))
846                                         continue;
847
848                                 /*
849                                  * If this was a background laundering, count
850                                  * activated pages towards our target.  The
851                                  * purpose of background laundering is to ensure
852                                  * that pages are eventually cycled through the
853                                  * laundry queue, and an activation is a valid
854                                  * way out.
855                                  */
856                                 if (!in_shortfall)
857                                         launder--;
858                                 VM_CNT_INC(v_reactivated);
859                                 goto skip_page;
860                         } else if ((object->flags & OBJ_DEAD) == 0) {
861                                 new.flags |= PGA_REQUEUE;
862                                 if (!vm_page_pqstate_commit(m, &old, new))
863                                         continue;
864                                 goto skip_page;
865                         }
866                         break;
867                 }
868
869                 /*
870                  * If the page appears to be clean at the machine-independent
871                  * layer, then remove all of its mappings from the pmap in
872                  * anticipation of freeing it.  If, however, any of the page's
873                  * mappings allow write access, then the page may still be
874                  * modified until the last of those mappings are removed.
875                  */
876                 if (object->ref_count != 0) {
877                         vm_page_test_dirty(m);
878                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
879                                 goto skip_page;
880                 }
881
882                 /*
883                  * Clean pages are freed, and dirty pages are paged out unless
884                  * they belong to a dead object.  Requeueing dirty pages from
885                  * dead objects is pointless, as they are being paged out and
886                  * freed by the thread that destroyed the object.
887                  */
888                 if (m->dirty == 0) {
889 free_page:
890                         /*
891                          * Now we are guaranteed that no other threads are
892                          * manipulating the page, check for a last-second
893                          * reference.
894                          */
895                         if (vm_pageout_defer(m, queue, true))
896                                 goto skip_page;
897                         vm_page_free(m);
898                         VM_CNT_INC(v_dfree);
899                 } else if ((object->flags & OBJ_DEAD) == 0) {
900                         if (object->type != OBJT_SWAP &&
901                             object->type != OBJT_DEFAULT)
902                                 pageout_ok = true;
903                         else if (disable_swap_pageouts)
904                                 pageout_ok = false;
905                         else
906                                 pageout_ok = true;
907                         if (!pageout_ok) {
908                                 vm_page_launder(m);
909                                 goto skip_page;
910                         }
911
912                         /*
913                          * Form a cluster with adjacent, dirty pages from the
914                          * same object, and page out that entire cluster.
915                          *
916                          * The adjacent, dirty pages must also be in the
917                          * laundry.  However, their mappings are not checked
918                          * for new references.  Consequently, a recently
919                          * referenced page may be paged out.  However, that
920                          * page will not be prematurely reclaimed.  After page
921                          * out, the page will be placed in the inactive queue,
922                          * where any new references will be detected and the
923                          * page reactivated.
924                          */
925                         error = vm_pageout_clean(m, &numpagedout);
926                         if (error == 0) {
927                                 launder -= numpagedout;
928                                 ss.scanned += numpagedout;
929                         } else if (error == EDEADLK) {
930                                 pageout_lock_miss++;
931                                 vnodes_skipped++;
932                         }
933                         object = NULL;
934                 } else {
935 skip_page:
936                         vm_page_xunbusy(m);
937                 }
938         }
939         if (object != NULL) {
940                 VM_OBJECT_WUNLOCK(object);
941                 object = NULL;
942         }
943         vm_pagequeue_lock(pq);
944         vm_pageout_end_scan(&ss);
945         vm_pagequeue_unlock(pq);
946
947         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
948                 queue = PQ_LAUNDRY;
949                 goto scan;
950         }
951
952         /*
953          * Wakeup the sync daemon if we skipped a vnode in a writeable object
954          * and we didn't launder enough pages.
955          */
956         if (vnodes_skipped > 0 && launder > 0)
957                 (void)speedup_syncer();
958
959         return (starting_target - launder);
960 }
961
962 /*
963  * Compute the integer square root.
964  */
965 static u_int
966 isqrt(u_int num)
967 {
968         u_int bit, root, tmp;
969
970         bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
971         root = 0;
972         while (bit != 0) {
973                 tmp = root + bit;
974                 root >>= 1;
975                 if (num >= tmp) {
976                         num -= tmp;
977                         root += bit;
978                 }
979                 bit >>= 2;
980         }
981         return (root);
982 }
983
984 /*
985  * Perform the work of the laundry thread: periodically wake up and determine
986  * whether any pages need to be laundered.  If so, determine the number of pages
987  * that need to be laundered, and launder them.
988  */
989 static void
990 vm_pageout_laundry_worker(void *arg)
991 {
992         struct vm_domain *vmd;
993         struct vm_pagequeue *pq;
994         uint64_t nclean, ndirty, nfreed;
995         int domain, last_target, launder, shortfall, shortfall_cycle, target;
996         bool in_shortfall;
997
998         domain = (uintptr_t)arg;
999         vmd = VM_DOMAIN(domain);
1000         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1001         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1002
1003         shortfall = 0;
1004         in_shortfall = false;
1005         shortfall_cycle = 0;
1006         last_target = target = 0;
1007         nfreed = 0;
1008
1009         /*
1010          * Calls to these handlers are serialized by the swap syscall lock.
1011          */
1012         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1013             EVENTHANDLER_PRI_ANY);
1014         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1015             EVENTHANDLER_PRI_ANY);
1016
1017         /*
1018          * The pageout laundry worker is never done, so loop forever.
1019          */
1020         for (;;) {
1021                 KASSERT(target >= 0, ("negative target %d", target));
1022                 KASSERT(shortfall_cycle >= 0,
1023                     ("negative cycle %d", shortfall_cycle));
1024                 launder = 0;
1025
1026                 /*
1027                  * First determine whether we need to launder pages to meet a
1028                  * shortage of free pages.
1029                  */
1030                 if (shortfall > 0) {
1031                         in_shortfall = true;
1032                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1033                         target = shortfall;
1034                 } else if (!in_shortfall)
1035                         goto trybackground;
1036                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1037                         /*
1038                          * We recently entered shortfall and began laundering
1039                          * pages.  If we have completed that laundering run
1040                          * (and we are no longer in shortfall) or we have met
1041                          * our laundry target through other activity, then we
1042                          * can stop laundering pages.
1043                          */
1044                         in_shortfall = false;
1045                         target = 0;
1046                         goto trybackground;
1047                 }
1048                 launder = target / shortfall_cycle--;
1049                 goto dolaundry;
1050
1051                 /*
1052                  * There's no immediate need to launder any pages; see if we
1053                  * meet the conditions to perform background laundering:
1054                  *
1055                  * 1. The ratio of dirty to clean inactive pages exceeds the
1056                  *    background laundering threshold, or
1057                  * 2. we haven't yet reached the target of the current
1058                  *    background laundering run.
1059                  *
1060                  * The background laundering threshold is not a constant.
1061                  * Instead, it is a slowly growing function of the number of
1062                  * clean pages freed by the page daemon since the last
1063                  * background laundering.  Thus, as the ratio of dirty to
1064                  * clean inactive pages grows, the amount of memory pressure
1065                  * required to trigger laundering decreases.  We ensure
1066                  * that the threshold is non-zero after an inactive queue
1067                  * scan, even if that scan failed to free a single clean page.
1068                  */
1069 trybackground:
1070                 nclean = vmd->vmd_free_count +
1071                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1072                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1073                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1074                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1075                         target = vmd->vmd_background_launder_target;
1076                 }
1077
1078                 /*
1079                  * We have a non-zero background laundering target.  If we've
1080                  * laundered up to our maximum without observing a page daemon
1081                  * request, just stop.  This is a safety belt that ensures we
1082                  * don't launder an excessive amount if memory pressure is low
1083                  * and the ratio of dirty to clean pages is large.  Otherwise,
1084                  * proceed at the background laundering rate.
1085                  */
1086                 if (target > 0) {
1087                         if (nfreed > 0) {
1088                                 nfreed = 0;
1089                                 last_target = target;
1090                         } else if (last_target - target >=
1091                             vm_background_launder_max * PAGE_SIZE / 1024) {
1092                                 target = 0;
1093                         }
1094                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1095                         launder /= VM_LAUNDER_RATE;
1096                         if (launder > target)
1097                                 launder = target;
1098                 }
1099
1100 dolaundry:
1101                 if (launder > 0) {
1102                         /*
1103                          * Because of I/O clustering, the number of laundered
1104                          * pages could exceed "target" by the maximum size of
1105                          * a cluster minus one. 
1106                          */
1107                         target -= min(vm_pageout_launder(vmd, launder,
1108                             in_shortfall), target);
1109                         pause("laundp", hz / VM_LAUNDER_RATE);
1110                 }
1111
1112                 /*
1113                  * If we're not currently laundering pages and the page daemon
1114                  * hasn't posted a new request, sleep until the page daemon
1115                  * kicks us.
1116                  */
1117                 vm_pagequeue_lock(pq);
1118                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1119                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1120                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1121
1122                 /*
1123                  * If the pagedaemon has indicated that it's in shortfall, start
1124                  * a shortfall laundering unless we're already in the middle of
1125                  * one.  This may preempt a background laundering.
1126                  */
1127                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1128                     (!in_shortfall || shortfall_cycle == 0)) {
1129                         shortfall = vm_laundry_target(vmd) +
1130                             vmd->vmd_pageout_deficit;
1131                         target = 0;
1132                 } else
1133                         shortfall = 0;
1134
1135                 if (target == 0)
1136                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1137                 nfreed += vmd->vmd_clean_pages_freed;
1138                 vmd->vmd_clean_pages_freed = 0;
1139                 vm_pagequeue_unlock(pq);
1140         }
1141 }
1142
1143 /*
1144  * Compute the number of pages we want to try to move from the
1145  * active queue to either the inactive or laundry queue.
1146  *
1147  * When scanning active pages during a shortage, we make clean pages
1148  * count more heavily towards the page shortage than dirty pages.
1149  * This is because dirty pages must be laundered before they can be
1150  * reused and thus have less utility when attempting to quickly
1151  * alleviate a free page shortage.  However, this weighting also
1152  * causes the scan to deactivate dirty pages more aggressively,
1153  * improving the effectiveness of clustering.
1154  */
1155 static int
1156 vm_pageout_active_target(struct vm_domain *vmd)
1157 {
1158         int shortage;
1159
1160         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1161             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1162             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1163         shortage *= act_scan_laundry_weight;
1164         return (shortage);
1165 }
1166
1167 /*
1168  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1169  * small portion of the queue in order to maintain quasi-LRU.
1170  */
1171 static void
1172 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1173 {
1174         struct scan_state ss;
1175         vm_object_t object;
1176         vm_page_t m, marker;
1177         struct vm_pagequeue *pq;
1178         vm_page_astate_t old, new;
1179         long min_scan;
1180         int act_delta, max_scan, ps_delta, refs, scan_tick;
1181         uint8_t nqueue;
1182
1183         marker = &vmd->vmd_markers[PQ_ACTIVE];
1184         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1185         vm_pagequeue_lock(pq);
1186
1187         /*
1188          * If we're just idle polling attempt to visit every
1189          * active page within 'update_period' seconds.
1190          */
1191         scan_tick = ticks;
1192         if (vm_pageout_update_period != 0) {
1193                 min_scan = pq->pq_cnt;
1194                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1195                 min_scan /= hz * vm_pageout_update_period;
1196         } else
1197                 min_scan = 0;
1198         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1199                 vmd->vmd_last_active_scan = scan_tick;
1200
1201         /*
1202          * Scan the active queue for pages that can be deactivated.  Update
1203          * the per-page activity counter and use it to identify deactivation
1204          * candidates.  Held pages may be deactivated.
1205          *
1206          * To avoid requeuing each page that remains in the active queue, we
1207          * implement the CLOCK algorithm.  To keep the implementation of the
1208          * enqueue operation consistent for all page queues, we use two hands,
1209          * represented by marker pages. Scans begin at the first hand, which
1210          * precedes the second hand in the queue.  When the two hands meet,
1211          * they are moved back to the head and tail of the queue, respectively,
1212          * and scanning resumes.
1213          */
1214         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1215 act_scan:
1216         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1217         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1218                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1219                         vm_pagequeue_lock(pq);
1220                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1221                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1222                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1223                             plinks.q);
1224                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1225                             plinks.q);
1226                         max_scan -= ss.scanned;
1227                         vm_pageout_end_scan(&ss);
1228                         goto act_scan;
1229                 }
1230                 if (__predict_false((m->flags & PG_MARKER) != 0))
1231                         continue;
1232
1233                 /*
1234                  * Don't touch a page that was removed from the queue after the
1235                  * page queue lock was released.  Otherwise, ensure that any
1236                  * pending queue operations, such as dequeues for wired pages,
1237                  * are handled.
1238                  */
1239                 if (vm_pageout_defer(m, PQ_ACTIVE, true))
1240                         continue;
1241
1242                 /*
1243                  * A page's object pointer may be set to NULL before
1244                  * the object lock is acquired.
1245                  */
1246                 object = atomic_load_ptr(&m->object);
1247                 if (__predict_false(object == NULL))
1248                         /*
1249                          * The page has been removed from its object.
1250                          */
1251                         continue;
1252
1253                 /* Deferred free of swap space. */
1254                 if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1255                     VM_OBJECT_TRYWLOCK(object)) {
1256                         if (m->object == object)
1257                                 vm_pager_page_unswapped(m);
1258                         VM_OBJECT_WUNLOCK(object);
1259                 }
1260
1261                 /*
1262                  * Check to see "how much" the page has been used.
1263                  *
1264                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1265                  * that a reference from a concurrently destroyed mapping is
1266                  * observed here and now.
1267                  *
1268                  * Perform an unsynchronized object ref count check.  While
1269                  * the page lock ensures that the page is not reallocated to
1270                  * another object, in particular, one with unmanaged mappings
1271                  * that cannot support pmap_ts_referenced(), two races are,
1272                  * nonetheless, possible:
1273                  * 1) The count was transitioning to zero, but we saw a non-
1274                  *    zero value.  pmap_ts_referenced() will return zero
1275                  *    because the page is not mapped.
1276                  * 2) The count was transitioning to one, but we saw zero.
1277                  *    This race delays the detection of a new reference.  At
1278                  *    worst, we will deactivate and reactivate the page.
1279                  */
1280                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1281
1282                 old = vm_page_astate_load(m);
1283                 do {
1284                         /*
1285                          * Check to see if the page has been removed from the
1286                          * queue since the first such check.  Leave it alone if
1287                          * so, discarding any references collected by
1288                          * pmap_ts_referenced().
1289                          */
1290                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1291                                 break;
1292
1293                         /*
1294                          * Advance or decay the act_count based on recent usage.
1295                          */
1296                         new = old;
1297                         act_delta = refs;
1298                         if ((old.flags & PGA_REFERENCED) != 0) {
1299                                 new.flags &= ~PGA_REFERENCED;
1300                                 act_delta++;
1301                         }
1302                         if (act_delta != 0) {
1303                                 new.act_count += ACT_ADVANCE + act_delta;
1304                                 if (new.act_count > ACT_MAX)
1305                                         new.act_count = ACT_MAX;
1306                         } else {
1307                                 new.act_count -= min(new.act_count,
1308                                     ACT_DECLINE);
1309                         }
1310
1311                         if (new.act_count > 0) {
1312                                 /*
1313                                  * Adjust the activation count and keep the page
1314                                  * in the active queue.  The count might be left
1315                                  * unchanged if it is saturated.  The page may
1316                                  * have been moved to a different queue since we
1317                                  * started the scan, in which case we move it
1318                                  * back.
1319                                  */
1320                                 ps_delta = 0;
1321                                 if (old.queue != PQ_ACTIVE) {
1322                                         new.flags &= ~PGA_QUEUE_OP_MASK;
1323                                         new.flags |= PGA_REQUEUE;
1324                                         new.queue = PQ_ACTIVE;
1325                                 }
1326                         } else {
1327                                 /*
1328                                  * When not short for inactive pages, let dirty
1329                                  * pages go through the inactive queue before
1330                                  * moving to the laundry queue.  This gives them
1331                                  * some extra time to be reactivated,
1332                                  * potentially avoiding an expensive pageout.
1333                                  * However, during a page shortage, the inactive
1334                                  * queue is necessarily small, and so dirty
1335                                  * pages would only spend a trivial amount of
1336                                  * time in the inactive queue.  Therefore, we
1337                                  * might as well place them directly in the
1338                                  * laundry queue to reduce queuing overhead.
1339                                  *
1340                                  * Calling vm_page_test_dirty() here would
1341                                  * require acquisition of the object's write
1342                                  * lock.  However, during a page shortage,
1343                                  * directing dirty pages into the laundry queue
1344                                  * is only an optimization and not a
1345                                  * requirement.  Therefore, we simply rely on
1346                                  * the opportunistic updates to the page's dirty
1347                                  * field by the pmap.
1348                                  */
1349                                 if (page_shortage <= 0) {
1350                                         nqueue = PQ_INACTIVE;
1351                                         ps_delta = 0;
1352                                 } else if (m->dirty == 0) {
1353                                         nqueue = PQ_INACTIVE;
1354                                         ps_delta = act_scan_laundry_weight;
1355                                 } else {
1356                                         nqueue = PQ_LAUNDRY;
1357                                         ps_delta = 1;
1358                                 }
1359
1360                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1361                                 new.flags |= PGA_REQUEUE;
1362                                 new.queue = nqueue;
1363                         }
1364                 } while (!vm_page_pqstate_commit(m, &old, new));
1365
1366                 page_shortage -= ps_delta;
1367         }
1368         vm_pagequeue_lock(pq);
1369         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1370         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1371         vm_pageout_end_scan(&ss);
1372         vm_pagequeue_unlock(pq);
1373 }
1374
1375 static int
1376 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1377     vm_page_t m)
1378 {
1379         vm_page_astate_t as;
1380
1381         vm_pagequeue_assert_locked(pq);
1382
1383         as = vm_page_astate_load(m);
1384         if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1385                 return (0);
1386         vm_page_aflag_set(m, PGA_ENQUEUED);
1387         TAILQ_INSERT_BEFORE(marker, m, plinks.q);
1388         return (1);
1389 }
1390
1391 /*
1392  * Re-add stuck pages to the inactive queue.  We will examine them again
1393  * during the next scan.  If the queue state of a page has changed since
1394  * it was physically removed from the page queue in
1395  * vm_pageout_collect_batch(), don't do anything with that page.
1396  */
1397 static void
1398 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1399     vm_page_t m)
1400 {
1401         struct vm_pagequeue *pq;
1402         vm_page_t marker;
1403         int delta;
1404
1405         delta = 0;
1406         marker = ss->marker;
1407         pq = ss->pq;
1408
1409         if (m != NULL) {
1410                 if (vm_batchqueue_insert(bq, m))
1411                         return;
1412                 vm_pagequeue_lock(pq);
1413                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1414         } else
1415                 vm_pagequeue_lock(pq);
1416         while ((m = vm_batchqueue_pop(bq)) != NULL)
1417                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1418         vm_pagequeue_cnt_add(pq, delta);
1419         vm_pagequeue_unlock(pq);
1420         vm_batchqueue_init(bq);
1421 }
1422
1423 static void
1424 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1425 {
1426         struct timeval start, end;
1427         struct scan_state ss;
1428         struct vm_batchqueue rq;
1429         struct vm_page marker_page;
1430         vm_page_t m, marker;
1431         struct vm_pagequeue *pq;
1432         vm_object_t object;
1433         vm_page_astate_t old, new;
1434         int act_delta, addl_page_shortage, starting_page_shortage, refs;
1435
1436         object = NULL;
1437         vm_batchqueue_init(&rq);
1438         getmicrouptime(&start);
1439
1440         /*
1441          * The addl_page_shortage is an estimate of the number of temporarily
1442          * stuck pages in the inactive queue.  In other words, the
1443          * number of pages from the inactive count that should be
1444          * discounted in setting the target for the active queue scan.
1445          */
1446         addl_page_shortage = 0;
1447
1448         /*
1449          * Start scanning the inactive queue for pages that we can free.  The
1450          * scan will stop when we reach the target or we have scanned the
1451          * entire queue.  (Note that m->a.act_count is not used to make
1452          * decisions for the inactive queue, only for the active queue.)
1453          */
1454         starting_page_shortage = page_shortage;
1455         marker = &marker_page;
1456         vm_page_init_marker(marker, PQ_INACTIVE, 0);
1457         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1458         vm_pagequeue_lock(pq);
1459         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1460         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1461                 KASSERT((m->flags & PG_MARKER) == 0,
1462                     ("marker page %p was dequeued", m));
1463
1464                 /*
1465                  * Don't touch a page that was removed from the queue after the
1466                  * page queue lock was released.  Otherwise, ensure that any
1467                  * pending queue operations, such as dequeues for wired pages,
1468                  * are handled.
1469                  */
1470                 if (vm_pageout_defer(m, PQ_INACTIVE, false))
1471                         continue;
1472
1473                 /*
1474                  * Lock the page's object.
1475                  */
1476                 if (object == NULL || object != m->object) {
1477                         if (object != NULL)
1478                                 VM_OBJECT_WUNLOCK(object);
1479                         object = atomic_load_ptr(&m->object);
1480                         if (__predict_false(object == NULL))
1481                                 /* The page is being freed by another thread. */
1482                                 continue;
1483
1484                         /* Depends on type-stability. */
1485                         VM_OBJECT_WLOCK(object);
1486                         if (__predict_false(m->object != object)) {
1487                                 VM_OBJECT_WUNLOCK(object);
1488                                 object = NULL;
1489                                 goto reinsert;
1490                         }
1491                 }
1492
1493                 if (vm_page_tryxbusy(m) == 0) {
1494                         /*
1495                          * Don't mess with busy pages.  Leave them at
1496                          * the front of the queue.  Most likely, they
1497                          * are being paged out and will leave the
1498                          * queue shortly after the scan finishes.  So,
1499                          * they ought to be discounted from the
1500                          * inactive count.
1501                          */
1502                         addl_page_shortage++;
1503                         goto reinsert;
1504                 }
1505
1506                 /* Deferred free of swap space. */
1507                 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1508                         vm_pager_page_unswapped(m);
1509
1510                 /*
1511                  * Check for wirings now that we hold the object lock and have
1512                  * exclusively busied the page.  If the page is mapped, it may
1513                  * still be wired by pmap lookups.  The call to
1514                  * vm_page_try_remove_all() below atomically checks for such
1515                  * wirings and removes mappings.  If the page is unmapped, the
1516                  * wire count is guaranteed not to increase after this check.
1517                  */
1518                 if (__predict_false(vm_page_wired(m)))
1519                         goto skip_page;
1520
1521                 /*
1522                  * Invalid pages can be easily freed. They cannot be
1523                  * mapped, vm_page_free() asserts this.
1524                  */
1525                 if (vm_page_none_valid(m))
1526                         goto free_page;
1527
1528                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1529
1530                 for (old = vm_page_astate_load(m);;) {
1531                         /*
1532                          * Check to see if the page has been removed from the
1533                          * queue since the first such check.  Leave it alone if
1534                          * so, discarding any references collected by
1535                          * pmap_ts_referenced().
1536                          */
1537                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1538                                 goto skip_page;
1539
1540                         new = old;
1541                         act_delta = refs;
1542                         if ((old.flags & PGA_REFERENCED) != 0) {
1543                                 new.flags &= ~PGA_REFERENCED;
1544                                 act_delta++;
1545                         }
1546                         if (act_delta == 0) {
1547                                 ;
1548                         } else if (object->ref_count != 0) {
1549                                 /*
1550                                  * Increase the activation count if the
1551                                  * page was referenced while in the
1552                                  * inactive queue.  This makes it less
1553                                  * likely that the page will be returned
1554                                  * prematurely to the inactive queue.
1555                                  */
1556                                 new.act_count += ACT_ADVANCE +
1557                                     act_delta;
1558                                 if (new.act_count > ACT_MAX)
1559                                         new.act_count = ACT_MAX;
1560
1561                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1562                                 new.flags |= PGA_REQUEUE;
1563                                 new.queue = PQ_ACTIVE;
1564                                 if (!vm_page_pqstate_commit(m, &old, new))
1565                                         continue;
1566
1567                                 VM_CNT_INC(v_reactivated);
1568                                 goto skip_page;
1569                         } else if ((object->flags & OBJ_DEAD) == 0) {
1570                                 new.queue = PQ_INACTIVE;
1571                                 new.flags |= PGA_REQUEUE;
1572                                 if (!vm_page_pqstate_commit(m, &old, new))
1573                                         continue;
1574                                 goto skip_page;
1575                         }
1576                         break;
1577                 }
1578
1579                 /*
1580                  * If the page appears to be clean at the machine-independent
1581                  * layer, then remove all of its mappings from the pmap in
1582                  * anticipation of freeing it.  If, however, any of the page's
1583                  * mappings allow write access, then the page may still be
1584                  * modified until the last of those mappings are removed.
1585                  */
1586                 if (object->ref_count != 0) {
1587                         vm_page_test_dirty(m);
1588                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
1589                                 goto skip_page;
1590                 }
1591
1592                 /*
1593                  * Clean pages can be freed, but dirty pages must be sent back
1594                  * to the laundry, unless they belong to a dead object.
1595                  * Requeueing dirty pages from dead objects is pointless, as
1596                  * they are being paged out and freed by the thread that
1597                  * destroyed the object.
1598                  */
1599                 if (m->dirty == 0) {
1600 free_page:
1601                         /*
1602                          * Now we are guaranteed that no other threads are
1603                          * manipulating the page, check for a last-second
1604                          * reference that would save it from doom.
1605                          */
1606                         if (vm_pageout_defer(m, PQ_INACTIVE, false))
1607                                 goto skip_page;
1608
1609                         /*
1610                          * Because we dequeued the page and have already checked
1611                          * for pending dequeue and enqueue requests, we can
1612                          * safely disassociate the page from the inactive queue
1613                          * without holding the queue lock.
1614                          */
1615                         m->a.queue = PQ_NONE;
1616                         vm_page_free(m);
1617                         page_shortage--;
1618                         continue;
1619                 }
1620                 if ((object->flags & OBJ_DEAD) == 0)
1621                         vm_page_launder(m);
1622 skip_page:
1623                 vm_page_xunbusy(m);
1624                 continue;
1625 reinsert:
1626                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1627         }
1628         if (object != NULL)
1629                 VM_OBJECT_WUNLOCK(object);
1630         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1631         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1632         vm_pagequeue_lock(pq);
1633         vm_pageout_end_scan(&ss);
1634         vm_pagequeue_unlock(pq);
1635
1636         /*
1637          * Record the remaining shortage and the progress and rate it was made.
1638          */
1639         atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
1640         getmicrouptime(&end);
1641         timevalsub(&end, &start);
1642         atomic_add_int(&vmd->vmd_inactive_us,
1643             end.tv_sec * 1000000 + end.tv_usec);
1644         atomic_add_int(&vmd->vmd_inactive_freed,
1645             starting_page_shortage - page_shortage);
1646 }
1647
1648 /*
1649  * Dispatch a number of inactive threads according to load and collect the
1650  * results to prevent a coherent (CEM: incoherent?) view of paging activity on
1651  * this domain.
1652  */
1653 static int
1654 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
1655 {
1656         u_int freed, pps, threads, us;
1657
1658         vmd->vmd_inactive_shortage = shortage;
1659
1660         /*
1661          * If we have more work than we can do in a quarter of our interval, we
1662          * fire off multiple threads to process it.
1663          */
1664         if (vmd->vmd_inactive_threads > 1 && vmd->vmd_inactive_pps != 0 &&
1665             shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
1666                 threads = vmd->vmd_inactive_threads;
1667                 vm_domain_pageout_lock(vmd);
1668                 vmd->vmd_inactive_shortage /= threads;
1669                 blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
1670                 blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
1671                 wakeup(&vmd->vmd_inactive_shortage);
1672                 vm_domain_pageout_unlock(vmd);
1673         }
1674
1675         /* Run the local thread scan. */
1676         vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
1677
1678         /*
1679          * Block until helper threads report results and then accumulate
1680          * totals.
1681          */
1682         blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
1683         freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
1684         VM_CNT_ADD(v_dfree, freed);
1685
1686         /*
1687          * Calculate the per-thread paging rate with an exponential decay of
1688          * prior results.  Careful to avoid integer rounding errors with large
1689          * us values.
1690          */
1691         us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
1692         if (us > 1000000)
1693                 /* Keep rounding to tenths */
1694                 pps = (freed * 10) / ((us * 10) / 1000000);
1695         else
1696                 pps = (1000000 / us) * freed;
1697         vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
1698
1699         return (shortage - freed);
1700 }
1701
1702 /*
1703  * Attempt to reclaim the requested number of pages from the inactive queue.
1704  * Returns true if the shortage was addressed.
1705  */
1706 static int
1707 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
1708 {
1709         struct vm_pagequeue *pq;
1710         u_int addl_page_shortage, deficit, page_shortage;
1711         u_int starting_page_shortage;
1712
1713         /*
1714          * vmd_pageout_deficit counts the number of pages requested in
1715          * allocations that failed because of a free page shortage.  We assume
1716          * that the allocations will be reattempted and thus include the deficit
1717          * in our scan target.
1718          */
1719         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1720         starting_page_shortage = shortage + deficit;
1721
1722         /*
1723          * Run the inactive scan on as many threads as is necessary.
1724          */
1725         page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
1726         addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
1727
1728         /*
1729          * Wake up the laundry thread so that it can perform any needed
1730          * laundering.  If we didn't meet our target, we're in shortfall and
1731          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1732          * swap devices are configured, the laundry thread has no work to do, so
1733          * don't bother waking it up.
1734          *
1735          * The laundry thread uses the number of inactive queue scans elapsed
1736          * since the last laundering to determine whether to launder again, so
1737          * keep count.
1738          */
1739         if (starting_page_shortage > 0) {
1740                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1741                 vm_pagequeue_lock(pq);
1742                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1743                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1744                         if (page_shortage > 0) {
1745                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1746                                 VM_CNT_INC(v_pdshortfalls);
1747                         } else if (vmd->vmd_laundry_request !=
1748                             VM_LAUNDRY_SHORTFALL)
1749                                 vmd->vmd_laundry_request =
1750                                     VM_LAUNDRY_BACKGROUND;
1751                         wakeup(&vmd->vmd_laundry_request);
1752                 }
1753                 vmd->vmd_clean_pages_freed +=
1754                     starting_page_shortage - page_shortage;
1755                 vm_pagequeue_unlock(pq);
1756         }
1757
1758         /*
1759          * Wakeup the swapout daemon if we didn't free the targeted number of
1760          * pages.
1761          */
1762         if (page_shortage > 0)
1763                 vm_swapout_run();
1764
1765         /*
1766          * If the inactive queue scan fails repeatedly to meet its
1767          * target, kill the largest process.
1768          */
1769         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1770
1771         /*
1772          * Reclaim pages by swapping out idle processes, if configured to do so.
1773          */
1774         vm_swapout_run_idle();
1775
1776         /*
1777          * See the description of addl_page_shortage above.
1778          */
1779         *addl_shortage = addl_page_shortage + deficit;
1780
1781         return (page_shortage <= 0);
1782 }
1783
1784 static int vm_pageout_oom_vote;
1785
1786 /*
1787  * The pagedaemon threads randlomly select one to perform the
1788  * OOM.  Trying to kill processes before all pagedaemons
1789  * failed to reach free target is premature.
1790  */
1791 static void
1792 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1793     int starting_page_shortage)
1794 {
1795         int old_vote;
1796
1797         if (starting_page_shortage <= 0 || starting_page_shortage !=
1798             page_shortage)
1799                 vmd->vmd_oom_seq = 0;
1800         else
1801                 vmd->vmd_oom_seq++;
1802         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1803                 if (vmd->vmd_oom) {
1804                         vmd->vmd_oom = FALSE;
1805                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1806                 }
1807                 return;
1808         }
1809
1810         /*
1811          * Do not follow the call sequence until OOM condition is
1812          * cleared.
1813          */
1814         vmd->vmd_oom_seq = 0;
1815
1816         if (vmd->vmd_oom)
1817                 return;
1818
1819         vmd->vmd_oom = TRUE;
1820         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1821         if (old_vote != vm_ndomains - 1)
1822                 return;
1823
1824         /*
1825          * The current pagedaemon thread is the last in the quorum to
1826          * start OOM.  Initiate the selection and signaling of the
1827          * victim.
1828          */
1829         vm_pageout_oom(VM_OOM_MEM);
1830
1831         /*
1832          * After one round of OOM terror, recall our vote.  On the
1833          * next pass, current pagedaemon would vote again if the low
1834          * memory condition is still there, due to vmd_oom being
1835          * false.
1836          */
1837         vmd->vmd_oom = FALSE;
1838         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1839 }
1840
1841 /*
1842  * The OOM killer is the page daemon's action of last resort when
1843  * memory allocation requests have been stalled for a prolonged period
1844  * of time because it cannot reclaim memory.  This function computes
1845  * the approximate number of physical pages that could be reclaimed if
1846  * the specified address space is destroyed.
1847  *
1848  * Private, anonymous memory owned by the address space is the
1849  * principal resource that we expect to recover after an OOM kill.
1850  * Since the physical pages mapped by the address space's COW entries
1851  * are typically shared pages, they are unlikely to be released and so
1852  * they are not counted.
1853  *
1854  * To get to the point where the page daemon runs the OOM killer, its
1855  * efforts to write-back vnode-backed pages may have stalled.  This
1856  * could be caused by a memory allocation deadlock in the write path
1857  * that might be resolved by an OOM kill.  Therefore, physical pages
1858  * belonging to vnode-backed objects are counted, because they might
1859  * be freed without being written out first if the address space holds
1860  * the last reference to an unlinked vnode.
1861  *
1862  * Similarly, physical pages belonging to OBJT_PHYS objects are
1863  * counted because the address space might hold the last reference to
1864  * the object.
1865  */
1866 static long
1867 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1868 {
1869         vm_map_t map;
1870         vm_map_entry_t entry;
1871         vm_object_t obj;
1872         long res;
1873
1874         map = &vmspace->vm_map;
1875         KASSERT(!map->system_map, ("system map"));
1876         sx_assert(&map->lock, SA_LOCKED);
1877         res = 0;
1878         VM_MAP_ENTRY_FOREACH(entry, map) {
1879                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1880                         continue;
1881                 obj = entry->object.vm_object;
1882                 if (obj == NULL)
1883                         continue;
1884                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1885                     obj->ref_count != 1)
1886                         continue;
1887                 switch (obj->type) {
1888                 case OBJT_DEFAULT:
1889                 case OBJT_SWAP:
1890                 case OBJT_PHYS:
1891                 case OBJT_VNODE:
1892                         res += obj->resident_page_count;
1893                         break;
1894                 }
1895         }
1896         return (res);
1897 }
1898
1899 static int vm_oom_ratelim_last;
1900 static int vm_oom_pf_secs = 10;
1901 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1902     "");
1903 static struct mtx vm_oom_ratelim_mtx;
1904
1905 void
1906 vm_pageout_oom(int shortage)
1907 {
1908         struct proc *p, *bigproc;
1909         vm_offset_t size, bigsize;
1910         struct thread *td;
1911         struct vmspace *vm;
1912         int now;
1913         bool breakout;
1914
1915         /*
1916          * For OOM requests originating from vm_fault(), there is a high
1917          * chance that a single large process faults simultaneously in
1918          * several threads.  Also, on an active system running many
1919          * processes of middle-size, like buildworld, all of them
1920          * could fault almost simultaneously as well.
1921          *
1922          * To avoid killing too many processes, rate-limit OOMs
1923          * initiated by vm_fault() time-outs on the waits for free
1924          * pages.
1925          */
1926         mtx_lock(&vm_oom_ratelim_mtx);
1927         now = ticks;
1928         if (shortage == VM_OOM_MEM_PF &&
1929             (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1930                 mtx_unlock(&vm_oom_ratelim_mtx);
1931                 return;
1932         }
1933         vm_oom_ratelim_last = now;
1934         mtx_unlock(&vm_oom_ratelim_mtx);
1935
1936         /*
1937          * We keep the process bigproc locked once we find it to keep anyone
1938          * from messing with it; however, there is a possibility of
1939          * deadlock if process B is bigproc and one of its child processes
1940          * attempts to propagate a signal to B while we are waiting for A's
1941          * lock while walking this list.  To avoid this, we don't block on
1942          * the process lock but just skip a process if it is already locked.
1943          */
1944         bigproc = NULL;
1945         bigsize = 0;
1946         sx_slock(&allproc_lock);
1947         FOREACH_PROC_IN_SYSTEM(p) {
1948                 PROC_LOCK(p);
1949
1950                 /*
1951                  * If this is a system, protected or killed process, skip it.
1952                  */
1953                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1954                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1955                     p->p_pid == 1 || P_KILLED(p) ||
1956                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1957                         PROC_UNLOCK(p);
1958                         continue;
1959                 }
1960                 /*
1961                  * If the process is in a non-running type state,
1962                  * don't touch it.  Check all the threads individually.
1963                  */
1964                 breakout = false;
1965                 FOREACH_THREAD_IN_PROC(p, td) {
1966                         thread_lock(td);
1967                         if (!TD_ON_RUNQ(td) &&
1968                             !TD_IS_RUNNING(td) &&
1969                             !TD_IS_SLEEPING(td) &&
1970                             !TD_IS_SUSPENDED(td) &&
1971                             !TD_IS_SWAPPED(td)) {
1972                                 thread_unlock(td);
1973                                 breakout = true;
1974                                 break;
1975                         }
1976                         thread_unlock(td);
1977                 }
1978                 if (breakout) {
1979                         PROC_UNLOCK(p);
1980                         continue;
1981                 }
1982                 /*
1983                  * get the process size
1984                  */
1985                 vm = vmspace_acquire_ref(p);
1986                 if (vm == NULL) {
1987                         PROC_UNLOCK(p);
1988                         continue;
1989                 }
1990                 _PHOLD_LITE(p);
1991                 PROC_UNLOCK(p);
1992                 sx_sunlock(&allproc_lock);
1993                 if (!vm_map_trylock_read(&vm->vm_map)) {
1994                         vmspace_free(vm);
1995                         sx_slock(&allproc_lock);
1996                         PRELE(p);
1997                         continue;
1998                 }
1999                 size = vmspace_swap_count(vm);
2000                 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
2001                         size += vm_pageout_oom_pagecount(vm);
2002                 vm_map_unlock_read(&vm->vm_map);
2003                 vmspace_free(vm);
2004                 sx_slock(&allproc_lock);
2005
2006                 /*
2007                  * If this process is bigger than the biggest one,
2008                  * remember it.
2009                  */
2010                 if (size > bigsize) {
2011                         if (bigproc != NULL)
2012                                 PRELE(bigproc);
2013                         bigproc = p;
2014                         bigsize = size;
2015                 } else {
2016                         PRELE(p);
2017                 }
2018         }
2019         sx_sunlock(&allproc_lock);
2020         if (bigproc != NULL) {
2021                 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
2022                         panic("out of swap space");
2023                 PROC_LOCK(bigproc);
2024                 killproc(bigproc, "out of swap space");
2025                 sched_nice(bigproc, PRIO_MIN);
2026                 _PRELE(bigproc);
2027                 PROC_UNLOCK(bigproc);
2028         }
2029 }
2030
2031 /*
2032  * Signal a free page shortage to subsystems that have registered an event
2033  * handler.  Reclaim memory from UMA in the event of a severe shortage.
2034  * Return true if the free page count should be re-evaluated.
2035  */
2036 static bool
2037 vm_pageout_lowmem(void)
2038 {
2039         static int lowmem_ticks = 0;
2040         int last;
2041         bool ret;
2042
2043         ret = false;
2044
2045         last = atomic_load_int(&lowmem_ticks);
2046         while ((u_int)(ticks - last) / hz >= lowmem_period) {
2047                 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2048                         continue;
2049
2050                 /*
2051                  * Decrease registered cache sizes.
2052                  */
2053                 SDT_PROBE0(vm, , , vm__lowmem_scan);
2054                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
2055
2056                 /*
2057                  * We do this explicitly after the caches have been
2058                  * drained above.
2059                  */
2060                 uma_reclaim(UMA_RECLAIM_TRIM);
2061                 ret = true;
2062                 break;
2063         }
2064
2065         /*
2066          * Kick off an asynchronous reclaim of cached memory if one of the
2067          * page daemons is failing to keep up with demand.  Use the "severe"
2068          * threshold instead of "min" to ensure that we do not blow away the
2069          * caches if a subset of the NUMA domains are depleted by kernel memory
2070          * allocations; the domainset iterators automatically skip domains
2071          * below the "min" threshold on the first pass.
2072          *
2073          * UMA reclaim worker has its own rate-limiting mechanism, so don't
2074          * worry about kicking it too often.
2075          */
2076         if (vm_page_count_severe())
2077                 uma_reclaim_wakeup();
2078
2079         return (ret);
2080 }
2081
2082 static void
2083 vm_pageout_worker(void *arg)
2084 {
2085         struct vm_domain *vmd;
2086         u_int ofree;
2087         int addl_shortage, domain, shortage;
2088         bool target_met;
2089
2090         domain = (uintptr_t)arg;
2091         vmd = VM_DOMAIN(domain);
2092         shortage = 0;
2093         target_met = true;
2094
2095         /*
2096          * XXXKIB It could be useful to bind pageout daemon threads to
2097          * the cores belonging to the domain, from which vm_page_array
2098          * is allocated.
2099          */
2100
2101         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2102         vmd->vmd_last_active_scan = ticks;
2103
2104         /*
2105          * The pageout daemon worker is never done, so loop forever.
2106          */
2107         while (TRUE) {
2108                 vm_domain_pageout_lock(vmd);
2109
2110                 /*
2111                  * We need to clear wanted before we check the limits.  This
2112                  * prevents races with wakers who will check wanted after they
2113                  * reach the limit.
2114                  */
2115                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2116
2117                 /*
2118                  * Might the page daemon need to run again?
2119                  */
2120                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2121                         /*
2122                          * Yes.  If the scan failed to produce enough free
2123                          * pages, sleep uninterruptibly for some time in the
2124                          * hope that the laundry thread will clean some pages.
2125                          */
2126                         vm_domain_pageout_unlock(vmd);
2127                         if (!target_met)
2128                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
2129                 } else {
2130                         /*
2131                          * No, sleep until the next wakeup or until pages
2132                          * need to have their reference stats updated.
2133                          */
2134                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
2135                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2136                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2137                                 VM_CNT_INC(v_pdwakeups);
2138                 }
2139
2140                 /* Prevent spurious wakeups by ensuring that wanted is set. */
2141                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2142
2143                 /*
2144                  * Use the controller to calculate how many pages to free in
2145                  * this interval, and scan the inactive queue.  If the lowmem
2146                  * handlers appear to have freed up some pages, subtract the
2147                  * difference from the inactive queue scan target.
2148                  */
2149                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2150                 if (shortage > 0) {
2151                         ofree = vmd->vmd_free_count;
2152                         if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2153                                 shortage -= min(vmd->vmd_free_count - ofree,
2154                                     (u_int)shortage);
2155                         target_met = vm_pageout_inactive(vmd, shortage,
2156                             &addl_shortage);
2157                 } else
2158                         addl_shortage = 0;
2159
2160                 /*
2161                  * Scan the active queue.  A positive value for shortage
2162                  * indicates that we must aggressively deactivate pages to avoid
2163                  * a shortfall.
2164                  */
2165                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
2166                 vm_pageout_scan_active(vmd, shortage);
2167         }
2168 }
2169
2170 /*
2171  * vm_pageout_helper runs additional pageout daemons in times of high paging
2172  * activity.
2173  */
2174 static void
2175 vm_pageout_helper(void *arg)
2176 {
2177         struct vm_domain *vmd;
2178         int domain;
2179
2180         domain = (uintptr_t)arg;
2181         vmd = VM_DOMAIN(domain);
2182
2183         vm_domain_pageout_lock(vmd);
2184         for (;;) {
2185                 msleep(&vmd->vmd_inactive_shortage,
2186                     vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
2187                 blockcount_release(&vmd->vmd_inactive_starting, 1);
2188
2189                 vm_domain_pageout_unlock(vmd);
2190                 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
2191                 vm_domain_pageout_lock(vmd);
2192
2193                 /*
2194                  * Release the running count while the pageout lock is held to
2195                  * prevent wakeup races.
2196                  */
2197                 blockcount_release(&vmd->vmd_inactive_running, 1);
2198         }
2199 }
2200
2201 static int
2202 get_pageout_threads_per_domain(const struct vm_domain *vmd)
2203 {
2204         unsigned total_pageout_threads, eligible_cpus, domain_cpus;
2205
2206         if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
2207                 return (0);
2208
2209         /*
2210          * Semi-arbitrarily constrain pagedaemon threads to less than half the
2211          * total number of CPUs in the system as an upper limit.
2212          */
2213         if (pageout_cpus_per_thread < 2)
2214                 pageout_cpus_per_thread = 2;
2215         else if (pageout_cpus_per_thread > mp_ncpus)
2216                 pageout_cpus_per_thread = mp_ncpus;
2217
2218         total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
2219         domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
2220
2221         /* Pagedaemons are not run in empty domains. */
2222         eligible_cpus = mp_ncpus;
2223         for (unsigned i = 0; i < vm_ndomains; i++)
2224                 if (VM_DOMAIN_EMPTY(i))
2225                         eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
2226
2227         /*
2228          * Assign a portion of the total pageout threads to this domain
2229          * corresponding to the fraction of pagedaemon-eligible CPUs in the
2230          * domain.  In asymmetric NUMA systems, domains with more CPUs may be
2231          * allocated more threads than domains with fewer CPUs.
2232          */
2233         return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
2234 }
2235
2236 /*
2237  * Initialize basic pageout daemon settings.  See the comment above the
2238  * definition of vm_domain for some explanation of how these thresholds are
2239  * used.
2240  */
2241 static void
2242 vm_pageout_init_domain(int domain)
2243 {
2244         struct vm_domain *vmd;
2245         struct sysctl_oid *oid;
2246
2247         vmd = VM_DOMAIN(domain);
2248         vmd->vmd_interrupt_free_min = 2;
2249
2250         /*
2251          * v_free_reserved needs to include enough for the largest
2252          * swap pager structures plus enough for any pv_entry structs
2253          * when paging. 
2254          */
2255         vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2256             vmd->vmd_interrupt_free_min;
2257         vmd->vmd_free_reserved = vm_pageout_page_count +
2258             vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2259         vmd->vmd_free_min = vmd->vmd_page_count / 200;
2260         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2261         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2262         vmd->vmd_free_min += vmd->vmd_free_reserved;
2263         vmd->vmd_free_severe += vmd->vmd_free_reserved;
2264         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2265         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2266                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2267
2268         /*
2269          * Set the default wakeup threshold to be 10% below the paging
2270          * target.  This keeps the steady state out of shortfall.
2271          */
2272         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2273
2274         /*
2275          * Target amount of memory to move out of the laundry queue during a
2276          * background laundering.  This is proportional to the amount of system
2277          * memory.
2278          */
2279         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2280             vmd->vmd_free_min) / 10;
2281
2282         /* Initialize the pageout daemon pid controller. */
2283         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2284             vmd->vmd_free_target, PIDCTRL_BOUND,
2285             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2286         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2287             "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2288         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2289
2290         vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2291 }
2292
2293 static void
2294 vm_pageout_init(void)
2295 {
2296         u_int freecount;
2297         int i;
2298
2299         /*
2300          * Initialize some paging parameters.
2301          */
2302         if (vm_cnt.v_page_count < 2000)
2303                 vm_pageout_page_count = 8;
2304
2305         freecount = 0;
2306         for (i = 0; i < vm_ndomains; i++) {
2307                 struct vm_domain *vmd;
2308
2309                 vm_pageout_init_domain(i);
2310                 vmd = VM_DOMAIN(i);
2311                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2312                 vm_cnt.v_free_target += vmd->vmd_free_target;
2313                 vm_cnt.v_free_min += vmd->vmd_free_min;
2314                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2315                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2316                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2317                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2318                 freecount += vmd->vmd_free_count;
2319         }
2320
2321         /*
2322          * Set interval in seconds for active scan.  We want to visit each
2323          * page at least once every ten minutes.  This is to prevent worst
2324          * case paging behaviors with stale active LRU.
2325          */
2326         if (vm_pageout_update_period == 0)
2327                 vm_pageout_update_period = 600;
2328
2329         if (vm_page_max_user_wired == 0)
2330                 vm_page_max_user_wired = freecount / 3;
2331 }
2332
2333 /*
2334  *     vm_pageout is the high level pageout daemon.
2335  */
2336 static void
2337 vm_pageout(void)
2338 {
2339         struct proc *p;
2340         struct thread *td;
2341         int error, first, i, j, pageout_threads;
2342
2343         p = curproc;
2344         td = curthread;
2345
2346         mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2347         swap_pager_swap_init();
2348         for (first = -1, i = 0; i < vm_ndomains; i++) {
2349                 if (VM_DOMAIN_EMPTY(i)) {
2350                         if (bootverbose)
2351                                 printf("domain %d empty; skipping pageout\n",
2352                                     i);
2353                         continue;
2354                 }
2355                 if (first == -1)
2356                         first = i;
2357                 else {
2358                         error = kthread_add(vm_pageout_worker,
2359                             (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2360                         if (error != 0)
2361                                 panic("starting pageout for domain %d: %d\n",
2362                                     i, error);
2363                 }
2364                 pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
2365                 for (j = 0; j < pageout_threads - 1; j++) {
2366                         error = kthread_add(vm_pageout_helper,
2367                             (void *)(uintptr_t)i, p, NULL, 0, 0,
2368                             "dom%d helper%d", i, j);
2369                         if (error != 0)
2370                                 panic("starting pageout helper %d for domain "
2371                                     "%d: %d\n", j, i, error);
2372                 }
2373                 error = kthread_add(vm_pageout_laundry_worker,
2374                     (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2375                 if (error != 0)
2376                         panic("starting laundry for domain %d: %d", i, error);
2377         }
2378         error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2379         if (error != 0)
2380                 panic("starting uma_reclaim helper, error %d\n", error);
2381
2382         snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2383         vm_pageout_worker((void *)(uintptr_t)first);
2384 }
2385
2386 /*
2387  * Perform an advisory wakeup of the page daemon.
2388  */
2389 void
2390 pagedaemon_wakeup(int domain)
2391 {
2392         struct vm_domain *vmd;
2393
2394         vmd = VM_DOMAIN(domain);
2395         vm_domain_pageout_assert_unlocked(vmd);
2396         if (curproc == pageproc)
2397                 return;
2398
2399         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2400                 vm_domain_pageout_lock(vmd);
2401                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2402                 wakeup(&vmd->vmd_pageout_wanted);
2403                 vm_domain_pageout_unlock(vmd);
2404         }
2405 }