]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Add vnode_pager_clean_{a,}sync(9)
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 #include "opt_vm.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/blockcount.h>
84 #include <sys/eventhandler.h>
85 #include <sys/lock.h>
86 #include <sys/mutex.h>
87 #include <sys/proc.h>
88 #include <sys/kthread.h>
89 #include <sys/ktr.h>
90 #include <sys/mount.h>
91 #include <sys/racct.h>
92 #include <sys/resourcevar.h>
93 #include <sys/sched.h>
94 #include <sys/sdt.h>
95 #include <sys/signalvar.h>
96 #include <sys/smp.h>
97 #include <sys/time.h>
98 #include <sys/vnode.h>
99 #include <sys/vmmeter.h>
100 #include <sys/rwlock.h>
101 #include <sys/sx.h>
102 #include <sys/sysctl.h>
103
104 #include <vm/vm.h>
105 #include <vm/vm_param.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pageout.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_phys.h>
112 #include <vm/vm_pagequeue.h>
113 #include <vm/swap_pager.h>
114 #include <vm/vm_extern.h>
115 #include <vm/uma.h>
116
117 /*
118  * System initialization
119  */
120
121 /* the kernel process "vm_pageout"*/
122 static void vm_pageout(void);
123 static void vm_pageout_init(void);
124 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
125 static int vm_pageout_cluster(vm_page_t m);
126 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
127     int starting_page_shortage);
128
129 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
130     NULL);
131
132 struct proc *pageproc;
133
134 static struct kproc_desc page_kp = {
135         "pagedaemon",
136         vm_pageout,
137         &pageproc
138 };
139 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
140     &page_kp);
141
142 SDT_PROVIDER_DEFINE(vm);
143 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
144
145 /* Pagedaemon activity rates, in subdivisions of one second. */
146 #define VM_LAUNDER_RATE         10
147 #define VM_INACT_SCAN_RATE      10
148
149 static int swapdev_enabled;
150 int vm_pageout_page_count = 32;
151
152 static int vm_panic_on_oom = 0;
153 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
154     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
155     "Panic on the given number of out-of-memory errors instead of "
156     "killing the largest process");
157
158 static int vm_pageout_update_period;
159 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
160     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
161     "Maximum active LRU update period");
162
163 static int pageout_cpus_per_thread = 16;
164 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
165     &pageout_cpus_per_thread, 0,
166     "Number of CPUs per pagedaemon worker thread");
167   
168 static int lowmem_period = 10;
169 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
170     "Low memory callback period");
171
172 static int disable_swap_pageouts;
173 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
174     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
175     "Disallow swapout of dirty pages");
176
177 static int pageout_lock_miss;
178 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
179     CTLFLAG_RD, &pageout_lock_miss, 0,
180     "vget() lock misses during pageout");
181
182 static int vm_pageout_oom_seq = 12;
183 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
184     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
185     "back-to-back calls to oom detector to start OOM");
186
187 static int act_scan_laundry_weight = 3;
188 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
189     &act_scan_laundry_weight, 0,
190     "weight given to clean vs. dirty pages in active queue scans");
191
192 static u_int vm_background_launder_rate = 4096;
193 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
194     &vm_background_launder_rate, 0,
195     "background laundering rate, in kilobytes per second");
196
197 static u_int vm_background_launder_max = 20 * 1024;
198 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
199     &vm_background_launder_max, 0,
200     "background laundering cap, in kilobytes");
201
202 u_long vm_page_max_user_wired;
203 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
204     &vm_page_max_user_wired, 0,
205     "system-wide limit to user-wired page count");
206
207 static u_int isqrt(u_int num);
208 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
209     bool in_shortfall);
210 static void vm_pageout_laundry_worker(void *arg);
211
212 struct scan_state {
213         struct vm_batchqueue bq;
214         struct vm_pagequeue *pq;
215         vm_page_t       marker;
216         int             maxscan;
217         int             scanned;
218 };
219
220 static void
221 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
222     vm_page_t marker, vm_page_t after, int maxscan)
223 {
224
225         vm_pagequeue_assert_locked(pq);
226         KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
227             ("marker %p already enqueued", marker));
228
229         if (after == NULL)
230                 TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
231         else
232                 TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
233         vm_page_aflag_set(marker, PGA_ENQUEUED);
234
235         vm_batchqueue_init(&ss->bq);
236         ss->pq = pq;
237         ss->marker = marker;
238         ss->maxscan = maxscan;
239         ss->scanned = 0;
240         vm_pagequeue_unlock(pq);
241 }
242
243 static void
244 vm_pageout_end_scan(struct scan_state *ss)
245 {
246         struct vm_pagequeue *pq;
247
248         pq = ss->pq;
249         vm_pagequeue_assert_locked(pq);
250         KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
251             ("marker %p not enqueued", ss->marker));
252
253         TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
254         vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
255         pq->pq_pdpages += ss->scanned;
256 }
257
258 /*
259  * Add a small number of queued pages to a batch queue for later processing
260  * without the corresponding queue lock held.  The caller must have enqueued a
261  * marker page at the desired start point for the scan.  Pages will be
262  * physically dequeued if the caller so requests.  Otherwise, the returned
263  * batch may contain marker pages, and it is up to the caller to handle them.
264  *
265  * When processing the batch queue, vm_pageout_defer() must be used to
266  * determine whether the page has been logically dequeued since the batch was
267  * collected.
268  */
269 static __always_inline void
270 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
271 {
272         struct vm_pagequeue *pq;
273         vm_page_t m, marker, n;
274
275         marker = ss->marker;
276         pq = ss->pq;
277
278         KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
279             ("marker %p not enqueued", ss->marker));
280
281         vm_pagequeue_lock(pq);
282         for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
283             ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
284             m = n, ss->scanned++) {
285                 n = TAILQ_NEXT(m, plinks.q);
286                 if ((m->flags & PG_MARKER) == 0) {
287                         KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
288                             ("page %p not enqueued", m));
289                         KASSERT((m->flags & PG_FICTITIOUS) == 0,
290                             ("Fictitious page %p cannot be in page queue", m));
291                         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
292                             ("Unmanaged page %p cannot be in page queue", m));
293                 } else if (dequeue)
294                         continue;
295
296                 (void)vm_batchqueue_insert(&ss->bq, m);
297                 if (dequeue) {
298                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
299                         vm_page_aflag_clear(m, PGA_ENQUEUED);
300                 }
301         }
302         TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
303         if (__predict_true(m != NULL))
304                 TAILQ_INSERT_BEFORE(m, marker, plinks.q);
305         else
306                 TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
307         if (dequeue)
308                 vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
309         vm_pagequeue_unlock(pq);
310 }
311
312 /*
313  * Return the next page to be scanned, or NULL if the scan is complete.
314  */
315 static __always_inline vm_page_t
316 vm_pageout_next(struct scan_state *ss, const bool dequeue)
317 {
318
319         if (ss->bq.bq_cnt == 0)
320                 vm_pageout_collect_batch(ss, dequeue);
321         return (vm_batchqueue_pop(&ss->bq));
322 }
323
324 /*
325  * Determine whether processing of a page should be deferred and ensure that any
326  * outstanding queue operations are processed.
327  */
328 static __always_inline bool
329 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
330 {
331         vm_page_astate_t as;
332
333         as = vm_page_astate_load(m);
334         if (__predict_false(as.queue != queue ||
335             ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
336                 return (true);
337         if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
338                 vm_page_pqbatch_submit(m, queue);
339                 return (true);
340         }
341         return (false);
342 }
343
344 /*
345  * Scan for pages at adjacent offsets within the given page's object that are
346  * eligible for laundering, form a cluster of these pages and the given page,
347  * and launder that cluster.
348  */
349 static int
350 vm_pageout_cluster(vm_page_t m)
351 {
352         vm_object_t object;
353         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
354         vm_pindex_t pindex;
355         int ib, is, page_base, pageout_count;
356
357         object = m->object;
358         VM_OBJECT_ASSERT_WLOCKED(object);
359         pindex = m->pindex;
360
361         vm_page_assert_xbusied(m);
362
363         mc[vm_pageout_page_count] = pb = ps = m;
364         pageout_count = 1;
365         page_base = vm_pageout_page_count;
366         ib = 1;
367         is = 1;
368
369         /*
370          * We can cluster only if the page is not clean, busy, or held, and
371          * the page is in the laundry queue.
372          *
373          * During heavy mmap/modification loads the pageout
374          * daemon can really fragment the underlying file
375          * due to flushing pages out of order and not trying to
376          * align the clusters (which leaves sporadic out-of-order
377          * holes).  To solve this problem we do the reverse scan
378          * first and attempt to align our cluster, then do a 
379          * forward scan if room remains.
380          */
381 more:
382         while (ib != 0 && pageout_count < vm_pageout_page_count) {
383                 if (ib > pindex) {
384                         ib = 0;
385                         break;
386                 }
387                 if ((p = vm_page_prev(pb)) == NULL ||
388                     vm_page_tryxbusy(p) == 0) {
389                         ib = 0;
390                         break;
391                 }
392                 if (vm_page_wired(p)) {
393                         ib = 0;
394                         vm_page_xunbusy(p);
395                         break;
396                 }
397                 vm_page_test_dirty(p);
398                 if (p->dirty == 0) {
399                         ib = 0;
400                         vm_page_xunbusy(p);
401                         break;
402                 }
403                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
404                         vm_page_xunbusy(p);
405                         ib = 0;
406                         break;
407                 }
408                 mc[--page_base] = pb = p;
409                 ++pageout_count;
410                 ++ib;
411
412                 /*
413                  * We are at an alignment boundary.  Stop here, and switch
414                  * directions.  Do not clear ib.
415                  */
416                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
417                         break;
418         }
419         while (pageout_count < vm_pageout_page_count && 
420             pindex + is < object->size) {
421                 if ((p = vm_page_next(ps)) == NULL ||
422                     vm_page_tryxbusy(p) == 0)
423                         break;
424                 if (vm_page_wired(p)) {
425                         vm_page_xunbusy(p);
426                         break;
427                 }
428                 vm_page_test_dirty(p);
429                 if (p->dirty == 0) {
430                         vm_page_xunbusy(p);
431                         break;
432                 }
433                 if (!vm_page_in_laundry(p) || !vm_page_try_remove_write(p)) {
434                         vm_page_xunbusy(p);
435                         break;
436                 }
437                 mc[page_base + pageout_count] = ps = p;
438                 ++pageout_count;
439                 ++is;
440         }
441
442         /*
443          * If we exhausted our forward scan, continue with the reverse scan
444          * when possible, even past an alignment boundary.  This catches
445          * boundary conditions.
446          */
447         if (ib != 0 && pageout_count < vm_pageout_page_count)
448                 goto more;
449
450         return (vm_pageout_flush(&mc[page_base], pageout_count,
451             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
452 }
453
454 /*
455  * vm_pageout_flush() - launder the given pages
456  *
457  *      The given pages are laundered.  Note that we setup for the start of
458  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
459  *      reference count all in here rather then in the parent.  If we want
460  *      the parent to do more sophisticated things we may have to change
461  *      the ordering.
462  *
463  *      Returned runlen is the count of pages between mreq and first
464  *      page after mreq with status VM_PAGER_AGAIN.
465  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
466  *      for any page in runlen set.
467  */
468 int
469 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
470     boolean_t *eio)
471 {
472         vm_object_t object = mc[0]->object;
473         int pageout_status[count];
474         int numpagedout = 0;
475         int i, runlen;
476
477         VM_OBJECT_ASSERT_WLOCKED(object);
478
479         /*
480          * Initiate I/O.  Mark the pages shared busy and verify that they're
481          * valid and read-only.
482          *
483          * We do not have to fixup the clean/dirty bits here... we can
484          * allow the pager to do it after the I/O completes.
485          *
486          * NOTE! mc[i]->dirty may be partial or fragmented due to an
487          * edge case with file fragments.
488          */
489         for (i = 0; i < count; i++) {
490                 KASSERT(vm_page_all_valid(mc[i]),
491                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
492                         mc[i], i, count));
493                 KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
494                     ("vm_pageout_flush: writeable page %p", mc[i]));
495                 vm_page_busy_downgrade(mc[i]);
496         }
497         vm_object_pip_add(object, count);
498
499         vm_pager_put_pages(object, mc, count, flags, pageout_status);
500
501         runlen = count - mreq;
502         if (eio != NULL)
503                 *eio = FALSE;
504         for (i = 0; i < count; i++) {
505                 vm_page_t mt = mc[i];
506
507                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
508                     !pmap_page_is_write_mapped(mt),
509                     ("vm_pageout_flush: page %p is not write protected", mt));
510                 switch (pageout_status[i]) {
511                 case VM_PAGER_OK:
512                         /*
513                          * The page may have moved since laundering started, in
514                          * which case it should be left alone.
515                          */
516                         if (vm_page_in_laundry(mt))
517                                 vm_page_deactivate_noreuse(mt);
518                         /* FALLTHROUGH */
519                 case VM_PAGER_PEND:
520                         numpagedout++;
521                         break;
522                 case VM_PAGER_BAD:
523                         /*
524                          * The page is outside the object's range.  We pretend
525                          * that the page out worked and clean the page, so the
526                          * changes will be lost if the page is reclaimed by
527                          * the page daemon.
528                          */
529                         vm_page_undirty(mt);
530                         if (vm_page_in_laundry(mt))
531                                 vm_page_deactivate_noreuse(mt);
532                         break;
533                 case VM_PAGER_ERROR:
534                 case VM_PAGER_FAIL:
535                         /*
536                          * If the page couldn't be paged out to swap because the
537                          * pager wasn't able to find space, place the page in
538                          * the PQ_UNSWAPPABLE holding queue.  This is an
539                          * optimization that prevents the page daemon from
540                          * wasting CPU cycles on pages that cannot be reclaimed
541                          * because no swap device is configured.
542                          *
543                          * Otherwise, reactivate the page so that it doesn't
544                          * clog the laundry and inactive queues.  (We will try
545                          * paging it out again later.)
546                          */
547                         if ((object->flags & OBJ_SWAP) != 0 &&
548                             pageout_status[i] == VM_PAGER_FAIL) {
549                                 vm_page_unswappable(mt);
550                                 numpagedout++;
551                         } else
552                                 vm_page_activate(mt);
553                         if (eio != NULL && i >= mreq && i - mreq < runlen)
554                                 *eio = TRUE;
555                         break;
556                 case VM_PAGER_AGAIN:
557                         if (i >= mreq && i - mreq < runlen)
558                                 runlen = i - mreq;
559                         break;
560                 }
561
562                 /*
563                  * If the operation is still going, leave the page busy to
564                  * block all other accesses. Also, leave the paging in
565                  * progress indicator set so that we don't attempt an object
566                  * collapse.
567                  */
568                 if (pageout_status[i] != VM_PAGER_PEND) {
569                         vm_object_pip_wakeup(object);
570                         vm_page_sunbusy(mt);
571                 }
572         }
573         if (prunlen != NULL)
574                 *prunlen = runlen;
575         return (numpagedout);
576 }
577
578 static void
579 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
580 {
581
582         atomic_store_rel_int(&swapdev_enabled, 1);
583 }
584
585 static void
586 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
587 {
588
589         if (swap_pager_nswapdev() == 1)
590                 atomic_store_rel_int(&swapdev_enabled, 0);
591 }
592
593 /*
594  * Attempt to acquire all of the necessary locks to launder a page and
595  * then call through the clustering layer to PUTPAGES.  Wait a short
596  * time for a vnode lock.
597  *
598  * Requires the page and object lock on entry, releases both before return.
599  * Returns 0 on success and an errno otherwise.
600  */
601 static int
602 vm_pageout_clean(vm_page_t m, int *numpagedout)
603 {
604         struct vnode *vp;
605         struct mount *mp;
606         vm_object_t object;
607         vm_pindex_t pindex;
608         int error;
609
610         object = m->object;
611         VM_OBJECT_ASSERT_WLOCKED(object);
612         error = 0;
613         vp = NULL;
614         mp = NULL;
615
616         /*
617          * The object is already known NOT to be dead.   It
618          * is possible for the vget() to block the whole
619          * pageout daemon, but the new low-memory handling
620          * code should prevent it.
621          *
622          * We can't wait forever for the vnode lock, we might
623          * deadlock due to a vn_read() getting stuck in
624          * vm_wait while holding this vnode.  We skip the 
625          * vnode if we can't get it in a reasonable amount
626          * of time.
627          */
628         if (object->type == OBJT_VNODE) {
629                 vm_page_xunbusy(m);
630                 vp = object->handle;
631                 if (vp->v_type == VREG &&
632                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
633                         mp = NULL;
634                         error = EDEADLK;
635                         goto unlock_all;
636                 }
637                 KASSERT(mp != NULL,
638                     ("vp %p with NULL v_mount", vp));
639                 vm_object_reference_locked(object);
640                 pindex = m->pindex;
641                 VM_OBJECT_WUNLOCK(object);
642                 if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
643                         vp = NULL;
644                         error = EDEADLK;
645                         goto unlock_mp;
646                 }
647                 VM_OBJECT_WLOCK(object);
648
649                 /*
650                  * Ensure that the object and vnode were not disassociated
651                  * while locks were dropped.
652                  */
653                 if (vp->v_object != object) {
654                         error = ENOENT;
655                         goto unlock_all;
656                 }
657
658                 /*
659                  * While the object was unlocked, the page may have been:
660                  * (1) moved to a different queue,
661                  * (2) reallocated to a different object,
662                  * (3) reallocated to a different offset, or
663                  * (4) cleaned.
664                  */
665                 if (!vm_page_in_laundry(m) || m->object != object ||
666                     m->pindex != pindex || m->dirty == 0) {
667                         error = ENXIO;
668                         goto unlock_all;
669                 }
670
671                 /*
672                  * The page may have been busied while the object lock was
673                  * released.
674                  */
675                 if (vm_page_tryxbusy(m) == 0) {
676                         error = EBUSY;
677                         goto unlock_all;
678                 }
679         }
680
681         /*
682          * Remove all writeable mappings, failing if the page is wired.
683          */
684         if (!vm_page_try_remove_write(m)) {
685                 vm_page_xunbusy(m);
686                 error = EBUSY;
687                 goto unlock_all;
688         }
689
690         /*
691          * If a page is dirty, then it is either being washed
692          * (but not yet cleaned) or it is still in the
693          * laundry.  If it is still in the laundry, then we
694          * start the cleaning operation. 
695          */
696         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
697                 error = EIO;
698
699 unlock_all:
700         VM_OBJECT_WUNLOCK(object);
701
702 unlock_mp:
703         if (mp != NULL) {
704                 if (vp != NULL)
705                         vput(vp);
706                 vm_object_deallocate(object);
707                 vn_finished_write(mp);
708         }
709
710         return (error);
711 }
712
713 /*
714  * Attempt to launder the specified number of pages.
715  *
716  * Returns the number of pages successfully laundered.
717  */
718 static int
719 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
720 {
721         struct scan_state ss;
722         struct vm_pagequeue *pq;
723         vm_object_t object;
724         vm_page_t m, marker;
725         vm_page_astate_t new, old;
726         int act_delta, error, numpagedout, queue, refs, starting_target;
727         int vnodes_skipped;
728         bool pageout_ok;
729
730         object = NULL;
731         starting_target = launder;
732         vnodes_skipped = 0;
733
734         /*
735          * Scan the laundry queues for pages eligible to be laundered.  We stop
736          * once the target number of dirty pages have been laundered, or once
737          * we've reached the end of the queue.  A single iteration of this loop
738          * may cause more than one page to be laundered because of clustering.
739          *
740          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
741          * swap devices are configured.
742          */
743         if (atomic_load_acq_int(&swapdev_enabled))
744                 queue = PQ_UNSWAPPABLE;
745         else
746                 queue = PQ_LAUNDRY;
747
748 scan:
749         marker = &vmd->vmd_markers[queue];
750         pq = &vmd->vmd_pagequeues[queue];
751         vm_pagequeue_lock(pq);
752         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
753         while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
754                 if (__predict_false((m->flags & PG_MARKER) != 0))
755                         continue;
756
757                 /*
758                  * Don't touch a page that was removed from the queue after the
759                  * page queue lock was released.  Otherwise, ensure that any
760                  * pending queue operations, such as dequeues for wired pages,
761                  * are handled.
762                  */
763                 if (vm_pageout_defer(m, queue, true))
764                         continue;
765
766                 /*
767                  * Lock the page's object.
768                  */
769                 if (object == NULL || object != m->object) {
770                         if (object != NULL)
771                                 VM_OBJECT_WUNLOCK(object);
772                         object = atomic_load_ptr(&m->object);
773                         if (__predict_false(object == NULL))
774                                 /* The page is being freed by another thread. */
775                                 continue;
776
777                         /* Depends on type-stability. */
778                         VM_OBJECT_WLOCK(object);
779                         if (__predict_false(m->object != object)) {
780                                 VM_OBJECT_WUNLOCK(object);
781                                 object = NULL;
782                                 continue;
783                         }
784                 }
785
786                 if (vm_page_tryxbusy(m) == 0)
787                         continue;
788
789                 /*
790                  * Check for wirings now that we hold the object lock and have
791                  * exclusively busied the page.  If the page is mapped, it may
792                  * still be wired by pmap lookups.  The call to
793                  * vm_page_try_remove_all() below atomically checks for such
794                  * wirings and removes mappings.  If the page is unmapped, the
795                  * wire count is guaranteed not to increase after this check.
796                  */
797                 if (__predict_false(vm_page_wired(m)))
798                         goto skip_page;
799
800                 /*
801                  * Invalid pages can be easily freed.  They cannot be
802                  * mapped; vm_page_free() asserts this.
803                  */
804                 if (vm_page_none_valid(m))
805                         goto free_page;
806
807                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
808
809                 for (old = vm_page_astate_load(m);;) {
810                         /*
811                          * Check to see if the page has been removed from the
812                          * queue since the first such check.  Leave it alone if
813                          * so, discarding any references collected by
814                          * pmap_ts_referenced().
815                          */
816                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
817                                 goto skip_page;
818
819                         new = old;
820                         act_delta = refs;
821                         if ((old.flags & PGA_REFERENCED) != 0) {
822                                 new.flags &= ~PGA_REFERENCED;
823                                 act_delta++;
824                         }
825                         if (act_delta == 0) {
826                                 ;
827                         } else if (object->ref_count != 0) {
828                                 /*
829                                  * Increase the activation count if the page was
830                                  * referenced while in the laundry queue.  This
831                                  * makes it less likely that the page will be
832                                  * returned prematurely to the laundry queue.
833                                  */
834                                 new.act_count += ACT_ADVANCE +
835                                     act_delta;
836                                 if (new.act_count > ACT_MAX)
837                                         new.act_count = ACT_MAX;
838
839                                 new.flags &= ~PGA_QUEUE_OP_MASK;
840                                 new.flags |= PGA_REQUEUE;
841                                 new.queue = PQ_ACTIVE;
842                                 if (!vm_page_pqstate_commit(m, &old, new))
843                                         continue;
844
845                                 /*
846                                  * If this was a background laundering, count
847                                  * activated pages towards our target.  The
848                                  * purpose of background laundering is to ensure
849                                  * that pages are eventually cycled through the
850                                  * laundry queue, and an activation is a valid
851                                  * way out.
852                                  */
853                                 if (!in_shortfall)
854                                         launder--;
855                                 VM_CNT_INC(v_reactivated);
856                                 goto skip_page;
857                         } else if ((object->flags & OBJ_DEAD) == 0) {
858                                 new.flags |= PGA_REQUEUE;
859                                 if (!vm_page_pqstate_commit(m, &old, new))
860                                         continue;
861                                 goto skip_page;
862                         }
863                         break;
864                 }
865
866                 /*
867                  * If the page appears to be clean at the machine-independent
868                  * layer, then remove all of its mappings from the pmap in
869                  * anticipation of freeing it.  If, however, any of the page's
870                  * mappings allow write access, then the page may still be
871                  * modified until the last of those mappings are removed.
872                  */
873                 if (object->ref_count != 0) {
874                         vm_page_test_dirty(m);
875                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
876                                 goto skip_page;
877                 }
878
879                 /*
880                  * Clean pages are freed, and dirty pages are paged out unless
881                  * they belong to a dead object.  Requeueing dirty pages from
882                  * dead objects is pointless, as they are being paged out and
883                  * freed by the thread that destroyed the object.
884                  */
885                 if (m->dirty == 0) {
886 free_page:
887                         /*
888                          * Now we are guaranteed that no other threads are
889                          * manipulating the page, check for a last-second
890                          * reference.
891                          */
892                         if (vm_pageout_defer(m, queue, true))
893                                 goto skip_page;
894                         vm_page_free(m);
895                         VM_CNT_INC(v_dfree);
896                 } else if ((object->flags & OBJ_DEAD) == 0) {
897                         if ((object->flags & OBJ_SWAP) == 0 &&
898                             object->type != OBJT_DEFAULT)
899                                 pageout_ok = true;
900                         else if (disable_swap_pageouts)
901                                 pageout_ok = false;
902                         else
903                                 pageout_ok = true;
904                         if (!pageout_ok) {
905                                 vm_page_launder(m);
906                                 goto skip_page;
907                         }
908
909                         /*
910                          * Form a cluster with adjacent, dirty pages from the
911                          * same object, and page out that entire cluster.
912                          *
913                          * The adjacent, dirty pages must also be in the
914                          * laundry.  However, their mappings are not checked
915                          * for new references.  Consequently, a recently
916                          * referenced page may be paged out.  However, that
917                          * page will not be prematurely reclaimed.  After page
918                          * out, the page will be placed in the inactive queue,
919                          * where any new references will be detected and the
920                          * page reactivated.
921                          */
922                         error = vm_pageout_clean(m, &numpagedout);
923                         if (error == 0) {
924                                 launder -= numpagedout;
925                                 ss.scanned += numpagedout;
926                         } else if (error == EDEADLK) {
927                                 pageout_lock_miss++;
928                                 vnodes_skipped++;
929                         }
930                         object = NULL;
931                 } else {
932 skip_page:
933                         vm_page_xunbusy(m);
934                 }
935         }
936         if (object != NULL) {
937                 VM_OBJECT_WUNLOCK(object);
938                 object = NULL;
939         }
940         vm_pagequeue_lock(pq);
941         vm_pageout_end_scan(&ss);
942         vm_pagequeue_unlock(pq);
943
944         if (launder > 0 && queue == PQ_UNSWAPPABLE) {
945                 queue = PQ_LAUNDRY;
946                 goto scan;
947         }
948
949         /*
950          * Wakeup the sync daemon if we skipped a vnode in a writeable object
951          * and we didn't launder enough pages.
952          */
953         if (vnodes_skipped > 0 && launder > 0)
954                 (void)speedup_syncer();
955
956         return (starting_target - launder);
957 }
958
959 /*
960  * Compute the integer square root.
961  */
962 static u_int
963 isqrt(u_int num)
964 {
965         u_int bit, root, tmp;
966
967         bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
968         root = 0;
969         while (bit != 0) {
970                 tmp = root + bit;
971                 root >>= 1;
972                 if (num >= tmp) {
973                         num -= tmp;
974                         root += bit;
975                 }
976                 bit >>= 2;
977         }
978         return (root);
979 }
980
981 /*
982  * Perform the work of the laundry thread: periodically wake up and determine
983  * whether any pages need to be laundered.  If so, determine the number of pages
984  * that need to be laundered, and launder them.
985  */
986 static void
987 vm_pageout_laundry_worker(void *arg)
988 {
989         struct vm_domain *vmd;
990         struct vm_pagequeue *pq;
991         uint64_t nclean, ndirty, nfreed;
992         int domain, last_target, launder, shortfall, shortfall_cycle, target;
993         bool in_shortfall;
994
995         domain = (uintptr_t)arg;
996         vmd = VM_DOMAIN(domain);
997         pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
998         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
999
1000         shortfall = 0;
1001         in_shortfall = false;
1002         shortfall_cycle = 0;
1003         last_target = target = 0;
1004         nfreed = 0;
1005
1006         /*
1007          * Calls to these handlers are serialized by the swap syscall lock.
1008          */
1009         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1010             EVENTHANDLER_PRI_ANY);
1011         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1012             EVENTHANDLER_PRI_ANY);
1013
1014         /*
1015          * The pageout laundry worker is never done, so loop forever.
1016          */
1017         for (;;) {
1018                 KASSERT(target >= 0, ("negative target %d", target));
1019                 KASSERT(shortfall_cycle >= 0,
1020                     ("negative cycle %d", shortfall_cycle));
1021                 launder = 0;
1022
1023                 /*
1024                  * First determine whether we need to launder pages to meet a
1025                  * shortage of free pages.
1026                  */
1027                 if (shortfall > 0) {
1028                         in_shortfall = true;
1029                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1030                         target = shortfall;
1031                 } else if (!in_shortfall)
1032                         goto trybackground;
1033                 else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1034                         /*
1035                          * We recently entered shortfall and began laundering
1036                          * pages.  If we have completed that laundering run
1037                          * (and we are no longer in shortfall) or we have met
1038                          * our laundry target through other activity, then we
1039                          * can stop laundering pages.
1040                          */
1041                         in_shortfall = false;
1042                         target = 0;
1043                         goto trybackground;
1044                 }
1045                 launder = target / shortfall_cycle--;
1046                 goto dolaundry;
1047
1048                 /*
1049                  * There's no immediate need to launder any pages; see if we
1050                  * meet the conditions to perform background laundering:
1051                  *
1052                  * 1. The ratio of dirty to clean inactive pages exceeds the
1053                  *    background laundering threshold, or
1054                  * 2. we haven't yet reached the target of the current
1055                  *    background laundering run.
1056                  *
1057                  * The background laundering threshold is not a constant.
1058                  * Instead, it is a slowly growing function of the number of
1059                  * clean pages freed by the page daemon since the last
1060                  * background laundering.  Thus, as the ratio of dirty to
1061                  * clean inactive pages grows, the amount of memory pressure
1062                  * required to trigger laundering decreases.  We ensure
1063                  * that the threshold is non-zero after an inactive queue
1064                  * scan, even if that scan failed to free a single clean page.
1065                  */
1066 trybackground:
1067                 nclean = vmd->vmd_free_count +
1068                     vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1069                 ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1070                 if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1071                     vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1072                         target = vmd->vmd_background_launder_target;
1073                 }
1074
1075                 /*
1076                  * We have a non-zero background laundering target.  If we've
1077                  * laundered up to our maximum without observing a page daemon
1078                  * request, just stop.  This is a safety belt that ensures we
1079                  * don't launder an excessive amount if memory pressure is low
1080                  * and the ratio of dirty to clean pages is large.  Otherwise,
1081                  * proceed at the background laundering rate.
1082                  */
1083                 if (target > 0) {
1084                         if (nfreed > 0) {
1085                                 nfreed = 0;
1086                                 last_target = target;
1087                         } else if (last_target - target >=
1088                             vm_background_launder_max * PAGE_SIZE / 1024) {
1089                                 target = 0;
1090                         }
1091                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1092                         launder /= VM_LAUNDER_RATE;
1093                         if (launder > target)
1094                                 launder = target;
1095                 }
1096
1097 dolaundry:
1098                 if (launder > 0) {
1099                         /*
1100                          * Because of I/O clustering, the number of laundered
1101                          * pages could exceed "target" by the maximum size of
1102                          * a cluster minus one. 
1103                          */
1104                         target -= min(vm_pageout_launder(vmd, launder,
1105                             in_shortfall), target);
1106                         pause("laundp", hz / VM_LAUNDER_RATE);
1107                 }
1108
1109                 /*
1110                  * If we're not currently laundering pages and the page daemon
1111                  * hasn't posted a new request, sleep until the page daemon
1112                  * kicks us.
1113                  */
1114                 vm_pagequeue_lock(pq);
1115                 if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1116                         (void)mtx_sleep(&vmd->vmd_laundry_request,
1117                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1118
1119                 /*
1120                  * If the pagedaemon has indicated that it's in shortfall, start
1121                  * a shortfall laundering unless we're already in the middle of
1122                  * one.  This may preempt a background laundering.
1123                  */
1124                 if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1125                     (!in_shortfall || shortfall_cycle == 0)) {
1126                         shortfall = vm_laundry_target(vmd) +
1127                             vmd->vmd_pageout_deficit;
1128                         target = 0;
1129                 } else
1130                         shortfall = 0;
1131
1132                 if (target == 0)
1133                         vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1134                 nfreed += vmd->vmd_clean_pages_freed;
1135                 vmd->vmd_clean_pages_freed = 0;
1136                 vm_pagequeue_unlock(pq);
1137         }
1138 }
1139
1140 /*
1141  * Compute the number of pages we want to try to move from the
1142  * active queue to either the inactive or laundry queue.
1143  *
1144  * When scanning active pages during a shortage, we make clean pages
1145  * count more heavily towards the page shortage than dirty pages.
1146  * This is because dirty pages must be laundered before they can be
1147  * reused and thus have less utility when attempting to quickly
1148  * alleviate a free page shortage.  However, this weighting also
1149  * causes the scan to deactivate dirty pages more aggressively,
1150  * improving the effectiveness of clustering.
1151  */
1152 static int
1153 vm_pageout_active_target(struct vm_domain *vmd)
1154 {
1155         int shortage;
1156
1157         shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1158             (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1159             vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1160         shortage *= act_scan_laundry_weight;
1161         return (shortage);
1162 }
1163
1164 /*
1165  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1166  * small portion of the queue in order to maintain quasi-LRU.
1167  */
1168 static void
1169 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1170 {
1171         struct scan_state ss;
1172         vm_object_t object;
1173         vm_page_t m, marker;
1174         struct vm_pagequeue *pq;
1175         vm_page_astate_t old, new;
1176         long min_scan;
1177         int act_delta, max_scan, ps_delta, refs, scan_tick;
1178         uint8_t nqueue;
1179
1180         marker = &vmd->vmd_markers[PQ_ACTIVE];
1181         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1182         vm_pagequeue_lock(pq);
1183
1184         /*
1185          * If we're just idle polling attempt to visit every
1186          * active page within 'update_period' seconds.
1187          */
1188         scan_tick = ticks;
1189         if (vm_pageout_update_period != 0) {
1190                 min_scan = pq->pq_cnt;
1191                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1192                 min_scan /= hz * vm_pageout_update_period;
1193         } else
1194                 min_scan = 0;
1195         if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1196                 vmd->vmd_last_active_scan = scan_tick;
1197
1198         /*
1199          * Scan the active queue for pages that can be deactivated.  Update
1200          * the per-page activity counter and use it to identify deactivation
1201          * candidates.  Held pages may be deactivated.
1202          *
1203          * To avoid requeuing each page that remains in the active queue, we
1204          * implement the CLOCK algorithm.  To keep the implementation of the
1205          * enqueue operation consistent for all page queues, we use two hands,
1206          * represented by marker pages. Scans begin at the first hand, which
1207          * precedes the second hand in the queue.  When the two hands meet,
1208          * they are moved back to the head and tail of the queue, respectively,
1209          * and scanning resumes.
1210          */
1211         max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1212 act_scan:
1213         vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1214         while ((m = vm_pageout_next(&ss, false)) != NULL) {
1215                 if (__predict_false(m == &vmd->vmd_clock[1])) {
1216                         vm_pagequeue_lock(pq);
1217                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1218                         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1219                         TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1220                             plinks.q);
1221                         TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1222                             plinks.q);
1223                         max_scan -= ss.scanned;
1224                         vm_pageout_end_scan(&ss);
1225                         goto act_scan;
1226                 }
1227                 if (__predict_false((m->flags & PG_MARKER) != 0))
1228                         continue;
1229
1230                 /*
1231                  * Don't touch a page that was removed from the queue after the
1232                  * page queue lock was released.  Otherwise, ensure that any
1233                  * pending queue operations, such as dequeues for wired pages,
1234                  * are handled.
1235                  */
1236                 if (vm_pageout_defer(m, PQ_ACTIVE, true))
1237                         continue;
1238
1239                 /*
1240                  * A page's object pointer may be set to NULL before
1241                  * the object lock is acquired.
1242                  */
1243                 object = atomic_load_ptr(&m->object);
1244                 if (__predict_false(object == NULL))
1245                         /*
1246                          * The page has been removed from its object.
1247                          */
1248                         continue;
1249
1250                 /* Deferred free of swap space. */
1251                 if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1252                     VM_OBJECT_TRYWLOCK(object)) {
1253                         if (m->object == object)
1254                                 vm_pager_page_unswapped(m);
1255                         VM_OBJECT_WUNLOCK(object);
1256                 }
1257
1258                 /*
1259                  * Check to see "how much" the page has been used.
1260                  *
1261                  * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1262                  * that a reference from a concurrently destroyed mapping is
1263                  * observed here and now.
1264                  *
1265                  * Perform an unsynchronized object ref count check.  While
1266                  * the page lock ensures that the page is not reallocated to
1267                  * another object, in particular, one with unmanaged mappings
1268                  * that cannot support pmap_ts_referenced(), two races are,
1269                  * nonetheless, possible:
1270                  * 1) The count was transitioning to zero, but we saw a non-
1271                  *    zero value.  pmap_ts_referenced() will return zero
1272                  *    because the page is not mapped.
1273                  * 2) The count was transitioning to one, but we saw zero.
1274                  *    This race delays the detection of a new reference.  At
1275                  *    worst, we will deactivate and reactivate the page.
1276                  */
1277                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1278
1279                 old = vm_page_astate_load(m);
1280                 do {
1281                         /*
1282                          * Check to see if the page has been removed from the
1283                          * queue since the first such check.  Leave it alone if
1284                          * so, discarding any references collected by
1285                          * pmap_ts_referenced().
1286                          */
1287                         if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
1288                                 ps_delta = 0;
1289                                 break;
1290                         }
1291
1292                         /*
1293                          * Advance or decay the act_count based on recent usage.
1294                          */
1295                         new = old;
1296                         act_delta = refs;
1297                         if ((old.flags & PGA_REFERENCED) != 0) {
1298                                 new.flags &= ~PGA_REFERENCED;
1299                                 act_delta++;
1300                         }
1301                         if (act_delta != 0) {
1302                                 new.act_count += ACT_ADVANCE + act_delta;
1303                                 if (new.act_count > ACT_MAX)
1304                                         new.act_count = ACT_MAX;
1305                         } else {
1306                                 new.act_count -= min(new.act_count,
1307                                     ACT_DECLINE);
1308                         }
1309
1310                         if (new.act_count > 0) {
1311                                 /*
1312                                  * Adjust the activation count and keep the page
1313                                  * in the active queue.  The count might be left
1314                                  * unchanged if it is saturated.  The page may
1315                                  * have been moved to a different queue since we
1316                                  * started the scan, in which case we move it
1317                                  * back.
1318                                  */
1319                                 ps_delta = 0;
1320                                 if (old.queue != PQ_ACTIVE) {
1321                                         new.flags &= ~PGA_QUEUE_OP_MASK;
1322                                         new.flags |= PGA_REQUEUE;
1323                                         new.queue = PQ_ACTIVE;
1324                                 }
1325                         } else {
1326                                 /*
1327                                  * When not short for inactive pages, let dirty
1328                                  * pages go through the inactive queue before
1329                                  * moving to the laundry queue.  This gives them
1330                                  * some extra time to be reactivated,
1331                                  * potentially avoiding an expensive pageout.
1332                                  * However, during a page shortage, the inactive
1333                                  * queue is necessarily small, and so dirty
1334                                  * pages would only spend a trivial amount of
1335                                  * time in the inactive queue.  Therefore, we
1336                                  * might as well place them directly in the
1337                                  * laundry queue to reduce queuing overhead.
1338                                  *
1339                                  * Calling vm_page_test_dirty() here would
1340                                  * require acquisition of the object's write
1341                                  * lock.  However, during a page shortage,
1342                                  * directing dirty pages into the laundry queue
1343                                  * is only an optimization and not a
1344                                  * requirement.  Therefore, we simply rely on
1345                                  * the opportunistic updates to the page's dirty
1346                                  * field by the pmap.
1347                                  */
1348                                 if (page_shortage <= 0) {
1349                                         nqueue = PQ_INACTIVE;
1350                                         ps_delta = 0;
1351                                 } else if (m->dirty == 0) {
1352                                         nqueue = PQ_INACTIVE;
1353                                         ps_delta = act_scan_laundry_weight;
1354                                 } else {
1355                                         nqueue = PQ_LAUNDRY;
1356                                         ps_delta = 1;
1357                                 }
1358
1359                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1360                                 new.flags |= PGA_REQUEUE;
1361                                 new.queue = nqueue;
1362                         }
1363                 } while (!vm_page_pqstate_commit(m, &old, new));
1364
1365                 page_shortage -= ps_delta;
1366         }
1367         vm_pagequeue_lock(pq);
1368         TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1369         TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1370         vm_pageout_end_scan(&ss);
1371         vm_pagequeue_unlock(pq);
1372 }
1373
1374 static int
1375 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1376     vm_page_t m)
1377 {
1378         vm_page_astate_t as;
1379
1380         vm_pagequeue_assert_locked(pq);
1381
1382         as = vm_page_astate_load(m);
1383         if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1384                 return (0);
1385         vm_page_aflag_set(m, PGA_ENQUEUED);
1386         TAILQ_INSERT_BEFORE(marker, m, plinks.q);
1387         return (1);
1388 }
1389
1390 /*
1391  * Re-add stuck pages to the inactive queue.  We will examine them again
1392  * during the next scan.  If the queue state of a page has changed since
1393  * it was physically removed from the page queue in
1394  * vm_pageout_collect_batch(), don't do anything with that page.
1395  */
1396 static void
1397 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1398     vm_page_t m)
1399 {
1400         struct vm_pagequeue *pq;
1401         vm_page_t marker;
1402         int delta;
1403
1404         delta = 0;
1405         marker = ss->marker;
1406         pq = ss->pq;
1407
1408         if (m != NULL) {
1409                 if (vm_batchqueue_insert(bq, m))
1410                         return;
1411                 vm_pagequeue_lock(pq);
1412                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1413         } else
1414                 vm_pagequeue_lock(pq);
1415         while ((m = vm_batchqueue_pop(bq)) != NULL)
1416                 delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1417         vm_pagequeue_cnt_add(pq, delta);
1418         vm_pagequeue_unlock(pq);
1419         vm_batchqueue_init(bq);
1420 }
1421
1422 static void
1423 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1424 {
1425         struct timeval start, end;
1426         struct scan_state ss;
1427         struct vm_batchqueue rq;
1428         struct vm_page marker_page;
1429         vm_page_t m, marker;
1430         struct vm_pagequeue *pq;
1431         vm_object_t object;
1432         vm_page_astate_t old, new;
1433         int act_delta, addl_page_shortage, starting_page_shortage, refs;
1434
1435         object = NULL;
1436         vm_batchqueue_init(&rq);
1437         getmicrouptime(&start);
1438
1439         /*
1440          * The addl_page_shortage is an estimate of the number of temporarily
1441          * stuck pages in the inactive queue.  In other words, the
1442          * number of pages from the inactive count that should be
1443          * discounted in setting the target for the active queue scan.
1444          */
1445         addl_page_shortage = 0;
1446
1447         /*
1448          * Start scanning the inactive queue for pages that we can free.  The
1449          * scan will stop when we reach the target or we have scanned the
1450          * entire queue.  (Note that m->a.act_count is not used to make
1451          * decisions for the inactive queue, only for the active queue.)
1452          */
1453         starting_page_shortage = page_shortage;
1454         marker = &marker_page;
1455         vm_page_init_marker(marker, PQ_INACTIVE, 0);
1456         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1457         vm_pagequeue_lock(pq);
1458         vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1459         while (page_shortage > 0 && (m = vm_pageout_next(&ss, true)) != NULL) {
1460                 KASSERT((m->flags & PG_MARKER) == 0,
1461                     ("marker page %p was dequeued", m));
1462
1463                 /*
1464                  * Don't touch a page that was removed from the queue after the
1465                  * page queue lock was released.  Otherwise, ensure that any
1466                  * pending queue operations, such as dequeues for wired pages,
1467                  * are handled.
1468                  */
1469                 if (vm_pageout_defer(m, PQ_INACTIVE, false))
1470                         continue;
1471
1472                 /*
1473                  * Lock the page's object.
1474                  */
1475                 if (object == NULL || object != m->object) {
1476                         if (object != NULL)
1477                                 VM_OBJECT_WUNLOCK(object);
1478                         object = atomic_load_ptr(&m->object);
1479                         if (__predict_false(object == NULL))
1480                                 /* The page is being freed by another thread. */
1481                                 continue;
1482
1483                         /* Depends on type-stability. */
1484                         VM_OBJECT_WLOCK(object);
1485                         if (__predict_false(m->object != object)) {
1486                                 VM_OBJECT_WUNLOCK(object);
1487                                 object = NULL;
1488                                 goto reinsert;
1489                         }
1490                 }
1491
1492                 if (vm_page_tryxbusy(m) == 0) {
1493                         /*
1494                          * Don't mess with busy pages.  Leave them at
1495                          * the front of the queue.  Most likely, they
1496                          * are being paged out and will leave the
1497                          * queue shortly after the scan finishes.  So,
1498                          * they ought to be discounted from the
1499                          * inactive count.
1500                          */
1501                         addl_page_shortage++;
1502                         goto reinsert;
1503                 }
1504
1505                 /* Deferred free of swap space. */
1506                 if ((m->a.flags & PGA_SWAP_FREE) != 0)
1507                         vm_pager_page_unswapped(m);
1508
1509                 /*
1510                  * Check for wirings now that we hold the object lock and have
1511                  * exclusively busied the page.  If the page is mapped, it may
1512                  * still be wired by pmap lookups.  The call to
1513                  * vm_page_try_remove_all() below atomically checks for such
1514                  * wirings and removes mappings.  If the page is unmapped, the
1515                  * wire count is guaranteed not to increase after this check.
1516                  */
1517                 if (__predict_false(vm_page_wired(m)))
1518                         goto skip_page;
1519
1520                 /*
1521                  * Invalid pages can be easily freed. They cannot be
1522                  * mapped, vm_page_free() asserts this.
1523                  */
1524                 if (vm_page_none_valid(m))
1525                         goto free_page;
1526
1527                 refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1528
1529                 for (old = vm_page_astate_load(m);;) {
1530                         /*
1531                          * Check to see if the page has been removed from the
1532                          * queue since the first such check.  Leave it alone if
1533                          * so, discarding any references collected by
1534                          * pmap_ts_referenced().
1535                          */
1536                         if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1537                                 goto skip_page;
1538
1539                         new = old;
1540                         act_delta = refs;
1541                         if ((old.flags & PGA_REFERENCED) != 0) {
1542                                 new.flags &= ~PGA_REFERENCED;
1543                                 act_delta++;
1544                         }
1545                         if (act_delta == 0) {
1546                                 ;
1547                         } else if (object->ref_count != 0) {
1548                                 /*
1549                                  * Increase the activation count if the
1550                                  * page was referenced while in the
1551                                  * inactive queue.  This makes it less
1552                                  * likely that the page will be returned
1553                                  * prematurely to the inactive queue.
1554                                  */
1555                                 new.act_count += ACT_ADVANCE +
1556                                     act_delta;
1557                                 if (new.act_count > ACT_MAX)
1558                                         new.act_count = ACT_MAX;
1559
1560                                 new.flags &= ~PGA_QUEUE_OP_MASK;
1561                                 new.flags |= PGA_REQUEUE;
1562                                 new.queue = PQ_ACTIVE;
1563                                 if (!vm_page_pqstate_commit(m, &old, new))
1564                                         continue;
1565
1566                                 VM_CNT_INC(v_reactivated);
1567                                 goto skip_page;
1568                         } else if ((object->flags & OBJ_DEAD) == 0) {
1569                                 new.queue = PQ_INACTIVE;
1570                                 new.flags |= PGA_REQUEUE;
1571                                 if (!vm_page_pqstate_commit(m, &old, new))
1572                                         continue;
1573                                 goto skip_page;
1574                         }
1575                         break;
1576                 }
1577
1578                 /*
1579                  * If the page appears to be clean at the machine-independent
1580                  * layer, then remove all of its mappings from the pmap in
1581                  * anticipation of freeing it.  If, however, any of the page's
1582                  * mappings allow write access, then the page may still be
1583                  * modified until the last of those mappings are removed.
1584                  */
1585                 if (object->ref_count != 0) {
1586                         vm_page_test_dirty(m);
1587                         if (m->dirty == 0 && !vm_page_try_remove_all(m))
1588                                 goto skip_page;
1589                 }
1590
1591                 /*
1592                  * Clean pages can be freed, but dirty pages must be sent back
1593                  * to the laundry, unless they belong to a dead object.
1594                  * Requeueing dirty pages from dead objects is pointless, as
1595                  * they are being paged out and freed by the thread that
1596                  * destroyed the object.
1597                  */
1598                 if (m->dirty == 0) {
1599 free_page:
1600                         /*
1601                          * Now we are guaranteed that no other threads are
1602                          * manipulating the page, check for a last-second
1603                          * reference that would save it from doom.
1604                          */
1605                         if (vm_pageout_defer(m, PQ_INACTIVE, false))
1606                                 goto skip_page;
1607
1608                         /*
1609                          * Because we dequeued the page and have already checked
1610                          * for pending dequeue and enqueue requests, we can
1611                          * safely disassociate the page from the inactive queue
1612                          * without holding the queue lock.
1613                          */
1614                         m->a.queue = PQ_NONE;
1615                         vm_page_free(m);
1616                         page_shortage--;
1617                         continue;
1618                 }
1619                 if ((object->flags & OBJ_DEAD) == 0)
1620                         vm_page_launder(m);
1621 skip_page:
1622                 vm_page_xunbusy(m);
1623                 continue;
1624 reinsert:
1625                 vm_pageout_reinsert_inactive(&ss, &rq, m);
1626         }
1627         if (object != NULL)
1628                 VM_OBJECT_WUNLOCK(object);
1629         vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1630         vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1631         vm_pagequeue_lock(pq);
1632         vm_pageout_end_scan(&ss);
1633         vm_pagequeue_unlock(pq);
1634
1635         /*
1636          * Record the remaining shortage and the progress and rate it was made.
1637          */
1638         atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
1639         getmicrouptime(&end);
1640         timevalsub(&end, &start);
1641         atomic_add_int(&vmd->vmd_inactive_us,
1642             end.tv_sec * 1000000 + end.tv_usec);
1643         atomic_add_int(&vmd->vmd_inactive_freed,
1644             starting_page_shortage - page_shortage);
1645 }
1646
1647 /*
1648  * Dispatch a number of inactive threads according to load and collect the
1649  * results to present a coherent view of paging activity on this domain.
1650  */
1651 static int
1652 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
1653 {
1654         u_int freed, pps, slop, threads, us;
1655
1656         vmd->vmd_inactive_shortage = shortage;
1657         slop = 0;
1658
1659         /*
1660          * If we have more work than we can do in a quarter of our interval, we
1661          * fire off multiple threads to process it.
1662          */
1663         threads = vmd->vmd_inactive_threads;
1664         if (threads > 1 && vmd->vmd_inactive_pps != 0 &&
1665             shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
1666                 vmd->vmd_inactive_shortage /= threads;
1667                 slop = shortage % threads;
1668                 vm_domain_pageout_lock(vmd);
1669                 blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
1670                 blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
1671                 wakeup(&vmd->vmd_inactive_shortage);
1672                 vm_domain_pageout_unlock(vmd);
1673         }
1674
1675         /* Run the local thread scan. */
1676         vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
1677
1678         /*
1679          * Block until helper threads report results and then accumulate
1680          * totals.
1681          */
1682         blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
1683         freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
1684         VM_CNT_ADD(v_dfree, freed);
1685
1686         /*
1687          * Calculate the per-thread paging rate with an exponential decay of
1688          * prior results.  Careful to avoid integer rounding errors with large
1689          * us values.
1690          */
1691         us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
1692         if (us > 1000000)
1693                 /* Keep rounding to tenths */
1694                 pps = (freed * 10) / ((us * 10) / 1000000);
1695         else
1696                 pps = (1000000 / us) * freed;
1697         vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
1698
1699         return (shortage - freed);
1700 }
1701
1702 /*
1703  * Attempt to reclaim the requested number of pages from the inactive queue.
1704  * Returns true if the shortage was addressed.
1705  */
1706 static int
1707 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
1708 {
1709         struct vm_pagequeue *pq;
1710         u_int addl_page_shortage, deficit, page_shortage;
1711         u_int starting_page_shortage;
1712
1713         /*
1714          * vmd_pageout_deficit counts the number of pages requested in
1715          * allocations that failed because of a free page shortage.  We assume
1716          * that the allocations will be reattempted and thus include the deficit
1717          * in our scan target.
1718          */
1719         deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1720         starting_page_shortage = shortage + deficit;
1721
1722         /*
1723          * Run the inactive scan on as many threads as is necessary.
1724          */
1725         page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
1726         addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
1727
1728         /*
1729          * Wake up the laundry thread so that it can perform any needed
1730          * laundering.  If we didn't meet our target, we're in shortfall and
1731          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1732          * swap devices are configured, the laundry thread has no work to do, so
1733          * don't bother waking it up.
1734          *
1735          * The laundry thread uses the number of inactive queue scans elapsed
1736          * since the last laundering to determine whether to launder again, so
1737          * keep count.
1738          */
1739         if (starting_page_shortage > 0) {
1740                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1741                 vm_pagequeue_lock(pq);
1742                 if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1743                     (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1744                         if (page_shortage > 0) {
1745                                 vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1746                                 VM_CNT_INC(v_pdshortfalls);
1747                         } else if (vmd->vmd_laundry_request !=
1748                             VM_LAUNDRY_SHORTFALL)
1749                                 vmd->vmd_laundry_request =
1750                                     VM_LAUNDRY_BACKGROUND;
1751                         wakeup(&vmd->vmd_laundry_request);
1752                 }
1753                 vmd->vmd_clean_pages_freed +=
1754                     starting_page_shortage - page_shortage;
1755                 vm_pagequeue_unlock(pq);
1756         }
1757
1758         /*
1759          * Wakeup the swapout daemon if we didn't free the targeted number of
1760          * pages.
1761          */
1762         if (page_shortage > 0)
1763                 vm_swapout_run();
1764
1765         /*
1766          * If the inactive queue scan fails repeatedly to meet its
1767          * target, kill the largest process.
1768          */
1769         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1770
1771         /*
1772          * Reclaim pages by swapping out idle processes, if configured to do so.
1773          */
1774         vm_swapout_run_idle();
1775
1776         /*
1777          * See the description of addl_page_shortage above.
1778          */
1779         *addl_shortage = addl_page_shortage + deficit;
1780
1781         return (page_shortage <= 0);
1782 }
1783
1784 static int vm_pageout_oom_vote;
1785
1786 /*
1787  * The pagedaemon threads randlomly select one to perform the
1788  * OOM.  Trying to kill processes before all pagedaemons
1789  * failed to reach free target is premature.
1790  */
1791 static void
1792 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1793     int starting_page_shortage)
1794 {
1795         int old_vote;
1796
1797         if (starting_page_shortage <= 0 || starting_page_shortage !=
1798             page_shortage)
1799                 vmd->vmd_oom_seq = 0;
1800         else
1801                 vmd->vmd_oom_seq++;
1802         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1803                 if (vmd->vmd_oom) {
1804                         vmd->vmd_oom = FALSE;
1805                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1806                 }
1807                 return;
1808         }
1809
1810         /*
1811          * Do not follow the call sequence until OOM condition is
1812          * cleared.
1813          */
1814         vmd->vmd_oom_seq = 0;
1815
1816         if (vmd->vmd_oom)
1817                 return;
1818
1819         vmd->vmd_oom = TRUE;
1820         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1821         if (old_vote != vm_ndomains - 1)
1822                 return;
1823
1824         /*
1825          * The current pagedaemon thread is the last in the quorum to
1826          * start OOM.  Initiate the selection and signaling of the
1827          * victim.
1828          */
1829         vm_pageout_oom(VM_OOM_MEM);
1830
1831         /*
1832          * After one round of OOM terror, recall our vote.  On the
1833          * next pass, current pagedaemon would vote again if the low
1834          * memory condition is still there, due to vmd_oom being
1835          * false.
1836          */
1837         vmd->vmd_oom = FALSE;
1838         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1839 }
1840
1841 /*
1842  * The OOM killer is the page daemon's action of last resort when
1843  * memory allocation requests have been stalled for a prolonged period
1844  * of time because it cannot reclaim memory.  This function computes
1845  * the approximate number of physical pages that could be reclaimed if
1846  * the specified address space is destroyed.
1847  *
1848  * Private, anonymous memory owned by the address space is the
1849  * principal resource that we expect to recover after an OOM kill.
1850  * Since the physical pages mapped by the address space's COW entries
1851  * are typically shared pages, they are unlikely to be released and so
1852  * they are not counted.
1853  *
1854  * To get to the point where the page daemon runs the OOM killer, its
1855  * efforts to write-back vnode-backed pages may have stalled.  This
1856  * could be caused by a memory allocation deadlock in the write path
1857  * that might be resolved by an OOM kill.  Therefore, physical pages
1858  * belonging to vnode-backed objects are counted, because they might
1859  * be freed without being written out first if the address space holds
1860  * the last reference to an unlinked vnode.
1861  *
1862  * Similarly, physical pages belonging to OBJT_PHYS objects are
1863  * counted because the address space might hold the last reference to
1864  * the object.
1865  */
1866 static long
1867 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1868 {
1869         vm_map_t map;
1870         vm_map_entry_t entry;
1871         vm_object_t obj;
1872         long res;
1873
1874         map = &vmspace->vm_map;
1875         KASSERT(!map->system_map, ("system map"));
1876         sx_assert(&map->lock, SA_LOCKED);
1877         res = 0;
1878         VM_MAP_ENTRY_FOREACH(entry, map) {
1879                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1880                         continue;
1881                 obj = entry->object.vm_object;
1882                 if (obj == NULL)
1883                         continue;
1884                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1885                     obj->ref_count != 1)
1886                         continue;
1887                 if (obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP ||
1888                     obj->type == OBJT_PHYS || obj->type == OBJT_VNODE ||
1889                     (obj->flags & OBJ_SWAP) != 0)
1890                         res += obj->resident_page_count;
1891         }
1892         return (res);
1893 }
1894
1895 static int vm_oom_ratelim_last;
1896 static int vm_oom_pf_secs = 10;
1897 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1898     "");
1899 static struct mtx vm_oom_ratelim_mtx;
1900
1901 void
1902 vm_pageout_oom(int shortage)
1903 {
1904         const char *reason;
1905         struct proc *p, *bigproc;
1906         vm_offset_t size, bigsize;
1907         struct thread *td;
1908         struct vmspace *vm;
1909         int now;
1910         bool breakout;
1911
1912         /*
1913          * For OOM requests originating from vm_fault(), there is a high
1914          * chance that a single large process faults simultaneously in
1915          * several threads.  Also, on an active system running many
1916          * processes of middle-size, like buildworld, all of them
1917          * could fault almost simultaneously as well.
1918          *
1919          * To avoid killing too many processes, rate-limit OOMs
1920          * initiated by vm_fault() time-outs on the waits for free
1921          * pages.
1922          */
1923         mtx_lock(&vm_oom_ratelim_mtx);
1924         now = ticks;
1925         if (shortage == VM_OOM_MEM_PF &&
1926             (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1927                 mtx_unlock(&vm_oom_ratelim_mtx);
1928                 return;
1929         }
1930         vm_oom_ratelim_last = now;
1931         mtx_unlock(&vm_oom_ratelim_mtx);
1932
1933         /*
1934          * We keep the process bigproc locked once we find it to keep anyone
1935          * from messing with it; however, there is a possibility of
1936          * deadlock if process B is bigproc and one of its child processes
1937          * attempts to propagate a signal to B while we are waiting for A's
1938          * lock while walking this list.  To avoid this, we don't block on
1939          * the process lock but just skip a process if it is already locked.
1940          */
1941         bigproc = NULL;
1942         bigsize = 0;
1943         sx_slock(&allproc_lock);
1944         FOREACH_PROC_IN_SYSTEM(p) {
1945                 PROC_LOCK(p);
1946
1947                 /*
1948                  * If this is a system, protected or killed process, skip it.
1949                  */
1950                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1951                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1952                     p->p_pid == 1 || P_KILLED(p) ||
1953                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1954                         PROC_UNLOCK(p);
1955                         continue;
1956                 }
1957                 /*
1958                  * If the process is in a non-running type state,
1959                  * don't touch it.  Check all the threads individually.
1960                  */
1961                 breakout = false;
1962                 FOREACH_THREAD_IN_PROC(p, td) {
1963                         thread_lock(td);
1964                         if (!TD_ON_RUNQ(td) &&
1965                             !TD_IS_RUNNING(td) &&
1966                             !TD_IS_SLEEPING(td) &&
1967                             !TD_IS_SUSPENDED(td) &&
1968                             !TD_IS_SWAPPED(td)) {
1969                                 thread_unlock(td);
1970                                 breakout = true;
1971                                 break;
1972                         }
1973                         thread_unlock(td);
1974                 }
1975                 if (breakout) {
1976                         PROC_UNLOCK(p);
1977                         continue;
1978                 }
1979                 /*
1980                  * get the process size
1981                  */
1982                 vm = vmspace_acquire_ref(p);
1983                 if (vm == NULL) {
1984                         PROC_UNLOCK(p);
1985                         continue;
1986                 }
1987                 _PHOLD_LITE(p);
1988                 PROC_UNLOCK(p);
1989                 sx_sunlock(&allproc_lock);
1990                 if (!vm_map_trylock_read(&vm->vm_map)) {
1991                         vmspace_free(vm);
1992                         sx_slock(&allproc_lock);
1993                         PRELE(p);
1994                         continue;
1995                 }
1996                 size = vmspace_swap_count(vm);
1997                 if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
1998                         size += vm_pageout_oom_pagecount(vm);
1999                 vm_map_unlock_read(&vm->vm_map);
2000                 vmspace_free(vm);
2001                 sx_slock(&allproc_lock);
2002
2003                 /*
2004                  * If this process is bigger than the biggest one,
2005                  * remember it.
2006                  */
2007                 if (size > bigsize) {
2008                         if (bigproc != NULL)
2009                                 PRELE(bigproc);
2010                         bigproc = p;
2011                         bigsize = size;
2012                 } else {
2013                         PRELE(p);
2014                 }
2015         }
2016         sx_sunlock(&allproc_lock);
2017
2018         if (bigproc != NULL) {
2019                 switch (shortage) {
2020                 case VM_OOM_MEM:
2021                         reason = "failed to reclaim memory";
2022                         break;
2023                 case VM_OOM_MEM_PF:
2024                         reason = "a thread waited too long to allocate a page";
2025                         break;
2026                 case VM_OOM_SWAPZ:
2027                         reason = "out of swap space";
2028                         break;
2029                 default:
2030                         panic("unknown OOM reason %d", shortage);
2031                 }
2032                 if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
2033                         panic("%s", reason);
2034                 PROC_LOCK(bigproc);
2035                 killproc(bigproc, reason);
2036                 sched_nice(bigproc, PRIO_MIN);
2037                 _PRELE(bigproc);
2038                 PROC_UNLOCK(bigproc);
2039         }
2040 }
2041
2042 /*
2043  * Signal a free page shortage to subsystems that have registered an event
2044  * handler.  Reclaim memory from UMA in the event of a severe shortage.
2045  * Return true if the free page count should be re-evaluated.
2046  */
2047 static bool
2048 vm_pageout_lowmem(void)
2049 {
2050         static int lowmem_ticks = 0;
2051         int last;
2052         bool ret;
2053
2054         ret = false;
2055
2056         last = atomic_load_int(&lowmem_ticks);
2057         while ((u_int)(ticks - last) / hz >= lowmem_period) {
2058                 if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2059                         continue;
2060
2061                 /*
2062                  * Decrease registered cache sizes.
2063                  */
2064                 SDT_PROBE0(vm, , , vm__lowmem_scan);
2065                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
2066
2067                 /*
2068                  * We do this explicitly after the caches have been
2069                  * drained above.
2070                  */
2071                 uma_reclaim(UMA_RECLAIM_TRIM);
2072                 ret = true;
2073                 break;
2074         }
2075
2076         /*
2077          * Kick off an asynchronous reclaim of cached memory if one of the
2078          * page daemons is failing to keep up with demand.  Use the "severe"
2079          * threshold instead of "min" to ensure that we do not blow away the
2080          * caches if a subset of the NUMA domains are depleted by kernel memory
2081          * allocations; the domainset iterators automatically skip domains
2082          * below the "min" threshold on the first pass.
2083          *
2084          * UMA reclaim worker has its own rate-limiting mechanism, so don't
2085          * worry about kicking it too often.
2086          */
2087         if (vm_page_count_severe())
2088                 uma_reclaim_wakeup();
2089
2090         return (ret);
2091 }
2092
2093 static void
2094 vm_pageout_worker(void *arg)
2095 {
2096         struct vm_domain *vmd;
2097         u_int ofree;
2098         int addl_shortage, domain, shortage;
2099         bool target_met;
2100
2101         domain = (uintptr_t)arg;
2102         vmd = VM_DOMAIN(domain);
2103         shortage = 0;
2104         target_met = true;
2105
2106         /*
2107          * XXXKIB It could be useful to bind pageout daemon threads to
2108          * the cores belonging to the domain, from which vm_page_array
2109          * is allocated.
2110          */
2111
2112         KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2113         vmd->vmd_last_active_scan = ticks;
2114
2115         /*
2116          * The pageout daemon worker is never done, so loop forever.
2117          */
2118         while (TRUE) {
2119                 vm_domain_pageout_lock(vmd);
2120
2121                 /*
2122                  * We need to clear wanted before we check the limits.  This
2123                  * prevents races with wakers who will check wanted after they
2124                  * reach the limit.
2125                  */
2126                 atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2127
2128                 /*
2129                  * Might the page daemon need to run again?
2130                  */
2131                 if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2132                         /*
2133                          * Yes.  If the scan failed to produce enough free
2134                          * pages, sleep uninterruptibly for some time in the
2135                          * hope that the laundry thread will clean some pages.
2136                          */
2137                         vm_domain_pageout_unlock(vmd);
2138                         if (!target_met)
2139                                 pause("pwait", hz / VM_INACT_SCAN_RATE);
2140                 } else {
2141                         /*
2142                          * No, sleep until the next wakeup or until pages
2143                          * need to have their reference stats updated.
2144                          */
2145                         if (mtx_sleep(&vmd->vmd_pageout_wanted,
2146                             vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2147                             "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2148                                 VM_CNT_INC(v_pdwakeups);
2149                 }
2150
2151                 /* Prevent spurious wakeups by ensuring that wanted is set. */
2152                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2153
2154                 /*
2155                  * Use the controller to calculate how many pages to free in
2156                  * this interval, and scan the inactive queue.  If the lowmem
2157                  * handlers appear to have freed up some pages, subtract the
2158                  * difference from the inactive queue scan target.
2159                  */
2160                 shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2161                 if (shortage > 0) {
2162                         ofree = vmd->vmd_free_count;
2163                         if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2164                                 shortage -= min(vmd->vmd_free_count - ofree,
2165                                     (u_int)shortage);
2166                         target_met = vm_pageout_inactive(vmd, shortage,
2167                             &addl_shortage);
2168                 } else
2169                         addl_shortage = 0;
2170
2171                 /*
2172                  * Scan the active queue.  A positive value for shortage
2173                  * indicates that we must aggressively deactivate pages to avoid
2174                  * a shortfall.
2175                  */
2176                 shortage = vm_pageout_active_target(vmd) + addl_shortage;
2177                 vm_pageout_scan_active(vmd, shortage);
2178         }
2179 }
2180
2181 /*
2182  * vm_pageout_helper runs additional pageout daemons in times of high paging
2183  * activity.
2184  */
2185 static void
2186 vm_pageout_helper(void *arg)
2187 {
2188         struct vm_domain *vmd;
2189         int domain;
2190
2191         domain = (uintptr_t)arg;
2192         vmd = VM_DOMAIN(domain);
2193
2194         vm_domain_pageout_lock(vmd);
2195         for (;;) {
2196                 msleep(&vmd->vmd_inactive_shortage,
2197                     vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
2198                 blockcount_release(&vmd->vmd_inactive_starting, 1);
2199
2200                 vm_domain_pageout_unlock(vmd);
2201                 vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
2202                 vm_domain_pageout_lock(vmd);
2203
2204                 /*
2205                  * Release the running count while the pageout lock is held to
2206                  * prevent wakeup races.
2207                  */
2208                 blockcount_release(&vmd->vmd_inactive_running, 1);
2209         }
2210 }
2211
2212 static int
2213 get_pageout_threads_per_domain(const struct vm_domain *vmd)
2214 {
2215         unsigned total_pageout_threads, eligible_cpus, domain_cpus;
2216
2217         if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
2218                 return (0);
2219
2220         /*
2221          * Semi-arbitrarily constrain pagedaemon threads to less than half the
2222          * total number of CPUs in the system as an upper limit.
2223          */
2224         if (pageout_cpus_per_thread < 2)
2225                 pageout_cpus_per_thread = 2;
2226         else if (pageout_cpus_per_thread > mp_ncpus)
2227                 pageout_cpus_per_thread = mp_ncpus;
2228
2229         total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
2230         domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
2231
2232         /* Pagedaemons are not run in empty domains. */
2233         eligible_cpus = mp_ncpus;
2234         for (unsigned i = 0; i < vm_ndomains; i++)
2235                 if (VM_DOMAIN_EMPTY(i))
2236                         eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
2237
2238         /*
2239          * Assign a portion of the total pageout threads to this domain
2240          * corresponding to the fraction of pagedaemon-eligible CPUs in the
2241          * domain.  In asymmetric NUMA systems, domains with more CPUs may be
2242          * allocated more threads than domains with fewer CPUs.
2243          */
2244         return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
2245 }
2246
2247 /*
2248  * Initialize basic pageout daemon settings.  See the comment above the
2249  * definition of vm_domain for some explanation of how these thresholds are
2250  * used.
2251  */
2252 static void
2253 vm_pageout_init_domain(int domain)
2254 {
2255         struct vm_domain *vmd;
2256         struct sysctl_oid *oid;
2257
2258         vmd = VM_DOMAIN(domain);
2259         vmd->vmd_interrupt_free_min = 2;
2260
2261         /*
2262          * v_free_reserved needs to include enough for the largest
2263          * swap pager structures plus enough for any pv_entry structs
2264          * when paging. 
2265          */
2266         vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2267             vmd->vmd_interrupt_free_min;
2268         vmd->vmd_free_reserved = vm_pageout_page_count +
2269             vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2270         vmd->vmd_free_min = vmd->vmd_page_count / 200;
2271         vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2272         vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2273         vmd->vmd_free_min += vmd->vmd_free_reserved;
2274         vmd->vmd_free_severe += vmd->vmd_free_reserved;
2275         vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2276         if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2277                 vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2278
2279         /*
2280          * Set the default wakeup threshold to be 10% below the paging
2281          * target.  This keeps the steady state out of shortfall.
2282          */
2283         vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2284
2285         /*
2286          * Target amount of memory to move out of the laundry queue during a
2287          * background laundering.  This is proportional to the amount of system
2288          * memory.
2289          */
2290         vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2291             vmd->vmd_free_min) / 10;
2292
2293         /* Initialize the pageout daemon pid controller. */
2294         pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2295             vmd->vmd_free_target, PIDCTRL_BOUND,
2296             PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2297         oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2298             "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2299         pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2300
2301         vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2302 }
2303
2304 static void
2305 vm_pageout_init(void)
2306 {
2307         u_long freecount;
2308         int i;
2309
2310         /*
2311          * Initialize some paging parameters.
2312          */
2313         if (vm_cnt.v_page_count < 2000)
2314                 vm_pageout_page_count = 8;
2315
2316         freecount = 0;
2317         for (i = 0; i < vm_ndomains; i++) {
2318                 struct vm_domain *vmd;
2319
2320                 vm_pageout_init_domain(i);
2321                 vmd = VM_DOMAIN(i);
2322                 vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2323                 vm_cnt.v_free_target += vmd->vmd_free_target;
2324                 vm_cnt.v_free_min += vmd->vmd_free_min;
2325                 vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2326                 vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2327                 vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2328                 vm_cnt.v_free_severe += vmd->vmd_free_severe;
2329                 freecount += vmd->vmd_free_count;
2330         }
2331
2332         /*
2333          * Set interval in seconds for active scan.  We want to visit each
2334          * page at least once every ten minutes.  This is to prevent worst
2335          * case paging behaviors with stale active LRU.
2336          */
2337         if (vm_pageout_update_period == 0)
2338                 vm_pageout_update_period = 600;
2339
2340         /*
2341          * Set the maximum number of user-wired virtual pages.  Historically the
2342          * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
2343          * may also request user-wired memory.
2344          */
2345         if (vm_page_max_user_wired == 0)
2346                 vm_page_max_user_wired = 4 * freecount / 5;
2347 }
2348
2349 /*
2350  *     vm_pageout is the high level pageout daemon.
2351  */
2352 static void
2353 vm_pageout(void)
2354 {
2355         struct proc *p;
2356         struct thread *td;
2357         int error, first, i, j, pageout_threads;
2358
2359         p = curproc;
2360         td = curthread;
2361
2362         mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2363         swap_pager_swap_init();
2364         for (first = -1, i = 0; i < vm_ndomains; i++) {
2365                 if (VM_DOMAIN_EMPTY(i)) {
2366                         if (bootverbose)
2367                                 printf("domain %d empty; skipping pageout\n",
2368                                     i);
2369                         continue;
2370                 }
2371                 if (first == -1)
2372                         first = i;
2373                 else {
2374                         error = kthread_add(vm_pageout_worker,
2375                             (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2376                         if (error != 0)
2377                                 panic("starting pageout for domain %d: %d\n",
2378                                     i, error);
2379                 }
2380                 pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
2381                 for (j = 0; j < pageout_threads - 1; j++) {
2382                         error = kthread_add(vm_pageout_helper,
2383                             (void *)(uintptr_t)i, p, NULL, 0, 0,
2384                             "dom%d helper%d", i, j);
2385                         if (error != 0)
2386                                 panic("starting pageout helper %d for domain "
2387                                     "%d: %d\n", j, i, error);
2388                 }
2389                 error = kthread_add(vm_pageout_laundry_worker,
2390                     (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2391                 if (error != 0)
2392                         panic("starting laundry for domain %d: %d", i, error);
2393         }
2394         error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2395         if (error != 0)
2396                 panic("starting uma_reclaim helper, error %d\n", error);
2397
2398         snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2399         vm_pageout_worker((void *)(uintptr_t)first);
2400 }
2401
2402 /*
2403  * Perform an advisory wakeup of the page daemon.
2404  */
2405 void
2406 pagedaemon_wakeup(int domain)
2407 {
2408         struct vm_domain *vmd;
2409
2410         vmd = VM_DOMAIN(domain);
2411         vm_domain_pageout_assert_unlocked(vmd);
2412         if (curproc == pageproc)
2413                 return;
2414
2415         if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2416                 vm_domain_pageout_lock(vmd);
2417                 atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2418                 wakeup(&vmd->vmd_pageout_wanted);
2419                 vm_domain_pageout_unlock(vmd);
2420         }
2421 }