]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Import lua 5.3.4 to contrib
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *      This product includes software developed by the University of
27  *      California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72
73 /*
74  *      The proverbial page-out daemon.
75  */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/swap_pager.h>
114 #include <vm/vm_extern.h>
115 #include <vm/uma.h>
116
117 /*
118  * System initialization
119  */
120
121 /* the kernel process "vm_pageout"*/
122 static void vm_pageout(void);
123 static void vm_pageout_init(void);
124 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
125 static int vm_pageout_cluster(vm_page_t m);
126 static bool vm_pageout_scan(struct vm_domain *vmd, int pass);
127 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
128     int starting_page_shortage);
129
130 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
131     NULL);
132
133 struct proc *pageproc;
134
135 static struct kproc_desc page_kp = {
136         "pagedaemon",
137         vm_pageout,
138         &pageproc
139 };
140 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
141     &page_kp);
142
143 SDT_PROVIDER_DEFINE(vm);
144 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
145
146 /* Pagedaemon activity rates, in subdivisions of one second. */
147 #define VM_LAUNDER_RATE         10
148 #define VM_INACT_SCAN_RATE      2
149
150 int vm_pageout_deficit;         /* Estimated number of pages deficit */
151 u_int vm_pageout_wakeup_thresh;
152 static int vm_pageout_oom_seq = 12;
153 bool vm_pageout_wanted;         /* Event on which pageout daemon sleeps */
154 bool vm_pages_needed;           /* Are threads waiting for free pages? */
155
156 /* Pending request for dirty page laundering. */
157 static enum {
158         VM_LAUNDRY_IDLE,
159         VM_LAUNDRY_BACKGROUND,
160         VM_LAUNDRY_SHORTFALL
161 } vm_laundry_request = VM_LAUNDRY_IDLE;
162
163 static int vm_pageout_update_period;
164 static int disable_swap_pageouts;
165 static int lowmem_period = 10;
166 static time_t lowmem_uptime;
167 static int swapdev_enabled;
168
169 static int vm_panic_on_oom = 0;
170
171 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
172         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
173         "panic on out of memory instead of killing the largest process");
174
175 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh,
176         CTLFLAG_RWTUN, &vm_pageout_wakeup_thresh, 0,
177         "free page threshold for waking up the pageout daemon");
178
179 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
180         CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
181         "Maximum active LRU update period");
182   
183 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
184         "Low memory callback period");
185
186 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
187         CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
188
189 static int pageout_lock_miss;
190 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
191         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
192
193 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
194         CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
195         "back-to-back calls to oom detector to start OOM");
196
197 static int act_scan_laundry_weight = 3;
198 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
199     &act_scan_laundry_weight, 0,
200     "weight given to clean vs. dirty pages in active queue scans");
201
202 static u_int vm_background_launder_target;
203 SYSCTL_UINT(_vm, OID_AUTO, background_launder_target, CTLFLAG_RWTUN,
204     &vm_background_launder_target, 0,
205     "background laundering target, in pages");
206
207 static u_int vm_background_launder_rate = 4096;
208 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
209     &vm_background_launder_rate, 0,
210     "background laundering rate, in kilobytes per second");
211
212 static u_int vm_background_launder_max = 20 * 1024;
213 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
214     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
215
216 int vm_pageout_page_count = 32;
217
218 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
219 SYSCTL_INT(_vm, OID_AUTO, max_wired,
220         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
221
222 static u_int isqrt(u_int num);
223 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
224 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
225     bool in_shortfall);
226 static void vm_pageout_laundry_worker(void *arg);
227 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
228
229 /*
230  * Initialize a dummy page for marking the caller's place in the specified
231  * paging queue.  In principle, this function only needs to set the flag
232  * PG_MARKER.  Nonetheless, it write busies and initializes the hold count
233  * to one as safety precautions.
234  */ 
235 static void
236 vm_pageout_init_marker(vm_page_t marker, u_short queue)
237 {
238
239         bzero(marker, sizeof(*marker));
240         marker->flags = PG_MARKER;
241         marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
242         marker->queue = queue;
243         marker->hold_count = 1;
244 }
245
246 /*
247  * vm_pageout_fallback_object_lock:
248  * 
249  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
250  * known to have failed and page queue must be either PQ_ACTIVE or
251  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
252  * while locking the vm object.  Use marker page to detect page queue
253  * changes and maintain notion of next page on page queue.  Return
254  * TRUE if no changes were detected, FALSE otherwise.  vm object is
255  * locked on return.
256  * 
257  * This function depends on both the lock portion of struct vm_object
258  * and normal struct vm_page being type stable.
259  */
260 static boolean_t
261 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
262 {
263         struct vm_page marker;
264         struct vm_pagequeue *pq;
265         boolean_t unchanged;
266         u_short queue;
267         vm_object_t object;
268
269         queue = m->queue;
270         vm_pageout_init_marker(&marker, queue);
271         pq = vm_page_pagequeue(m);
272         object = m->object;
273         
274         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
275         vm_pagequeue_unlock(pq);
276         vm_page_unlock(m);
277         VM_OBJECT_WLOCK(object);
278         vm_page_lock(m);
279         vm_pagequeue_lock(pq);
280
281         /*
282          * The page's object might have changed, and/or the page might
283          * have moved from its original position in the queue.  If the
284          * page's object has changed, then the caller should abandon
285          * processing the page because the wrong object lock was
286          * acquired.  Use the marker's plinks.q, not the page's, to
287          * determine if the page has been moved.  The state of the
288          * page's plinks.q can be indeterminate; whereas, the marker's
289          * plinks.q must be valid.
290          */
291         *next = TAILQ_NEXT(&marker, plinks.q);
292         unchanged = m->object == object &&
293             m == TAILQ_PREV(&marker, pglist, plinks.q);
294         KASSERT(!unchanged || m->queue == queue,
295             ("page %p queue %d %d", m, queue, m->queue));
296         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
297         return (unchanged);
298 }
299
300 /*
301  * Lock the page while holding the page queue lock.  Use marker page
302  * to detect page queue changes and maintain notion of next page on
303  * page queue.  Return TRUE if no changes were detected, FALSE
304  * otherwise.  The page is locked on return. The page queue lock might
305  * be dropped and reacquired.
306  *
307  * This function depends on normal struct vm_page being type stable.
308  */
309 static boolean_t
310 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
311 {
312         struct vm_page marker;
313         struct vm_pagequeue *pq;
314         boolean_t unchanged;
315         u_short queue;
316
317         vm_page_lock_assert(m, MA_NOTOWNED);
318         if (vm_page_trylock(m))
319                 return (TRUE);
320
321         queue = m->queue;
322         vm_pageout_init_marker(&marker, queue);
323         pq = vm_page_pagequeue(m);
324
325         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
326         vm_pagequeue_unlock(pq);
327         vm_page_lock(m);
328         vm_pagequeue_lock(pq);
329
330         /* Page queue might have changed. */
331         *next = TAILQ_NEXT(&marker, plinks.q);
332         unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
333         KASSERT(!unchanged || m->queue == queue,
334             ("page %p queue %d %d", m, queue, m->queue));
335         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
336         return (unchanged);
337 }
338
339 /*
340  * Scan for pages at adjacent offsets within the given page's object that are
341  * eligible for laundering, form a cluster of these pages and the given page,
342  * and launder that cluster.
343  */
344 static int
345 vm_pageout_cluster(vm_page_t m)
346 {
347         vm_object_t object;
348         vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
349         vm_pindex_t pindex;
350         int ib, is, page_base, pageout_count;
351
352         vm_page_assert_locked(m);
353         object = m->object;
354         VM_OBJECT_ASSERT_WLOCKED(object);
355         pindex = m->pindex;
356
357         /*
358          * We can't clean the page if it is busy or held.
359          */
360         vm_page_assert_unbusied(m);
361         KASSERT(m->hold_count == 0, ("page %p is held", m));
362
363         pmap_remove_write(m);
364         vm_page_unlock(m);
365
366         mc[vm_pageout_page_count] = pb = ps = m;
367         pageout_count = 1;
368         page_base = vm_pageout_page_count;
369         ib = 1;
370         is = 1;
371
372         /*
373          * We can cluster only if the page is not clean, busy, or held, and
374          * the page is in the laundry queue.
375          *
376          * During heavy mmap/modification loads the pageout
377          * daemon can really fragment the underlying file
378          * due to flushing pages out of order and not trying to
379          * align the clusters (which leaves sporadic out-of-order
380          * holes).  To solve this problem we do the reverse scan
381          * first and attempt to align our cluster, then do a 
382          * forward scan if room remains.
383          */
384 more:
385         while (ib != 0 && pageout_count < vm_pageout_page_count) {
386                 if (ib > pindex) {
387                         ib = 0;
388                         break;
389                 }
390                 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
391                         ib = 0;
392                         break;
393                 }
394                 vm_page_test_dirty(p);
395                 if (p->dirty == 0) {
396                         ib = 0;
397                         break;
398                 }
399                 vm_page_lock(p);
400                 if (!vm_page_in_laundry(p) ||
401                     p->hold_count != 0) {       /* may be undergoing I/O */
402                         vm_page_unlock(p);
403                         ib = 0;
404                         break;
405                 }
406                 pmap_remove_write(p);
407                 vm_page_unlock(p);
408                 mc[--page_base] = pb = p;
409                 ++pageout_count;
410                 ++ib;
411
412                 /*
413                  * We are at an alignment boundary.  Stop here, and switch
414                  * directions.  Do not clear ib.
415                  */
416                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
417                         break;
418         }
419         while (pageout_count < vm_pageout_page_count && 
420             pindex + is < object->size) {
421                 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
422                         break;
423                 vm_page_test_dirty(p);
424                 if (p->dirty == 0)
425                         break;
426                 vm_page_lock(p);
427                 if (!vm_page_in_laundry(p) ||
428                     p->hold_count != 0) {       /* may be undergoing I/O */
429                         vm_page_unlock(p);
430                         break;
431                 }
432                 pmap_remove_write(p);
433                 vm_page_unlock(p);
434                 mc[page_base + pageout_count] = ps = p;
435                 ++pageout_count;
436                 ++is;
437         }
438
439         /*
440          * If we exhausted our forward scan, continue with the reverse scan
441          * when possible, even past an alignment boundary.  This catches
442          * boundary conditions.
443          */
444         if (ib != 0 && pageout_count < vm_pageout_page_count)
445                 goto more;
446
447         return (vm_pageout_flush(&mc[page_base], pageout_count,
448             VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
449 }
450
451 /*
452  * vm_pageout_flush() - launder the given pages
453  *
454  *      The given pages are laundered.  Note that we setup for the start of
455  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
456  *      reference count all in here rather then in the parent.  If we want
457  *      the parent to do more sophisticated things we may have to change
458  *      the ordering.
459  *
460  *      Returned runlen is the count of pages between mreq and first
461  *      page after mreq with status VM_PAGER_AGAIN.
462  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
463  *      for any page in runlen set.
464  */
465 int
466 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
467     boolean_t *eio)
468 {
469         vm_object_t object = mc[0]->object;
470         int pageout_status[count];
471         int numpagedout = 0;
472         int i, runlen;
473
474         VM_OBJECT_ASSERT_WLOCKED(object);
475
476         /*
477          * Initiate I/O.  Mark the pages busy and verify that they're valid
478          * and read-only.
479          *
480          * We do not have to fixup the clean/dirty bits here... we can
481          * allow the pager to do it after the I/O completes.
482          *
483          * NOTE! mc[i]->dirty may be partial or fragmented due to an
484          * edge case with file fragments.
485          */
486         for (i = 0; i < count; i++) {
487                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
488                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
489                         mc[i], i, count));
490                 KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
491                     ("vm_pageout_flush: writeable page %p", mc[i]));
492                 vm_page_sbusy(mc[i]);
493         }
494         vm_object_pip_add(object, count);
495
496         vm_pager_put_pages(object, mc, count, flags, pageout_status);
497
498         runlen = count - mreq;
499         if (eio != NULL)
500                 *eio = FALSE;
501         for (i = 0; i < count; i++) {
502                 vm_page_t mt = mc[i];
503
504                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
505                     !pmap_page_is_write_mapped(mt),
506                     ("vm_pageout_flush: page %p is not write protected", mt));
507                 switch (pageout_status[i]) {
508                 case VM_PAGER_OK:
509                         vm_page_lock(mt);
510                         if (vm_page_in_laundry(mt))
511                                 vm_page_deactivate_noreuse(mt);
512                         vm_page_unlock(mt);
513                         /* FALLTHROUGH */
514                 case VM_PAGER_PEND:
515                         numpagedout++;
516                         break;
517                 case VM_PAGER_BAD:
518                         /*
519                          * The page is outside the object's range.  We pretend
520                          * that the page out worked and clean the page, so the
521                          * changes will be lost if the page is reclaimed by
522                          * the page daemon.
523                          */
524                         vm_page_undirty(mt);
525                         vm_page_lock(mt);
526                         if (vm_page_in_laundry(mt))
527                                 vm_page_deactivate_noreuse(mt);
528                         vm_page_unlock(mt);
529                         break;
530                 case VM_PAGER_ERROR:
531                 case VM_PAGER_FAIL:
532                         /*
533                          * If the page couldn't be paged out to swap because the
534                          * pager wasn't able to find space, place the page in
535                          * the PQ_UNSWAPPABLE holding queue.  This is an
536                          * optimization that prevents the page daemon from
537                          * wasting CPU cycles on pages that cannot be reclaimed
538                          * becase no swap device is configured.
539                          *
540                          * Otherwise, reactivate the page so that it doesn't
541                          * clog the laundry and inactive queues.  (We will try
542                          * paging it out again later.)
543                          */
544                         vm_page_lock(mt);
545                         if (object->type == OBJT_SWAP &&
546                             pageout_status[i] == VM_PAGER_FAIL) {
547                                 vm_page_unswappable(mt);
548                                 numpagedout++;
549                         } else
550                                 vm_page_activate(mt);
551                         vm_page_unlock(mt);
552                         if (eio != NULL && i >= mreq && i - mreq < runlen)
553                                 *eio = TRUE;
554                         break;
555                 case VM_PAGER_AGAIN:
556                         if (i >= mreq && i - mreq < runlen)
557                                 runlen = i - mreq;
558                         break;
559                 }
560
561                 /*
562                  * If the operation is still going, leave the page busy to
563                  * block all other accesses. Also, leave the paging in
564                  * progress indicator set so that we don't attempt an object
565                  * collapse.
566                  */
567                 if (pageout_status[i] != VM_PAGER_PEND) {
568                         vm_object_pip_wakeup(object);
569                         vm_page_sunbusy(mt);
570                 }
571         }
572         if (prunlen != NULL)
573                 *prunlen = runlen;
574         return (numpagedout);
575 }
576
577 static void
578 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
579 {
580
581         atomic_store_rel_int(&swapdev_enabled, 1);
582 }
583
584 static void
585 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
586 {
587
588         if (swap_pager_nswapdev() == 1)
589                 atomic_store_rel_int(&swapdev_enabled, 0);
590 }
591
592 /*
593  * Attempt to acquire all of the necessary locks to launder a page and
594  * then call through the clustering layer to PUTPAGES.  Wait a short
595  * time for a vnode lock.
596  *
597  * Requires the page and object lock on entry, releases both before return.
598  * Returns 0 on success and an errno otherwise.
599  */
600 static int
601 vm_pageout_clean(vm_page_t m, int *numpagedout)
602 {
603         struct vnode *vp;
604         struct mount *mp;
605         vm_object_t object;
606         vm_pindex_t pindex;
607         int error, lockmode;
608
609         vm_page_assert_locked(m);
610         object = m->object;
611         VM_OBJECT_ASSERT_WLOCKED(object);
612         error = 0;
613         vp = NULL;
614         mp = NULL;
615
616         /*
617          * The object is already known NOT to be dead.   It
618          * is possible for the vget() to block the whole
619          * pageout daemon, but the new low-memory handling
620          * code should prevent it.
621          *
622          * We can't wait forever for the vnode lock, we might
623          * deadlock due to a vn_read() getting stuck in
624          * vm_wait while holding this vnode.  We skip the 
625          * vnode if we can't get it in a reasonable amount
626          * of time.
627          */
628         if (object->type == OBJT_VNODE) {
629                 vm_page_unlock(m);
630                 vp = object->handle;
631                 if (vp->v_type == VREG &&
632                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
633                         mp = NULL;
634                         error = EDEADLK;
635                         goto unlock_all;
636                 }
637                 KASSERT(mp != NULL,
638                     ("vp %p with NULL v_mount", vp));
639                 vm_object_reference_locked(object);
640                 pindex = m->pindex;
641                 VM_OBJECT_WUNLOCK(object);
642                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
643                     LK_SHARED : LK_EXCLUSIVE;
644                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
645                         vp = NULL;
646                         error = EDEADLK;
647                         goto unlock_mp;
648                 }
649                 VM_OBJECT_WLOCK(object);
650                 vm_page_lock(m);
651                 /*
652                  * While the object and page were unlocked, the page
653                  * may have been:
654                  * (1) moved to a different queue,
655                  * (2) reallocated to a different object,
656                  * (3) reallocated to a different offset, or
657                  * (4) cleaned.
658                  */
659                 if (!vm_page_in_laundry(m) || m->object != object ||
660                     m->pindex != pindex || m->dirty == 0) {
661                         vm_page_unlock(m);
662                         error = ENXIO;
663                         goto unlock_all;
664                 }
665
666                 /*
667                  * The page may have been busied or held while the object
668                  * and page locks were released.
669                  */
670                 if (vm_page_busied(m) || m->hold_count != 0) {
671                         vm_page_unlock(m);
672                         error = EBUSY;
673                         goto unlock_all;
674                 }
675         }
676
677         /*
678          * If a page is dirty, then it is either being washed
679          * (but not yet cleaned) or it is still in the
680          * laundry.  If it is still in the laundry, then we
681          * start the cleaning operation. 
682          */
683         if ((*numpagedout = vm_pageout_cluster(m)) == 0)
684                 error = EIO;
685
686 unlock_all:
687         VM_OBJECT_WUNLOCK(object);
688
689 unlock_mp:
690         vm_page_lock_assert(m, MA_NOTOWNED);
691         if (mp != NULL) {
692                 if (vp != NULL)
693                         vput(vp);
694                 vm_object_deallocate(object);
695                 vn_finished_write(mp);
696         }
697
698         return (error);
699 }
700
701 /*
702  * Attempt to launder the specified number of pages.
703  *
704  * Returns the number of pages successfully laundered.
705  */
706 static int
707 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
708 {
709         struct vm_pagequeue *pq;
710         vm_object_t object;
711         vm_page_t m, next;
712         int act_delta, error, maxscan, numpagedout, starting_target;
713         int vnodes_skipped;
714         bool pageout_ok, queue_locked;
715
716         starting_target = launder;
717         vnodes_skipped = 0;
718
719         /*
720          * Scan the laundry queues for pages eligible to be laundered.  We stop
721          * once the target number of dirty pages have been laundered, or once
722          * we've reached the end of the queue.  A single iteration of this loop
723          * may cause more than one page to be laundered because of clustering.
724          *
725          * maxscan ensures that we don't re-examine requeued pages.  Any
726          * additional pages written as part of a cluster are subtracted from
727          * maxscan since they must be taken from the laundry queue.
728          *
729          * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
730          * swap devices are configured.
731          */
732         if (atomic_load_acq_int(&swapdev_enabled))
733                 pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
734         else
735                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
736
737 scan:
738         vm_pagequeue_lock(pq);
739         maxscan = pq->pq_cnt;
740         queue_locked = true;
741         for (m = TAILQ_FIRST(&pq->pq_pl);
742             m != NULL && maxscan-- > 0 && launder > 0;
743             m = next) {
744                 vm_pagequeue_assert_locked(pq);
745                 KASSERT(queue_locked, ("unlocked laundry queue"));
746                 KASSERT(vm_page_in_laundry(m),
747                     ("page %p has an inconsistent queue", m));
748                 next = TAILQ_NEXT(m, plinks.q);
749                 if ((m->flags & PG_MARKER) != 0)
750                         continue;
751                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
752                     ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
753                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
754                     ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
755                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
756                         vm_page_unlock(m);
757                         continue;
758                 }
759                 object = m->object;
760                 if ((!VM_OBJECT_TRYWLOCK(object) &&
761                     (!vm_pageout_fallback_object_lock(m, &next) ||
762                     m->hold_count != 0)) || vm_page_busied(m)) {
763                         VM_OBJECT_WUNLOCK(object);
764                         vm_page_unlock(m);
765                         continue;
766                 }
767
768                 /*
769                  * Unlock the laundry queue, invalidating the 'next' pointer.
770                  * Use a marker to remember our place in the laundry queue.
771                  */
772                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
773                     plinks.q);
774                 vm_pagequeue_unlock(pq);
775                 queue_locked = false;
776
777                 /*
778                  * Invalid pages can be easily freed.  They cannot be
779                  * mapped; vm_page_free() asserts this.
780                  */
781                 if (m->valid == 0)
782                         goto free_page;
783
784                 /*
785                  * If the page has been referenced and the object is not dead,
786                  * reactivate or requeue the page depending on whether the
787                  * object is mapped.
788                  */
789                 if ((m->aflags & PGA_REFERENCED) != 0) {
790                         vm_page_aflag_clear(m, PGA_REFERENCED);
791                         act_delta = 1;
792                 } else
793                         act_delta = 0;
794                 if (object->ref_count != 0)
795                         act_delta += pmap_ts_referenced(m);
796                 else {
797                         KASSERT(!pmap_page_is_mapped(m),
798                             ("page %p is mapped", m));
799                 }
800                 if (act_delta != 0) {
801                         if (object->ref_count != 0) {
802                                 VM_CNT_INC(v_reactivated);
803                                 vm_page_activate(m);
804
805                                 /*
806                                  * Increase the activation count if the page
807                                  * was referenced while in the laundry queue.
808                                  * This makes it less likely that the page will
809                                  * be returned prematurely to the inactive
810                                  * queue.
811                                  */
812                                 m->act_count += act_delta + ACT_ADVANCE;
813
814                                 /*
815                                  * If this was a background laundering, count
816                                  * activated pages towards our target.  The
817                                  * purpose of background laundering is to ensure
818                                  * that pages are eventually cycled through the
819                                  * laundry queue, and an activation is a valid
820                                  * way out.
821                                  */
822                                 if (!in_shortfall)
823                                         launder--;
824                                 goto drop_page;
825                         } else if ((object->flags & OBJ_DEAD) == 0)
826                                 goto requeue_page;
827                 }
828
829                 /*
830                  * If the page appears to be clean at the machine-independent
831                  * layer, then remove all of its mappings from the pmap in
832                  * anticipation of freeing it.  If, however, any of the page's
833                  * mappings allow write access, then the page may still be
834                  * modified until the last of those mappings are removed.
835                  */
836                 if (object->ref_count != 0) {
837                         vm_page_test_dirty(m);
838                         if (m->dirty == 0)
839                                 pmap_remove_all(m);
840                 }
841
842                 /*
843                  * Clean pages are freed, and dirty pages are paged out unless
844                  * they belong to a dead object.  Requeueing dirty pages from
845                  * dead objects is pointless, as they are being paged out and
846                  * freed by the thread that destroyed the object.
847                  */
848                 if (m->dirty == 0) {
849 free_page:
850                         vm_page_free(m);
851                         VM_CNT_INC(v_dfree);
852                 } else if ((object->flags & OBJ_DEAD) == 0) {
853                         if (object->type != OBJT_SWAP &&
854                             object->type != OBJT_DEFAULT)
855                                 pageout_ok = true;
856                         else if (disable_swap_pageouts)
857                                 pageout_ok = false;
858                         else
859                                 pageout_ok = true;
860                         if (!pageout_ok) {
861 requeue_page:
862                                 vm_pagequeue_lock(pq);
863                                 queue_locked = true;
864                                 vm_page_requeue_locked(m);
865                                 goto drop_page;
866                         }
867
868                         /*
869                          * Form a cluster with adjacent, dirty pages from the
870                          * same object, and page out that entire cluster.
871                          *
872                          * The adjacent, dirty pages must also be in the
873                          * laundry.  However, their mappings are not checked
874                          * for new references.  Consequently, a recently
875                          * referenced page may be paged out.  However, that
876                          * page will not be prematurely reclaimed.  After page
877                          * out, the page will be placed in the inactive queue,
878                          * where any new references will be detected and the
879                          * page reactivated.
880                          */
881                         error = vm_pageout_clean(m, &numpagedout);
882                         if (error == 0) {
883                                 launder -= numpagedout;
884                                 maxscan -= numpagedout - 1;
885                         } else if (error == EDEADLK) {
886                                 pageout_lock_miss++;
887                                 vnodes_skipped++;
888                         }
889                         goto relock_queue;
890                 }
891 drop_page:
892                 vm_page_unlock(m);
893                 VM_OBJECT_WUNLOCK(object);
894 relock_queue:
895                 if (!queue_locked) {
896                         vm_pagequeue_lock(pq);
897                         queue_locked = true;
898                 }
899                 next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
900                 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
901         }
902         vm_pagequeue_unlock(pq);
903
904         if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
905                 pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
906                 goto scan;
907         }
908
909         /*
910          * Wakeup the sync daemon if we skipped a vnode in a writeable object
911          * and we didn't launder enough pages.
912          */
913         if (vnodes_skipped > 0 && launder > 0)
914                 (void)speedup_syncer();
915
916         return (starting_target - launder);
917 }
918
919 /*
920  * Compute the integer square root.
921  */
922 static u_int
923 isqrt(u_int num)
924 {
925         u_int bit, root, tmp;
926
927         bit = 1u << ((NBBY * sizeof(u_int)) - 2);
928         while (bit > num)
929                 bit >>= 2;
930         root = 0;
931         while (bit != 0) {
932                 tmp = root + bit;
933                 root >>= 1;
934                 if (num >= tmp) {
935                         num -= tmp;
936                         root += bit;
937                 }
938                 bit >>= 2;
939         }
940         return (root);
941 }
942
943 /*
944  * Perform the work of the laundry thread: periodically wake up and determine
945  * whether any pages need to be laundered.  If so, determine the number of pages
946  * that need to be laundered, and launder them.
947  */
948 static void
949 vm_pageout_laundry_worker(void *arg)
950 {
951         struct vm_domain *domain;
952         struct vm_pagequeue *pq;
953         uint64_t nclean, ndirty;
954         u_int last_launder, wakeups;
955         int domidx, last_target, launder, shortfall, shortfall_cycle, target;
956         bool in_shortfall;
957
958         domidx = (uintptr_t)arg;
959         domain = &vm_dom[domidx];
960         pq = &domain->vmd_pagequeues[PQ_LAUNDRY];
961         KASSERT(domain->vmd_segs != 0, ("domain without segments"));
962         vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY);
963
964         shortfall = 0;
965         in_shortfall = false;
966         shortfall_cycle = 0;
967         target = 0;
968         last_launder = 0;
969
970         /*
971          * Calls to these handlers are serialized by the swap syscall lock.
972          */
973         (void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, domain,
974             EVENTHANDLER_PRI_ANY);
975         (void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, domain,
976             EVENTHANDLER_PRI_ANY);
977
978         /*
979          * The pageout laundry worker is never done, so loop forever.
980          */
981         for (;;) {
982                 KASSERT(target >= 0, ("negative target %d", target));
983                 KASSERT(shortfall_cycle >= 0,
984                     ("negative cycle %d", shortfall_cycle));
985                 launder = 0;
986                 wakeups = VM_CNT_FETCH(v_pdwakeups);
987
988                 /*
989                  * First determine whether we need to launder pages to meet a
990                  * shortage of free pages.
991                  */
992                 if (shortfall > 0) {
993                         in_shortfall = true;
994                         shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
995                         target = shortfall;
996                 } else if (!in_shortfall)
997                         goto trybackground;
998                 else if (shortfall_cycle == 0 || vm_laundry_target() <= 0) {
999                         /*
1000                          * We recently entered shortfall and began laundering
1001                          * pages.  If we have completed that laundering run
1002                          * (and we are no longer in shortfall) or we have met
1003                          * our laundry target through other activity, then we
1004                          * can stop laundering pages.
1005                          */
1006                         in_shortfall = false;
1007                         target = 0;
1008                         goto trybackground;
1009                 }
1010                 last_launder = wakeups;
1011                 launder = target / shortfall_cycle--;
1012                 goto dolaundry;
1013
1014                 /*
1015                  * There's no immediate need to launder any pages; see if we
1016                  * meet the conditions to perform background laundering:
1017                  *
1018                  * 1. The ratio of dirty to clean inactive pages exceeds the
1019                  *    background laundering threshold and the pagedaemon has
1020                  *    been woken up to reclaim pages since our last
1021                  *    laundering, or
1022                  * 2. we haven't yet reached the target of the current
1023                  *    background laundering run.
1024                  *
1025                  * The background laundering threshold is not a constant.
1026                  * Instead, it is a slowly growing function of the number of
1027                  * page daemon wakeups since the last laundering.  Thus, as the
1028                  * ratio of dirty to clean inactive pages grows, the amount of
1029                  * memory pressure required to trigger laundering decreases.
1030                  */
1031 trybackground:
1032                 nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count;
1033                 ndirty = vm_cnt.v_laundry_count;
1034                 if (target == 0 && wakeups != last_launder &&
1035                     ndirty * isqrt(wakeups - last_launder) >= nclean) {
1036                         target = vm_background_launder_target;
1037                 }
1038
1039                 /*
1040                  * We have a non-zero background laundering target.  If we've
1041                  * laundered up to our maximum without observing a page daemon
1042                  * wakeup, just stop.  This is a safety belt that ensures we
1043                  * don't launder an excessive amount if memory pressure is low
1044                  * and the ratio of dirty to clean pages is large.  Otherwise,
1045                  * proceed at the background laundering rate.
1046                  */
1047                 if (target > 0) {
1048                         if (wakeups != last_launder) {
1049                                 last_launder = wakeups;
1050                                 last_target = target;
1051                         } else if (last_target - target >=
1052                             vm_background_launder_max * PAGE_SIZE / 1024) {
1053                                 target = 0;
1054                         }
1055                         launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1056                         launder /= VM_LAUNDER_RATE;
1057                         if (launder > target)
1058                                 launder = target;
1059                 }
1060
1061 dolaundry:
1062                 if (launder > 0) {
1063                         /*
1064                          * Because of I/O clustering, the number of laundered
1065                          * pages could exceed "target" by the maximum size of
1066                          * a cluster minus one. 
1067                          */
1068                         target -= min(vm_pageout_launder(domain, launder,
1069                             in_shortfall), target);
1070                         pause("laundp", hz / VM_LAUNDER_RATE);
1071                 }
1072
1073                 /*
1074                  * If we're not currently laundering pages and the page daemon
1075                  * hasn't posted a new request, sleep until the page daemon
1076                  * kicks us.
1077                  */
1078                 vm_pagequeue_lock(pq);
1079                 if (target == 0 && vm_laundry_request == VM_LAUNDRY_IDLE)
1080                         (void)mtx_sleep(&vm_laundry_request,
1081                             vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1082
1083                 /*
1084                  * If the pagedaemon has indicated that it's in shortfall, start
1085                  * a shortfall laundering unless we're already in the middle of
1086                  * one.  This may preempt a background laundering.
1087                  */
1088                 if (vm_laundry_request == VM_LAUNDRY_SHORTFALL &&
1089                     (!in_shortfall || shortfall_cycle == 0)) {
1090                         shortfall = vm_laundry_target() + vm_pageout_deficit;
1091                         target = 0;
1092                 } else
1093                         shortfall = 0;
1094
1095                 if (target == 0)
1096                         vm_laundry_request = VM_LAUNDRY_IDLE;
1097                 vm_pagequeue_unlock(pq);
1098         }
1099 }
1100
1101 /*
1102  *      vm_pageout_scan does the dirty work for the pageout daemon.
1103  *
1104  *      pass == 0: Update active LRU/deactivate pages
1105  *      pass >= 1: Free inactive pages
1106  *
1107  * Returns true if pass was zero or enough pages were freed by the inactive
1108  * queue scan to meet the target.
1109  */
1110 static bool
1111 vm_pageout_scan(struct vm_domain *vmd, int pass)
1112 {
1113         vm_page_t m, next;
1114         struct vm_pagequeue *pq;
1115         vm_object_t object;
1116         long min_scan;
1117         int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1118         int page_shortage, scan_tick, scanned, starting_page_shortage;
1119         boolean_t queue_locked;
1120
1121         /*
1122          * If we need to reclaim memory ask kernel caches to return
1123          * some.  We rate limit to avoid thrashing.
1124          */
1125         if (vmd == &vm_dom[0] && pass > 0 &&
1126             (time_uptime - lowmem_uptime) >= lowmem_period) {
1127                 /*
1128                  * Decrease registered cache sizes.
1129                  */
1130                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1131                 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1132                 /*
1133                  * We do this explicitly after the caches have been
1134                  * drained above.
1135                  */
1136                 uma_reclaim();
1137                 lowmem_uptime = time_uptime;
1138         }
1139
1140         /*
1141          * The addl_page_shortage is the number of temporarily
1142          * stuck pages in the inactive queue.  In other words, the
1143          * number of pages from the inactive count that should be
1144          * discounted in setting the target for the active queue scan.
1145          */
1146         addl_page_shortage = 0;
1147
1148         /*
1149          * Calculate the number of pages that we want to free.  This number
1150          * can be negative if many pages are freed between the wakeup call to
1151          * the page daemon and this calculation.
1152          */
1153         if (pass > 0) {
1154                 deficit = atomic_readandclear_int(&vm_pageout_deficit);
1155                 page_shortage = vm_paging_target() + deficit;
1156         } else
1157                 page_shortage = deficit = 0;
1158         starting_page_shortage = page_shortage;
1159
1160         /*
1161          * Start scanning the inactive queue for pages that we can free.  The
1162          * scan will stop when we reach the target or we have scanned the
1163          * entire queue.  (Note that m->act_count is not used to make
1164          * decisions for the inactive queue, only for the active queue.)
1165          */
1166         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1167         maxscan = pq->pq_cnt;
1168         vm_pagequeue_lock(pq);
1169         queue_locked = TRUE;
1170         for (m = TAILQ_FIRST(&pq->pq_pl);
1171              m != NULL && maxscan-- > 0 && page_shortage > 0;
1172              m = next) {
1173                 vm_pagequeue_assert_locked(pq);
1174                 KASSERT(queue_locked, ("unlocked inactive queue"));
1175                 KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1176
1177                 VM_CNT_INC(v_pdpages);
1178                 next = TAILQ_NEXT(m, plinks.q);
1179
1180                 /*
1181                  * skip marker pages
1182                  */
1183                 if (m->flags & PG_MARKER)
1184                         continue;
1185
1186                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1187                     ("Fictitious page %p cannot be in inactive queue", m));
1188                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1189                     ("Unmanaged page %p cannot be in inactive queue", m));
1190
1191                 /*
1192                  * The page or object lock acquisitions fail if the
1193                  * page was removed from the queue or moved to a
1194                  * different position within the queue.  In either
1195                  * case, addl_page_shortage should not be incremented.
1196                  */
1197                 if (!vm_pageout_page_lock(m, &next))
1198                         goto unlock_page;
1199                 else if (m->hold_count != 0) {
1200                         /*
1201                          * Held pages are essentially stuck in the
1202                          * queue.  So, they ought to be discounted
1203                          * from the inactive count.  See the
1204                          * calculation of inactq_shortage before the
1205                          * loop over the active queue below.
1206                          */
1207                         addl_page_shortage++;
1208                         goto unlock_page;
1209                 }
1210                 object = m->object;
1211                 if (!VM_OBJECT_TRYWLOCK(object)) {
1212                         if (!vm_pageout_fallback_object_lock(m, &next))
1213                                 goto unlock_object;
1214                         else if (m->hold_count != 0) {
1215                                 addl_page_shortage++;
1216                                 goto unlock_object;
1217                         }
1218                 }
1219                 if (vm_page_busied(m)) {
1220                         /*
1221                          * Don't mess with busy pages.  Leave them at
1222                          * the front of the queue.  Most likely, they
1223                          * are being paged out and will leave the
1224                          * queue shortly after the scan finishes.  So,
1225                          * they ought to be discounted from the
1226                          * inactive count.
1227                          */
1228                         addl_page_shortage++;
1229 unlock_object:
1230                         VM_OBJECT_WUNLOCK(object);
1231 unlock_page:
1232                         vm_page_unlock(m);
1233                         continue;
1234                 }
1235                 KASSERT(m->hold_count == 0, ("Held page %p", m));
1236
1237                 /*
1238                  * Dequeue the inactive page and unlock the inactive page
1239                  * queue, invalidating the 'next' pointer.  Dequeueing the
1240                  * page here avoids a later reacquisition (and release) of
1241                  * the inactive page queue lock when vm_page_activate(),
1242                  * vm_page_free(), or vm_page_launder() is called.  Use a
1243                  * marker to remember our place in the inactive queue.
1244                  */
1245                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1246                 vm_page_dequeue_locked(m);
1247                 vm_pagequeue_unlock(pq);
1248                 queue_locked = FALSE;
1249
1250                 /*
1251                  * Invalid pages can be easily freed. They cannot be
1252                  * mapped, vm_page_free() asserts this.
1253                  */
1254                 if (m->valid == 0)
1255                         goto free_page;
1256
1257                 /*
1258                  * If the page has been referenced and the object is not dead,
1259                  * reactivate or requeue the page depending on whether the
1260                  * object is mapped.
1261                  */
1262                 if ((m->aflags & PGA_REFERENCED) != 0) {
1263                         vm_page_aflag_clear(m, PGA_REFERENCED);
1264                         act_delta = 1;
1265                 } else
1266                         act_delta = 0;
1267                 if (object->ref_count != 0) {
1268                         act_delta += pmap_ts_referenced(m);
1269                 } else {
1270                         KASSERT(!pmap_page_is_mapped(m),
1271                             ("vm_pageout_scan: page %p is mapped", m));
1272                 }
1273                 if (act_delta != 0) {
1274                         if (object->ref_count != 0) {
1275                                 VM_CNT_INC(v_reactivated);
1276                                 vm_page_activate(m);
1277
1278                                 /*
1279                                  * Increase the activation count if the page
1280                                  * was referenced while in the inactive queue.
1281                                  * This makes it less likely that the page will
1282                                  * be returned prematurely to the inactive
1283                                  * queue.
1284                                  */
1285                                 m->act_count += act_delta + ACT_ADVANCE;
1286                                 goto drop_page;
1287                         } else if ((object->flags & OBJ_DEAD) == 0) {
1288                                 vm_pagequeue_lock(pq);
1289                                 queue_locked = TRUE;
1290                                 m->queue = PQ_INACTIVE;
1291                                 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1292                                 vm_pagequeue_cnt_inc(pq);
1293                                 goto drop_page;
1294                         }
1295                 }
1296
1297                 /*
1298                  * If the page appears to be clean at the machine-independent
1299                  * layer, then remove all of its mappings from the pmap in
1300                  * anticipation of freeing it.  If, however, any of the page's
1301                  * mappings allow write access, then the page may still be
1302                  * modified until the last of those mappings are removed.
1303                  */
1304                 if (object->ref_count != 0) {
1305                         vm_page_test_dirty(m);
1306                         if (m->dirty == 0)
1307                                 pmap_remove_all(m);
1308                 }
1309
1310                 /*
1311                  * Clean pages can be freed, but dirty pages must be sent back
1312                  * to the laundry, unless they belong to a dead object.
1313                  * Requeueing dirty pages from dead objects is pointless, as
1314                  * they are being paged out and freed by the thread that
1315                  * destroyed the object.
1316                  */
1317                 if (m->dirty == 0) {
1318 free_page:
1319                         vm_page_free(m);
1320                         VM_CNT_INC(v_dfree);
1321                         --page_shortage;
1322                 } else if ((object->flags & OBJ_DEAD) == 0)
1323                         vm_page_launder(m);
1324 drop_page:
1325                 vm_page_unlock(m);
1326                 VM_OBJECT_WUNLOCK(object);
1327                 if (!queue_locked) {
1328                         vm_pagequeue_lock(pq);
1329                         queue_locked = TRUE;
1330                 }
1331                 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1332                 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1333         }
1334         vm_pagequeue_unlock(pq);
1335
1336         /*
1337          * Wake up the laundry thread so that it can perform any needed
1338          * laundering.  If we didn't meet our target, we're in shortfall and
1339          * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1340          * swap devices are configured, the laundry thread has no work to do, so
1341          * don't bother waking it up.
1342          */
1343         if (vm_laundry_request == VM_LAUNDRY_IDLE &&
1344             starting_page_shortage > 0) {
1345                 pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY];
1346                 vm_pagequeue_lock(pq);
1347                 if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) {
1348                         if (page_shortage > 0) {
1349                                 vm_laundry_request = VM_LAUNDRY_SHORTFALL;
1350                                 VM_CNT_INC(v_pdshortfalls);
1351                         } else if (vm_laundry_request != VM_LAUNDRY_SHORTFALL)
1352                                 vm_laundry_request = VM_LAUNDRY_BACKGROUND;
1353                         wakeup(&vm_laundry_request);
1354                 }
1355                 vm_pagequeue_unlock(pq);
1356         }
1357
1358         /*
1359          * Wakeup the swapout daemon if we didn't free the targeted number of
1360          * pages.
1361          */
1362         if (page_shortage > 0)
1363                 vm_swapout_run();
1364
1365         /*
1366          * If the inactive queue scan fails repeatedly to meet its
1367          * target, kill the largest process.
1368          */
1369         vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1370
1371         /*
1372          * Compute the number of pages we want to try to move from the
1373          * active queue to either the inactive or laundry queue.
1374          *
1375          * When scanning active pages, we make clean pages count more heavily
1376          * towards the page shortage than dirty pages.  This is because dirty
1377          * pages must be laundered before they can be reused and thus have less
1378          * utility when attempting to quickly alleviate a shortage.  However,
1379          * this weighting also causes the scan to deactivate dirty pages more
1380          * more aggressively, improving the effectiveness of clustering and
1381          * ensuring that they can eventually be reused.
1382          */
1383         inactq_shortage = vm_cnt.v_inactive_target - (vm_cnt.v_inactive_count +
1384             vm_cnt.v_laundry_count / act_scan_laundry_weight) +
1385             vm_paging_target() + deficit + addl_page_shortage;
1386         page_shortage *= act_scan_laundry_weight;
1387
1388         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1389         vm_pagequeue_lock(pq);
1390         maxscan = pq->pq_cnt;
1391
1392         /*
1393          * If we're just idle polling attempt to visit every
1394          * active page within 'update_period' seconds.
1395          */
1396         scan_tick = ticks;
1397         if (vm_pageout_update_period != 0) {
1398                 min_scan = pq->pq_cnt;
1399                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1400                 min_scan /= hz * vm_pageout_update_period;
1401         } else
1402                 min_scan = 0;
1403         if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
1404                 vmd->vmd_last_active_scan = scan_tick;
1405
1406         /*
1407          * Scan the active queue for pages that can be deactivated.  Update
1408          * the per-page activity counter and use it to identify deactivation
1409          * candidates.  Held pages may be deactivated.
1410          */
1411         for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1412             min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
1413             scanned++) {
1414                 KASSERT(m->queue == PQ_ACTIVE,
1415                     ("vm_pageout_scan: page %p isn't active", m));
1416                 next = TAILQ_NEXT(m, plinks.q);
1417                 if ((m->flags & PG_MARKER) != 0)
1418                         continue;
1419                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1420                     ("Fictitious page %p cannot be in active queue", m));
1421                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1422                     ("Unmanaged page %p cannot be in active queue", m));
1423                 if (!vm_pageout_page_lock(m, &next)) {
1424                         vm_page_unlock(m);
1425                         continue;
1426                 }
1427
1428                 /*
1429                  * The count for page daemon pages is updated after checking
1430                  * the page for eligibility.
1431                  */
1432                 VM_CNT_INC(v_pdpages);
1433
1434                 /*
1435                  * Check to see "how much" the page has been used.
1436                  */
1437                 if ((m->aflags & PGA_REFERENCED) != 0) {
1438                         vm_page_aflag_clear(m, PGA_REFERENCED);
1439                         act_delta = 1;
1440                 } else
1441                         act_delta = 0;
1442
1443                 /*
1444                  * Perform an unsynchronized object ref count check.  While
1445                  * the page lock ensures that the page is not reallocated to
1446                  * another object, in particular, one with unmanaged mappings
1447                  * that cannot support pmap_ts_referenced(), two races are,
1448                  * nonetheless, possible:
1449                  * 1) The count was transitioning to zero, but we saw a non-
1450                  *    zero value.  pmap_ts_referenced() will return zero
1451                  *    because the page is not mapped.
1452                  * 2) The count was transitioning to one, but we saw zero. 
1453                  *    This race delays the detection of a new reference.  At
1454                  *    worst, we will deactivate and reactivate the page.
1455                  */
1456                 if (m->object->ref_count != 0)
1457                         act_delta += pmap_ts_referenced(m);
1458
1459                 /*
1460                  * Advance or decay the act_count based on recent usage.
1461                  */
1462                 if (act_delta != 0) {
1463                         m->act_count += ACT_ADVANCE + act_delta;
1464                         if (m->act_count > ACT_MAX)
1465                                 m->act_count = ACT_MAX;
1466                 } else
1467                         m->act_count -= min(m->act_count, ACT_DECLINE);
1468
1469                 /*
1470                  * Move this page to the tail of the active, inactive or laundry
1471                  * queue depending on usage.
1472                  */
1473                 if (m->act_count == 0) {
1474                         /* Dequeue to avoid later lock recursion. */
1475                         vm_page_dequeue_locked(m);
1476
1477                         /*
1478                          * When not short for inactive pages, let dirty pages go
1479                          * through the inactive queue before moving to the
1480                          * laundry queues.  This gives them some extra time to
1481                          * be reactivated, potentially avoiding an expensive
1482                          * pageout.  During a page shortage, the inactive queue
1483                          * is necessarily small, so we may move dirty pages
1484                          * directly to the laundry queue.
1485                          */
1486                         if (inactq_shortage <= 0)
1487                                 vm_page_deactivate(m);
1488                         else {
1489                                 /*
1490                                  * Calling vm_page_test_dirty() here would
1491                                  * require acquisition of the object's write
1492                                  * lock.  However, during a page shortage,
1493                                  * directing dirty pages into the laundry
1494                                  * queue is only an optimization and not a
1495                                  * requirement.  Therefore, we simply rely on
1496                                  * the opportunistic updates to the page's
1497                                  * dirty field by the pmap.
1498                                  */
1499                                 if (m->dirty == 0) {
1500                                         vm_page_deactivate(m);
1501                                         inactq_shortage -=
1502                                             act_scan_laundry_weight;
1503                                 } else {
1504                                         vm_page_launder(m);
1505                                         inactq_shortage--;
1506                                 }
1507                         }
1508                 } else
1509                         vm_page_requeue_locked(m);
1510                 vm_page_unlock(m);
1511         }
1512         vm_pagequeue_unlock(pq);
1513         if (pass > 0)
1514                 vm_swapout_run_idle();
1515         return (page_shortage <= 0);
1516 }
1517
1518 static int vm_pageout_oom_vote;
1519
1520 /*
1521  * The pagedaemon threads randlomly select one to perform the
1522  * OOM.  Trying to kill processes before all pagedaemons
1523  * failed to reach free target is premature.
1524  */
1525 static void
1526 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1527     int starting_page_shortage)
1528 {
1529         int old_vote;
1530
1531         if (starting_page_shortage <= 0 || starting_page_shortage !=
1532             page_shortage)
1533                 vmd->vmd_oom_seq = 0;
1534         else
1535                 vmd->vmd_oom_seq++;
1536         if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1537                 if (vmd->vmd_oom) {
1538                         vmd->vmd_oom = FALSE;
1539                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1540                 }
1541                 return;
1542         }
1543
1544         /*
1545          * Do not follow the call sequence until OOM condition is
1546          * cleared.
1547          */
1548         vmd->vmd_oom_seq = 0;
1549
1550         if (vmd->vmd_oom)
1551                 return;
1552
1553         vmd->vmd_oom = TRUE;
1554         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1555         if (old_vote != vm_ndomains - 1)
1556                 return;
1557
1558         /*
1559          * The current pagedaemon thread is the last in the quorum to
1560          * start OOM.  Initiate the selection and signaling of the
1561          * victim.
1562          */
1563         vm_pageout_oom(VM_OOM_MEM);
1564
1565         /*
1566          * After one round of OOM terror, recall our vote.  On the
1567          * next pass, current pagedaemon would vote again if the low
1568          * memory condition is still there, due to vmd_oom being
1569          * false.
1570          */
1571         vmd->vmd_oom = FALSE;
1572         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1573 }
1574
1575 /*
1576  * The OOM killer is the page daemon's action of last resort when
1577  * memory allocation requests have been stalled for a prolonged period
1578  * of time because it cannot reclaim memory.  This function computes
1579  * the approximate number of physical pages that could be reclaimed if
1580  * the specified address space is destroyed.
1581  *
1582  * Private, anonymous memory owned by the address space is the
1583  * principal resource that we expect to recover after an OOM kill.
1584  * Since the physical pages mapped by the address space's COW entries
1585  * are typically shared pages, they are unlikely to be released and so
1586  * they are not counted.
1587  *
1588  * To get to the point where the page daemon runs the OOM killer, its
1589  * efforts to write-back vnode-backed pages may have stalled.  This
1590  * could be caused by a memory allocation deadlock in the write path
1591  * that might be resolved by an OOM kill.  Therefore, physical pages
1592  * belonging to vnode-backed objects are counted, because they might
1593  * be freed without being written out first if the address space holds
1594  * the last reference to an unlinked vnode.
1595  *
1596  * Similarly, physical pages belonging to OBJT_PHYS objects are
1597  * counted because the address space might hold the last reference to
1598  * the object.
1599  */
1600 static long
1601 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1602 {
1603         vm_map_t map;
1604         vm_map_entry_t entry;
1605         vm_object_t obj;
1606         long res;
1607
1608         map = &vmspace->vm_map;
1609         KASSERT(!map->system_map, ("system map"));
1610         sx_assert(&map->lock, SA_LOCKED);
1611         res = 0;
1612         for (entry = map->header.next; entry != &map->header;
1613             entry = entry->next) {
1614                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1615                         continue;
1616                 obj = entry->object.vm_object;
1617                 if (obj == NULL)
1618                         continue;
1619                 if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1620                     obj->ref_count != 1)
1621                         continue;
1622                 switch (obj->type) {
1623                 case OBJT_DEFAULT:
1624                 case OBJT_SWAP:
1625                 case OBJT_PHYS:
1626                 case OBJT_VNODE:
1627                         res += obj->resident_page_count;
1628                         break;
1629                 }
1630         }
1631         return (res);
1632 }
1633
1634 void
1635 vm_pageout_oom(int shortage)
1636 {
1637         struct proc *p, *bigproc;
1638         vm_offset_t size, bigsize;
1639         struct thread *td;
1640         struct vmspace *vm;
1641         bool breakout;
1642
1643         /*
1644          * We keep the process bigproc locked once we find it to keep anyone
1645          * from messing with it; however, there is a possibility of
1646          * deadlock if process B is bigproc and one of its child processes
1647          * attempts to propagate a signal to B while we are waiting for A's
1648          * lock while walking this list.  To avoid this, we don't block on
1649          * the process lock but just skip a process if it is already locked.
1650          */
1651         bigproc = NULL;
1652         bigsize = 0;
1653         sx_slock(&allproc_lock);
1654         FOREACH_PROC_IN_SYSTEM(p) {
1655                 PROC_LOCK(p);
1656
1657                 /*
1658                  * If this is a system, protected or killed process, skip it.
1659                  */
1660                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1661                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1662                     p->p_pid == 1 || P_KILLED(p) ||
1663                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1664                         PROC_UNLOCK(p);
1665                         continue;
1666                 }
1667                 /*
1668                  * If the process is in a non-running type state,
1669                  * don't touch it.  Check all the threads individually.
1670                  */
1671                 breakout = false;
1672                 FOREACH_THREAD_IN_PROC(p, td) {
1673                         thread_lock(td);
1674                         if (!TD_ON_RUNQ(td) &&
1675                             !TD_IS_RUNNING(td) &&
1676                             !TD_IS_SLEEPING(td) &&
1677                             !TD_IS_SUSPENDED(td) &&
1678                             !TD_IS_SWAPPED(td)) {
1679                                 thread_unlock(td);
1680                                 breakout = true;
1681                                 break;
1682                         }
1683                         thread_unlock(td);
1684                 }
1685                 if (breakout) {
1686                         PROC_UNLOCK(p);
1687                         continue;
1688                 }
1689                 /*
1690                  * get the process size
1691                  */
1692                 vm = vmspace_acquire_ref(p);
1693                 if (vm == NULL) {
1694                         PROC_UNLOCK(p);
1695                         continue;
1696                 }
1697                 _PHOLD_LITE(p);
1698                 PROC_UNLOCK(p);
1699                 sx_sunlock(&allproc_lock);
1700                 if (!vm_map_trylock_read(&vm->vm_map)) {
1701                         vmspace_free(vm);
1702                         sx_slock(&allproc_lock);
1703                         PRELE(p);
1704                         continue;
1705                 }
1706                 size = vmspace_swap_count(vm);
1707                 if (shortage == VM_OOM_MEM)
1708                         size += vm_pageout_oom_pagecount(vm);
1709                 vm_map_unlock_read(&vm->vm_map);
1710                 vmspace_free(vm);
1711                 sx_slock(&allproc_lock);
1712
1713                 /*
1714                  * If this process is bigger than the biggest one,
1715                  * remember it.
1716                  */
1717                 if (size > bigsize) {
1718                         if (bigproc != NULL)
1719                                 PRELE(bigproc);
1720                         bigproc = p;
1721                         bigsize = size;
1722                 } else {
1723                         PRELE(p);
1724                 }
1725         }
1726         sx_sunlock(&allproc_lock);
1727         if (bigproc != NULL) {
1728                 if (vm_panic_on_oom != 0)
1729                         panic("out of swap space");
1730                 PROC_LOCK(bigproc);
1731                 killproc(bigproc, "out of swap space");
1732                 sched_nice(bigproc, PRIO_MIN);
1733                 _PRELE(bigproc);
1734                 PROC_UNLOCK(bigproc);
1735                 wakeup(&vm_cnt.v_free_count);
1736         }
1737 }
1738
1739 static void
1740 vm_pageout_worker(void *arg)
1741 {
1742         struct vm_domain *domain;
1743         int domidx, pass;
1744         bool target_met;
1745
1746         domidx = (uintptr_t)arg;
1747         domain = &vm_dom[domidx];
1748         pass = 0;
1749         target_met = true;
1750
1751         /*
1752          * XXXKIB It could be useful to bind pageout daemon threads to
1753          * the cores belonging to the domain, from which vm_page_array
1754          * is allocated.
1755          */
1756
1757         KASSERT(domain->vmd_segs != 0, ("domain without segments"));
1758         domain->vmd_last_active_scan = ticks;
1759         vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
1760         vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE);
1761         TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl,
1762             &domain->vmd_inacthead, plinks.q);
1763
1764         /*
1765          * The pageout daemon worker is never done, so loop forever.
1766          */
1767         while (TRUE) {
1768                 mtx_lock(&vm_page_queue_free_mtx);
1769
1770                 /*
1771                  * Generally, after a level >= 1 scan, if there are enough
1772                  * free pages to wakeup the waiters, then they are already
1773                  * awake.  A call to vm_page_free() during the scan awakened
1774                  * them.  However, in the following case, this wakeup serves
1775                  * to bound the amount of time that a thread might wait.
1776                  * Suppose a thread's call to vm_page_alloc() fails, but
1777                  * before that thread calls VM_WAIT, enough pages are freed by
1778                  * other threads to alleviate the free page shortage.  The
1779                  * thread will, nonetheless, wait until another page is freed
1780                  * or this wakeup is performed.
1781                  */
1782                 if (vm_pages_needed && !vm_page_count_min()) {
1783                         vm_pages_needed = false;
1784                         wakeup(&vm_cnt.v_free_count);
1785                 }
1786
1787                 /*
1788                  * Do not clear vm_pageout_wanted until we reach our free page
1789                  * target.  Otherwise, we may be awakened over and over again,
1790                  * wasting CPU time.
1791                  */
1792                 if (vm_pageout_wanted && target_met)
1793                         vm_pageout_wanted = false;
1794
1795                 /*
1796                  * Might the page daemon receive a wakeup call?
1797                  */
1798                 if (vm_pageout_wanted) {
1799                         /*
1800                          * No.  Either vm_pageout_wanted was set by another
1801                          * thread during the previous scan, which must have
1802                          * been a level 0 scan, or vm_pageout_wanted was
1803                          * already set and the scan failed to free enough
1804                          * pages.  If we haven't yet performed a level >= 1
1805                          * (page reclamation) scan, then increase the level
1806                          * and scan again now.  Otherwise, sleep a bit and
1807                          * try again later.
1808                          */
1809                         mtx_unlock(&vm_page_queue_free_mtx);
1810                         if (pass >= 1)
1811                                 pause("psleep", hz / VM_INACT_SCAN_RATE);
1812                         pass++;
1813                 } else {
1814                         /*
1815                          * Yes.  Sleep until pages need to be reclaimed or
1816                          * have their reference stats updated.
1817                          */
1818                         if (mtx_sleep(&vm_pageout_wanted,
1819                             &vm_page_queue_free_mtx, PDROP | PVM, "psleep",
1820                             hz) == 0) {
1821                                 VM_CNT_INC(v_pdwakeups);
1822                                 pass = 1;
1823                         } else
1824                                 pass = 0;
1825                 }
1826
1827                 target_met = vm_pageout_scan(domain, pass);
1828         }
1829 }
1830
1831 /*
1832  *      vm_pageout_init initialises basic pageout daemon settings.
1833  */
1834 static void
1835 vm_pageout_init(void)
1836 {
1837         /*
1838          * Initialize some paging parameters.
1839          */
1840         vm_cnt.v_interrupt_free_min = 2;
1841         if (vm_cnt.v_page_count < 2000)
1842                 vm_pageout_page_count = 8;
1843
1844         /*
1845          * v_free_reserved needs to include enough for the largest
1846          * swap pager structures plus enough for any pv_entry structs
1847          * when paging. 
1848          */
1849         if (vm_cnt.v_page_count > 1024)
1850                 vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200;
1851         else
1852                 vm_cnt.v_free_min = 4;
1853         vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1854             vm_cnt.v_interrupt_free_min;
1855         vm_cnt.v_free_reserved = vm_pageout_page_count +
1856             vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768);
1857         vm_cnt.v_free_severe = vm_cnt.v_free_min / 2;
1858         vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved;
1859         vm_cnt.v_free_min += vm_cnt.v_free_reserved;
1860         vm_cnt.v_free_severe += vm_cnt.v_free_reserved;
1861         vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2;
1862         if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3)
1863                 vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3;
1864
1865         /*
1866          * Set the default wakeup threshold to be 10% above the minimum
1867          * page limit.  This keeps the steady state out of shortfall.
1868          */
1869         vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11;
1870
1871         /*
1872          * Set interval in seconds for active scan.  We want to visit each
1873          * page at least once every ten minutes.  This is to prevent worst
1874          * case paging behaviors with stale active LRU.
1875          */
1876         if (vm_pageout_update_period == 0)
1877                 vm_pageout_update_period = 600;
1878
1879         /* XXX does not really belong here */
1880         if (vm_page_max_wired == 0)
1881                 vm_page_max_wired = vm_cnt.v_free_count / 3;
1882
1883         /*
1884          * Target amount of memory to move out of the laundry queue during a
1885          * background laundering.  This is proportional to the amount of system
1886          * memory.
1887          */
1888         vm_background_launder_target = (vm_cnt.v_free_target -
1889             vm_cnt.v_free_min) / 10;
1890 }
1891
1892 /*
1893  *     vm_pageout is the high level pageout daemon.
1894  */
1895 static void
1896 vm_pageout(void)
1897 {
1898         int error;
1899 #ifdef VM_NUMA_ALLOC
1900         int i;
1901 #endif
1902
1903         swap_pager_swap_init();
1904         error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
1905             0, 0, "laundry: dom0");
1906         if (error != 0)
1907                 panic("starting laundry for domain 0, error %d", error);
1908 #ifdef VM_NUMA_ALLOC
1909         for (i = 1; i < vm_ndomains; i++) {
1910                 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1911                     curproc, NULL, 0, 0, "dom%d", i);
1912                 if (error != 0) {
1913                         panic("starting pageout for domain %d, error %d\n",
1914                             i, error);
1915                 }
1916         }
1917 #endif
1918         error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
1919             0, 0, "uma");
1920         if (error != 0)
1921                 panic("starting uma_reclaim helper, error %d\n", error);
1922         vm_pageout_worker((void *)(uintptr_t)0);
1923 }
1924
1925 /*
1926  * Unless the free page queue lock is held by the caller, this function
1927  * should be regarded as advisory.  Specifically, the caller should
1928  * not msleep() on &vm_cnt.v_free_count following this function unless
1929  * the free page queue lock is held until the msleep() is performed.
1930  */
1931 void
1932 pagedaemon_wakeup(void)
1933 {
1934
1935         if (!vm_pageout_wanted && curthread->td_proc != pageproc) {
1936                 vm_pageout_wanted = true;
1937                 wakeup(&vm_pageout_wanted);
1938         }
1939 }