]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
Testing whether a page is dirty does not require the page lock. Moreover,
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by the University of
25  *      California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70
71 /*
72  *      The proverbial page-out daemon.
73  */
74
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
77
78 #include "opt_vm.h"
79 #include "opt_kdtrace.h"
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/eventhandler.h>
84 #include <sys/lock.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/kthread.h>
88 #include <sys/ktr.h>
89 #include <sys/mount.h>
90 #include <sys/racct.h>
91 #include <sys/resourcevar.h>
92 #include <sys/sched.h>
93 #include <sys/sdt.h>
94 #include <sys/signalvar.h>
95 #include <sys/smp.h>
96 #include <sys/time.h>
97 #include <sys/vnode.h>
98 #include <sys/vmmeter.h>
99 #include <sys/rwlock.h>
100 #include <sys/sx.h>
101 #include <sys/sysctl.h>
102
103 #include <vm/vm.h>
104 #include <vm/vm_param.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_page.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/swap_pager.h>
112 #include <vm/vm_extern.h>
113 #include <vm/uma.h>
114
115 /*
116  * System initialization
117  */
118
119 /* the kernel process "vm_pageout"*/
120 static void vm_pageout(void);
121 static void vm_pageout_init(void);
122 static int vm_pageout_clean(vm_page_t m);
123 static int vm_pageout_cluster(vm_page_t m);
124 static void vm_pageout_scan(struct vm_domain *vmd, int pass);
125 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
126
127 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
128     NULL);
129
130 struct proc *pageproc;
131
132 static struct kproc_desc page_kp = {
133         "pagedaemon",
134         vm_pageout,
135         &pageproc
136 };
137 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
138     &page_kp);
139
140 SDT_PROVIDER_DEFINE(vm);
141 SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache);
142 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
143
144 #if !defined(NO_SWAPPING)
145 /* the kernel process "vm_daemon"*/
146 static void vm_daemon(void);
147 static struct   proc *vmproc;
148
149 static struct kproc_desc vm_kp = {
150         "vmdaemon",
151         vm_daemon,
152         &vmproc
153 };
154 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
155 #endif
156
157
158 int vm_pages_needed;            /* Event on which pageout daemon sleeps */
159 int vm_pageout_deficit;         /* Estimated number of pages deficit */
160 int vm_pageout_pages_needed;    /* flag saying that the pageout daemon needs pages */
161 int vm_pageout_wakeup_thresh;
162
163 #if !defined(NO_SWAPPING)
164 static int vm_pageout_req_swapout;      /* XXX */
165 static int vm_daemon_needed;
166 static struct mtx vm_daemon_mtx;
167 /* Allow for use by vm_pageout before vm_daemon is initialized. */
168 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
169 #endif
170 static int vm_max_launder = 32;
171 static int vm_pageout_update_period;
172 static int defer_swap_pageouts;
173 static int disable_swap_pageouts;
174 static int lowmem_period = 10;
175 static time_t lowmem_uptime;
176
177 #if defined(NO_SWAPPING)
178 static int vm_swap_enabled = 0;
179 static int vm_swap_idle_enabled = 0;
180 #else
181 static int vm_swap_enabled = 1;
182 static int vm_swap_idle_enabled = 0;
183 #endif
184
185 static int vm_panic_on_oom = 0;
186
187 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
188         CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
189         "panic on out of memory instead of killing the largest process");
190
191 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh,
192         CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0,
193         "free page threshold for waking up the pageout daemon");
194
195 SYSCTL_INT(_vm, OID_AUTO, max_launder,
196         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
197
198 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
199         CTLFLAG_RW, &vm_pageout_update_period, 0,
200         "Maximum active LRU update period");
201   
202 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0,
203         "Low memory callback period");
204
205 #if defined(NO_SWAPPING)
206 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
207         CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
208 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
209         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
210 #else
211 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
212         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
213 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
214         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
215 #endif
216
217 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
218         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
219
220 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
221         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
222
223 static int pageout_lock_miss;
224 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
225         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
226
227 #define VM_PAGEOUT_PAGE_COUNT 16
228 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
229
230 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
231 SYSCTL_INT(_vm, OID_AUTO, max_wired,
232         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
233
234 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
235 static boolean_t vm_pageout_launder(struct vm_pagequeue *pq, int, vm_paddr_t,
236     vm_paddr_t);
237 #if !defined(NO_SWAPPING)
238 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
239 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
240 static void vm_req_vmdaemon(int req);
241 #endif
242 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
243
244 /*
245  * Initialize a dummy page for marking the caller's place in the specified
246  * paging queue.  In principle, this function only needs to set the flag
247  * PG_MARKER.  Nonetheless, it wirte busies and initializes the hold count
248  * to one as safety precautions.
249  */ 
250 static void
251 vm_pageout_init_marker(vm_page_t marker, u_short queue)
252 {
253
254         bzero(marker, sizeof(*marker));
255         marker->flags = PG_MARKER;
256         marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
257         marker->queue = queue;
258         marker->hold_count = 1;
259 }
260
261 /*
262  * vm_pageout_fallback_object_lock:
263  * 
264  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
265  * known to have failed and page queue must be either PQ_ACTIVE or
266  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queues
267  * while locking the vm object.  Use marker page to detect page queue
268  * changes and maintain notion of next page on page queue.  Return
269  * TRUE if no changes were detected, FALSE otherwise.  vm object is
270  * locked on return.
271  * 
272  * This function depends on both the lock portion of struct vm_object
273  * and normal struct vm_page being type stable.
274  */
275 static boolean_t
276 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
277 {
278         struct vm_page marker;
279         struct vm_pagequeue *pq;
280         boolean_t unchanged;
281         u_short queue;
282         vm_object_t object;
283
284         queue = m->queue;
285         vm_pageout_init_marker(&marker, queue);
286         pq = vm_page_pagequeue(m);
287         object = m->object;
288         
289         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
290         vm_pagequeue_unlock(pq);
291         vm_page_unlock(m);
292         VM_OBJECT_WLOCK(object);
293         vm_page_lock(m);
294         vm_pagequeue_lock(pq);
295
296         /* Page queue might have changed. */
297         *next = TAILQ_NEXT(&marker, plinks.q);
298         unchanged = (m->queue == queue &&
299                      m->object == object &&
300                      &marker == TAILQ_NEXT(m, plinks.q));
301         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
302         return (unchanged);
303 }
304
305 /*
306  * Lock the page while holding the page queue lock.  Use marker page
307  * to detect page queue changes and maintain notion of next page on
308  * page queue.  Return TRUE if no changes were detected, FALSE
309  * otherwise.  The page is locked on return. The page queue lock might
310  * be dropped and reacquired.
311  *
312  * This function depends on normal struct vm_page being type stable.
313  */
314 static boolean_t
315 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
316 {
317         struct vm_page marker;
318         struct vm_pagequeue *pq;
319         boolean_t unchanged;
320         u_short queue;
321
322         vm_page_lock_assert(m, MA_NOTOWNED);
323         if (vm_page_trylock(m))
324                 return (TRUE);
325
326         queue = m->queue;
327         vm_pageout_init_marker(&marker, queue);
328         pq = vm_page_pagequeue(m);
329
330         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
331         vm_pagequeue_unlock(pq);
332         vm_page_lock(m);
333         vm_pagequeue_lock(pq);
334
335         /* Page queue might have changed. */
336         *next = TAILQ_NEXT(&marker, plinks.q);
337         unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, plinks.q));
338         TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
339         return (unchanged);
340 }
341
342 /*
343  * vm_pageout_clean:
344  *
345  * Clean the page and remove it from the laundry.
346  * 
347  * We set the busy bit to cause potential page faults on this page to
348  * block.  Note the careful timing, however, the busy bit isn't set till
349  * late and we cannot do anything that will mess with the page.
350  */
351 static int
352 vm_pageout_cluster(vm_page_t m)
353 {
354         vm_object_t object;
355         vm_page_t mc[2*vm_pageout_page_count], pb, ps;
356         int pageout_count;
357         int ib, is, page_base;
358         vm_pindex_t pindex = m->pindex;
359
360         vm_page_lock_assert(m, MA_OWNED);
361         object = m->object;
362         VM_OBJECT_ASSERT_WLOCKED(object);
363
364         /*
365          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
366          * with the new swapper, but we could have serious problems paging
367          * out other object types if there is insufficient memory.  
368          *
369          * Unfortunately, checking free memory here is far too late, so the
370          * check has been moved up a procedural level.
371          */
372
373         /*
374          * Can't clean the page if it's busy or held.
375          */
376         vm_page_assert_unbusied(m);
377         KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
378         vm_page_unlock(m);
379
380         mc[vm_pageout_page_count] = pb = ps = m;
381         pageout_count = 1;
382         page_base = vm_pageout_page_count;
383         ib = 1;
384         is = 1;
385
386         /*
387          * Scan object for clusterable pages.
388          *
389          * We can cluster ONLY if: ->> the page is NOT
390          * clean, wired, busy, held, or mapped into a
391          * buffer, and one of the following:
392          * 1) The page is inactive, or a seldom used
393          *    active page.
394          * -or-
395          * 2) we force the issue.
396          *
397          * During heavy mmap/modification loads the pageout
398          * daemon can really fragment the underlying file
399          * due to flushing pages out of order and not trying
400          * align the clusters (which leave sporatic out-of-order
401          * holes).  To solve this problem we do the reverse scan
402          * first and attempt to align our cluster, then do a 
403          * forward scan if room remains.
404          */
405 more:
406         while (ib && pageout_count < vm_pageout_page_count) {
407                 vm_page_t p;
408
409                 if (ib > pindex) {
410                         ib = 0;
411                         break;
412                 }
413
414                 if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
415                         ib = 0;
416                         break;
417                 }
418                 vm_page_test_dirty(p);
419                 if (p->dirty == 0) {
420                         ib = 0;
421                         break;
422                 }
423                 vm_page_lock(p);
424                 if (p->queue != PQ_INACTIVE ||
425                     p->hold_count != 0) {       /* may be undergoing I/O */
426                         vm_page_unlock(p);
427                         ib = 0;
428                         break;
429                 }
430                 vm_page_unlock(p);
431                 mc[--page_base] = pb = p;
432                 ++pageout_count;
433                 ++ib;
434                 /*
435                  * alignment boundry, stop here and switch directions.  Do
436                  * not clear ib.
437                  */
438                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
439                         break;
440         }
441
442         while (pageout_count < vm_pageout_page_count && 
443             pindex + is < object->size) {
444                 vm_page_t p;
445
446                 if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
447                         break;
448                 vm_page_test_dirty(p);
449                 if (p->dirty == 0)
450                         break;
451                 vm_page_lock(p);
452                 if (p->queue != PQ_INACTIVE ||
453                     p->hold_count != 0) {       /* may be undergoing I/O */
454                         vm_page_unlock(p);
455                         break;
456                 }
457                 vm_page_unlock(p);
458                 mc[page_base + pageout_count] = ps = p;
459                 ++pageout_count;
460                 ++is;
461         }
462
463         /*
464          * If we exhausted our forward scan, continue with the reverse scan
465          * when possible, even past a page boundry.  This catches boundry
466          * conditions.
467          */
468         if (ib && pageout_count < vm_pageout_page_count)
469                 goto more;
470
471         /*
472          * we allow reads during pageouts...
473          */
474         return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL,
475             NULL));
476 }
477
478 /*
479  * vm_pageout_flush() - launder the given pages
480  *
481  *      The given pages are laundered.  Note that we setup for the start of
482  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
483  *      reference count all in here rather then in the parent.  If we want
484  *      the parent to do more sophisticated things we may have to change
485  *      the ordering.
486  *
487  *      Returned runlen is the count of pages between mreq and first
488  *      page after mreq with status VM_PAGER_AGAIN.
489  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
490  *      for any page in runlen set.
491  */
492 int
493 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
494     boolean_t *eio)
495 {
496         vm_object_t object = mc[0]->object;
497         int pageout_status[count];
498         int numpagedout = 0;
499         int i, runlen;
500
501         VM_OBJECT_ASSERT_WLOCKED(object);
502
503         /*
504          * Initiate I/O.  Bump the vm_page_t->busy counter and
505          * mark the pages read-only.
506          *
507          * We do not have to fixup the clean/dirty bits here... we can
508          * allow the pager to do it after the I/O completes.
509          *
510          * NOTE! mc[i]->dirty may be partial or fragmented due to an
511          * edge case with file fragments.
512          */
513         for (i = 0; i < count; i++) {
514                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
515                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
516                         mc[i], i, count));
517                 vm_page_sbusy(mc[i]);
518                 pmap_remove_write(mc[i]);
519         }
520         vm_object_pip_add(object, count);
521
522         vm_pager_put_pages(object, mc, count, flags, pageout_status);
523
524         runlen = count - mreq;
525         if (eio != NULL)
526                 *eio = FALSE;
527         for (i = 0; i < count; i++) {
528                 vm_page_t mt = mc[i];
529
530                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
531                     !pmap_page_is_write_mapped(mt),
532                     ("vm_pageout_flush: page %p is not write protected", mt));
533                 switch (pageout_status[i]) {
534                 case VM_PAGER_OK:
535                 case VM_PAGER_PEND:
536                         numpagedout++;
537                         break;
538                 case VM_PAGER_BAD:
539                         /*
540                          * Page outside of range of object. Right now we
541                          * essentially lose the changes by pretending it
542                          * worked.
543                          */
544                         vm_page_undirty(mt);
545                         break;
546                 case VM_PAGER_ERROR:
547                 case VM_PAGER_FAIL:
548                         /*
549                          * If page couldn't be paged out, then reactivate the
550                          * page so it doesn't clog the inactive list.  (We
551                          * will try paging out it again later).
552                          */
553                         vm_page_lock(mt);
554                         vm_page_activate(mt);
555                         vm_page_unlock(mt);
556                         if (eio != NULL && i >= mreq && i - mreq < runlen)
557                                 *eio = TRUE;
558                         break;
559                 case VM_PAGER_AGAIN:
560                         if (i >= mreq && i - mreq < runlen)
561                                 runlen = i - mreq;
562                         break;
563                 }
564
565                 /*
566                  * If the operation is still going, leave the page busy to
567                  * block all other accesses. Also, leave the paging in
568                  * progress indicator set so that we don't attempt an object
569                  * collapse.
570                  */
571                 if (pageout_status[i] != VM_PAGER_PEND) {
572                         vm_object_pip_wakeup(object);
573                         vm_page_sunbusy(mt);
574                 }
575         }
576         if (prunlen != NULL)
577                 *prunlen = runlen;
578         return (numpagedout);
579 }
580
581 static boolean_t
582 vm_pageout_launder(struct vm_pagequeue *pq, int tries, vm_paddr_t low,
583     vm_paddr_t high)
584 {
585         struct mount *mp;
586         struct vnode *vp;
587         vm_object_t object;
588         vm_paddr_t pa;
589         vm_page_t m, m_tmp, next;
590         int lockmode;
591
592         vm_pagequeue_lock(pq);
593         TAILQ_FOREACH_SAFE(m, &pq->pq_pl, plinks.q, next) {
594                 if ((m->flags & PG_MARKER) != 0)
595                         continue;
596                 pa = VM_PAGE_TO_PHYS(m);
597                 if (pa < low || pa + PAGE_SIZE > high)
598                         continue;
599                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
600                         vm_page_unlock(m);
601                         continue;
602                 }
603                 object = m->object;
604                 if ((!VM_OBJECT_TRYWLOCK(object) &&
605                     (!vm_pageout_fallback_object_lock(m, &next) ||
606                     m->hold_count != 0)) || vm_page_busied(m)) {
607                         vm_page_unlock(m);
608                         VM_OBJECT_WUNLOCK(object);
609                         continue;
610                 }
611                 vm_page_test_dirty(m);
612                 if (m->dirty == 0 && object->ref_count != 0)
613                         pmap_remove_all(m);
614                 if (m->dirty != 0) {
615                         vm_page_unlock(m);
616                         if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
617                                 VM_OBJECT_WUNLOCK(object);
618                                 continue;
619                         }
620                         if (object->type == OBJT_VNODE) {
621                                 vm_pagequeue_unlock(pq);
622                                 vp = object->handle;
623                                 vm_object_reference_locked(object);
624                                 VM_OBJECT_WUNLOCK(object);
625                                 (void)vn_start_write(vp, &mp, V_WAIT);
626                                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
627                                     LK_SHARED : LK_EXCLUSIVE;
628                                 vn_lock(vp, lockmode | LK_RETRY);
629                                 VM_OBJECT_WLOCK(object);
630                                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
631                                 VM_OBJECT_WUNLOCK(object);
632                                 VOP_UNLOCK(vp, 0);
633                                 vm_object_deallocate(object);
634                                 vn_finished_write(mp);
635                                 return (TRUE);
636                         } else if (object->type == OBJT_SWAP ||
637                             object->type == OBJT_DEFAULT) {
638                                 vm_pagequeue_unlock(pq);
639                                 m_tmp = m;
640                                 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
641                                     0, NULL, NULL);
642                                 VM_OBJECT_WUNLOCK(object);
643                                 return (TRUE);
644                         }
645                 } else {
646                         /*
647                          * Dequeue here to prevent lock recursion in
648                          * vm_page_cache().
649                          */
650                         vm_page_dequeue_locked(m);
651                         vm_page_cache(m);
652                         vm_page_unlock(m);
653                 }
654                 VM_OBJECT_WUNLOCK(object);
655         }
656         vm_pagequeue_unlock(pq);
657         return (FALSE);
658 }
659
660 /*
661  * Increase the number of cached pages.  The specified value, "tries",
662  * determines which categories of pages are cached:
663  *
664  *  0: All clean, inactive pages within the specified physical address range
665  *     are cached.  Will not sleep.
666  *  1: The vm_lowmem handlers are called.  All inactive pages within
667  *     the specified physical address range are cached.  May sleep.
668  *  2: The vm_lowmem handlers are called.  All inactive and active pages
669  *     within the specified physical address range are cached.  May sleep.
670  */
671 void
672 vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
673 {
674         int actl, actmax, inactl, inactmax, dom, initial_dom;
675         static int start_dom = 0;
676
677         if (tries > 0) {
678                 /*
679                  * Decrease registered cache sizes.  The vm_lowmem handlers
680                  * may acquire locks and/or sleep, so they can only be invoked
681                  * when "tries" is greater than zero.
682                  */
683                 SDT_PROBE0(vm, , , vm__lowmem_cache);
684                 EVENTHANDLER_INVOKE(vm_lowmem, 0);
685
686                 /*
687                  * We do this explicitly after the caches have been drained
688                  * above.
689                  */
690                 uma_reclaim();
691         }
692
693         /*
694          * Make the next scan start on the next domain.
695          */
696         initial_dom = atomic_fetchadd_int(&start_dom, 1) % vm_ndomains;
697
698         inactl = 0;
699         inactmax = vm_cnt.v_inactive_count;
700         actl = 0;
701         actmax = tries < 2 ? 0 : vm_cnt.v_active_count;
702         dom = initial_dom;
703
704         /*
705          * Scan domains in round-robin order, first inactive queues,
706          * then active.  Since domain usually owns large physically
707          * contiguous chunk of memory, it makes sense to completely
708          * exhaust one domain before switching to next, while growing
709          * the pool of contiguous physical pages.
710          *
711          * Do not even start launder a domain which cannot contain
712          * the specified address range, as indicated by segments
713          * constituting the domain.
714          */
715 again:
716         if (inactl < inactmax) {
717                 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
718                     low, high) &&
719                     vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE],
720                     tries, low, high)) {
721                         inactl++;
722                         goto again;
723                 }
724                 if (++dom == vm_ndomains)
725                         dom = 0;
726                 if (dom != initial_dom)
727                         goto again;
728         }
729         if (actl < actmax) {
730                 if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
731                     low, high) &&
732                     vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE],
733                       tries, low, high)) {
734                         actl++;
735                         goto again;
736                 }
737                 if (++dom == vm_ndomains)
738                         dom = 0;
739                 if (dom != initial_dom)
740                         goto again;
741         }
742 }
743
744 #if !defined(NO_SWAPPING)
745 /*
746  *      vm_pageout_object_deactivate_pages
747  *
748  *      Deactivate enough pages to satisfy the inactive target
749  *      requirements.
750  *
751  *      The object and map must be locked.
752  */
753 static void
754 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
755     long desired)
756 {
757         vm_object_t backing_object, object;
758         vm_page_t p;
759         int act_delta, remove_mode;
760
761         VM_OBJECT_ASSERT_LOCKED(first_object);
762         if ((first_object->flags & OBJ_FICTITIOUS) != 0)
763                 return;
764         for (object = first_object;; object = backing_object) {
765                 if (pmap_resident_count(pmap) <= desired)
766                         goto unlock_return;
767                 VM_OBJECT_ASSERT_LOCKED(object);
768                 if ((object->flags & OBJ_UNMANAGED) != 0 ||
769                     object->paging_in_progress != 0)
770                         goto unlock_return;
771
772                 remove_mode = 0;
773                 if (object->shadow_count > 1)
774                         remove_mode = 1;
775                 /*
776                  * Scan the object's entire memory queue.
777                  */
778                 TAILQ_FOREACH(p, &object->memq, listq) {
779                         if (pmap_resident_count(pmap) <= desired)
780                                 goto unlock_return;
781                         if (vm_page_busied(p))
782                                 continue;
783                         PCPU_INC(cnt.v_pdpages);
784                         vm_page_lock(p);
785                         if (p->wire_count != 0 || p->hold_count != 0 ||
786                             !pmap_page_exists_quick(pmap, p)) {
787                                 vm_page_unlock(p);
788                                 continue;
789                         }
790                         act_delta = pmap_ts_referenced(p);
791                         if ((p->aflags & PGA_REFERENCED) != 0) {
792                                 if (act_delta == 0)
793                                         act_delta = 1;
794                                 vm_page_aflag_clear(p, PGA_REFERENCED);
795                         }
796                         if (p->queue != PQ_ACTIVE && act_delta != 0) {
797                                 vm_page_activate(p);
798                                 p->act_count += act_delta;
799                         } else if (p->queue == PQ_ACTIVE) {
800                                 if (act_delta == 0) {
801                                         p->act_count -= min(p->act_count,
802                                             ACT_DECLINE);
803                                         if (!remove_mode && p->act_count == 0) {
804                                                 pmap_remove_all(p);
805                                                 vm_page_deactivate(p);
806                                         } else
807                                                 vm_page_requeue(p);
808                                 } else {
809                                         vm_page_activate(p);
810                                         if (p->act_count < ACT_MAX -
811                                             ACT_ADVANCE)
812                                                 p->act_count += ACT_ADVANCE;
813                                         vm_page_requeue(p);
814                                 }
815                         } else if (p->queue == PQ_INACTIVE)
816                                 pmap_remove_all(p);
817                         vm_page_unlock(p);
818                 }
819                 if ((backing_object = object->backing_object) == NULL)
820                         goto unlock_return;
821                 VM_OBJECT_RLOCK(backing_object);
822                 if (object != first_object)
823                         VM_OBJECT_RUNLOCK(object);
824         }
825 unlock_return:
826         if (object != first_object)
827                 VM_OBJECT_RUNLOCK(object);
828 }
829
830 /*
831  * deactivate some number of pages in a map, try to do it fairly, but
832  * that is really hard to do.
833  */
834 static void
835 vm_pageout_map_deactivate_pages(map, desired)
836         vm_map_t map;
837         long desired;
838 {
839         vm_map_entry_t tmpe;
840         vm_object_t obj, bigobj;
841         int nothingwired;
842
843         if (!vm_map_trylock(map))
844                 return;
845
846         bigobj = NULL;
847         nothingwired = TRUE;
848
849         /*
850          * first, search out the biggest object, and try to free pages from
851          * that.
852          */
853         tmpe = map->header.next;
854         while (tmpe != &map->header) {
855                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
856                         obj = tmpe->object.vm_object;
857                         if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
858                                 if (obj->shadow_count <= 1 &&
859                                     (bigobj == NULL ||
860                                      bigobj->resident_page_count < obj->resident_page_count)) {
861                                         if (bigobj != NULL)
862                                                 VM_OBJECT_RUNLOCK(bigobj);
863                                         bigobj = obj;
864                                 } else
865                                         VM_OBJECT_RUNLOCK(obj);
866                         }
867                 }
868                 if (tmpe->wired_count > 0)
869                         nothingwired = FALSE;
870                 tmpe = tmpe->next;
871         }
872
873         if (bigobj != NULL) {
874                 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
875                 VM_OBJECT_RUNLOCK(bigobj);
876         }
877         /*
878          * Next, hunt around for other pages to deactivate.  We actually
879          * do this search sort of wrong -- .text first is not the best idea.
880          */
881         tmpe = map->header.next;
882         while (tmpe != &map->header) {
883                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
884                         break;
885                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
886                         obj = tmpe->object.vm_object;
887                         if (obj != NULL) {
888                                 VM_OBJECT_RLOCK(obj);
889                                 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
890                                 VM_OBJECT_RUNLOCK(obj);
891                         }
892                 }
893                 tmpe = tmpe->next;
894         }
895
896         /*
897          * Remove all mappings if a process is swapped out, this will free page
898          * table pages.
899          */
900         if (desired == 0 && nothingwired) {
901                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
902                     vm_map_max(map));
903         }
904
905         vm_map_unlock(map);
906 }
907 #endif          /* !defined(NO_SWAPPING) */
908
909 /*
910  * Attempt to acquire all of the necessary locks to launder a page and
911  * then call through the clustering layer to PUTPAGES.  Wait a short
912  * time for a vnode lock.
913  *
914  * Requires the page and object lock on entry, releases both before return.
915  * Returns 0 on success and an errno otherwise.
916  */
917 static int
918 vm_pageout_clean(vm_page_t m)
919 {
920         struct vnode *vp;
921         struct mount *mp;
922         vm_object_t object;
923         vm_pindex_t pindex;
924         int error, lockmode;
925
926         vm_page_assert_locked(m);
927         object = m->object;
928         VM_OBJECT_ASSERT_WLOCKED(object);
929         error = 0;
930         vp = NULL;
931         mp = NULL;
932
933         /*
934          * The object is already known NOT to be dead.   It
935          * is possible for the vget() to block the whole
936          * pageout daemon, but the new low-memory handling
937          * code should prevent it.
938          *
939          * We can't wait forever for the vnode lock, we might
940          * deadlock due to a vn_read() getting stuck in
941          * vm_wait while holding this vnode.  We skip the 
942          * vnode if we can't get it in a reasonable amount
943          * of time.
944          */
945         if (object->type == OBJT_VNODE) {
946                 vm_page_unlock(m);
947                 vp = object->handle;
948                 if (vp->v_type == VREG &&
949                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
950                         mp = NULL;
951                         error = EDEADLK;
952                         goto unlock_all;
953                 }
954                 KASSERT(mp != NULL,
955                     ("vp %p with NULL v_mount", vp));
956                 vm_object_reference_locked(object);
957                 pindex = m->pindex;
958                 VM_OBJECT_WUNLOCK(object);
959                 lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
960                     LK_SHARED : LK_EXCLUSIVE;
961                 if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
962                         vp = NULL;
963                         error = EDEADLK;
964                         goto unlock_mp;
965                 }
966                 VM_OBJECT_WLOCK(object);
967                 vm_page_lock(m);
968                 /*
969                  * While the object and page were unlocked, the page
970                  * may have been:
971                  * (1) moved to a different queue,
972                  * (2) reallocated to a different object,
973                  * (3) reallocated to a different offset, or
974                  * (4) cleaned.
975                  */
976                 if (m->queue != PQ_INACTIVE || m->object != object ||
977                     m->pindex != pindex || m->dirty == 0) {
978                         vm_page_unlock(m);
979                         error = ENXIO;
980                         goto unlock_all;
981                 }
982
983                 /*
984                  * The page may have been busied or held while the object
985                  * and page locks were released.
986                  */
987                 if (vm_page_busied(m) || m->hold_count != 0) {
988                         vm_page_unlock(m);
989                         error = EBUSY;
990                         goto unlock_all;
991                 }
992         }
993
994         /*
995          * If a page is dirty, then it is either being washed
996          * (but not yet cleaned) or it is still in the
997          * laundry.  If it is still in the laundry, then we
998          * start the cleaning operation. 
999          */
1000         if (vm_pageout_cluster(m) == 0)
1001                 error = EIO;
1002
1003 unlock_all:
1004         VM_OBJECT_WUNLOCK(object);
1005
1006 unlock_mp:
1007         vm_page_lock_assert(m, MA_NOTOWNED);
1008         if (mp != NULL) {
1009                 if (vp != NULL)
1010                         vput(vp);
1011                 vm_object_deallocate(object);
1012                 vn_finished_write(mp);
1013         }
1014
1015         return (error);
1016 }
1017
1018 /*
1019  *      vm_pageout_scan does the dirty work for the pageout daemon.
1020  *
1021  *      pass 0 - Update active LRU/deactivate pages
1022  *      pass 1 - Move inactive to cache or free
1023  *      pass 2 - Launder dirty pages
1024  */
1025 static void
1026 vm_pageout_scan(struct vm_domain *vmd, int pass)
1027 {
1028         vm_page_t m, next;
1029         struct vm_pagequeue *pq;
1030         vm_object_t object;
1031         long min_scan;
1032         int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
1033         int vnodes_skipped = 0;
1034         int maxlaunder, scan_tick, scanned;
1035         boolean_t queues_locked;
1036
1037         /*
1038          * If we need to reclaim memory ask kernel caches to return
1039          * some.  We rate limit to avoid thrashing.
1040          */
1041         if (vmd == &vm_dom[0] && pass > 0 &&
1042             (time_uptime - lowmem_uptime) >= lowmem_period) {
1043                 /*
1044                  * Decrease registered cache sizes.
1045                  */
1046                 SDT_PROBE0(vm, , , vm__lowmem_scan);
1047                 EVENTHANDLER_INVOKE(vm_lowmem, 0);
1048                 /*
1049                  * We do this explicitly after the caches have been
1050                  * drained above.
1051                  */
1052                 uma_reclaim();
1053                 lowmem_uptime = time_uptime;
1054         }
1055
1056         /*
1057          * The addl_page_shortage is the number of temporarily
1058          * stuck pages in the inactive queue.  In other words, the
1059          * number of pages from the inactive count that should be
1060          * discounted in setting the target for the active queue scan.
1061          */
1062         addl_page_shortage = 0;
1063
1064         /*
1065          * Calculate the number of pages we want to either free or move
1066          * to the cache.
1067          */
1068         if (pass > 0) {
1069                 deficit = atomic_readandclear_int(&vm_pageout_deficit);
1070                 page_shortage = vm_paging_target() + deficit;
1071         } else
1072                 page_shortage = deficit = 0;
1073
1074         /*
1075          * maxlaunder limits the number of dirty pages we flush per scan.
1076          * For most systems a smaller value (16 or 32) is more robust under
1077          * extreme memory and disk pressure because any unnecessary writes
1078          * to disk can result in extreme performance degredation.  However,
1079          * systems with excessive dirty pages (especially when MAP_NOSYNC is
1080          * used) will die horribly with limited laundering.  If the pageout
1081          * daemon cannot clean enough pages in the first pass, we let it go
1082          * all out in succeeding passes.
1083          */
1084         if ((maxlaunder = vm_max_launder) <= 1)
1085                 maxlaunder = 1;
1086         if (pass > 1)
1087                 maxlaunder = 10000;
1088
1089         /*
1090          * Start scanning the inactive queue for pages we can move to the
1091          * cache or free.  The scan will stop when the target is reached or
1092          * we have scanned the entire inactive queue.  Note that m->act_count
1093          * is not used to form decisions for the inactive queue, only for the
1094          * active queue.
1095          */
1096         pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1097         maxscan = pq->pq_cnt;
1098         vm_pagequeue_lock(pq);
1099         queues_locked = TRUE;
1100         for (m = TAILQ_FIRST(&pq->pq_pl);
1101              m != NULL && maxscan-- > 0 && page_shortage > 0;
1102              m = next) {
1103                 vm_pagequeue_assert_locked(pq);
1104                 KASSERT(queues_locked, ("unlocked queues"));
1105                 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
1106
1107                 PCPU_INC(cnt.v_pdpages);
1108                 next = TAILQ_NEXT(m, plinks.q);
1109
1110                 /*
1111                  * skip marker pages
1112                  */
1113                 if (m->flags & PG_MARKER)
1114                         continue;
1115
1116                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1117                     ("Fictitious page %p cannot be in inactive queue", m));
1118                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1119                     ("Unmanaged page %p cannot be in inactive queue", m));
1120
1121                 /*
1122                  * The page or object lock acquisitions fail if the
1123                  * page was removed from the queue or moved to a
1124                  * different position within the queue.  In either
1125                  * case, addl_page_shortage should not be incremented.
1126                  */
1127                 if (!vm_pageout_page_lock(m, &next)) {
1128                         vm_page_unlock(m);
1129                         continue;
1130                 }
1131                 object = m->object;
1132                 if (!VM_OBJECT_TRYWLOCK(object) &&
1133                     !vm_pageout_fallback_object_lock(m, &next)) {
1134                         vm_page_unlock(m);
1135                         VM_OBJECT_WUNLOCK(object);
1136                         continue;
1137                 }
1138
1139                 /*
1140                  * Don't mess with busy pages, keep them at at the
1141                  * front of the queue, most likely they are being
1142                  * paged out.  Increment addl_page_shortage for busy
1143                  * pages, because they may leave the inactive queue
1144                  * shortly after page scan is finished.
1145                  */
1146                 if (vm_page_busied(m)) {
1147                         vm_page_unlock(m);
1148                         VM_OBJECT_WUNLOCK(object);
1149                         addl_page_shortage++;
1150                         continue;
1151                 }
1152
1153                 /*
1154                  * We unlock the inactive page queue, invalidating the
1155                  * 'next' pointer.  Use our marker to remember our
1156                  * place.
1157                  */
1158                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1159                 vm_pagequeue_unlock(pq);
1160                 queues_locked = FALSE;
1161
1162                 /*
1163                  * Invalid pages can be easily freed. They cannot be
1164                  * mapped, vm_page_free() asserts this.
1165                  */
1166                 if (m->valid == 0 && m->hold_count == 0) {
1167                         vm_page_free(m);
1168                         PCPU_INC(cnt.v_dfree);
1169                         --page_shortage;
1170                         goto drop_page;
1171                 }
1172
1173                 /*
1174                  * We bump the activation count if the page has been
1175                  * referenced while in the inactive queue.  This makes
1176                  * it less likely that the page will be added back to the
1177                  * inactive queue prematurely again.  Here we check the 
1178                  * page tables (or emulated bits, if any), given the upper 
1179                  * level VM system not knowing anything about existing 
1180                  * references.
1181                  */
1182                 if ((m->aflags & PGA_REFERENCED) != 0) {
1183                         vm_page_aflag_clear(m, PGA_REFERENCED);
1184                         act_delta = 1;
1185                 } else
1186                         act_delta = 0;
1187                 if (object->ref_count != 0) {
1188                         act_delta += pmap_ts_referenced(m);
1189                 } else {
1190                         KASSERT(!pmap_page_is_mapped(m),
1191                             ("vm_pageout_scan: page %p is mapped", m));
1192                 }
1193
1194                 /*
1195                  * If the upper level VM system knows about any page 
1196                  * references, we reactivate the page or requeue it.
1197                  */
1198                 if (act_delta != 0) {
1199                         if (object->ref_count != 0) {
1200                                 vm_page_activate(m);
1201                                 m->act_count += act_delta + ACT_ADVANCE;
1202                         } else {
1203                                 vm_pagequeue_lock(pq);
1204                                 queues_locked = TRUE;
1205                                 vm_page_requeue_locked(m);
1206                         }
1207                         goto drop_page;
1208                 }
1209
1210                 if (m->hold_count != 0) {
1211                         /*
1212                          * Held pages are essentially stuck in the
1213                          * queue.  So, they ought to be discounted
1214                          * from the inactive count.  See the
1215                          * calculation of the page_shortage for the
1216                          * loop over the active queue below.
1217                          */
1218                         addl_page_shortage++;
1219                         goto drop_page;
1220                 }
1221
1222                 /*
1223                  * If the page appears to be clean at the machine-independent
1224                  * layer, then remove all of its mappings from the pmap in
1225                  * anticipation of placing it onto the cache queue.  If,
1226                  * however, any of the page's mappings allow write access,
1227                  * then the page may still be modified until the last of those
1228                  * mappings are removed.
1229                  */
1230                 if (object->ref_count != 0) {
1231                         vm_page_test_dirty(m);
1232                         if (m->dirty == 0)
1233                                 pmap_remove_all(m);
1234                 }
1235
1236                 if (m->dirty == 0) {
1237                         /*
1238                          * Clean pages can be freed.
1239                          */
1240                         vm_page_free(m);
1241                         PCPU_INC(cnt.v_dfree);
1242                         --page_shortage;
1243                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) {
1244                         /*
1245                          * Dirty pages need to be paged out, but flushing
1246                          * a page is extremely expensive versus freeing
1247                          * a clean page.  Rather then artificially limiting
1248                          * the number of pages we can flush, we instead give
1249                          * dirty pages extra priority on the inactive queue
1250                          * by forcing them to be cycled through the queue
1251                          * twice before being flushed, after which the
1252                          * (now clean) page will cycle through once more
1253                          * before being freed.  This significantly extends
1254                          * the thrash point for a heavily loaded machine.
1255                          */
1256                         m->flags |= PG_WINATCFLS;
1257                         vm_pagequeue_lock(pq);
1258                         queues_locked = TRUE;
1259                         vm_page_requeue_locked(m);
1260                 } else if (maxlaunder > 0) {
1261                         /*
1262                          * We always want to try to flush some dirty pages if
1263                          * we encounter them, to keep the system stable.
1264                          * Normally this number is small, but under extreme
1265                          * pressure where there are insufficient clean pages
1266                          * on the inactive queue, we may have to go all out.
1267                          */
1268                         int swap_pageouts_ok;
1269                         int error;
1270
1271                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
1272                                 swap_pageouts_ok = 1;
1273                         } else {
1274                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1275                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1276                                 vm_page_count_min());
1277                                                                                 
1278                         }
1279
1280                         /*
1281                          * We don't bother paging objects that are "dead".  
1282                          * Those objects are in a "rundown" state.
1283                          */
1284                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
1285                                 vm_pagequeue_lock(pq);
1286                                 vm_page_unlock(m);
1287                                 VM_OBJECT_WUNLOCK(object);
1288                                 queues_locked = TRUE;
1289                                 vm_page_requeue_locked(m);
1290                                 goto relock_queues;
1291                         }
1292                         error = vm_pageout_clean(m);
1293                         /*
1294                          * Decrement page_shortage on success to account for
1295                          * the (future) cleaned page.  Otherwise we could wind
1296                          * up laundering or cleaning too many pages.
1297                          */
1298                         if (error == 0) {
1299                                 page_shortage--;
1300                                 maxlaunder--;
1301                         } else if (error == EDEADLK) {
1302                                 pageout_lock_miss++;
1303                                 vnodes_skipped++;
1304                         } else if (error == EBUSY) {
1305                                 addl_page_shortage++;
1306                         }
1307                         vm_page_lock_assert(m, MA_NOTOWNED);
1308                         goto relock_queues;
1309                 }
1310 drop_page:
1311                 vm_page_unlock(m);
1312                 VM_OBJECT_WUNLOCK(object);
1313 relock_queues:
1314                 if (!queues_locked) {
1315                         vm_pagequeue_lock(pq);
1316                         queues_locked = TRUE;
1317                 }
1318                 next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1319                 TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1320         }
1321         vm_pagequeue_unlock(pq);
1322
1323 #if !defined(NO_SWAPPING)
1324         /*
1325          * Wakeup the swapout daemon if we didn't cache or free the targeted
1326          * number of pages. 
1327          */
1328         if (vm_swap_enabled && page_shortage > 0)
1329                 vm_req_vmdaemon(VM_SWAP_NORMAL);
1330 #endif
1331
1332         /*
1333          * Wakeup the sync daemon if we skipped a vnode in a writeable object
1334          * and we didn't cache or free enough pages.
1335          */
1336         if (vnodes_skipped > 0 && page_shortage > vm_cnt.v_free_target -
1337             vm_cnt.v_free_min)
1338                 (void)speedup_syncer();
1339
1340         /*
1341          * Compute the number of pages we want to try to move from the
1342          * active queue to the inactive queue.
1343          */
1344         page_shortage = vm_cnt.v_inactive_target - vm_cnt.v_inactive_count +
1345             vm_paging_target() + deficit + addl_page_shortage;
1346
1347         pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1348         vm_pagequeue_lock(pq);
1349         maxscan = pq->pq_cnt;
1350
1351         /*
1352          * If we're just idle polling attempt to visit every
1353          * active page within 'update_period' seconds.
1354          */
1355         scan_tick = ticks;
1356         if (vm_pageout_update_period != 0) {
1357                 min_scan = pq->pq_cnt;
1358                 min_scan *= scan_tick - vmd->vmd_last_active_scan;
1359                 min_scan /= hz * vm_pageout_update_period;
1360         } else
1361                 min_scan = 0;
1362         if (min_scan > 0 || (page_shortage > 0 && maxscan > 0))
1363                 vmd->vmd_last_active_scan = scan_tick;
1364
1365         /*
1366          * Scan the active queue for pages that can be deactivated.  Update
1367          * the per-page activity counter and use it to identify deactivation
1368          * candidates.
1369          */
1370         for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1371             min_scan || (page_shortage > 0 && scanned < maxscan)); m = next,
1372             scanned++) {
1373
1374                 KASSERT(m->queue == PQ_ACTIVE,
1375                     ("vm_pageout_scan: page %p isn't active", m));
1376
1377                 next = TAILQ_NEXT(m, plinks.q);
1378                 if ((m->flags & PG_MARKER) != 0)
1379                         continue;
1380                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1381                     ("Fictitious page %p cannot be in active queue", m));
1382                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1383                     ("Unmanaged page %p cannot be in active queue", m));
1384                 if (!vm_pageout_page_lock(m, &next)) {
1385                         vm_page_unlock(m);
1386                         continue;
1387                 }
1388
1389                 /*
1390                  * The count for pagedaemon pages is done after checking the
1391                  * page for eligibility...
1392                  */
1393                 PCPU_INC(cnt.v_pdpages);
1394
1395                 /*
1396                  * Check to see "how much" the page has been used.
1397                  */
1398                 if ((m->aflags & PGA_REFERENCED) != 0) {
1399                         vm_page_aflag_clear(m, PGA_REFERENCED);
1400                         act_delta = 1;
1401                 } else
1402                         act_delta = 0;
1403
1404                 /*
1405                  * Unlocked object ref count check.  Two races are possible.
1406                  * 1) The ref was transitioning to zero and we saw non-zero,
1407                  *    the pmap bits will be checked unnecessarily.
1408                  * 2) The ref was transitioning to one and we saw zero. 
1409                  *    The page lock prevents a new reference to this page so
1410                  *    we need not check the reference bits.
1411                  */
1412                 if (m->object->ref_count != 0)
1413                         act_delta += pmap_ts_referenced(m);
1414
1415                 /*
1416                  * Advance or decay the act_count based on recent usage.
1417                  */
1418                 if (act_delta != 0) {
1419                         m->act_count += ACT_ADVANCE + act_delta;
1420                         if (m->act_count > ACT_MAX)
1421                                 m->act_count = ACT_MAX;
1422                 } else
1423                         m->act_count -= min(m->act_count, ACT_DECLINE);
1424
1425                 /*
1426                  * Move this page to the tail of the active or inactive
1427                  * queue depending on usage.
1428                  */
1429                 if (m->act_count == 0) {
1430                         /* Dequeue to avoid later lock recursion. */
1431                         vm_page_dequeue_locked(m);
1432                         vm_page_deactivate(m);
1433                         page_shortage--;
1434                 } else
1435                         vm_page_requeue_locked(m);
1436                 vm_page_unlock(m);
1437         }
1438         vm_pagequeue_unlock(pq);
1439 #if !defined(NO_SWAPPING)
1440         /*
1441          * Idle process swapout -- run once per second.
1442          */
1443         if (vm_swap_idle_enabled) {
1444                 static long lsec;
1445                 if (time_second != lsec) {
1446                         vm_req_vmdaemon(VM_SWAP_IDLE);
1447                         lsec = time_second;
1448                 }
1449         }
1450 #endif
1451
1452         /*
1453          * If we are critically low on one of RAM or swap and low on
1454          * the other, kill the largest process.  However, we avoid
1455          * doing this on the first pass in order to give ourselves a
1456          * chance to flush out dirty vnode-backed pages and to allow
1457          * active pages to be moved to the inactive queue and reclaimed.
1458          */
1459         vm_pageout_mightbe_oom(vmd, pass);
1460 }
1461
1462 static int vm_pageout_oom_vote;
1463
1464 /*
1465  * The pagedaemon threads randlomly select one to perform the
1466  * OOM.  Trying to kill processes before all pagedaemons
1467  * failed to reach free target is premature.
1468  */
1469 static void
1470 vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass)
1471 {
1472         int old_vote;
1473
1474         if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) ||
1475             (swap_pager_full && vm_paging_target() > 0))) {
1476                 if (vmd->vmd_oom) {
1477                         vmd->vmd_oom = FALSE;
1478                         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1479                 }
1480                 return;
1481         }
1482
1483         if (vmd->vmd_oom)
1484                 return;
1485
1486         vmd->vmd_oom = TRUE;
1487         old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1488         if (old_vote != vm_ndomains - 1)
1489                 return;
1490
1491         /*
1492          * The current pagedaemon thread is the last in the quorum to
1493          * start OOM.  Initiate the selection and signaling of the
1494          * victim.
1495          */
1496         vm_pageout_oom(VM_OOM_MEM);
1497
1498         /*
1499          * After one round of OOM terror, recall our vote.  On the
1500          * next pass, current pagedaemon would vote again if the low
1501          * memory condition is still there, due to vmd_oom being
1502          * false.
1503          */
1504         vmd->vmd_oom = FALSE;
1505         atomic_subtract_int(&vm_pageout_oom_vote, 1);
1506 }
1507
1508 void
1509 vm_pageout_oom(int shortage)
1510 {
1511         struct proc *p, *bigproc;
1512         vm_offset_t size, bigsize;
1513         struct thread *td;
1514         struct vmspace *vm;
1515
1516         /*
1517          * We keep the process bigproc locked once we find it to keep anyone
1518          * from messing with it; however, there is a possibility of
1519          * deadlock if process B is bigproc and one of it's child processes
1520          * attempts to propagate a signal to B while we are waiting for A's
1521          * lock while walking this list.  To avoid this, we don't block on
1522          * the process lock but just skip a process if it is already locked.
1523          */
1524         bigproc = NULL;
1525         bigsize = 0;
1526         sx_slock(&allproc_lock);
1527         FOREACH_PROC_IN_SYSTEM(p) {
1528                 int breakout;
1529
1530                 PROC_LOCK(p);
1531
1532                 /*
1533                  * If this is a system, protected or killed process, skip it.
1534                  */
1535                 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1536                     P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1537                     p->p_pid == 1 || P_KILLED(p) ||
1538                     (p->p_pid < 48 && swap_pager_avail != 0)) {
1539                         PROC_UNLOCK(p);
1540                         continue;
1541                 }
1542                 /*
1543                  * If the process is in a non-running type state,
1544                  * don't touch it.  Check all the threads individually.
1545                  */
1546                 breakout = 0;
1547                 FOREACH_THREAD_IN_PROC(p, td) {
1548                         thread_lock(td);
1549                         if (!TD_ON_RUNQ(td) &&
1550                             !TD_IS_RUNNING(td) &&
1551                             !TD_IS_SLEEPING(td) &&
1552                             !TD_IS_SUSPENDED(td)) {
1553                                 thread_unlock(td);
1554                                 breakout = 1;
1555                                 break;
1556                         }
1557                         thread_unlock(td);
1558                 }
1559                 if (breakout) {
1560                         PROC_UNLOCK(p);
1561                         continue;
1562                 }
1563                 /*
1564                  * get the process size
1565                  */
1566                 vm = vmspace_acquire_ref(p);
1567                 if (vm == NULL) {
1568                         PROC_UNLOCK(p);
1569                         continue;
1570                 }
1571                 _PHOLD(p);
1572                 if (!vm_map_trylock_read(&vm->vm_map)) {
1573                         _PRELE(p);
1574                         PROC_UNLOCK(p);
1575                         vmspace_free(vm);
1576                         continue;
1577                 }
1578                 PROC_UNLOCK(p);
1579                 size = vmspace_swap_count(vm);
1580                 vm_map_unlock_read(&vm->vm_map);
1581                 if (shortage == VM_OOM_MEM)
1582                         size += vmspace_resident_count(vm);
1583                 vmspace_free(vm);
1584                 /*
1585                  * if the this process is bigger than the biggest one
1586                  * remember it.
1587                  */
1588                 if (size > bigsize) {
1589                         if (bigproc != NULL)
1590                                 PRELE(bigproc);
1591                         bigproc = p;
1592                         bigsize = size;
1593                 } else {
1594                         PRELE(p);
1595                 }
1596         }
1597         sx_sunlock(&allproc_lock);
1598         if (bigproc != NULL) {
1599                 if (vm_panic_on_oom != 0)
1600                         panic("out of swap space");
1601                 PROC_LOCK(bigproc);
1602                 killproc(bigproc, "out of swap space");
1603                 sched_nice(bigproc, PRIO_MIN);
1604                 _PRELE(bigproc);
1605                 PROC_UNLOCK(bigproc);
1606                 wakeup(&vm_cnt.v_free_count);
1607         }
1608 }
1609
1610 static void
1611 vm_pageout_worker(void *arg)
1612 {
1613         struct vm_domain *domain;
1614         int domidx;
1615
1616         domidx = (uintptr_t)arg;
1617         domain = &vm_dom[domidx];
1618
1619         /*
1620          * XXXKIB It could be useful to bind pageout daemon threads to
1621          * the cores belonging to the domain, from which vm_page_array
1622          * is allocated.
1623          */
1624
1625         KASSERT(domain->vmd_segs != 0, ("domain without segments"));
1626         domain->vmd_last_active_scan = ticks;
1627         vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
1628
1629         /*
1630          * The pageout daemon worker is never done, so loop forever.
1631          */
1632         while (TRUE) {
1633                 /*
1634                  * If we have enough free memory, wakeup waiters.  Do
1635                  * not clear vm_pages_needed until we reach our target,
1636                  * otherwise we may be woken up over and over again and
1637                  * waste a lot of cpu.
1638                  */
1639                 mtx_lock(&vm_page_queue_free_mtx);
1640                 if (vm_pages_needed && !vm_page_count_min()) {
1641                         if (!vm_paging_needed())
1642                                 vm_pages_needed = 0;
1643                         wakeup(&vm_cnt.v_free_count);
1644                 }
1645                 if (vm_pages_needed) {
1646                         /*
1647                          * Still not done, take a second pass without waiting
1648                          * (unlimited dirty cleaning), otherwise sleep a bit
1649                          * and try again.
1650                          */
1651                         if (domain->vmd_pass > 1)
1652                                 msleep(&vm_pages_needed,
1653                                     &vm_page_queue_free_mtx, PVM, "psleep",
1654                                     hz / 2);
1655                 } else {
1656                         /*
1657                          * Good enough, sleep until required to refresh
1658                          * stats.
1659                          */
1660                         domain->vmd_pass = 0;
1661                         msleep(&vm_pages_needed, &vm_page_queue_free_mtx,
1662                             PVM, "psleep", hz);
1663
1664                 }
1665                 if (vm_pages_needed) {
1666                         vm_cnt.v_pdwakeups++;
1667                         domain->vmd_pass++;
1668                 }
1669                 mtx_unlock(&vm_page_queue_free_mtx);
1670                 vm_pageout_scan(domain, domain->vmd_pass);
1671         }
1672 }
1673
1674 /*
1675  *      vm_pageout_init initialises basic pageout daemon settings.
1676  */
1677 static void
1678 vm_pageout_init(void)
1679 {
1680         /*
1681          * Initialize some paging parameters.
1682          */
1683         vm_cnt.v_interrupt_free_min = 2;
1684         if (vm_cnt.v_page_count < 2000)
1685                 vm_pageout_page_count = 8;
1686
1687         /*
1688          * v_free_reserved needs to include enough for the largest
1689          * swap pager structures plus enough for any pv_entry structs
1690          * when paging. 
1691          */
1692         if (vm_cnt.v_page_count > 1024)
1693                 vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200;
1694         else
1695                 vm_cnt.v_free_min = 4;
1696         vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1697             vm_cnt.v_interrupt_free_min;
1698         vm_cnt.v_free_reserved = vm_pageout_page_count +
1699             vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768);
1700         vm_cnt.v_free_severe = vm_cnt.v_free_min / 2;
1701         vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved;
1702         vm_cnt.v_free_min += vm_cnt.v_free_reserved;
1703         vm_cnt.v_free_severe += vm_cnt.v_free_reserved;
1704         vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2;
1705         if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3)
1706                 vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3;
1707
1708         /*
1709          * Set the default wakeup threshold to be 10% above the minimum
1710          * page limit.  This keeps the steady state out of shortfall.
1711          */
1712         vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11;
1713
1714         /*
1715          * Set interval in seconds for active scan.  We want to visit each
1716          * page at least once every ten minutes.  This is to prevent worst
1717          * case paging behaviors with stale active LRU.
1718          */
1719         if (vm_pageout_update_period == 0)
1720                 vm_pageout_update_period = 600;
1721
1722         /* XXX does not really belong here */
1723         if (vm_page_max_wired == 0)
1724                 vm_page_max_wired = vm_cnt.v_free_count / 3;
1725 }
1726
1727 /*
1728  *     vm_pageout is the high level pageout daemon.
1729  */
1730 static void
1731 vm_pageout(void)
1732 {
1733         int error;
1734 #if MAXMEMDOM > 1
1735         int i;
1736 #endif
1737
1738         swap_pager_swap_init();
1739 #if MAXMEMDOM > 1
1740         for (i = 1; i < vm_ndomains; i++) {
1741                 error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1742                     curproc, NULL, 0, 0, "dom%d", i);
1743                 if (error != 0) {
1744                         panic("starting pageout for domain %d, error %d\n",
1745                             i, error);
1746                 }
1747         }
1748 #endif
1749         error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
1750             0, 0, "uma");
1751         if (error != 0)
1752                 panic("starting uma_reclaim helper, error %d\n", error);
1753         vm_pageout_worker((void *)(uintptr_t)0);
1754 }
1755
1756 /*
1757  * Unless the free page queue lock is held by the caller, this function
1758  * should be regarded as advisory.  Specifically, the caller should
1759  * not msleep() on &vm_cnt.v_free_count following this function unless
1760  * the free page queue lock is held until the msleep() is performed.
1761  */
1762 void
1763 pagedaemon_wakeup(void)
1764 {
1765
1766         if (!vm_pages_needed && curthread->td_proc != pageproc) {
1767                 vm_pages_needed = 1;
1768                 wakeup(&vm_pages_needed);
1769         }
1770 }
1771
1772 #if !defined(NO_SWAPPING)
1773 static void
1774 vm_req_vmdaemon(int req)
1775 {
1776         static int lastrun = 0;
1777
1778         mtx_lock(&vm_daemon_mtx);
1779         vm_pageout_req_swapout |= req;
1780         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1781                 wakeup(&vm_daemon_needed);
1782                 lastrun = ticks;
1783         }
1784         mtx_unlock(&vm_daemon_mtx);
1785 }
1786
1787 static void
1788 vm_daemon(void)
1789 {
1790         struct rlimit rsslim;
1791         struct proc *p;
1792         struct thread *td;
1793         struct vmspace *vm;
1794         int breakout, swapout_flags, tryagain, attempts;
1795 #ifdef RACCT
1796         uint64_t rsize, ravailable;
1797 #endif
1798
1799         while (TRUE) {
1800                 mtx_lock(&vm_daemon_mtx);
1801                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
1802 #ifdef RACCT
1803                     racct_enable ? hz : 0
1804 #else
1805                     0
1806 #endif
1807                 );
1808                 swapout_flags = vm_pageout_req_swapout;
1809                 vm_pageout_req_swapout = 0;
1810                 mtx_unlock(&vm_daemon_mtx);
1811                 if (swapout_flags)
1812                         swapout_procs(swapout_flags);
1813
1814                 /*
1815                  * scan the processes for exceeding their rlimits or if
1816                  * process is swapped out -- deactivate pages
1817                  */
1818                 tryagain = 0;
1819                 attempts = 0;
1820 again:
1821                 attempts++;
1822                 sx_slock(&allproc_lock);
1823                 FOREACH_PROC_IN_SYSTEM(p) {
1824                         vm_pindex_t limit, size;
1825
1826                         /*
1827                          * if this is a system process or if we have already
1828                          * looked at this process, skip it.
1829                          */
1830                         PROC_LOCK(p);
1831                         if (p->p_state != PRS_NORMAL ||
1832                             p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
1833                                 PROC_UNLOCK(p);
1834                                 continue;
1835                         }
1836                         /*
1837                          * if the process is in a non-running type state,
1838                          * don't touch it.
1839                          */
1840                         breakout = 0;
1841                         FOREACH_THREAD_IN_PROC(p, td) {
1842                                 thread_lock(td);
1843                                 if (!TD_ON_RUNQ(td) &&
1844                                     !TD_IS_RUNNING(td) &&
1845                                     !TD_IS_SLEEPING(td) &&
1846                                     !TD_IS_SUSPENDED(td)) {
1847                                         thread_unlock(td);
1848                                         breakout = 1;
1849                                         break;
1850                                 }
1851                                 thread_unlock(td);
1852                         }
1853                         if (breakout) {
1854                                 PROC_UNLOCK(p);
1855                                 continue;
1856                         }
1857                         /*
1858                          * get a limit
1859                          */
1860                         lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
1861                         limit = OFF_TO_IDX(
1862                             qmin(rsslim.rlim_cur, rsslim.rlim_max));
1863
1864                         /*
1865                          * let processes that are swapped out really be
1866                          * swapped out set the limit to nothing (will force a
1867                          * swap-out.)
1868                          */
1869                         if ((p->p_flag & P_INMEM) == 0)
1870                                 limit = 0;      /* XXX */
1871                         vm = vmspace_acquire_ref(p);
1872                         PROC_UNLOCK(p);
1873                         if (vm == NULL)
1874                                 continue;
1875
1876                         size = vmspace_resident_count(vm);
1877                         if (size >= limit) {
1878                                 vm_pageout_map_deactivate_pages(
1879                                     &vm->vm_map, limit);
1880                         }
1881 #ifdef RACCT
1882                         if (racct_enable) {
1883                                 rsize = IDX_TO_OFF(size);
1884                                 PROC_LOCK(p);
1885                                 racct_set(p, RACCT_RSS, rsize);
1886                                 ravailable = racct_get_available(p, RACCT_RSS);
1887                                 PROC_UNLOCK(p);
1888                                 if (rsize > ravailable) {
1889                                         /*
1890                                          * Don't be overly aggressive; this
1891                                          * might be an innocent process,
1892                                          * and the limit could've been exceeded
1893                                          * by some memory hog.  Don't try
1894                                          * to deactivate more than 1/4th
1895                                          * of process' resident set size.
1896                                          */
1897                                         if (attempts <= 8) {
1898                                                 if (ravailable < rsize -
1899                                                     (rsize / 4)) {
1900                                                         ravailable = rsize -
1901                                                             (rsize / 4);
1902                                                 }
1903                                         }
1904                                         vm_pageout_map_deactivate_pages(
1905                                             &vm->vm_map,
1906                                             OFF_TO_IDX(ravailable));
1907                                         /* Update RSS usage after paging out. */
1908                                         size = vmspace_resident_count(vm);
1909                                         rsize = IDX_TO_OFF(size);
1910                                         PROC_LOCK(p);
1911                                         racct_set(p, RACCT_RSS, rsize);
1912                                         PROC_UNLOCK(p);
1913                                         if (rsize > ravailable)
1914                                                 tryagain = 1;
1915                                 }
1916                         }
1917 #endif
1918                         vmspace_free(vm);
1919                 }
1920                 sx_sunlock(&allproc_lock);
1921                 if (tryagain != 0 && attempts <= 10)
1922                         goto again;
1923         }
1924 }
1925 #endif                  /* !defined(NO_SWAPPING) */