]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
o Change the locking scheme for swp_bcount.
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by the University of
25  *      California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70
71 /*
72  *      The proverbial page-out daemon.
73  */
74
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
77
78 #include "opt_vm.h"
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/eventhandler.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/kthread.h>
87 #include <sys/ktr.h>
88 #include <sys/mount.h>
89 #include <sys/racct.h>
90 #include <sys/resourcevar.h>
91 #include <sys/sched.h>
92 #include <sys/signalvar.h>
93 #include <sys/vnode.h>
94 #include <sys/vmmeter.h>
95 #include <sys/rwlock.h>
96 #include <sys/sx.h>
97 #include <sys/sysctl.h>
98
99 #include <vm/vm.h>
100 #include <vm/vm_param.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_map.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_pager.h>
106 #include <vm/swap_pager.h>
107 #include <vm/vm_extern.h>
108 #include <vm/uma.h>
109
110 /*
111  * System initialization
112  */
113
114 /* the kernel process "vm_pageout"*/
115 static void vm_pageout(void);
116 static int vm_pageout_clean(vm_page_t);
117 static void vm_pageout_scan(int pass);
118
119 struct proc *pageproc;
120
121 static struct kproc_desc page_kp = {
122         "pagedaemon",
123         vm_pageout,
124         &pageproc
125 };
126 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
127     &page_kp);
128
129 #if !defined(NO_SWAPPING)
130 /* the kernel process "vm_daemon"*/
131 static void vm_daemon(void);
132 static struct   proc *vmproc;
133
134 static struct kproc_desc vm_kp = {
135         "vmdaemon",
136         vm_daemon,
137         &vmproc
138 };
139 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
140 #endif
141
142
143 int vm_pages_needed;            /* Event on which pageout daemon sleeps */
144 int vm_pageout_deficit;         /* Estimated number of pages deficit */
145 int vm_pageout_pages_needed;    /* flag saying that the pageout daemon needs pages */
146
147 #if !defined(NO_SWAPPING)
148 static int vm_pageout_req_swapout;      /* XXX */
149 static int vm_daemon_needed;
150 static struct mtx vm_daemon_mtx;
151 /* Allow for use by vm_pageout before vm_daemon is initialized. */
152 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
153 #endif
154 static int vm_max_launder = 32;
155 static int vm_pageout_stats_max;
156 static int vm_pageout_stats;
157 static int vm_pageout_stats_interval;
158 static int vm_pageout_full_stats;
159 static int vm_pageout_full_stats_interval;
160 static int vm_pageout_algorithm;
161 static int defer_swap_pageouts;
162 static int disable_swap_pageouts;
163
164 #if defined(NO_SWAPPING)
165 static int vm_swap_enabled = 0;
166 static int vm_swap_idle_enabled = 0;
167 #else
168 static int vm_swap_enabled = 1;
169 static int vm_swap_idle_enabled = 0;
170 #endif
171
172 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
173         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
174
175 SYSCTL_INT(_vm, OID_AUTO, max_launder,
176         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
177
178 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
179         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
180
181 SYSCTL_INT(_vm, OID_AUTO, pageout_stats,
182         CTLFLAG_RD, &vm_pageout_stats, 0, "Number of partial stats scans");
183
184 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
185         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
186
187 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats,
188         CTLFLAG_RD, &vm_pageout_full_stats, 0, "Number of full stats scans");
189
190 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
191         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
192
193 #if defined(NO_SWAPPING)
194 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
195         CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
196 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
197         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
198 #else
199 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
200         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
201 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
202         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
203 #endif
204
205 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
206         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
207
208 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
209         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
210
211 static int pageout_lock_miss;
212 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
213         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
214
215 #define VM_PAGEOUT_PAGE_COUNT 16
216 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
217
218 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
219 SYSCTL_INT(_vm, OID_AUTO, max_wired,
220         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
221
222 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
223 static boolean_t vm_pageout_launder(int, int, vm_paddr_t, vm_paddr_t);
224 #if !defined(NO_SWAPPING)
225 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
226 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
227 static void vm_req_vmdaemon(int req);
228 #endif
229 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
230 static void vm_pageout_page_stats(void);
231
232 /*
233  * Initialize a dummy page for marking the caller's place in the specified
234  * paging queue.  In principle, this function only needs to set the flag
235  * PG_MARKER.  Nonetheless, it sets the flag VPO_BUSY and initializes the hold
236  * count to one as safety precautions.
237  */ 
238 static void
239 vm_pageout_init_marker(vm_page_t marker, u_short queue)
240 {
241
242         bzero(marker, sizeof(*marker));
243         marker->flags = PG_MARKER;
244         marker->oflags = VPO_BUSY;
245         marker->queue = queue;
246         marker->hold_count = 1;
247 }
248
249 /*
250  * vm_pageout_fallback_object_lock:
251  * 
252  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
253  * known to have failed and page queue must be either PQ_ACTIVE or
254  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queues
255  * while locking the vm object.  Use marker page to detect page queue
256  * changes and maintain notion of next page on page queue.  Return
257  * TRUE if no changes were detected, FALSE otherwise.  vm object is
258  * locked on return.
259  * 
260  * This function depends on both the lock portion of struct vm_object
261  * and normal struct vm_page being type stable.
262  */
263 static boolean_t
264 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
265 {
266         struct vm_page marker;
267         struct vm_pagequeue *pq;
268         boolean_t unchanged;
269         u_short queue;
270         vm_object_t object;
271
272         queue = m->queue;
273         vm_pageout_init_marker(&marker, queue);
274         pq = &vm_pagequeues[queue];
275         object = m->object;
276         
277         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
278         vm_pagequeue_unlock(pq);
279         vm_page_unlock(m);
280         VM_OBJECT_WLOCK(object);
281         vm_page_lock(m);
282         vm_pagequeue_lock(pq);
283
284         /* Page queue might have changed. */
285         *next = TAILQ_NEXT(&marker, pageq);
286         unchanged = (m->queue == queue &&
287                      m->object == object &&
288                      &marker == TAILQ_NEXT(m, pageq));
289         TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
290         return (unchanged);
291 }
292
293 /*
294  * Lock the page while holding the page queue lock.  Use marker page
295  * to detect page queue changes and maintain notion of next page on
296  * page queue.  Return TRUE if no changes were detected, FALSE
297  * otherwise.  The page is locked on return. The page queue lock might
298  * be dropped and reacquired.
299  *
300  * This function depends on normal struct vm_page being type stable.
301  */
302 static boolean_t
303 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
304 {
305         struct vm_page marker;
306         struct vm_pagequeue *pq;
307         boolean_t unchanged;
308         u_short queue;
309
310         vm_page_lock_assert(m, MA_NOTOWNED);
311         if (vm_page_trylock(m))
312                 return (TRUE);
313
314         queue = m->queue;
315         vm_pageout_init_marker(&marker, queue);
316         pq = &vm_pagequeues[queue];
317
318         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
319         vm_pagequeue_unlock(pq);
320         vm_page_lock(m);
321         vm_pagequeue_lock(pq);
322
323         /* Page queue might have changed. */
324         *next = TAILQ_NEXT(&marker, pageq);
325         unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
326         TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
327         return (unchanged);
328 }
329
330 /*
331  * vm_pageout_clean:
332  *
333  * Clean the page and remove it from the laundry.
334  * 
335  * We set the busy bit to cause potential page faults on this page to
336  * block.  Note the careful timing, however, the busy bit isn't set till
337  * late and we cannot do anything that will mess with the page.
338  */
339 static int
340 vm_pageout_clean(vm_page_t m)
341 {
342         vm_object_t object;
343         vm_page_t mc[2*vm_pageout_page_count], pb, ps;
344         int pageout_count;
345         int ib, is, page_base;
346         vm_pindex_t pindex = m->pindex;
347
348         vm_page_lock_assert(m, MA_OWNED);
349         object = m->object;
350         VM_OBJECT_ASSERT_WLOCKED(object);
351
352         /*
353          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
354          * with the new swapper, but we could have serious problems paging
355          * out other object types if there is insufficient memory.  
356          *
357          * Unfortunately, checking free memory here is far too late, so the
358          * check has been moved up a procedural level.
359          */
360
361         /*
362          * Can't clean the page if it's busy or held.
363          */
364         KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0,
365             ("vm_pageout_clean: page %p is busy", m));
366         KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
367         vm_page_unlock(m);
368
369         mc[vm_pageout_page_count] = pb = ps = m;
370         pageout_count = 1;
371         page_base = vm_pageout_page_count;
372         ib = 1;
373         is = 1;
374
375         /*
376          * Scan object for clusterable pages.
377          *
378          * We can cluster ONLY if: ->> the page is NOT
379          * clean, wired, busy, held, or mapped into a
380          * buffer, and one of the following:
381          * 1) The page is inactive, or a seldom used
382          *    active page.
383          * -or-
384          * 2) we force the issue.
385          *
386          * During heavy mmap/modification loads the pageout
387          * daemon can really fragment the underlying file
388          * due to flushing pages out of order and not trying
389          * align the clusters (which leave sporatic out-of-order
390          * holes).  To solve this problem we do the reverse scan
391          * first and attempt to align our cluster, then do a 
392          * forward scan if room remains.
393          */
394 more:
395         while (ib && pageout_count < vm_pageout_page_count) {
396                 vm_page_t p;
397
398                 if (ib > pindex) {
399                         ib = 0;
400                         break;
401                 }
402
403                 if ((p = vm_page_prev(pb)) == NULL ||
404                     (p->oflags & VPO_BUSY) != 0 || p->busy != 0) {
405                         ib = 0;
406                         break;
407                 }
408                 vm_page_lock(p);
409                 vm_page_test_dirty(p);
410                 if (p->dirty == 0 ||
411                     p->queue != PQ_INACTIVE ||
412                     p->hold_count != 0) {       /* may be undergoing I/O */
413                         vm_page_unlock(p);
414                         ib = 0;
415                         break;
416                 }
417                 vm_page_unlock(p);
418                 mc[--page_base] = pb = p;
419                 ++pageout_count;
420                 ++ib;
421                 /*
422                  * alignment boundry, stop here and switch directions.  Do
423                  * not clear ib.
424                  */
425                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
426                         break;
427         }
428
429         while (pageout_count < vm_pageout_page_count && 
430             pindex + is < object->size) {
431                 vm_page_t p;
432
433                 if ((p = vm_page_next(ps)) == NULL ||
434                     (p->oflags & VPO_BUSY) != 0 || p->busy != 0)
435                         break;
436                 vm_page_lock(p);
437                 vm_page_test_dirty(p);
438                 if (p->dirty == 0 ||
439                     p->queue != PQ_INACTIVE ||
440                     p->hold_count != 0) {       /* may be undergoing I/O */
441                         vm_page_unlock(p);
442                         break;
443                 }
444                 vm_page_unlock(p);
445                 mc[page_base + pageout_count] = ps = p;
446                 ++pageout_count;
447                 ++is;
448         }
449
450         /*
451          * If we exhausted our forward scan, continue with the reverse scan
452          * when possible, even past a page boundry.  This catches boundry
453          * conditions.
454          */
455         if (ib && pageout_count < vm_pageout_page_count)
456                 goto more;
457
458         /*
459          * we allow reads during pageouts...
460          */
461         return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL,
462             NULL));
463 }
464
465 /*
466  * vm_pageout_flush() - launder the given pages
467  *
468  *      The given pages are laundered.  Note that we setup for the start of
469  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
470  *      reference count all in here rather then in the parent.  If we want
471  *      the parent to do more sophisticated things we may have to change
472  *      the ordering.
473  *
474  *      Returned runlen is the count of pages between mreq and first
475  *      page after mreq with status VM_PAGER_AGAIN.
476  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
477  *      for any page in runlen set.
478  */
479 int
480 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
481     boolean_t *eio)
482 {
483         vm_object_t object = mc[0]->object;
484         int pageout_status[count];
485         int numpagedout = 0;
486         int i, runlen;
487
488         VM_OBJECT_ASSERT_WLOCKED(object);
489
490         /*
491          * Initiate I/O.  Bump the vm_page_t->busy counter and
492          * mark the pages read-only.
493          *
494          * We do not have to fixup the clean/dirty bits here... we can
495          * allow the pager to do it after the I/O completes.
496          *
497          * NOTE! mc[i]->dirty may be partial or fragmented due to an
498          * edge case with file fragments.
499          */
500         for (i = 0; i < count; i++) {
501                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
502                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
503                         mc[i], i, count));
504                 vm_page_io_start(mc[i]);
505                 pmap_remove_write(mc[i]);
506         }
507         vm_object_pip_add(object, count);
508
509         vm_pager_put_pages(object, mc, count, flags, pageout_status);
510
511         runlen = count - mreq;
512         if (eio != NULL)
513                 *eio = FALSE;
514         for (i = 0; i < count; i++) {
515                 vm_page_t mt = mc[i];
516
517                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
518                     !pmap_page_is_write_mapped(mt),
519                     ("vm_pageout_flush: page %p is not write protected", mt));
520                 switch (pageout_status[i]) {
521                 case VM_PAGER_OK:
522                 case VM_PAGER_PEND:
523                         numpagedout++;
524                         break;
525                 case VM_PAGER_BAD:
526                         /*
527                          * Page outside of range of object. Right now we
528                          * essentially lose the changes by pretending it
529                          * worked.
530                          */
531                         vm_page_undirty(mt);
532                         break;
533                 case VM_PAGER_ERROR:
534                 case VM_PAGER_FAIL:
535                         /*
536                          * If page couldn't be paged out, then reactivate the
537                          * page so it doesn't clog the inactive list.  (We
538                          * will try paging out it again later).
539                          */
540                         vm_page_lock(mt);
541                         vm_page_activate(mt);
542                         vm_page_unlock(mt);
543                         if (eio != NULL && i >= mreq && i - mreq < runlen)
544                                 *eio = TRUE;
545                         break;
546                 case VM_PAGER_AGAIN:
547                         if (i >= mreq && i - mreq < runlen)
548                                 runlen = i - mreq;
549                         break;
550                 }
551
552                 /*
553                  * If the operation is still going, leave the page busy to
554                  * block all other accesses. Also, leave the paging in
555                  * progress indicator set so that we don't attempt an object
556                  * collapse.
557                  */
558                 if (pageout_status[i] != VM_PAGER_PEND) {
559                         vm_object_pip_wakeup(object);
560                         vm_page_io_finish(mt);
561                         if (vm_page_count_severe()) {
562                                 vm_page_lock(mt);
563                                 vm_page_try_to_cache(mt);
564                                 vm_page_unlock(mt);
565                         }
566                 }
567         }
568         if (prunlen != NULL)
569                 *prunlen = runlen;
570         return (numpagedout);
571 }
572
573 static boolean_t
574 vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
575 {
576         struct mount *mp;
577         struct vm_pagequeue *pq;
578         struct vnode *vp;
579         vm_object_t object;
580         vm_paddr_t pa;
581         vm_page_t m, m_tmp, next;
582
583         pq = &vm_pagequeues[queue];
584         vm_pagequeue_lock(pq);
585         TAILQ_FOREACH_SAFE(m, &pq->pq_pl, pageq, next) {
586                 KASSERT(m->queue == queue,
587                     ("vm_pageout_launder: page %p's queue is not %d", m,
588                     queue));
589                 if ((m->flags & PG_MARKER) != 0)
590                         continue;
591                 pa = VM_PAGE_TO_PHYS(m);
592                 if (pa < low || pa + PAGE_SIZE > high)
593                         continue;
594                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
595                         vm_page_unlock(m);
596                         continue;
597                 }
598                 object = m->object;
599                 if ((!VM_OBJECT_TRYWLOCK(object) &&
600                     (!vm_pageout_fallback_object_lock(m, &next) ||
601                     m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
602                     m->busy != 0) {
603                         vm_page_unlock(m);
604                         VM_OBJECT_WUNLOCK(object);
605                         continue;
606                 }
607                 vm_page_test_dirty(m);
608                 if (m->dirty == 0 && object->ref_count != 0)
609                         pmap_remove_all(m);
610                 if (m->dirty != 0) {
611                         vm_page_unlock(m);
612                         if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
613                                 VM_OBJECT_WUNLOCK(object);
614                                 continue;
615                         }
616                         if (object->type == OBJT_VNODE) {
617                                 vm_pagequeue_unlock(pq);
618                                 vp = object->handle;
619                                 vm_object_reference_locked(object);
620                                 VM_OBJECT_WUNLOCK(object);
621                                 (void)vn_start_write(vp, &mp, V_WAIT);
622                                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
623                                 VM_OBJECT_WLOCK(object);
624                                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
625                                 VM_OBJECT_WUNLOCK(object);
626                                 VOP_UNLOCK(vp, 0);
627                                 vm_object_deallocate(object);
628                                 vn_finished_write(mp);
629                                 return (TRUE);
630                         } else if (object->type == OBJT_SWAP ||
631                             object->type == OBJT_DEFAULT) {
632                                 vm_pagequeue_unlock(pq);
633                                 m_tmp = m;
634                                 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
635                                     0, NULL, NULL);
636                                 VM_OBJECT_WUNLOCK(object);
637                                 return (TRUE);
638                         }
639                 } else {
640                         /*
641                          * Dequeue here to prevent lock recursion in
642                          * vm_page_cache().
643                          */
644                         vm_page_dequeue_locked(m);
645                         vm_page_cache(m);
646                         vm_page_unlock(m);
647                 }
648                 VM_OBJECT_WUNLOCK(object);
649         }
650         vm_pagequeue_unlock(pq);
651         return (FALSE);
652 }
653
654 /*
655  * Increase the number of cached pages.  The specified value, "tries",
656  * determines which categories of pages are cached:
657  *
658  *  0: All clean, inactive pages within the specified physical address range
659  *     are cached.  Will not sleep.
660  *  1: The vm_lowmem handlers are called.  All inactive pages within
661  *     the specified physical address range are cached.  May sleep.
662  *  2: The vm_lowmem handlers are called.  All inactive and active pages
663  *     within the specified physical address range are cached.  May sleep.
664  */
665 void
666 vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
667 {
668         int actl, actmax, inactl, inactmax;
669
670         if (tries > 0) {
671                 /*
672                  * Decrease registered cache sizes.  The vm_lowmem handlers
673                  * may acquire locks and/or sleep, so they can only be invoked
674                  * when "tries" is greater than zero.
675                  */
676                 EVENTHANDLER_INVOKE(vm_lowmem, 0);
677
678                 /*
679                  * We do this explicitly after the caches have been drained
680                  * above.
681                  */
682                 uma_reclaim();
683         }
684         inactl = 0;
685         inactmax = cnt.v_inactive_count;
686         actl = 0;
687         actmax = tries < 2 ? 0 : cnt.v_active_count;
688 again:
689         if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low,
690             high)) {
691                 inactl++;
692                 goto again;
693         }
694         if (actl < actmax && vm_pageout_launder(PQ_ACTIVE, tries, low, high)) {
695                 actl++;
696                 goto again;
697         }
698 }
699
700 #if !defined(NO_SWAPPING)
701 /*
702  *      vm_pageout_object_deactivate_pages
703  *
704  *      Deactivate enough pages to satisfy the inactive target
705  *      requirements.
706  *
707  *      The object and map must be locked.
708  */
709 static void
710 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
711     long desired)
712 {
713         vm_object_t backing_object, object;
714         vm_page_t p;
715         int actcount, remove_mode;
716
717         VM_OBJECT_ASSERT_WLOCKED(first_object);
718         if ((first_object->flags & OBJ_FICTITIOUS) != 0)
719                 return;
720         for (object = first_object;; object = backing_object) {
721                 if (pmap_resident_count(pmap) <= desired)
722                         goto unlock_return;
723                 VM_OBJECT_ASSERT_WLOCKED(object);
724                 if ((object->flags & OBJ_UNMANAGED) != 0 ||
725                     object->paging_in_progress != 0)
726                         goto unlock_return;
727
728                 remove_mode = 0;
729                 if (object->shadow_count > 1)
730                         remove_mode = 1;
731                 /*
732                  * Scan the object's entire memory queue.
733                  */
734                 TAILQ_FOREACH(p, &object->memq, listq) {
735                         if (pmap_resident_count(pmap) <= desired)
736                                 goto unlock_return;
737                         if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0)
738                                 continue;
739                         PCPU_INC(cnt.v_pdpages);
740                         vm_page_lock(p);
741                         if (p->wire_count != 0 || p->hold_count != 0 ||
742                             !pmap_page_exists_quick(pmap, p)) {
743                                 vm_page_unlock(p);
744                                 continue;
745                         }
746                         actcount = pmap_ts_referenced(p);
747                         if ((p->aflags & PGA_REFERENCED) != 0) {
748                                 if (actcount == 0)
749                                         actcount = 1;
750                                 vm_page_aflag_clear(p, PGA_REFERENCED);
751                         }
752                         if (p->queue != PQ_ACTIVE && actcount != 0) {
753                                 vm_page_activate(p);
754                                 p->act_count += actcount;
755                         } else if (p->queue == PQ_ACTIVE) {
756                                 if (actcount == 0) {
757                                         p->act_count -= min(p->act_count,
758                                             ACT_DECLINE);
759                                         if (!remove_mode &&
760                                             (vm_pageout_algorithm ||
761                                             p->act_count == 0)) {
762                                                 pmap_remove_all(p);
763                                                 vm_page_deactivate(p);
764                                         } else
765                                                 vm_page_requeue(p);
766                                 } else {
767                                         vm_page_activate(p);
768                                         if (p->act_count < ACT_MAX -
769                                             ACT_ADVANCE)
770                                                 p->act_count += ACT_ADVANCE;
771                                         vm_page_requeue(p);
772                                 }
773                         } else if (p->queue == PQ_INACTIVE)
774                                 pmap_remove_all(p);
775                         vm_page_unlock(p);
776                 }
777                 if ((backing_object = object->backing_object) == NULL)
778                         goto unlock_return;
779                 VM_OBJECT_WLOCK(backing_object);
780                 if (object != first_object)
781                         VM_OBJECT_WUNLOCK(object);
782         }
783 unlock_return:
784         if (object != first_object)
785                 VM_OBJECT_WUNLOCK(object);
786 }
787
788 /*
789  * deactivate some number of pages in a map, try to do it fairly, but
790  * that is really hard to do.
791  */
792 static void
793 vm_pageout_map_deactivate_pages(map, desired)
794         vm_map_t map;
795         long desired;
796 {
797         vm_map_entry_t tmpe;
798         vm_object_t obj, bigobj;
799         int nothingwired;
800
801         if (!vm_map_trylock(map))
802                 return;
803
804         bigobj = NULL;
805         nothingwired = TRUE;
806
807         /*
808          * first, search out the biggest object, and try to free pages from
809          * that.
810          */
811         tmpe = map->header.next;
812         while (tmpe != &map->header) {
813                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
814                         obj = tmpe->object.vm_object;
815                         if (obj != NULL && VM_OBJECT_TRYWLOCK(obj)) {
816                                 if (obj->shadow_count <= 1 &&
817                                     (bigobj == NULL ||
818                                      bigobj->resident_page_count < obj->resident_page_count)) {
819                                         if (bigobj != NULL)
820                                                 VM_OBJECT_WUNLOCK(bigobj);
821                                         bigobj = obj;
822                                 } else
823                                         VM_OBJECT_WUNLOCK(obj);
824                         }
825                 }
826                 if (tmpe->wired_count > 0)
827                         nothingwired = FALSE;
828                 tmpe = tmpe->next;
829         }
830
831         if (bigobj != NULL) {
832                 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
833                 VM_OBJECT_WUNLOCK(bigobj);
834         }
835         /*
836          * Next, hunt around for other pages to deactivate.  We actually
837          * do this search sort of wrong -- .text first is not the best idea.
838          */
839         tmpe = map->header.next;
840         while (tmpe != &map->header) {
841                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
842                         break;
843                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
844                         obj = tmpe->object.vm_object;
845                         if (obj != NULL) {
846                                 VM_OBJECT_WLOCK(obj);
847                                 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
848                                 VM_OBJECT_WUNLOCK(obj);
849                         }
850                 }
851                 tmpe = tmpe->next;
852         }
853
854         /*
855          * Remove all mappings if a process is swapped out, this will free page
856          * table pages.
857          */
858         if (desired == 0 && nothingwired) {
859                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
860                     vm_map_max(map));
861         }
862         vm_map_unlock(map);
863 }
864 #endif          /* !defined(NO_SWAPPING) */
865
866 /*
867  *      vm_pageout_scan does the dirty work for the pageout daemon.
868  */
869 static void
870 vm_pageout_scan(int pass)
871 {
872         vm_page_t m, next;
873         struct vm_page marker;
874         struct vm_pagequeue *pq;
875         int page_shortage, maxscan, pcount;
876         int addl_page_shortage;
877         vm_object_t object;
878         int actcount;
879         int vnodes_skipped = 0;
880         int maxlaunder;
881         boolean_t queues_locked;
882
883         vm_pageout_init_marker(&marker, PQ_INACTIVE);
884
885         /*
886          * Decrease registered cache sizes.
887          */
888         EVENTHANDLER_INVOKE(vm_lowmem, 0);
889         /*
890          * We do this explicitly after the caches have been drained above.
891          */
892         uma_reclaim();
893
894         /*
895          * The addl_page_shortage is the number of temporarily
896          * stuck pages in the inactive queue.  In other words, the
897          * number of pages from cnt.v_inactive_count that should be
898          * discounted in setting the target for the active queue scan.
899          */
900         addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
901
902         /*
903          * Calculate the number of pages we want to either free or move
904          * to the cache.
905          */
906         page_shortage = vm_paging_target() + addl_page_shortage;
907
908         /*
909          * maxlaunder limits the number of dirty pages we flush per scan.
910          * For most systems a smaller value (16 or 32) is more robust under
911          * extreme memory and disk pressure because any unnecessary writes
912          * to disk can result in extreme performance degredation.  However,
913          * systems with excessive dirty pages (especially when MAP_NOSYNC is
914          * used) will die horribly with limited laundering.  If the pageout
915          * daemon cannot clean enough pages in the first pass, we let it go
916          * all out in succeeding passes.
917          */
918         if ((maxlaunder = vm_max_launder) <= 1)
919                 maxlaunder = 1;
920         if (pass)
921                 maxlaunder = 10000;
922
923         maxscan = cnt.v_inactive_count;
924
925         /*
926          * Start scanning the inactive queue for pages we can move to the
927          * cache or free.  The scan will stop when the target is reached or
928          * we have scanned the entire inactive queue.  Note that m->act_count
929          * is not used to form decisions for the inactive queue, only for the
930          * active queue.
931          */
932         pq = &vm_pagequeues[PQ_INACTIVE];
933         vm_pagequeue_lock(pq);
934         queues_locked = TRUE;
935         for (m = TAILQ_FIRST(&pq->pq_pl);
936              m != NULL && maxscan-- > 0 && page_shortage > 0;
937              m = next) {
938                 vm_pagequeue_assert_locked(pq);
939                 KASSERT(queues_locked, ("unlocked queues"));
940                 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
941
942                 PCPU_INC(cnt.v_pdpages);
943                 next = TAILQ_NEXT(m, pageq);
944
945                 /*
946                  * skip marker pages
947                  */
948                 if (m->flags & PG_MARKER)
949                         continue;
950
951                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
952                     ("Fictitious page %p cannot be in inactive queue", m));
953                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
954                     ("Unmanaged page %p cannot be in inactive queue", m));
955
956                 /*
957                  * The page or object lock acquisitions fail if the
958                  * page was removed from the queue or moved to a
959                  * different position within the queue.  In either
960                  * case, addl_page_shortage should not be incremented.
961                  */
962                 if (!vm_pageout_page_lock(m, &next)) {
963                         vm_page_unlock(m);
964                         continue;
965                 }
966                 object = m->object;
967                 if (!VM_OBJECT_TRYWLOCK(object) &&
968                     !vm_pageout_fallback_object_lock(m, &next)) {
969                         vm_page_unlock(m);
970                         VM_OBJECT_WUNLOCK(object);
971                         continue;
972                 }
973
974                 /*
975                  * Don't mess with busy pages, keep them at at the
976                  * front of the queue, most likely they are being
977                  * paged out.  Increment addl_page_shortage for busy
978                  * pages, because they may leave the inactive queue
979                  * shortly after page scan is finished.
980                  */
981                 if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
982                         vm_page_unlock(m);
983                         VM_OBJECT_WUNLOCK(object);
984                         addl_page_shortage++;
985                         continue;
986                 }
987
988                 /*
989                  * We unlock the inactive page queue, invalidating the
990                  * 'next' pointer.  Use our marker to remember our
991                  * place.
992                  */
993                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
994                 vm_pagequeue_unlock(pq);
995                 queues_locked = FALSE;
996
997                 /*
998                  * If the object is not being used, we ignore previous 
999                  * references.
1000                  */
1001                 if (object->ref_count == 0) {
1002                         vm_page_aflag_clear(m, PGA_REFERENCED);
1003                         KASSERT(!pmap_page_is_mapped(m),
1004                             ("vm_pageout_scan: page %p is mapped", m));
1005
1006                 /*
1007                  * Otherwise, if the page has been referenced while in the 
1008                  * inactive queue, we bump the "activation count" upwards, 
1009                  * making it less likely that the page will be added back to 
1010                  * the inactive queue prematurely again.  Here we check the 
1011                  * page tables (or emulated bits, if any), given the upper 
1012                  * level VM system not knowing anything about existing 
1013                  * references.
1014                  */
1015                 } else if ((m->aflags & PGA_REFERENCED) == 0 &&
1016                     (actcount = pmap_ts_referenced(m)) != 0) {
1017                         vm_page_activate(m);
1018                         VM_OBJECT_WUNLOCK(object);
1019                         m->act_count += actcount + ACT_ADVANCE;
1020                         vm_page_unlock(m);
1021                         goto relock_queues;
1022                 }
1023
1024                 /*
1025                  * If the upper level VM system knows about any page 
1026                  * references, we activate the page.  We also set the 
1027                  * "activation count" higher than normal so that we will less 
1028                  * likely place pages back onto the inactive queue again.
1029                  */
1030                 if ((m->aflags & PGA_REFERENCED) != 0) {
1031                         vm_page_aflag_clear(m, PGA_REFERENCED);
1032                         actcount = pmap_ts_referenced(m);
1033                         vm_page_activate(m);
1034                         VM_OBJECT_WUNLOCK(object);
1035                         m->act_count += actcount + ACT_ADVANCE + 1;
1036                         vm_page_unlock(m);
1037                         goto relock_queues;
1038                 }
1039
1040                 if (m->hold_count != 0) {
1041                         vm_page_unlock(m);
1042                         VM_OBJECT_WUNLOCK(object);
1043
1044                         /*
1045                          * Held pages are essentially stuck in the
1046                          * queue.  So, they ought to be discounted
1047                          * from cnt.v_inactive_count.  See the
1048                          * calculation of the page_shortage for the
1049                          * loop over the active queue below.
1050                          */
1051                         addl_page_shortage++;
1052                         goto relock_queues;
1053                 }
1054
1055                 /*
1056                  * If the page appears to be clean at the machine-independent
1057                  * layer, then remove all of its mappings from the pmap in
1058                  * anticipation of placing it onto the cache queue.  If,
1059                  * however, any of the page's mappings allow write access,
1060                  * then the page may still be modified until the last of those
1061                  * mappings are removed.
1062                  */
1063                 vm_page_test_dirty(m);
1064                 if (m->dirty == 0 && object->ref_count != 0)
1065                         pmap_remove_all(m);
1066
1067                 if (m->valid == 0) {
1068                         /*
1069                          * Invalid pages can be easily freed
1070                          */
1071                         vm_page_free(m);
1072                         PCPU_INC(cnt.v_dfree);
1073                         --page_shortage;
1074                 } else if (m->dirty == 0) {
1075                         /*
1076                          * Clean pages can be placed onto the cache queue.
1077                          * This effectively frees them.
1078                          */
1079                         vm_page_cache(m);
1080                         --page_shortage;
1081                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1082                         /*
1083                          * Dirty pages need to be paged out, but flushing
1084                          * a page is extremely expensive verses freeing
1085                          * a clean page.  Rather then artificially limiting
1086                          * the number of pages we can flush, we instead give
1087                          * dirty pages extra priority on the inactive queue
1088                          * by forcing them to be cycled through the queue
1089                          * twice before being flushed, after which the
1090                          * (now clean) page will cycle through once more
1091                          * before being freed.  This significantly extends
1092                          * the thrash point for a heavily loaded machine.
1093                          */
1094                         m->flags |= PG_WINATCFLS;
1095                         vm_pagequeue_lock(pq);
1096                         queues_locked = TRUE;
1097                         vm_page_requeue_locked(m);
1098                 } else if (maxlaunder > 0) {
1099                         /*
1100                          * We always want to try to flush some dirty pages if
1101                          * we encounter them, to keep the system stable.
1102                          * Normally this number is small, but under extreme
1103                          * pressure where there are insufficient clean pages
1104                          * on the inactive queue, we may have to go all out.
1105                          */
1106                         int swap_pageouts_ok;
1107                         struct vnode *vp = NULL;
1108                         struct mount *mp = NULL;
1109
1110                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
1111                                 swap_pageouts_ok = 1;
1112                         } else {
1113                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1114                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1115                                 vm_page_count_min());
1116                                                                                 
1117                         }
1118
1119                         /*
1120                          * We don't bother paging objects that are "dead".  
1121                          * Those objects are in a "rundown" state.
1122                          */
1123                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
1124                                 vm_pagequeue_lock(pq);
1125                                 vm_page_unlock(m);
1126                                 VM_OBJECT_WUNLOCK(object);
1127                                 queues_locked = TRUE;
1128                                 vm_page_requeue_locked(m);
1129                                 goto relock_queues;
1130                         }
1131
1132                         /*
1133                          * The object is already known NOT to be dead.   It
1134                          * is possible for the vget() to block the whole
1135                          * pageout daemon, but the new low-memory handling
1136                          * code should prevent it.
1137                          *
1138                          * The previous code skipped locked vnodes and, worse,
1139                          * reordered pages in the queue.  This results in
1140                          * completely non-deterministic operation and, on a
1141                          * busy system, can lead to extremely non-optimal
1142                          * pageouts.  For example, it can cause clean pages
1143                          * to be freed and dirty pages to be moved to the end
1144                          * of the queue.  Since dirty pages are also moved to
1145                          * the end of the queue once-cleaned, this gives
1146                          * way too large a weighting to defering the freeing
1147                          * of dirty pages.
1148                          *
1149                          * We can't wait forever for the vnode lock, we might
1150                          * deadlock due to a vn_read() getting stuck in
1151                          * vm_wait while holding this vnode.  We skip the 
1152                          * vnode if we can't get it in a reasonable amount
1153                          * of time.
1154                          */
1155                         if (object->type == OBJT_VNODE) {
1156                                 vm_page_unlock(m);
1157                                 vp = object->handle;
1158                                 if (vp->v_type == VREG &&
1159                                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1160                                         mp = NULL;
1161                                         ++pageout_lock_miss;
1162                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1163                                                 vnodes_skipped++;
1164                                         goto unlock_and_continue;
1165                                 }
1166                                 KASSERT(mp != NULL,
1167                                     ("vp %p with NULL v_mount", vp));
1168                                 vm_object_reference_locked(object);
1169                                 VM_OBJECT_WUNLOCK(object);
1170                                 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
1171                                     curthread)) {
1172                                         VM_OBJECT_WLOCK(object);
1173                                         ++pageout_lock_miss;
1174                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1175                                                 vnodes_skipped++;
1176                                         vp = NULL;
1177                                         goto unlock_and_continue;
1178                                 }
1179                                 VM_OBJECT_WLOCK(object);
1180                                 vm_page_lock(m);
1181                                 vm_pagequeue_lock(pq);
1182                                 queues_locked = TRUE;
1183                                 /*
1184                                  * The page might have been moved to another
1185                                  * queue during potential blocking in vget()
1186                                  * above.  The page might have been freed and
1187                                  * reused for another vnode.
1188                                  */
1189                                 if (m->queue != PQ_INACTIVE ||
1190                                     m->object != object ||
1191                                     TAILQ_NEXT(m, pageq) != &marker) {
1192                                         vm_page_unlock(m);
1193                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1194                                                 vnodes_skipped++;
1195                                         goto unlock_and_continue;
1196                                 }
1197         
1198                                 /*
1199                                  * The page may have been busied during the
1200                                  * blocking in vget().  We don't move the
1201                                  * page back onto the end of the queue so that
1202                                  * statistics are more correct if we don't.
1203                                  */
1204                                 if (m->busy || (m->oflags & VPO_BUSY)) {
1205                                         vm_page_unlock(m);
1206                                         goto unlock_and_continue;
1207                                 }
1208
1209                                 /*
1210                                  * If the page has become held it might
1211                                  * be undergoing I/O, so skip it
1212                                  */
1213                                 if (m->hold_count) {
1214                                         vm_page_unlock(m);
1215                                         vm_page_requeue_locked(m);
1216                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1217                                                 vnodes_skipped++;
1218                                         goto unlock_and_continue;
1219                                 }
1220                                 vm_pagequeue_unlock(pq);
1221                                 queues_locked = FALSE;
1222                         }
1223
1224                         /*
1225                          * If a page is dirty, then it is either being washed
1226                          * (but not yet cleaned) or it is still in the
1227                          * laundry.  If it is still in the laundry, then we
1228                          * start the cleaning operation. 
1229                          *
1230                          * decrement page_shortage on success to account for
1231                          * the (future) cleaned page.  Otherwise we could wind
1232                          * up laundering or cleaning too many pages.
1233                          */
1234                         if (vm_pageout_clean(m) != 0) {
1235                                 --page_shortage;
1236                                 --maxlaunder;
1237                         }
1238 unlock_and_continue:
1239                         vm_page_lock_assert(m, MA_NOTOWNED);
1240                         VM_OBJECT_WUNLOCK(object);
1241                         if (mp != NULL) {
1242                                 if (queues_locked) {
1243                                         vm_pagequeue_unlock(pq);
1244                                         queues_locked = FALSE;
1245                                 }
1246                                 if (vp != NULL)
1247                                         vput(vp);
1248                                 vm_object_deallocate(object);
1249                                 vn_finished_write(mp);
1250                         }
1251                         vm_page_lock_assert(m, MA_NOTOWNED);
1252                         goto relock_queues;
1253                 }
1254                 vm_page_unlock(m);
1255                 VM_OBJECT_WUNLOCK(object);
1256 relock_queues:
1257                 if (!queues_locked) {
1258                         vm_pagequeue_lock(pq);
1259                         queues_locked = TRUE;
1260                 }
1261                 next = TAILQ_NEXT(&marker, pageq);
1262                 TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
1263         }
1264         vm_pagequeue_unlock(pq);
1265
1266         /*
1267          * Compute the number of pages we want to try to move from the
1268          * active queue to the inactive queue.
1269          */
1270         page_shortage = vm_paging_target() +
1271                 cnt.v_inactive_target - cnt.v_inactive_count;
1272         page_shortage += addl_page_shortage;
1273
1274         /*
1275          * Scan the active queue for things we can deactivate. We nominally
1276          * track the per-page activity counter and use it to locate
1277          * deactivation candidates.
1278          */
1279         pcount = cnt.v_active_count;
1280         pq = &vm_pagequeues[PQ_ACTIVE];
1281         vm_pagequeue_lock(pq);
1282         m = TAILQ_FIRST(&pq->pq_pl);
1283         while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1284
1285                 KASSERT(m->queue == PQ_ACTIVE,
1286                     ("vm_pageout_scan: page %p isn't active", m));
1287
1288                 next = TAILQ_NEXT(m, pageq);
1289                 if ((m->flags & PG_MARKER) != 0) {
1290                         m = next;
1291                         continue;
1292                 }
1293                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1294                     ("Fictitious page %p cannot be in active queue", m));
1295                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1296                     ("Unmanaged page %p cannot be in active queue", m));
1297                 if (!vm_pageout_page_lock(m, &next)) {
1298                         vm_page_unlock(m);
1299                         m = next;
1300                         continue;
1301                 }
1302                 object = m->object;
1303                 if (!VM_OBJECT_TRYWLOCK(object) &&
1304                     !vm_pageout_fallback_object_lock(m, &next)) {
1305                         VM_OBJECT_WUNLOCK(object);
1306                         vm_page_unlock(m);
1307                         m = next;
1308                         continue;
1309                 }
1310
1311                 /*
1312                  * Don't deactivate pages that are busy.
1313                  */
1314                 if ((m->busy != 0) ||
1315                     (m->oflags & VPO_BUSY) ||
1316                     (m->hold_count != 0)) {
1317                         vm_page_unlock(m);
1318                         VM_OBJECT_WUNLOCK(object);
1319                         vm_page_requeue_locked(m);
1320                         m = next;
1321                         continue;
1322                 }
1323
1324                 /*
1325                  * The count for pagedaemon pages is done after checking the
1326                  * page for eligibility...
1327                  */
1328                 PCPU_INC(cnt.v_pdpages);
1329
1330                 /*
1331                  * Check to see "how much" the page has been used.
1332                  */
1333                 actcount = 0;
1334                 if (object->ref_count != 0) {
1335                         if (m->aflags & PGA_REFERENCED) {
1336                                 actcount += 1;
1337                         }
1338                         actcount += pmap_ts_referenced(m);
1339                         if (actcount) {
1340                                 m->act_count += ACT_ADVANCE + actcount;
1341                                 if (m->act_count > ACT_MAX)
1342                                         m->act_count = ACT_MAX;
1343                         }
1344                 }
1345
1346                 /*
1347                  * Since we have "tested" this bit, we need to clear it now.
1348                  */
1349                 vm_page_aflag_clear(m, PGA_REFERENCED);
1350
1351                 /*
1352                  * Only if an object is currently being used, do we use the
1353                  * page activation count stats.
1354                  */
1355                 if (actcount != 0 && object->ref_count != 0)
1356                         vm_page_requeue_locked(m);
1357                 else {
1358                         m->act_count -= min(m->act_count, ACT_DECLINE);
1359                         if (vm_pageout_algorithm ||
1360                             object->ref_count == 0 ||
1361                             m->act_count == 0) {
1362                                 page_shortage--;
1363                                 /* Dequeue to avoid later lock recursion. */
1364                                 vm_page_dequeue_locked(m);
1365                                 if (object->ref_count == 0) {
1366                                         KASSERT(!pmap_page_is_mapped(m),
1367                                     ("vm_pageout_scan: page %p is mapped", m));
1368                                         if (m->dirty == 0)
1369                                                 vm_page_cache(m);
1370                                         else
1371                                                 vm_page_deactivate(m);
1372                                 } else {
1373                                         vm_page_deactivate(m);
1374                                 }
1375                         } else
1376                                 vm_page_requeue_locked(m);
1377                 }
1378                 vm_page_unlock(m);
1379                 VM_OBJECT_WUNLOCK(object);
1380                 m = next;
1381         }
1382         vm_pagequeue_unlock(pq);
1383 #if !defined(NO_SWAPPING)
1384         /*
1385          * Idle process swapout -- run once per second.
1386          */
1387         if (vm_swap_idle_enabled) {
1388                 static long lsec;
1389                 if (time_second != lsec) {
1390                         vm_req_vmdaemon(VM_SWAP_IDLE);
1391                         lsec = time_second;
1392                 }
1393         }
1394 #endif
1395                 
1396         /*
1397          * If we didn't get enough free pages, and we have skipped a vnode
1398          * in a writeable object, wakeup the sync daemon.  And kick swapout
1399          * if we did not get enough free pages.
1400          */
1401         if (vm_paging_target() > 0) {
1402                 if (vnodes_skipped && vm_page_count_min())
1403                         (void) speedup_syncer();
1404 #if !defined(NO_SWAPPING)
1405                 if (vm_swap_enabled && vm_page_count_target())
1406                         vm_req_vmdaemon(VM_SWAP_NORMAL);
1407 #endif
1408         }
1409
1410         /*
1411          * If we are critically low on one of RAM or swap and low on
1412          * the other, kill the largest process.  However, we avoid
1413          * doing this on the first pass in order to give ourselves a
1414          * chance to flush out dirty vnode-backed pages and to allow
1415          * active pages to be moved to the inactive queue and reclaimed.
1416          */
1417         if (pass != 0 &&
1418             ((swap_pager_avail < 64 && vm_page_count_min()) ||
1419              (swap_pager_full && vm_paging_target() > 0)))
1420                 vm_pageout_oom(VM_OOM_MEM);
1421 }
1422
1423
1424 void
1425 vm_pageout_oom(int shortage)
1426 {
1427         struct proc *p, *bigproc;
1428         vm_offset_t size, bigsize;
1429         struct thread *td;
1430         struct vmspace *vm;
1431
1432         /*
1433          * We keep the process bigproc locked once we find it to keep anyone
1434          * from messing with it; however, there is a possibility of
1435          * deadlock if process B is bigproc and one of it's child processes
1436          * attempts to propagate a signal to B while we are waiting for A's
1437          * lock while walking this list.  To avoid this, we don't block on
1438          * the process lock but just skip a process if it is already locked.
1439          */
1440         bigproc = NULL;
1441         bigsize = 0;
1442         sx_slock(&allproc_lock);
1443         FOREACH_PROC_IN_SYSTEM(p) {
1444                 int breakout;
1445
1446                 if (PROC_TRYLOCK(p) == 0)
1447                         continue;
1448                 /*
1449                  * If this is a system, protected or killed process, skip it.
1450                  */
1451                 if (p->p_state != PRS_NORMAL ||
1452                     (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
1453                     (p->p_pid == 1) || P_KILLED(p) ||
1454                     ((p->p_pid < 48) && (swap_pager_avail != 0))) {
1455                         PROC_UNLOCK(p);
1456                         continue;
1457                 }
1458                 /*
1459                  * If the process is in a non-running type state,
1460                  * don't touch it.  Check all the threads individually.
1461                  */
1462                 breakout = 0;
1463                 FOREACH_THREAD_IN_PROC(p, td) {
1464                         thread_lock(td);
1465                         if (!TD_ON_RUNQ(td) &&
1466                             !TD_IS_RUNNING(td) &&
1467                             !TD_IS_SLEEPING(td) &&
1468                             !TD_IS_SUSPENDED(td)) {
1469                                 thread_unlock(td);
1470                                 breakout = 1;
1471                                 break;
1472                         }
1473                         thread_unlock(td);
1474                 }
1475                 if (breakout) {
1476                         PROC_UNLOCK(p);
1477                         continue;
1478                 }
1479                 /*
1480                  * get the process size
1481                  */
1482                 vm = vmspace_acquire_ref(p);
1483                 if (vm == NULL) {
1484                         PROC_UNLOCK(p);
1485                         continue;
1486                 }
1487                 if (!vm_map_trylock_read(&vm->vm_map)) {
1488                         vmspace_free(vm);
1489                         PROC_UNLOCK(p);
1490                         continue;
1491                 }
1492                 size = vmspace_swap_count(vm);
1493                 vm_map_unlock_read(&vm->vm_map);
1494                 if (shortage == VM_OOM_MEM)
1495                         size += vmspace_resident_count(vm);
1496                 vmspace_free(vm);
1497                 /*
1498                  * if the this process is bigger than the biggest one
1499                  * remember it.
1500                  */
1501                 if (size > bigsize) {
1502                         if (bigproc != NULL)
1503                                 PROC_UNLOCK(bigproc);
1504                         bigproc = p;
1505                         bigsize = size;
1506                 } else
1507                         PROC_UNLOCK(p);
1508         }
1509         sx_sunlock(&allproc_lock);
1510         if (bigproc != NULL) {
1511                 killproc(bigproc, "out of swap space");
1512                 sched_nice(bigproc, PRIO_MIN);
1513                 PROC_UNLOCK(bigproc);
1514                 wakeup(&cnt.v_free_count);
1515         }
1516 }
1517
1518 /*
1519  * This routine tries to maintain the pseudo LRU active queue,
1520  * so that during long periods of time where there is no paging,
1521  * that some statistic accumulation still occurs.  This code
1522  * helps the situation where paging just starts to occur.
1523  */
1524 static void
1525 vm_pageout_page_stats(void)
1526 {
1527         struct vm_pagequeue *pq;
1528         vm_object_t object;
1529         vm_page_t m, next;
1530         int pcount, tpcount;            /* Number of pages to check */
1531         static int fullintervalcount = 0;
1532         int page_shortage;
1533
1534         page_shortage = 
1535             (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1536             (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1537
1538         if (page_shortage <= 0)
1539                 return;
1540
1541         pcount = cnt.v_active_count;
1542         fullintervalcount += vm_pageout_stats_interval;
1543         if (fullintervalcount < vm_pageout_full_stats_interval) {
1544                 vm_pageout_stats++;
1545                 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
1546                     cnt.v_page_count;
1547                 if (pcount > tpcount)
1548                         pcount = tpcount;
1549         } else {
1550                 vm_pageout_full_stats++;
1551                 fullintervalcount = 0;
1552         }
1553
1554         pq = &vm_pagequeues[PQ_ACTIVE];
1555         vm_pagequeue_lock(pq);
1556         m = TAILQ_FIRST(&pq->pq_pl);
1557         while ((m != NULL) && (pcount-- > 0)) {
1558                 int actcount;
1559
1560                 KASSERT(m->queue == PQ_ACTIVE,
1561                     ("vm_pageout_page_stats: page %p isn't active", m));
1562
1563                 next = TAILQ_NEXT(m, pageq);
1564                 if ((m->flags & PG_MARKER) != 0) {
1565                         m = next;
1566                         continue;
1567                 }
1568                 vm_page_lock_assert(m, MA_NOTOWNED);
1569                 if (!vm_pageout_page_lock(m, &next)) {
1570                         vm_page_unlock(m);
1571                         m = next;
1572                         continue;
1573                 }
1574                 object = m->object;
1575                 if (!VM_OBJECT_TRYWLOCK(object) &&
1576                     !vm_pageout_fallback_object_lock(m, &next)) {
1577                         VM_OBJECT_WUNLOCK(object);
1578                         vm_page_unlock(m);
1579                         m = next;
1580                         continue;
1581                 }
1582
1583                 /*
1584                  * Don't deactivate pages that are busy.
1585                  */
1586                 if ((m->busy != 0) ||
1587                     (m->oflags & VPO_BUSY) ||
1588                     (m->hold_count != 0)) {
1589                         vm_page_unlock(m);
1590                         VM_OBJECT_WUNLOCK(object);
1591                         vm_page_requeue_locked(m);
1592                         m = next;
1593                         continue;
1594                 }
1595
1596                 actcount = 0;
1597                 if (m->aflags & PGA_REFERENCED) {
1598                         vm_page_aflag_clear(m, PGA_REFERENCED);
1599                         actcount += 1;
1600                 }
1601
1602                 actcount += pmap_ts_referenced(m);
1603                 if (actcount) {
1604                         m->act_count += ACT_ADVANCE + actcount;
1605                         if (m->act_count > ACT_MAX)
1606                                 m->act_count = ACT_MAX;
1607                         vm_page_requeue_locked(m);
1608                 } else {
1609                         if (m->act_count == 0) {
1610                                 /*
1611                                  * We turn off page access, so that we have
1612                                  * more accurate RSS stats.  We don't do this
1613                                  * in the normal page deactivation when the
1614                                  * system is loaded VM wise, because the
1615                                  * cost of the large number of page protect
1616                                  * operations would be higher than the value
1617                                  * of doing the operation.
1618                                  */
1619                                 pmap_remove_all(m);
1620                                 /* Dequeue to avoid later lock recursion. */
1621                                 vm_page_dequeue_locked(m);
1622                                 vm_page_deactivate(m);
1623                         } else {
1624                                 m->act_count -= min(m->act_count, ACT_DECLINE);
1625                                 vm_page_requeue_locked(m);
1626                         }
1627                 }
1628                 vm_page_unlock(m);
1629                 VM_OBJECT_WUNLOCK(object);
1630                 m = next;
1631         }
1632         vm_pagequeue_unlock(pq);
1633 }
1634
1635 /*
1636  *      vm_pageout is the high level pageout daemon.
1637  */
1638 static void
1639 vm_pageout(void)
1640 {
1641         int error, pass;
1642
1643         /*
1644          * Initialize some paging parameters.
1645          */
1646         cnt.v_interrupt_free_min = 2;
1647         if (cnt.v_page_count < 2000)
1648                 vm_pageout_page_count = 8;
1649
1650         /*
1651          * v_free_reserved needs to include enough for the largest
1652          * swap pager structures plus enough for any pv_entry structs
1653          * when paging. 
1654          */
1655         if (cnt.v_page_count > 1024)
1656                 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1657         else
1658                 cnt.v_free_min = 4;
1659         cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1660             cnt.v_interrupt_free_min;
1661         cnt.v_free_reserved = vm_pageout_page_count +
1662             cnt.v_pageout_free_min + (cnt.v_page_count / 768);
1663         cnt.v_free_severe = cnt.v_free_min / 2;
1664         cnt.v_free_min += cnt.v_free_reserved;
1665         cnt.v_free_severe += cnt.v_free_reserved;
1666
1667         /*
1668          * v_free_target and v_cache_min control pageout hysteresis.  Note
1669          * that these are more a measure of the VM cache queue hysteresis
1670          * then the VM free queue.  Specifically, v_free_target is the
1671          * high water mark (free+cache pages).
1672          *
1673          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1674          * low water mark, while v_free_min is the stop.  v_cache_min must
1675          * be big enough to handle memory needs while the pageout daemon
1676          * is signalled and run to free more pages.
1677          */
1678         if (cnt.v_free_count > 6144)
1679                 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1680         else
1681                 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1682
1683         if (cnt.v_free_count > 2048) {
1684                 cnt.v_cache_min = cnt.v_free_target;
1685                 cnt.v_cache_max = 2 * cnt.v_cache_min;
1686                 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1687         } else {
1688                 cnt.v_cache_min = 0;
1689                 cnt.v_cache_max = 0;
1690                 cnt.v_inactive_target = cnt.v_free_count / 4;
1691         }
1692         if (cnt.v_inactive_target > cnt.v_free_count / 3)
1693                 cnt.v_inactive_target = cnt.v_free_count / 3;
1694
1695         /* XXX does not really belong here */
1696         if (vm_page_max_wired == 0)
1697                 vm_page_max_wired = cnt.v_free_count / 3;
1698
1699         if (vm_pageout_stats_max == 0)
1700                 vm_pageout_stats_max = cnt.v_free_target;
1701
1702         /*
1703          * Set interval in seconds for stats scan.
1704          */
1705         if (vm_pageout_stats_interval == 0)
1706                 vm_pageout_stats_interval = 5;
1707         if (vm_pageout_full_stats_interval == 0)
1708                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1709
1710         swap_pager_swap_init();
1711         pass = 0;
1712         /*
1713          * The pageout daemon is never done, so loop forever.
1714          */
1715         while (TRUE) {
1716                 /*
1717                  * If we have enough free memory, wakeup waiters.  Do
1718                  * not clear vm_pages_needed until we reach our target,
1719                  * otherwise we may be woken up over and over again and
1720                  * waste a lot of cpu.
1721                  */
1722                 mtx_lock(&vm_page_queue_free_mtx);
1723                 if (vm_pages_needed && !vm_page_count_min()) {
1724                         if (!vm_paging_needed())
1725                                 vm_pages_needed = 0;
1726                         wakeup(&cnt.v_free_count);
1727                 }
1728                 if (vm_pages_needed) {
1729                         /*
1730                          * Still not done, take a second pass without waiting
1731                          * (unlimited dirty cleaning), otherwise sleep a bit
1732                          * and try again.
1733                          */
1734                         ++pass;
1735                         if (pass > 1)
1736                                 msleep(&vm_pages_needed,
1737                                     &vm_page_queue_free_mtx, PVM, "psleep",
1738                                     hz / 2);
1739                 } else {
1740                         /*
1741                          * Good enough, sleep & handle stats.  Prime the pass
1742                          * for the next run.
1743                          */
1744                         if (pass > 1)
1745                                 pass = 1;
1746                         else
1747                                 pass = 0;
1748                         error = msleep(&vm_pages_needed,
1749                             &vm_page_queue_free_mtx, PVM, "psleep",
1750                             vm_pageout_stats_interval * hz);
1751                         if (error && !vm_pages_needed) {
1752                                 mtx_unlock(&vm_page_queue_free_mtx);
1753                                 pass = 0;
1754                                 vm_pageout_page_stats();
1755                                 continue;
1756                         }
1757                 }
1758                 if (vm_pages_needed)
1759                         cnt.v_pdwakeups++;
1760                 mtx_unlock(&vm_page_queue_free_mtx);
1761                 vm_pageout_scan(pass);
1762         }
1763 }
1764
1765 /*
1766  * Unless the free page queue lock is held by the caller, this function
1767  * should be regarded as advisory.  Specifically, the caller should
1768  * not msleep() on &cnt.v_free_count following this function unless
1769  * the free page queue lock is held until the msleep() is performed.
1770  */
1771 void
1772 pagedaemon_wakeup(void)
1773 {
1774
1775         if (!vm_pages_needed && curthread->td_proc != pageproc) {
1776                 vm_pages_needed = 1;
1777                 wakeup(&vm_pages_needed);
1778         }
1779 }
1780
1781 #if !defined(NO_SWAPPING)
1782 static void
1783 vm_req_vmdaemon(int req)
1784 {
1785         static int lastrun = 0;
1786
1787         mtx_lock(&vm_daemon_mtx);
1788         vm_pageout_req_swapout |= req;
1789         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1790                 wakeup(&vm_daemon_needed);
1791                 lastrun = ticks;
1792         }
1793         mtx_unlock(&vm_daemon_mtx);
1794 }
1795
1796 static void
1797 vm_daemon(void)
1798 {
1799         struct rlimit rsslim;
1800         struct proc *p;
1801         struct thread *td;
1802         struct vmspace *vm;
1803         int breakout, swapout_flags, tryagain, attempts;
1804 #ifdef RACCT
1805         uint64_t rsize, ravailable;
1806 #endif
1807
1808         while (TRUE) {
1809                 mtx_lock(&vm_daemon_mtx);
1810 #ifdef RACCT
1811                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz);
1812 #else
1813                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0);
1814 #endif
1815                 swapout_flags = vm_pageout_req_swapout;
1816                 vm_pageout_req_swapout = 0;
1817                 mtx_unlock(&vm_daemon_mtx);
1818                 if (swapout_flags)
1819                         swapout_procs(swapout_flags);
1820
1821                 /*
1822                  * scan the processes for exceeding their rlimits or if
1823                  * process is swapped out -- deactivate pages
1824                  */
1825                 tryagain = 0;
1826                 attempts = 0;
1827 again:
1828                 attempts++;
1829                 sx_slock(&allproc_lock);
1830                 FOREACH_PROC_IN_SYSTEM(p) {
1831                         vm_pindex_t limit, size;
1832
1833                         /*
1834                          * if this is a system process or if we have already
1835                          * looked at this process, skip it.
1836                          */
1837                         PROC_LOCK(p);
1838                         if (p->p_state != PRS_NORMAL ||
1839                             p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
1840                                 PROC_UNLOCK(p);
1841                                 continue;
1842                         }
1843                         /*
1844                          * if the process is in a non-running type state,
1845                          * don't touch it.
1846                          */
1847                         breakout = 0;
1848                         FOREACH_THREAD_IN_PROC(p, td) {
1849                                 thread_lock(td);
1850                                 if (!TD_ON_RUNQ(td) &&
1851                                     !TD_IS_RUNNING(td) &&
1852                                     !TD_IS_SLEEPING(td) &&
1853                                     !TD_IS_SUSPENDED(td)) {
1854                                         thread_unlock(td);
1855                                         breakout = 1;
1856                                         break;
1857                                 }
1858                                 thread_unlock(td);
1859                         }
1860                         if (breakout) {
1861                                 PROC_UNLOCK(p);
1862                                 continue;
1863                         }
1864                         /*
1865                          * get a limit
1866                          */
1867                         lim_rlimit(p, RLIMIT_RSS, &rsslim);
1868                         limit = OFF_TO_IDX(
1869                             qmin(rsslim.rlim_cur, rsslim.rlim_max));
1870
1871                         /*
1872                          * let processes that are swapped out really be
1873                          * swapped out set the limit to nothing (will force a
1874                          * swap-out.)
1875                          */
1876                         if ((p->p_flag & P_INMEM) == 0)
1877                                 limit = 0;      /* XXX */
1878                         vm = vmspace_acquire_ref(p);
1879                         PROC_UNLOCK(p);
1880                         if (vm == NULL)
1881                                 continue;
1882
1883                         size = vmspace_resident_count(vm);
1884                         if (size >= limit) {
1885                                 vm_pageout_map_deactivate_pages(
1886                                     &vm->vm_map, limit);
1887                         }
1888 #ifdef RACCT
1889                         rsize = IDX_TO_OFF(size);
1890                         PROC_LOCK(p);
1891                         racct_set(p, RACCT_RSS, rsize);
1892                         ravailable = racct_get_available(p, RACCT_RSS);
1893                         PROC_UNLOCK(p);
1894                         if (rsize > ravailable) {
1895                                 /*
1896                                  * Don't be overly aggressive; this might be
1897                                  * an innocent process, and the limit could've
1898                                  * been exceeded by some memory hog.  Don't
1899                                  * try to deactivate more than 1/4th of process'
1900                                  * resident set size.
1901                                  */
1902                                 if (attempts <= 8) {
1903                                         if (ravailable < rsize - (rsize / 4))
1904                                                 ravailable = rsize - (rsize / 4);
1905                                 }
1906                                 vm_pageout_map_deactivate_pages(
1907                                     &vm->vm_map, OFF_TO_IDX(ravailable));
1908                                 /* Update RSS usage after paging out. */
1909                                 size = vmspace_resident_count(vm);
1910                                 rsize = IDX_TO_OFF(size);
1911                                 PROC_LOCK(p);
1912                                 racct_set(p, RACCT_RSS, rsize);
1913                                 PROC_UNLOCK(p);
1914                                 if (rsize > ravailable)
1915                                         tryagain = 1;
1916                         }
1917 #endif
1918                         vmspace_free(vm);
1919                 }
1920                 sx_sunlock(&allproc_lock);
1921                 if (tryagain != 0 && attempts <= 10)
1922                         goto again;
1923         }
1924 }
1925 #endif                  /* !defined(NO_SWAPPING) */