]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_pageout.c
- Style.
[FreeBSD/FreeBSD.git] / sys / vm / vm_pageout.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *      This product includes software developed by the University of
25  *      California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70
71 /*
72  *      The proverbial page-out daemon.
73  */
74
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
77
78 #include "opt_vm.h"
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/eventhandler.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/kthread.h>
87 #include <sys/ktr.h>
88 #include <sys/mount.h>
89 #include <sys/racct.h>
90 #include <sys/resourcevar.h>
91 #include <sys/sched.h>
92 #include <sys/signalvar.h>
93 #include <sys/vnode.h>
94 #include <sys/vmmeter.h>
95 #include <sys/sx.h>
96 #include <sys/sysctl.h>
97
98 #include <vm/vm.h>
99 #include <vm/vm_param.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106 #include <vm/vm_extern.h>
107 #include <vm/uma.h>
108
109 /*
110  * System initialization
111  */
112
113 /* the kernel process "vm_pageout"*/
114 static void vm_pageout(void);
115 static int vm_pageout_clean(vm_page_t);
116 static void vm_pageout_scan(int pass);
117
118 struct proc *pageproc;
119
120 static struct kproc_desc page_kp = {
121         "pagedaemon",
122         vm_pageout,
123         &pageproc
124 };
125 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
126     &page_kp);
127
128 #if !defined(NO_SWAPPING)
129 /* the kernel process "vm_daemon"*/
130 static void vm_daemon(void);
131 static struct   proc *vmproc;
132
133 static struct kproc_desc vm_kp = {
134         "vmdaemon",
135         vm_daemon,
136         &vmproc
137 };
138 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
139 #endif
140
141
142 int vm_pages_needed;            /* Event on which pageout daemon sleeps */
143 int vm_pageout_deficit;         /* Estimated number of pages deficit */
144 int vm_pageout_pages_needed;    /* flag saying that the pageout daemon needs pages */
145
146 #if !defined(NO_SWAPPING)
147 static int vm_pageout_req_swapout;      /* XXX */
148 static int vm_daemon_needed;
149 static struct mtx vm_daemon_mtx;
150 /* Allow for use by vm_pageout before vm_daemon is initialized. */
151 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
152 #endif
153 static int vm_max_launder = 32;
154 static int vm_pageout_stats_max;
155 static int vm_pageout_stats_interval;
156 static int vm_pageout_full_stats_interval;
157 static int vm_pageout_algorithm;
158 static int defer_swap_pageouts;
159 static int disable_swap_pageouts;
160
161 #if defined(NO_SWAPPING)
162 static int vm_swap_enabled = 0;
163 static int vm_swap_idle_enabled = 0;
164 #else
165 static int vm_swap_enabled = 1;
166 static int vm_swap_idle_enabled = 0;
167 #endif
168
169 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
170         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
171
172 SYSCTL_INT(_vm, OID_AUTO, max_launder,
173         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
174
175 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
176         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
177
178 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
179         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
180
181 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
182         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
183
184 #if defined(NO_SWAPPING)
185 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
186         CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
187 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
188         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
189 #else
190 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
191         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
192 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
193         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
194 #endif
195
196 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
197         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
198
199 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
200         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
201
202 static int pageout_lock_miss;
203 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
204         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
205
206 #define VM_PAGEOUT_PAGE_COUNT 16
207 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
208
209 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
210 SYSCTL_INT(_vm, OID_AUTO, max_wired,
211         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
212
213 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
214 static boolean_t vm_pageout_launder(int, int, vm_paddr_t, vm_paddr_t);
215 #if !defined(NO_SWAPPING)
216 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
217 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
218 static void vm_req_vmdaemon(int req);
219 #endif
220 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
221 static void vm_pageout_page_stats(void);
222
223 /*
224  * Initialize a dummy page for marking the caller's place in the specified
225  * paging queue.  In principle, this function only needs to set the flag
226  * PG_MARKER.  Nonetheless, it sets the flag VPO_BUSY and initializes the hold
227  * count to one as safety precautions.
228  */ 
229 static void
230 vm_pageout_init_marker(vm_page_t marker, u_short queue)
231 {
232
233         bzero(marker, sizeof(*marker));
234         marker->flags = PG_MARKER;
235         marker->oflags = VPO_BUSY;
236         marker->queue = queue;
237         marker->hold_count = 1;
238 }
239
240 /*
241  * vm_pageout_fallback_object_lock:
242  * 
243  * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
244  * known to have failed and page queue must be either PQ_ACTIVE or
245  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queues
246  * while locking the vm object.  Use marker page to detect page queue
247  * changes and maintain notion of next page on page queue.  Return
248  * TRUE if no changes were detected, FALSE otherwise.  vm object is
249  * locked on return.
250  * 
251  * This function depends on both the lock portion of struct vm_object
252  * and normal struct vm_page being type stable.
253  */
254 static boolean_t
255 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
256 {
257         struct vm_page marker;
258         struct vm_pagequeue *pq;
259         boolean_t unchanged;
260         u_short queue;
261         vm_object_t object;
262
263         queue = m->queue;
264         vm_pageout_init_marker(&marker, queue);
265         pq = &vm_pagequeues[queue];
266         object = m->object;
267         
268         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
269         vm_pagequeue_unlock(pq);
270         vm_page_unlock(m);
271         VM_OBJECT_LOCK(object);
272         vm_page_lock(m);
273         vm_pagequeue_lock(pq);
274
275         /* Page queue might have changed. */
276         *next = TAILQ_NEXT(&marker, pageq);
277         unchanged = (m->queue == queue &&
278                      m->object == object &&
279                      &marker == TAILQ_NEXT(m, pageq));
280         TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
281         return (unchanged);
282 }
283
284 /*
285  * Lock the page while holding the page queue lock.  Use marker page
286  * to detect page queue changes and maintain notion of next page on
287  * page queue.  Return TRUE if no changes were detected, FALSE
288  * otherwise.  The page is locked on return. The page queue lock might
289  * be dropped and reacquired.
290  *
291  * This function depends on normal struct vm_page being type stable.
292  */
293 static boolean_t
294 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
295 {
296         struct vm_page marker;
297         struct vm_pagequeue *pq;
298         boolean_t unchanged;
299         u_short queue;
300
301         vm_page_lock_assert(m, MA_NOTOWNED);
302         if (vm_page_trylock(m))
303                 return (TRUE);
304
305         queue = m->queue;
306         vm_pageout_init_marker(&marker, queue);
307         pq = &vm_pagequeues[queue];
308
309         TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
310         vm_pagequeue_unlock(pq);
311         vm_page_lock(m);
312         vm_pagequeue_lock(pq);
313
314         /* Page queue might have changed. */
315         *next = TAILQ_NEXT(&marker, pageq);
316         unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
317         TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
318         return (unchanged);
319 }
320
321 /*
322  * vm_pageout_clean:
323  *
324  * Clean the page and remove it from the laundry.
325  * 
326  * We set the busy bit to cause potential page faults on this page to
327  * block.  Note the careful timing, however, the busy bit isn't set till
328  * late and we cannot do anything that will mess with the page.
329  */
330 static int
331 vm_pageout_clean(vm_page_t m)
332 {
333         vm_object_t object;
334         vm_page_t mc[2*vm_pageout_page_count], pb, ps;
335         int pageout_count;
336         int ib, is, page_base;
337         vm_pindex_t pindex = m->pindex;
338
339         vm_page_lock_assert(m, MA_OWNED);
340         object = m->object;
341         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
342
343         /*
344          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
345          * with the new swapper, but we could have serious problems paging
346          * out other object types if there is insufficient memory.  
347          *
348          * Unfortunately, checking free memory here is far too late, so the
349          * check has been moved up a procedural level.
350          */
351
352         /*
353          * Can't clean the page if it's busy or held.
354          */
355         KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0,
356             ("vm_pageout_clean: page %p is busy", m));
357         KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
358         vm_page_unlock(m);
359
360         mc[vm_pageout_page_count] = pb = ps = m;
361         pageout_count = 1;
362         page_base = vm_pageout_page_count;
363         ib = 1;
364         is = 1;
365
366         /*
367          * Scan object for clusterable pages.
368          *
369          * We can cluster ONLY if: ->> the page is NOT
370          * clean, wired, busy, held, or mapped into a
371          * buffer, and one of the following:
372          * 1) The page is inactive, or a seldom used
373          *    active page.
374          * -or-
375          * 2) we force the issue.
376          *
377          * During heavy mmap/modification loads the pageout
378          * daemon can really fragment the underlying file
379          * due to flushing pages out of order and not trying
380          * align the clusters (which leave sporatic out-of-order
381          * holes).  To solve this problem we do the reverse scan
382          * first and attempt to align our cluster, then do a 
383          * forward scan if room remains.
384          */
385 more:
386         while (ib && pageout_count < vm_pageout_page_count) {
387                 vm_page_t p;
388
389                 if (ib > pindex) {
390                         ib = 0;
391                         break;
392                 }
393
394                 if ((p = vm_page_prev(pb)) == NULL ||
395                     (p->oflags & VPO_BUSY) != 0 || p->busy != 0) {
396                         ib = 0;
397                         break;
398                 }
399                 vm_page_lock(p);
400                 vm_page_test_dirty(p);
401                 if (p->dirty == 0 ||
402                     p->queue != PQ_INACTIVE ||
403                     p->hold_count != 0) {       /* may be undergoing I/O */
404                         vm_page_unlock(p);
405                         ib = 0;
406                         break;
407                 }
408                 vm_page_unlock(p);
409                 mc[--page_base] = pb = p;
410                 ++pageout_count;
411                 ++ib;
412                 /*
413                  * alignment boundry, stop here and switch directions.  Do
414                  * not clear ib.
415                  */
416                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
417                         break;
418         }
419
420         while (pageout_count < vm_pageout_page_count && 
421             pindex + is < object->size) {
422                 vm_page_t p;
423
424                 if ((p = vm_page_next(ps)) == NULL ||
425                     (p->oflags & VPO_BUSY) != 0 || p->busy != 0)
426                         break;
427                 vm_page_lock(p);
428                 vm_page_test_dirty(p);
429                 if (p->dirty == 0 ||
430                     p->queue != PQ_INACTIVE ||
431                     p->hold_count != 0) {       /* may be undergoing I/O */
432                         vm_page_unlock(p);
433                         break;
434                 }
435                 vm_page_unlock(p);
436                 mc[page_base + pageout_count] = ps = p;
437                 ++pageout_count;
438                 ++is;
439         }
440
441         /*
442          * If we exhausted our forward scan, continue with the reverse scan
443          * when possible, even past a page boundry.  This catches boundry
444          * conditions.
445          */
446         if (ib && pageout_count < vm_pageout_page_count)
447                 goto more;
448
449         /*
450          * we allow reads during pageouts...
451          */
452         return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL,
453             NULL));
454 }
455
456 /*
457  * vm_pageout_flush() - launder the given pages
458  *
459  *      The given pages are laundered.  Note that we setup for the start of
460  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
461  *      reference count all in here rather then in the parent.  If we want
462  *      the parent to do more sophisticated things we may have to change
463  *      the ordering.
464  *
465  *      Returned runlen is the count of pages between mreq and first
466  *      page after mreq with status VM_PAGER_AGAIN.
467  *      *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
468  *      for any page in runlen set.
469  */
470 int
471 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
472     boolean_t *eio)
473 {
474         vm_object_t object = mc[0]->object;
475         int pageout_status[count];
476         int numpagedout = 0;
477         int i, runlen;
478
479         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
480
481         /*
482          * Initiate I/O.  Bump the vm_page_t->busy counter and
483          * mark the pages read-only.
484          *
485          * We do not have to fixup the clean/dirty bits here... we can
486          * allow the pager to do it after the I/O completes.
487          *
488          * NOTE! mc[i]->dirty may be partial or fragmented due to an
489          * edge case with file fragments.
490          */
491         for (i = 0; i < count; i++) {
492                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
493                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
494                         mc[i], i, count));
495                 vm_page_io_start(mc[i]);
496                 pmap_remove_write(mc[i]);
497         }
498         vm_object_pip_add(object, count);
499
500         vm_pager_put_pages(object, mc, count, flags, pageout_status);
501
502         runlen = count - mreq;
503         if (eio != NULL)
504                 *eio = FALSE;
505         for (i = 0; i < count; i++) {
506                 vm_page_t mt = mc[i];
507
508                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
509                     !pmap_page_is_write_mapped(mt),
510                     ("vm_pageout_flush: page %p is not write protected", mt));
511                 switch (pageout_status[i]) {
512                 case VM_PAGER_OK:
513                 case VM_PAGER_PEND:
514                         numpagedout++;
515                         break;
516                 case VM_PAGER_BAD:
517                         /*
518                          * Page outside of range of object. Right now we
519                          * essentially lose the changes by pretending it
520                          * worked.
521                          */
522                         vm_page_undirty(mt);
523                         break;
524                 case VM_PAGER_ERROR:
525                 case VM_PAGER_FAIL:
526                         /*
527                          * If page couldn't be paged out, then reactivate the
528                          * page so it doesn't clog the inactive list.  (We
529                          * will try paging out it again later).
530                          */
531                         vm_page_lock(mt);
532                         vm_page_activate(mt);
533                         vm_page_unlock(mt);
534                         if (eio != NULL && i >= mreq && i - mreq < runlen)
535                                 *eio = TRUE;
536                         break;
537                 case VM_PAGER_AGAIN:
538                         if (i >= mreq && i - mreq < runlen)
539                                 runlen = i - mreq;
540                         break;
541                 }
542
543                 /*
544                  * If the operation is still going, leave the page busy to
545                  * block all other accesses. Also, leave the paging in
546                  * progress indicator set so that we don't attempt an object
547                  * collapse.
548                  */
549                 if (pageout_status[i] != VM_PAGER_PEND) {
550                         vm_object_pip_wakeup(object);
551                         vm_page_io_finish(mt);
552                         if (vm_page_count_severe()) {
553                                 vm_page_lock(mt);
554                                 vm_page_try_to_cache(mt);
555                                 vm_page_unlock(mt);
556                         }
557                 }
558         }
559         if (prunlen != NULL)
560                 *prunlen = runlen;
561         return (numpagedout);
562 }
563
564 static boolean_t
565 vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
566 {
567         struct mount *mp;
568         struct vm_pagequeue *pq;
569         struct vnode *vp;
570         vm_object_t object;
571         vm_paddr_t pa;
572         vm_page_t m, m_tmp, next;
573
574         pq = &vm_pagequeues[queue];
575         vm_pagequeue_lock(pq);
576         TAILQ_FOREACH_SAFE(m, &pq->pq_pl, pageq, next) {
577                 KASSERT(m->queue == queue,
578                     ("vm_pageout_launder: page %p's queue is not %d", m,
579                     queue));
580                 if ((m->flags & PG_MARKER) != 0)
581                         continue;
582                 pa = VM_PAGE_TO_PHYS(m);
583                 if (pa < low || pa + PAGE_SIZE > high)
584                         continue;
585                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
586                         vm_page_unlock(m);
587                         continue;
588                 }
589                 object = m->object;
590                 if ((!VM_OBJECT_TRYLOCK(object) &&
591                     (!vm_pageout_fallback_object_lock(m, &next) ||
592                     m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
593                     m->busy != 0) {
594                         vm_page_unlock(m);
595                         VM_OBJECT_UNLOCK(object);
596                         continue;
597                 }
598                 vm_page_test_dirty(m);
599                 if (m->dirty == 0 && object->ref_count != 0)
600                         pmap_remove_all(m);
601                 if (m->dirty != 0) {
602                         vm_page_unlock(m);
603                         if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
604                                 VM_OBJECT_UNLOCK(object);
605                                 continue;
606                         }
607                         if (object->type == OBJT_VNODE) {
608                                 vm_pagequeue_unlock(pq);
609                                 vp = object->handle;
610                                 vm_object_reference_locked(object);
611                                 VM_OBJECT_UNLOCK(object);
612                                 (void)vn_start_write(vp, &mp, V_WAIT);
613                                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
614                                 VM_OBJECT_LOCK(object);
615                                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
616                                 VM_OBJECT_UNLOCK(object);
617                                 VOP_UNLOCK(vp, 0);
618                                 vm_object_deallocate(object);
619                                 vn_finished_write(mp);
620                                 return (TRUE);
621                         } else if (object->type == OBJT_SWAP ||
622                             object->type == OBJT_DEFAULT) {
623                                 vm_pagequeue_unlock(pq);
624                                 m_tmp = m;
625                                 vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
626                                     0, NULL, NULL);
627                                 VM_OBJECT_UNLOCK(object);
628                                 return (TRUE);
629                         }
630                 } else {
631                         /*
632                          * Dequeue here to prevent lock recursion in
633                          * vm_page_cache().
634                          */
635                         vm_page_dequeue_locked(m);
636                         vm_page_cache(m);
637                         vm_page_unlock(m);
638                 }
639                 VM_OBJECT_UNLOCK(object);
640         }
641         vm_pagequeue_unlock(pq);
642         return (FALSE);
643 }
644
645 /*
646  * Increase the number of cached pages.  The specified value, "tries",
647  * determines which categories of pages are cached:
648  *
649  *  0: All clean, inactive pages within the specified physical address range
650  *     are cached.  Will not sleep.
651  *  1: The vm_lowmem handlers are called.  All inactive pages within
652  *     the specified physical address range are cached.  May sleep.
653  *  2: The vm_lowmem handlers are called.  All inactive and active pages
654  *     within the specified physical address range are cached.  May sleep.
655  */
656 void
657 vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
658 {
659         int actl, actmax, inactl, inactmax;
660
661         if (tries > 0) {
662                 /*
663                  * Decrease registered cache sizes.  The vm_lowmem handlers
664                  * may acquire locks and/or sleep, so they can only be invoked
665                  * when "tries" is greater than zero.
666                  */
667                 EVENTHANDLER_INVOKE(vm_lowmem, 0);
668
669                 /*
670                  * We do this explicitly after the caches have been drained
671                  * above.
672                  */
673                 uma_reclaim();
674         }
675         inactl = 0;
676         inactmax = cnt.v_inactive_count;
677         actl = 0;
678         actmax = tries < 2 ? 0 : cnt.v_active_count;
679 again:
680         if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low,
681             high)) {
682                 inactl++;
683                 goto again;
684         }
685         if (actl < actmax && vm_pageout_launder(PQ_ACTIVE, tries, low, high)) {
686                 actl++;
687                 goto again;
688         }
689 }
690
691 #if !defined(NO_SWAPPING)
692 /*
693  *      vm_pageout_object_deactivate_pages
694  *
695  *      Deactivate enough pages to satisfy the inactive target
696  *      requirements.
697  *
698  *      The object and map must be locked.
699  */
700 static void
701 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
702     long desired)
703 {
704         vm_object_t backing_object, object;
705         vm_page_t p;
706         int actcount, remove_mode;
707
708         VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
709         if ((first_object->flags & OBJ_FICTITIOUS) != 0)
710                 return;
711         for (object = first_object;; object = backing_object) {
712                 if (pmap_resident_count(pmap) <= desired)
713                         goto unlock_return;
714                 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
715                 if ((object->flags & OBJ_UNMANAGED) != 0 ||
716                     object->paging_in_progress != 0)
717                         goto unlock_return;
718
719                 remove_mode = 0;
720                 if (object->shadow_count > 1)
721                         remove_mode = 1;
722                 /*
723                  * Scan the object's entire memory queue.
724                  */
725                 TAILQ_FOREACH(p, &object->memq, listq) {
726                         if (pmap_resident_count(pmap) <= desired)
727                                 goto unlock_return;
728                         if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0)
729                                 continue;
730                         PCPU_INC(cnt.v_pdpages);
731                         vm_page_lock(p);
732                         if (p->wire_count != 0 || p->hold_count != 0 ||
733                             !pmap_page_exists_quick(pmap, p)) {
734                                 vm_page_unlock(p);
735                                 continue;
736                         }
737                         actcount = pmap_ts_referenced(p);
738                         if ((p->aflags & PGA_REFERENCED) != 0) {
739                                 if (actcount == 0)
740                                         actcount = 1;
741                                 vm_page_aflag_clear(p, PGA_REFERENCED);
742                         }
743                         if (p->queue != PQ_ACTIVE && actcount != 0) {
744                                 vm_page_activate(p);
745                                 p->act_count += actcount;
746                         } else if (p->queue == PQ_ACTIVE) {
747                                 if (actcount == 0) {
748                                         p->act_count -= min(p->act_count,
749                                             ACT_DECLINE);
750                                         if (!remove_mode &&
751                                             (vm_pageout_algorithm ||
752                                             p->act_count == 0)) {
753                                                 pmap_remove_all(p);
754                                                 vm_page_deactivate(p);
755                                         } else
756                                                 vm_page_requeue(p);
757                                 } else {
758                                         vm_page_activate(p);
759                                         if (p->act_count < ACT_MAX -
760                                             ACT_ADVANCE)
761                                                 p->act_count += ACT_ADVANCE;
762                                         vm_page_requeue(p);
763                                 }
764                         } else if (p->queue == PQ_INACTIVE)
765                                 pmap_remove_all(p);
766                         vm_page_unlock(p);
767                 }
768                 if ((backing_object = object->backing_object) == NULL)
769                         goto unlock_return;
770                 VM_OBJECT_LOCK(backing_object);
771                 if (object != first_object)
772                         VM_OBJECT_UNLOCK(object);
773         }
774 unlock_return:
775         if (object != first_object)
776                 VM_OBJECT_UNLOCK(object);
777 }
778
779 /*
780  * deactivate some number of pages in a map, try to do it fairly, but
781  * that is really hard to do.
782  */
783 static void
784 vm_pageout_map_deactivate_pages(map, desired)
785         vm_map_t map;
786         long desired;
787 {
788         vm_map_entry_t tmpe;
789         vm_object_t obj, bigobj;
790         int nothingwired;
791
792         if (!vm_map_trylock(map))
793                 return;
794
795         bigobj = NULL;
796         nothingwired = TRUE;
797
798         /*
799          * first, search out the biggest object, and try to free pages from
800          * that.
801          */
802         tmpe = map->header.next;
803         while (tmpe != &map->header) {
804                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
805                         obj = tmpe->object.vm_object;
806                         if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
807                                 if (obj->shadow_count <= 1 &&
808                                     (bigobj == NULL ||
809                                      bigobj->resident_page_count < obj->resident_page_count)) {
810                                         if (bigobj != NULL)
811                                                 VM_OBJECT_UNLOCK(bigobj);
812                                         bigobj = obj;
813                                 } else
814                                         VM_OBJECT_UNLOCK(obj);
815                         }
816                 }
817                 if (tmpe->wired_count > 0)
818                         nothingwired = FALSE;
819                 tmpe = tmpe->next;
820         }
821
822         if (bigobj != NULL) {
823                 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
824                 VM_OBJECT_UNLOCK(bigobj);
825         }
826         /*
827          * Next, hunt around for other pages to deactivate.  We actually
828          * do this search sort of wrong -- .text first is not the best idea.
829          */
830         tmpe = map->header.next;
831         while (tmpe != &map->header) {
832                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
833                         break;
834                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
835                         obj = tmpe->object.vm_object;
836                         if (obj != NULL) {
837                                 VM_OBJECT_LOCK(obj);
838                                 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
839                                 VM_OBJECT_UNLOCK(obj);
840                         }
841                 }
842                 tmpe = tmpe->next;
843         }
844
845         /*
846          * Remove all mappings if a process is swapped out, this will free page
847          * table pages.
848          */
849         if (desired == 0 && nothingwired) {
850                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
851                     vm_map_max(map));
852         }
853         vm_map_unlock(map);
854 }
855 #endif          /* !defined(NO_SWAPPING) */
856
857 /*
858  *      vm_pageout_scan does the dirty work for the pageout daemon.
859  */
860 static void
861 vm_pageout_scan(int pass)
862 {
863         vm_page_t m, next;
864         struct vm_page marker;
865         struct vm_pagequeue *pq;
866         int page_shortage, maxscan, pcount;
867         int addl_page_shortage;
868         vm_object_t object;
869         int actcount;
870         int vnodes_skipped = 0;
871         int maxlaunder;
872         boolean_t queues_locked;
873
874         vm_pageout_init_marker(&marker, PQ_INACTIVE);
875
876         /*
877          * Decrease registered cache sizes.
878          */
879         EVENTHANDLER_INVOKE(vm_lowmem, 0);
880         /*
881          * We do this explicitly after the caches have been drained above.
882          */
883         uma_reclaim();
884
885         /*
886          * The addl_page_shortage is the number of temporarily
887          * stuck pages in the inactive queue.  In other words, the
888          * number of pages from cnt.v_inactive_count that should be
889          * discounted in setting the target for the active queue scan.
890          */
891         addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
892
893         /*
894          * Calculate the number of pages we want to either free or move
895          * to the cache.
896          */
897         page_shortage = vm_paging_target() + addl_page_shortage;
898
899         /*
900          * maxlaunder limits the number of dirty pages we flush per scan.
901          * For most systems a smaller value (16 or 32) is more robust under
902          * extreme memory and disk pressure because any unnecessary writes
903          * to disk can result in extreme performance degredation.  However,
904          * systems with excessive dirty pages (especially when MAP_NOSYNC is
905          * used) will die horribly with limited laundering.  If the pageout
906          * daemon cannot clean enough pages in the first pass, we let it go
907          * all out in succeeding passes.
908          */
909         if ((maxlaunder = vm_max_launder) <= 1)
910                 maxlaunder = 1;
911         if (pass)
912                 maxlaunder = 10000;
913
914         maxscan = cnt.v_inactive_count;
915
916         /*
917          * Start scanning the inactive queue for pages we can move to the
918          * cache or free.  The scan will stop when the target is reached or
919          * we have scanned the entire inactive queue.  Note that m->act_count
920          * is not used to form decisions for the inactive queue, only for the
921          * active queue.
922          */
923         pq = &vm_pagequeues[PQ_INACTIVE];
924         vm_pagequeue_lock(pq);
925         queues_locked = TRUE;
926         for (m = TAILQ_FIRST(&pq->pq_pl);
927              m != NULL && maxscan-- > 0 && page_shortage > 0;
928              m = next) {
929                 vm_pagequeue_assert_locked(pq);
930                 KASSERT(queues_locked, ("unlocked queues"));
931                 KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m));
932
933                 PCPU_INC(cnt.v_pdpages);
934                 next = TAILQ_NEXT(m, pageq);
935
936                 /*
937                  * skip marker pages
938                  */
939                 if (m->flags & PG_MARKER)
940                         continue;
941
942                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
943                     ("Fictitious page %p cannot be in inactive queue", m));
944                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
945                     ("Unmanaged page %p cannot be in inactive queue", m));
946
947                 /*
948                  * The page or object lock acquisitions fail if the
949                  * page was removed from the queue or moved to a
950                  * different position within the queue.  In either
951                  * case, addl_page_shortage should not be incremented.
952                  */
953                 if (!vm_pageout_page_lock(m, &next)) {
954                         vm_page_unlock(m);
955                         continue;
956                 }
957                 object = m->object;
958                 if (!VM_OBJECT_TRYLOCK(object) &&
959                     !vm_pageout_fallback_object_lock(m, &next)) {
960                         vm_page_unlock(m);
961                         VM_OBJECT_UNLOCK(object);
962                         continue;
963                 }
964
965                 /*
966                  * Don't mess with busy pages, keep them at at the
967                  * front of the queue, most likely they are being
968                  * paged out.  Increment addl_page_shortage for busy
969                  * pages, because they may leave the inactive queue
970                  * shortly after page scan is finished.
971                  */
972                 if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
973                         vm_page_unlock(m);
974                         VM_OBJECT_UNLOCK(object);
975                         addl_page_shortage++;
976                         continue;
977                 }
978
979                 /*
980                  * We unlock the inactive page queue, invalidating the
981                  * 'next' pointer.  Use our marker to remember our
982                  * place.
983                  */
984                 TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
985                 vm_pagequeue_unlock(pq);
986                 queues_locked = FALSE;
987
988                 /*
989                  * If the object is not being used, we ignore previous 
990                  * references.
991                  */
992                 if (object->ref_count == 0) {
993                         vm_page_aflag_clear(m, PGA_REFERENCED);
994                         KASSERT(!pmap_page_is_mapped(m),
995                             ("vm_pageout_scan: page %p is mapped", m));
996
997                 /*
998                  * Otherwise, if the page has been referenced while in the 
999                  * inactive queue, we bump the "activation count" upwards, 
1000                  * making it less likely that the page will be added back to 
1001                  * the inactive queue prematurely again.  Here we check the 
1002                  * page tables (or emulated bits, if any), given the upper 
1003                  * level VM system not knowing anything about existing 
1004                  * references.
1005                  */
1006                 } else if ((m->aflags & PGA_REFERENCED) == 0 &&
1007                     (actcount = pmap_ts_referenced(m)) != 0) {
1008                         vm_page_activate(m);
1009                         vm_page_unlock(m);
1010                         m->act_count += actcount + ACT_ADVANCE;
1011                         VM_OBJECT_UNLOCK(object);
1012                         goto relock_queues;
1013                 }
1014
1015                 /*
1016                  * If the upper level VM system knows about any page 
1017                  * references, we activate the page.  We also set the 
1018                  * "activation count" higher than normal so that we will less 
1019                  * likely place pages back onto the inactive queue again.
1020                  */
1021                 if ((m->aflags & PGA_REFERENCED) != 0) {
1022                         vm_page_aflag_clear(m, PGA_REFERENCED);
1023                         actcount = pmap_ts_referenced(m);
1024                         vm_page_activate(m);
1025                         vm_page_unlock(m);
1026                         m->act_count += actcount + ACT_ADVANCE + 1;
1027                         VM_OBJECT_UNLOCK(object);
1028                         goto relock_queues;
1029                 }
1030
1031                 if (m->hold_count != 0) {
1032                         vm_page_unlock(m);
1033                         VM_OBJECT_UNLOCK(object);
1034
1035                         /*
1036                          * Held pages are essentially stuck in the
1037                          * queue.  So, they ought to be discounted
1038                          * from cnt.v_inactive_count.  See the
1039                          * calculation of the page_shortage for the
1040                          * loop over the active queue below.
1041                          */
1042                         addl_page_shortage++;
1043                         goto relock_queues;
1044                 }
1045
1046                 /*
1047                  * If the page appears to be clean at the machine-independent
1048                  * layer, then remove all of its mappings from the pmap in
1049                  * anticipation of placing it onto the cache queue.  If,
1050                  * however, any of the page's mappings allow write access,
1051                  * then the page may still be modified until the last of those
1052                  * mappings are removed.
1053                  */
1054                 vm_page_test_dirty(m);
1055                 if (m->dirty == 0 && object->ref_count != 0)
1056                         pmap_remove_all(m);
1057
1058                 if (m->valid == 0) {
1059                         /*
1060                          * Invalid pages can be easily freed
1061                          */
1062                         vm_page_free(m);
1063                         PCPU_INC(cnt.v_dfree);
1064                         --page_shortage;
1065                 } else if (m->dirty == 0) {
1066                         /*
1067                          * Clean pages can be placed onto the cache queue.
1068                          * This effectively frees them.
1069                          */
1070                         vm_page_cache(m);
1071                         --page_shortage;
1072                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
1073                         /*
1074                          * Dirty pages need to be paged out, but flushing
1075                          * a page is extremely expensive verses freeing
1076                          * a clean page.  Rather then artificially limiting
1077                          * the number of pages we can flush, we instead give
1078                          * dirty pages extra priority on the inactive queue
1079                          * by forcing them to be cycled through the queue
1080                          * twice before being flushed, after which the
1081                          * (now clean) page will cycle through once more
1082                          * before being freed.  This significantly extends
1083                          * the thrash point for a heavily loaded machine.
1084                          */
1085                         m->flags |= PG_WINATCFLS;
1086                         vm_pagequeue_lock(pq);
1087                         queues_locked = TRUE;
1088                         vm_page_requeue_locked(m);
1089                 } else if (maxlaunder > 0) {
1090                         /*
1091                          * We always want to try to flush some dirty pages if
1092                          * we encounter them, to keep the system stable.
1093                          * Normally this number is small, but under extreme
1094                          * pressure where there are insufficient clean pages
1095                          * on the inactive queue, we may have to go all out.
1096                          */
1097                         int swap_pageouts_ok;
1098                         struct vnode *vp = NULL;
1099                         struct mount *mp = NULL;
1100
1101                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
1102                                 swap_pageouts_ok = 1;
1103                         } else {
1104                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
1105                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
1106                                 vm_page_count_min());
1107                                                                                 
1108                         }
1109
1110                         /*
1111                          * We don't bother paging objects that are "dead".  
1112                          * Those objects are in a "rundown" state.
1113                          */
1114                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
1115                                 vm_pagequeue_lock(pq);
1116                                 vm_page_unlock(m);
1117                                 VM_OBJECT_UNLOCK(object);
1118                                 queues_locked = TRUE;
1119                                 vm_page_requeue_locked(m);
1120                                 goto relock_queues;
1121                         }
1122
1123                         /*
1124                          * The object is already known NOT to be dead.   It
1125                          * is possible for the vget() to block the whole
1126                          * pageout daemon, but the new low-memory handling
1127                          * code should prevent it.
1128                          *
1129                          * The previous code skipped locked vnodes and, worse,
1130                          * reordered pages in the queue.  This results in
1131                          * completely non-deterministic operation and, on a
1132                          * busy system, can lead to extremely non-optimal
1133                          * pageouts.  For example, it can cause clean pages
1134                          * to be freed and dirty pages to be moved to the end
1135                          * of the queue.  Since dirty pages are also moved to
1136                          * the end of the queue once-cleaned, this gives
1137                          * way too large a weighting to defering the freeing
1138                          * of dirty pages.
1139                          *
1140                          * We can't wait forever for the vnode lock, we might
1141                          * deadlock due to a vn_read() getting stuck in
1142                          * vm_wait while holding this vnode.  We skip the 
1143                          * vnode if we can't get it in a reasonable amount
1144                          * of time.
1145                          */
1146                         if (object->type == OBJT_VNODE) {
1147                                 vm_page_unlock(m);
1148                                 vp = object->handle;
1149                                 if (vp->v_type == VREG &&
1150                                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1151                                         mp = NULL;
1152                                         ++pageout_lock_miss;
1153                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1154                                                 vnodes_skipped++;
1155                                         goto unlock_and_continue;
1156                                 }
1157                                 KASSERT(mp != NULL,
1158                                     ("vp %p with NULL v_mount", vp));
1159                                 vm_object_reference_locked(object);
1160                                 VM_OBJECT_UNLOCK(object);
1161                                 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
1162                                     curthread)) {
1163                                         VM_OBJECT_LOCK(object);
1164                                         ++pageout_lock_miss;
1165                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1166                                                 vnodes_skipped++;
1167                                         vp = NULL;
1168                                         goto unlock_and_continue;
1169                                 }
1170                                 VM_OBJECT_LOCK(object);
1171                                 vm_page_lock(m);
1172                                 vm_pagequeue_lock(pq);
1173                                 queues_locked = TRUE;
1174                                 /*
1175                                  * The page might have been moved to another
1176                                  * queue during potential blocking in vget()
1177                                  * above.  The page might have been freed and
1178                                  * reused for another vnode.
1179                                  */
1180                                 if (m->queue != PQ_INACTIVE ||
1181                                     m->object != object ||
1182                                     TAILQ_NEXT(m, pageq) != &marker) {
1183                                         vm_page_unlock(m);
1184                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1185                                                 vnodes_skipped++;
1186                                         goto unlock_and_continue;
1187                                 }
1188         
1189                                 /*
1190                                  * The page may have been busied during the
1191                                  * blocking in vget().  We don't move the
1192                                  * page back onto the end of the queue so that
1193                                  * statistics are more correct if we don't.
1194                                  */
1195                                 if (m->busy || (m->oflags & VPO_BUSY)) {
1196                                         vm_page_unlock(m);
1197                                         goto unlock_and_continue;
1198                                 }
1199
1200                                 /*
1201                                  * If the page has become held it might
1202                                  * be undergoing I/O, so skip it
1203                                  */
1204                                 if (m->hold_count) {
1205                                         vm_page_unlock(m);
1206                                         vm_page_requeue_locked(m);
1207                                         if (object->flags & OBJ_MIGHTBEDIRTY)
1208                                                 vnodes_skipped++;
1209                                         goto unlock_and_continue;
1210                                 }
1211                                 vm_pagequeue_unlock(pq);
1212                                 queues_locked = FALSE;
1213                         }
1214
1215                         /*
1216                          * If a page is dirty, then it is either being washed
1217                          * (but not yet cleaned) or it is still in the
1218                          * laundry.  If it is still in the laundry, then we
1219                          * start the cleaning operation. 
1220                          *
1221                          * decrement page_shortage on success to account for
1222                          * the (future) cleaned page.  Otherwise we could wind
1223                          * up laundering or cleaning too many pages.
1224                          */
1225                         if (vm_pageout_clean(m) != 0) {
1226                                 --page_shortage;
1227                                 --maxlaunder;
1228                         }
1229 unlock_and_continue:
1230                         vm_page_lock_assert(m, MA_NOTOWNED);
1231                         VM_OBJECT_UNLOCK(object);
1232                         if (mp != NULL) {
1233                                 if (queues_locked) {
1234                                         vm_pagequeue_unlock(pq);
1235                                         queues_locked = FALSE;
1236                                 }
1237                                 if (vp != NULL)
1238                                         vput(vp);
1239                                 vm_object_deallocate(object);
1240                                 vn_finished_write(mp);
1241                         }
1242                         vm_page_lock_assert(m, MA_NOTOWNED);
1243                         goto relock_queues;
1244                 }
1245                 vm_page_unlock(m);
1246                 VM_OBJECT_UNLOCK(object);
1247 relock_queues:
1248                 if (!queues_locked) {
1249                         vm_pagequeue_lock(pq);
1250                         queues_locked = TRUE;
1251                 }
1252                 next = TAILQ_NEXT(&marker, pageq);
1253                 TAILQ_REMOVE(&pq->pq_pl, &marker, pageq);
1254         }
1255         vm_pagequeue_unlock(pq);
1256
1257         /*
1258          * Compute the number of pages we want to try to move from the
1259          * active queue to the inactive queue.
1260          */
1261         page_shortage = vm_paging_target() +
1262                 cnt.v_inactive_target - cnt.v_inactive_count;
1263         page_shortage += addl_page_shortage;
1264
1265         /*
1266          * Scan the active queue for things we can deactivate. We nominally
1267          * track the per-page activity counter and use it to locate
1268          * deactivation candidates.
1269          */
1270         pcount = cnt.v_active_count;
1271         pq = &vm_pagequeues[PQ_ACTIVE];
1272         vm_pagequeue_lock(pq);
1273         m = TAILQ_FIRST(&pq->pq_pl);
1274         while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1275
1276                 KASSERT(m->queue == PQ_ACTIVE,
1277                     ("vm_pageout_scan: page %p isn't active", m));
1278
1279                 next = TAILQ_NEXT(m, pageq);
1280                 if ((m->flags & PG_MARKER) != 0) {
1281                         m = next;
1282                         continue;
1283                 }
1284                 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1285                     ("Fictitious page %p cannot be in active queue", m));
1286                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1287                     ("Unmanaged page %p cannot be in active queue", m));
1288                 if (!vm_pageout_page_lock(m, &next)) {
1289                         vm_page_unlock(m);
1290                         m = next;
1291                         continue;
1292                 }
1293                 object = m->object;
1294                 if (!VM_OBJECT_TRYLOCK(object) &&
1295                     !vm_pageout_fallback_object_lock(m, &next)) {
1296                         VM_OBJECT_UNLOCK(object);
1297                         vm_page_unlock(m);
1298                         m = next;
1299                         continue;
1300                 }
1301
1302                 /*
1303                  * Don't deactivate pages that are busy.
1304                  */
1305                 if ((m->busy != 0) ||
1306                     (m->oflags & VPO_BUSY) ||
1307                     (m->hold_count != 0)) {
1308                         vm_page_unlock(m);
1309                         VM_OBJECT_UNLOCK(object);
1310                         vm_page_requeue_locked(m);
1311                         m = next;
1312                         continue;
1313                 }
1314
1315                 /*
1316                  * The count for pagedaemon pages is done after checking the
1317                  * page for eligibility...
1318                  */
1319                 PCPU_INC(cnt.v_pdpages);
1320
1321                 /*
1322                  * Check to see "how much" the page has been used.
1323                  */
1324                 actcount = 0;
1325                 if (object->ref_count != 0) {
1326                         if (m->aflags & PGA_REFERENCED) {
1327                                 actcount += 1;
1328                         }
1329                         actcount += pmap_ts_referenced(m);
1330                         if (actcount) {
1331                                 m->act_count += ACT_ADVANCE + actcount;
1332                                 if (m->act_count > ACT_MAX)
1333                                         m->act_count = ACT_MAX;
1334                         }
1335                 }
1336
1337                 /*
1338                  * Since we have "tested" this bit, we need to clear it now.
1339                  */
1340                 vm_page_aflag_clear(m, PGA_REFERENCED);
1341
1342                 /*
1343                  * Only if an object is currently being used, do we use the
1344                  * page activation count stats.
1345                  */
1346                 if (actcount != 0 && object->ref_count != 0)
1347                         vm_page_requeue_locked(m);
1348                 else {
1349                         m->act_count -= min(m->act_count, ACT_DECLINE);
1350                         if (vm_pageout_algorithm ||
1351                             object->ref_count == 0 ||
1352                             m->act_count == 0) {
1353                                 page_shortage--;
1354                                 /* Dequeue to avoid later lock recursion. */
1355                                 vm_page_dequeue_locked(m);
1356                                 if (object->ref_count == 0) {
1357                                         KASSERT(!pmap_page_is_mapped(m),
1358                                     ("vm_pageout_scan: page %p is mapped", m));
1359                                         if (m->dirty == 0)
1360                                                 vm_page_cache(m);
1361                                         else
1362                                                 vm_page_deactivate(m);
1363                                 } else {
1364                                         vm_page_deactivate(m);
1365                                 }
1366                         } else
1367                                 vm_page_requeue_locked(m);
1368                 }
1369                 vm_page_unlock(m);
1370                 VM_OBJECT_UNLOCK(object);
1371                 m = next;
1372         }
1373         vm_pagequeue_unlock(pq);
1374 #if !defined(NO_SWAPPING)
1375         /*
1376          * Idle process swapout -- run once per second.
1377          */
1378         if (vm_swap_idle_enabled) {
1379                 static long lsec;
1380                 if (time_second != lsec) {
1381                         vm_req_vmdaemon(VM_SWAP_IDLE);
1382                         lsec = time_second;
1383                 }
1384         }
1385 #endif
1386                 
1387         /*
1388          * If we didn't get enough free pages, and we have skipped a vnode
1389          * in a writeable object, wakeup the sync daemon.  And kick swapout
1390          * if we did not get enough free pages.
1391          */
1392         if (vm_paging_target() > 0) {
1393                 if (vnodes_skipped && vm_page_count_min())
1394                         (void) speedup_syncer();
1395 #if !defined(NO_SWAPPING)
1396                 if (vm_swap_enabled && vm_page_count_target())
1397                         vm_req_vmdaemon(VM_SWAP_NORMAL);
1398 #endif
1399         }
1400
1401         /*
1402          * If we are critically low on one of RAM or swap and low on
1403          * the other, kill the largest process.  However, we avoid
1404          * doing this on the first pass in order to give ourselves a
1405          * chance to flush out dirty vnode-backed pages and to allow
1406          * active pages to be moved to the inactive queue and reclaimed.
1407          */
1408         if (pass != 0 &&
1409             ((swap_pager_avail < 64 && vm_page_count_min()) ||
1410              (swap_pager_full && vm_paging_target() > 0)))
1411                 vm_pageout_oom(VM_OOM_MEM);
1412 }
1413
1414
1415 void
1416 vm_pageout_oom(int shortage)
1417 {
1418         struct proc *p, *bigproc;
1419         vm_offset_t size, bigsize;
1420         struct thread *td;
1421         struct vmspace *vm;
1422
1423         /*
1424          * We keep the process bigproc locked once we find it to keep anyone
1425          * from messing with it; however, there is a possibility of
1426          * deadlock if process B is bigproc and one of it's child processes
1427          * attempts to propagate a signal to B while we are waiting for A's
1428          * lock while walking this list.  To avoid this, we don't block on
1429          * the process lock but just skip a process if it is already locked.
1430          */
1431         bigproc = NULL;
1432         bigsize = 0;
1433         sx_slock(&allproc_lock);
1434         FOREACH_PROC_IN_SYSTEM(p) {
1435                 int breakout;
1436
1437                 if (PROC_TRYLOCK(p) == 0)
1438                         continue;
1439                 /*
1440                  * If this is a system, protected or killed process, skip it.
1441                  */
1442                 if (p->p_state != PRS_NORMAL ||
1443                     (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
1444                     (p->p_pid == 1) || P_KILLED(p) ||
1445                     ((p->p_pid < 48) && (swap_pager_avail != 0))) {
1446                         PROC_UNLOCK(p);
1447                         continue;
1448                 }
1449                 /*
1450                  * If the process is in a non-running type state,
1451                  * don't touch it.  Check all the threads individually.
1452                  */
1453                 breakout = 0;
1454                 FOREACH_THREAD_IN_PROC(p, td) {
1455                         thread_lock(td);
1456                         if (!TD_ON_RUNQ(td) &&
1457                             !TD_IS_RUNNING(td) &&
1458                             !TD_IS_SLEEPING(td) &&
1459                             !TD_IS_SUSPENDED(td)) {
1460                                 thread_unlock(td);
1461                                 breakout = 1;
1462                                 break;
1463                         }
1464                         thread_unlock(td);
1465                 }
1466                 if (breakout) {
1467                         PROC_UNLOCK(p);
1468                         continue;
1469                 }
1470                 /*
1471                  * get the process size
1472                  */
1473                 vm = vmspace_acquire_ref(p);
1474                 if (vm == NULL) {
1475                         PROC_UNLOCK(p);
1476                         continue;
1477                 }
1478                 if (!vm_map_trylock_read(&vm->vm_map)) {
1479                         vmspace_free(vm);
1480                         PROC_UNLOCK(p);
1481                         continue;
1482                 }
1483                 size = vmspace_swap_count(vm);
1484                 vm_map_unlock_read(&vm->vm_map);
1485                 if (shortage == VM_OOM_MEM)
1486                         size += vmspace_resident_count(vm);
1487                 vmspace_free(vm);
1488                 /*
1489                  * if the this process is bigger than the biggest one
1490                  * remember it.
1491                  */
1492                 if (size > bigsize) {
1493                         if (bigproc != NULL)
1494                                 PROC_UNLOCK(bigproc);
1495                         bigproc = p;
1496                         bigsize = size;
1497                 } else
1498                         PROC_UNLOCK(p);
1499         }
1500         sx_sunlock(&allproc_lock);
1501         if (bigproc != NULL) {
1502                 killproc(bigproc, "out of swap space");
1503                 sched_nice(bigproc, PRIO_MIN);
1504                 PROC_UNLOCK(bigproc);
1505                 wakeup(&cnt.v_free_count);
1506         }
1507 }
1508
1509 /*
1510  * This routine tries to maintain the pseudo LRU active queue,
1511  * so that during long periods of time where there is no paging,
1512  * that some statistic accumulation still occurs.  This code
1513  * helps the situation where paging just starts to occur.
1514  */
1515 static void
1516 vm_pageout_page_stats(void)
1517 {
1518         struct vm_pagequeue *pq;
1519         vm_object_t object;
1520         vm_page_t m, next;
1521         int pcount, tpcount;            /* Number of pages to check */
1522         static int fullintervalcount = 0;
1523         int page_shortage;
1524
1525         page_shortage = 
1526             (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1527             (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1528
1529         if (page_shortage <= 0)
1530                 return;
1531
1532         pcount = cnt.v_active_count;
1533         fullintervalcount += vm_pageout_stats_interval;
1534         if (fullintervalcount < vm_pageout_full_stats_interval) {
1535                 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
1536                     cnt.v_page_count;
1537                 if (pcount > tpcount)
1538                         pcount = tpcount;
1539         } else {
1540                 fullintervalcount = 0;
1541         }
1542
1543         pq = &vm_pagequeues[PQ_ACTIVE];
1544         vm_pagequeue_lock(pq);
1545         m = TAILQ_FIRST(&pq->pq_pl);
1546         while ((m != NULL) && (pcount-- > 0)) {
1547                 int actcount;
1548
1549                 KASSERT(m->queue == PQ_ACTIVE,
1550                     ("vm_pageout_page_stats: page %p isn't active", m));
1551
1552                 next = TAILQ_NEXT(m, pageq);
1553                 if ((m->flags & PG_MARKER) != 0) {
1554                         m = next;
1555                         continue;
1556                 }
1557                 vm_page_lock_assert(m, MA_NOTOWNED);
1558                 if (!vm_pageout_page_lock(m, &next)) {
1559                         vm_page_unlock(m);
1560                         m = next;
1561                         continue;
1562                 }
1563                 object = m->object;
1564                 if (!VM_OBJECT_TRYLOCK(object) &&
1565                     !vm_pageout_fallback_object_lock(m, &next)) {
1566                         VM_OBJECT_UNLOCK(object);
1567                         vm_page_unlock(m);
1568                         m = next;
1569                         continue;
1570                 }
1571
1572                 /*
1573                  * Don't deactivate pages that are busy.
1574                  */
1575                 if ((m->busy != 0) ||
1576                     (m->oflags & VPO_BUSY) ||
1577                     (m->hold_count != 0)) {
1578                         vm_page_unlock(m);
1579                         VM_OBJECT_UNLOCK(object);
1580                         vm_page_requeue_locked(m);
1581                         m = next;
1582                         continue;
1583                 }
1584
1585                 actcount = 0;
1586                 if (m->aflags & PGA_REFERENCED) {
1587                         vm_page_aflag_clear(m, PGA_REFERENCED);
1588                         actcount += 1;
1589                 }
1590
1591                 actcount += pmap_ts_referenced(m);
1592                 if (actcount) {
1593                         m->act_count += ACT_ADVANCE + actcount;
1594                         if (m->act_count > ACT_MAX)
1595                                 m->act_count = ACT_MAX;
1596                         vm_page_requeue_locked(m);
1597                 } else {
1598                         if (m->act_count == 0) {
1599                                 /*
1600                                  * We turn off page access, so that we have
1601                                  * more accurate RSS stats.  We don't do this
1602                                  * in the normal page deactivation when the
1603                                  * system is loaded VM wise, because the
1604                                  * cost of the large number of page protect
1605                                  * operations would be higher than the value
1606                                  * of doing the operation.
1607                                  */
1608                                 pmap_remove_all(m);
1609                                 /* Dequeue to avoid later lock recursion. */
1610                                 vm_page_dequeue_locked(m);
1611                                 vm_page_deactivate(m);
1612                         } else {
1613                                 m->act_count -= min(m->act_count, ACT_DECLINE);
1614                                 vm_page_requeue_locked(m);
1615                         }
1616                 }
1617                 vm_page_unlock(m);
1618                 VM_OBJECT_UNLOCK(object);
1619                 m = next;
1620         }
1621         vm_pagequeue_unlock(pq);
1622 }
1623
1624 /*
1625  *      vm_pageout is the high level pageout daemon.
1626  */
1627 static void
1628 vm_pageout(void)
1629 {
1630         int error, pass;
1631
1632         /*
1633          * Initialize some paging parameters.
1634          */
1635         cnt.v_interrupt_free_min = 2;
1636         if (cnt.v_page_count < 2000)
1637                 vm_pageout_page_count = 8;
1638
1639         /*
1640          * v_free_reserved needs to include enough for the largest
1641          * swap pager structures plus enough for any pv_entry structs
1642          * when paging. 
1643          */
1644         if (cnt.v_page_count > 1024)
1645                 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1646         else
1647                 cnt.v_free_min = 4;
1648         cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1649             cnt.v_interrupt_free_min;
1650         cnt.v_free_reserved = vm_pageout_page_count +
1651             cnt.v_pageout_free_min + (cnt.v_page_count / 768);
1652         cnt.v_free_severe = cnt.v_free_min / 2;
1653         cnt.v_free_min += cnt.v_free_reserved;
1654         cnt.v_free_severe += cnt.v_free_reserved;
1655
1656         /*
1657          * v_free_target and v_cache_min control pageout hysteresis.  Note
1658          * that these are more a measure of the VM cache queue hysteresis
1659          * then the VM free queue.  Specifically, v_free_target is the
1660          * high water mark (free+cache pages).
1661          *
1662          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1663          * low water mark, while v_free_min is the stop.  v_cache_min must
1664          * be big enough to handle memory needs while the pageout daemon
1665          * is signalled and run to free more pages.
1666          */
1667         if (cnt.v_free_count > 6144)
1668                 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1669         else
1670                 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1671
1672         if (cnt.v_free_count > 2048) {
1673                 cnt.v_cache_min = cnt.v_free_target;
1674                 cnt.v_cache_max = 2 * cnt.v_cache_min;
1675                 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1676         } else {
1677                 cnt.v_cache_min = 0;
1678                 cnt.v_cache_max = 0;
1679                 cnt.v_inactive_target = cnt.v_free_count / 4;
1680         }
1681         if (cnt.v_inactive_target > cnt.v_free_count / 3)
1682                 cnt.v_inactive_target = cnt.v_free_count / 3;
1683
1684         /* XXX does not really belong here */
1685         if (vm_page_max_wired == 0)
1686                 vm_page_max_wired = cnt.v_free_count / 3;
1687
1688         if (vm_pageout_stats_max == 0)
1689                 vm_pageout_stats_max = cnt.v_free_target;
1690
1691         /*
1692          * Set interval in seconds for stats scan.
1693          */
1694         if (vm_pageout_stats_interval == 0)
1695                 vm_pageout_stats_interval = 5;
1696         if (vm_pageout_full_stats_interval == 0)
1697                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1698
1699         swap_pager_swap_init();
1700         pass = 0;
1701         /*
1702          * The pageout daemon is never done, so loop forever.
1703          */
1704         while (TRUE) {
1705                 /*
1706                  * If we have enough free memory, wakeup waiters.  Do
1707                  * not clear vm_pages_needed until we reach our target,
1708                  * otherwise we may be woken up over and over again and
1709                  * waste a lot of cpu.
1710                  */
1711                 mtx_lock(&vm_page_queue_free_mtx);
1712                 if (vm_pages_needed && !vm_page_count_min()) {
1713                         if (!vm_paging_needed())
1714                                 vm_pages_needed = 0;
1715                         wakeup(&cnt.v_free_count);
1716                 }
1717                 if (vm_pages_needed) {
1718                         /*
1719                          * Still not done, take a second pass without waiting
1720                          * (unlimited dirty cleaning), otherwise sleep a bit
1721                          * and try again.
1722                          */
1723                         ++pass;
1724                         if (pass > 1)
1725                                 msleep(&vm_pages_needed,
1726                                     &vm_page_queue_free_mtx, PVM, "psleep",
1727                                     hz / 2);
1728                 } else {
1729                         /*
1730                          * Good enough, sleep & handle stats.  Prime the pass
1731                          * for the next run.
1732                          */
1733                         if (pass > 1)
1734                                 pass = 1;
1735                         else
1736                                 pass = 0;
1737                         error = msleep(&vm_pages_needed,
1738                             &vm_page_queue_free_mtx, PVM, "psleep",
1739                             vm_pageout_stats_interval * hz);
1740                         if (error && !vm_pages_needed) {
1741                                 mtx_unlock(&vm_page_queue_free_mtx);
1742                                 pass = 0;
1743                                 vm_pageout_page_stats();
1744                                 continue;
1745                         }
1746                 }
1747                 if (vm_pages_needed)
1748                         cnt.v_pdwakeups++;
1749                 mtx_unlock(&vm_page_queue_free_mtx);
1750                 vm_pageout_scan(pass);
1751         }
1752 }
1753
1754 /*
1755  * Unless the free page queue lock is held by the caller, this function
1756  * should be regarded as advisory.  Specifically, the caller should
1757  * not msleep() on &cnt.v_free_count following this function unless
1758  * the free page queue lock is held until the msleep() is performed.
1759  */
1760 void
1761 pagedaemon_wakeup(void)
1762 {
1763
1764         if (!vm_pages_needed && curthread->td_proc != pageproc) {
1765                 vm_pages_needed = 1;
1766                 wakeup(&vm_pages_needed);
1767         }
1768 }
1769
1770 #if !defined(NO_SWAPPING)
1771 static void
1772 vm_req_vmdaemon(int req)
1773 {
1774         static int lastrun = 0;
1775
1776         mtx_lock(&vm_daemon_mtx);
1777         vm_pageout_req_swapout |= req;
1778         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1779                 wakeup(&vm_daemon_needed);
1780                 lastrun = ticks;
1781         }
1782         mtx_unlock(&vm_daemon_mtx);
1783 }
1784
1785 static void
1786 vm_daemon(void)
1787 {
1788         struct rlimit rsslim;
1789         struct proc *p;
1790         struct thread *td;
1791         struct vmspace *vm;
1792         int breakout, swapout_flags, tryagain, attempts;
1793 #ifdef RACCT
1794         uint64_t rsize, ravailable;
1795 #endif
1796
1797         while (TRUE) {
1798                 mtx_lock(&vm_daemon_mtx);
1799 #ifdef RACCT
1800                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz);
1801 #else
1802                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0);
1803 #endif
1804                 swapout_flags = vm_pageout_req_swapout;
1805                 vm_pageout_req_swapout = 0;
1806                 mtx_unlock(&vm_daemon_mtx);
1807                 if (swapout_flags)
1808                         swapout_procs(swapout_flags);
1809
1810                 /*
1811                  * scan the processes for exceeding their rlimits or if
1812                  * process is swapped out -- deactivate pages
1813                  */
1814                 tryagain = 0;
1815                 attempts = 0;
1816 again:
1817                 attempts++;
1818                 sx_slock(&allproc_lock);
1819                 FOREACH_PROC_IN_SYSTEM(p) {
1820                         vm_pindex_t limit, size;
1821
1822                         /*
1823                          * if this is a system process or if we have already
1824                          * looked at this process, skip it.
1825                          */
1826                         PROC_LOCK(p);
1827                         if (p->p_state != PRS_NORMAL ||
1828                             p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
1829                                 PROC_UNLOCK(p);
1830                                 continue;
1831                         }
1832                         /*
1833                          * if the process is in a non-running type state,
1834                          * don't touch it.
1835                          */
1836                         breakout = 0;
1837                         FOREACH_THREAD_IN_PROC(p, td) {
1838                                 thread_lock(td);
1839                                 if (!TD_ON_RUNQ(td) &&
1840                                     !TD_IS_RUNNING(td) &&
1841                                     !TD_IS_SLEEPING(td) &&
1842                                     !TD_IS_SUSPENDED(td)) {
1843                                         thread_unlock(td);
1844                                         breakout = 1;
1845                                         break;
1846                                 }
1847                                 thread_unlock(td);
1848                         }
1849                         if (breakout) {
1850                                 PROC_UNLOCK(p);
1851                                 continue;
1852                         }
1853                         /*
1854                          * get a limit
1855                          */
1856                         lim_rlimit(p, RLIMIT_RSS, &rsslim);
1857                         limit = OFF_TO_IDX(
1858                             qmin(rsslim.rlim_cur, rsslim.rlim_max));
1859
1860                         /*
1861                          * let processes that are swapped out really be
1862                          * swapped out set the limit to nothing (will force a
1863                          * swap-out.)
1864                          */
1865                         if ((p->p_flag & P_INMEM) == 0)
1866                                 limit = 0;      /* XXX */
1867                         vm = vmspace_acquire_ref(p);
1868                         PROC_UNLOCK(p);
1869                         if (vm == NULL)
1870                                 continue;
1871
1872                         size = vmspace_resident_count(vm);
1873                         if (size >= limit) {
1874                                 vm_pageout_map_deactivate_pages(
1875                                     &vm->vm_map, limit);
1876                         }
1877 #ifdef RACCT
1878                         rsize = IDX_TO_OFF(size);
1879                         PROC_LOCK(p);
1880                         racct_set(p, RACCT_RSS, rsize);
1881                         ravailable = racct_get_available(p, RACCT_RSS);
1882                         PROC_UNLOCK(p);
1883                         if (rsize > ravailable) {
1884                                 /*
1885                                  * Don't be overly aggressive; this might be
1886                                  * an innocent process, and the limit could've
1887                                  * been exceeded by some memory hog.  Don't
1888                                  * try to deactivate more than 1/4th of process'
1889                                  * resident set size.
1890                                  */
1891                                 if (attempts <= 8) {
1892                                         if (ravailable < rsize - (rsize / 4))
1893                                                 ravailable = rsize - (rsize / 4);
1894                                 }
1895                                 vm_pageout_map_deactivate_pages(
1896                                     &vm->vm_map, OFF_TO_IDX(ravailable));
1897                                 /* Update RSS usage after paging out. */
1898                                 size = vmspace_resident_count(vm);
1899                                 rsize = IDX_TO_OFF(size);
1900                                 PROC_LOCK(p);
1901                                 racct_set(p, RACCT_RSS, rsize);
1902                                 PROC_UNLOCK(p);
1903                                 if (rsize > ravailable)
1904                                         tryagain = 1;
1905                         }
1906 #endif
1907                         vmspace_free(vm);
1908                 }
1909                 sx_sunlock(&allproc_lock);
1910                 if (tryagain != 0 && attempts <= 10)
1911                         goto again;
1912         }
1913 }
1914 #endif                  /* !defined(NO_SWAPPING) */