2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
70 #include <sys/mutex.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sched.h>
74 #include <sys/sf_buf.h>
76 #include <sys/vmmeter.h>
78 #include <sys/sysctl.h>
80 #include <sys/kernel.h>
82 #include <sys/unistd.h>
85 #include <vm/vm_param.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pager.h>
94 #include <vm/swap_pager.h>
99 * System initialization
101 * Note: proc0 from proc.h
103 static void vm_init_limits(void *);
104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 * Note: run scheduling should be divorced from the vm system.
111 static void scheduler(void *);
112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
115 static void swapout(struct proc *);
119 static volatile int proc0_rescan;
125 * WARNING! This code calls vm_map_check_protection() which only checks
126 * the associated vm_map_entry range. It does not determine whether the
127 * contents of the memory is actually readable or writable. In most cases
128 * just checking the vm_map_entry is sufficient within the kernel's address
132 kernacc(addr, len, rw)
137 vm_offset_t saddr, eaddr;
140 KASSERT((rw & ~VM_PROT_ALL) == 0,
141 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
143 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
144 (vm_offset_t)addr + len < (vm_offset_t)addr)
148 saddr = trunc_page((vm_offset_t)addr);
149 eaddr = round_page((vm_offset_t)addr + len);
150 vm_map_lock_read(kernel_map);
151 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
152 vm_map_unlock_read(kernel_map);
159 * WARNING! This code calls vm_map_check_protection() which only checks
160 * the associated vm_map_entry range. It does not determine whether the
161 * contents of the memory is actually readable or writable. vmapbuf(),
162 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
163 * used in conjuction with this call.
166 useracc(addr, len, rw)
174 KASSERT((rw & ~VM_PROT_ALL) == 0,
175 ("illegal ``rw'' argument to useracc (%x)\n", rw));
177 map = &curproc->p_vmspace->vm_map;
178 if ((vm_offset_t)addr + len > vm_map_max(map) ||
179 (vm_offset_t)addr + len < (vm_offset_t)addr) {
182 vm_map_lock_read(map);
183 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
184 round_page((vm_offset_t)addr + len), prot);
185 vm_map_unlock_read(map);
190 vslock(void *addr, size_t len)
192 vm_offset_t end, last, start;
196 last = (vm_offset_t)addr + len;
197 start = trunc_page((vm_offset_t)addr);
198 end = round_page(last);
199 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
201 npages = atop(end - start);
202 if (npages > vm_page_max_wired)
206 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
207 lim_cur(curproc, RLIMIT_MEMLOCK)) {
208 PROC_UNLOCK(curproc);
211 PROC_UNLOCK(curproc);
216 * The limit for transient usage of wired pages should be
217 * larger than for "permanent" wired pages (mlock()).
219 * Also, the sysctl code, which is the only present user
220 * of vslock(), does a hard loop on EAGAIN.
222 if (npages + cnt.v_wire_count > vm_page_max_wired)
225 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
226 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
228 * Return EFAULT on error to match copy{in,out}() behaviour
229 * rather than returning ENOMEM like mlock() would.
231 return (error == KERN_SUCCESS ? 0 : EFAULT);
235 vsunlock(void *addr, size_t len)
238 /* Rely on the parameter sanity checks performed by vslock(). */
239 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
240 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
241 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
245 * Pin the page contained within the given object at the given offset. If the
246 * page is not resident, allocate and load it using the given object's pager.
247 * Return the pinned page if successful; otherwise, return NULL.
250 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
256 VM_OBJECT_LOCK(object);
257 pindex = OFF_TO_IDX(offset);
258 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
259 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
261 rv = vm_pager_get_pages(object, ma, 1, 0);
262 m = vm_page_lookup(object, pindex);
265 if (m->valid == 0 || rv != VM_PAGER_OK) {
266 vm_page_lock_queues();
268 vm_page_unlock_queues();
273 vm_page_lock_queues();
276 vm_page_unlock_queues();
278 VM_OBJECT_UNLOCK(object);
283 * Return a CPU private mapping to the page at the given offset within the
284 * given object. The page is pinned before it is mapped.
287 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
291 m = vm_imgact_hold_page(object, offset);
295 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
299 * Destroy the given CPU private mapping and unpin the page that it mapped.
302 vm_imgact_unmap_page(struct sf_buf *sf)
309 vm_page_lock_queues();
311 vm_page_unlock_queues();
314 #ifndef KSTACK_MAX_PAGES
315 #define KSTACK_MAX_PAGES 32
319 * Create the kernel stack (including pcb for i386) for a new thread.
320 * This routine directly affects the fork perf for a process and
321 * create performance for a thread.
324 vm_thread_new(struct thread *td, int pages)
328 vm_page_t m, ma[KSTACK_MAX_PAGES];
333 pages = KSTACK_PAGES;
334 else if (pages > KSTACK_MAX_PAGES)
335 pages = KSTACK_MAX_PAGES;
337 * Allocate an object for the kstack.
339 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
340 td->td_kstack_obj = ksobj;
342 * Get a kernel virtual address for this thread's kstack.
344 ks = kmem_alloc_nofault(kernel_map,
345 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
347 panic("vm_thread_new: kstack allocation failed");
348 if (KSTACK_GUARD_PAGES != 0) {
349 pmap_qremove(ks, KSTACK_GUARD_PAGES);
350 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
354 * Knowing the number of pages allocated is useful when you
355 * want to deallocate them.
357 td->td_kstack_pages = pages;
359 * For the length of the stack, link in a real page of ram for each
362 VM_OBJECT_LOCK(ksobj);
363 for (i = 0; i < pages; i++) {
365 * Get a kernel stack page.
367 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
368 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
370 m->valid = VM_PAGE_BITS_ALL;
372 VM_OBJECT_UNLOCK(ksobj);
373 pmap_qenter(ks, ma, pages);
377 * Dispose of a thread's kernel stack.
380 vm_thread_dispose(struct thread *td)
387 pages = td->td_kstack_pages;
388 ksobj = td->td_kstack_obj;
390 pmap_qremove(ks, pages);
391 VM_OBJECT_LOCK(ksobj);
392 for (i = 0; i < pages; i++) {
393 m = vm_page_lookup(ksobj, i);
395 panic("vm_thread_dispose: kstack already missing?");
396 vm_page_lock_queues();
397 vm_page_unwire(m, 0);
399 vm_page_unlock_queues();
401 VM_OBJECT_UNLOCK(ksobj);
402 vm_object_deallocate(ksobj);
403 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
404 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
408 * Allow a thread's kernel stack to be paged out.
411 vm_thread_swapout(struct thread *td)
417 cpu_thread_swapout(td);
418 pages = td->td_kstack_pages;
419 ksobj = td->td_kstack_obj;
420 pmap_qremove(td->td_kstack, pages);
421 VM_OBJECT_LOCK(ksobj);
422 for (i = 0; i < pages; i++) {
423 m = vm_page_lookup(ksobj, i);
425 panic("vm_thread_swapout: kstack already missing?");
426 vm_page_lock_queues();
428 vm_page_unwire(m, 0);
429 vm_page_unlock_queues();
431 VM_OBJECT_UNLOCK(ksobj);
435 * Bring the kernel stack for a specified thread back in.
438 vm_thread_swapin(struct thread *td)
441 vm_page_t m, ma[KSTACK_MAX_PAGES];
444 pages = td->td_kstack_pages;
445 ksobj = td->td_kstack_obj;
446 VM_OBJECT_LOCK(ksobj);
447 for (i = 0; i < pages; i++) {
448 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
449 if (m->valid != VM_PAGE_BITS_ALL) {
450 rv = vm_pager_get_pages(ksobj, &m, 1, 0);
451 if (rv != VM_PAGER_OK)
452 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
453 m = vm_page_lookup(ksobj, i);
454 m->valid = VM_PAGE_BITS_ALL;
457 vm_page_lock_queues();
460 vm_page_unlock_queues();
462 VM_OBJECT_UNLOCK(ksobj);
463 pmap_qenter(td->td_kstack, ma, pages);
464 cpu_thread_swapin(td);
468 * Set up a variable-sized alternate kstack.
471 vm_thread_new_altkstack(struct thread *td, int pages)
474 td->td_altkstack = td->td_kstack;
475 td->td_altkstack_obj = td->td_kstack_obj;
476 td->td_altkstack_pages = td->td_kstack_pages;
478 vm_thread_new(td, pages);
482 * Restore the original kstack.
485 vm_thread_dispose_altkstack(struct thread *td)
488 vm_thread_dispose(td);
490 td->td_kstack = td->td_altkstack;
491 td->td_kstack_obj = td->td_altkstack_obj;
492 td->td_kstack_pages = td->td_altkstack_pages;
493 td->td_altkstack = 0;
494 td->td_altkstack_obj = NULL;
495 td->td_altkstack_pages = 0;
499 * Implement fork's actions on an address space.
500 * Here we arrange for the address space to be copied or referenced,
501 * allocate a user struct (pcb and kernel stack), then call the
502 * machine-dependent layer to fill those in and make the new process
503 * ready to run. The new process is set up so that it returns directly
504 * to user mode to avoid stack copying and relocation problems.
507 vm_forkproc(td, p2, td2, flags)
513 struct proc *p1 = td->td_proc;
515 if ((flags & RFPROC) == 0) {
517 * Divorce the memory, if it is shared, essentially
518 * this changes shared memory amongst threads, into
521 if ((flags & RFMEM) == 0) {
522 if (p1->p_vmspace->vm_refcnt > 1) {
526 cpu_fork(td, p2, td2, flags);
531 p2->p_vmspace = p1->p_vmspace;
532 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
535 while (vm_page_count_severe()) {
539 if ((flags & RFMEM) == 0) {
540 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
541 if (p1->p_vmspace->vm_shm)
546 * cpu_fork will copy and update the pcb, set up the kernel stack,
547 * and make the child ready to run.
549 cpu_fork(td, p2, td2, flags);
553 * Called after process has been wait(2)'ed apon and is being reaped.
554 * The idea is to reclaim resources that we could not reclaim while
555 * the process was still executing.
562 vmspace_exitfree(p); /* and clean-out the vmspace */
566 * Set default limits for VM system.
567 * Called for proc 0, and then inherited by all others.
569 * XXX should probably act directly on proc0.
572 vm_init_limits(udata)
575 struct proc *p = udata;
580 * Set up the initial limits on process VM. Set the maximum resident
581 * set size to be half of (reasonably) available memory. Since this
582 * is a soft limit, it comes into effect only when the system is out
583 * of memory - half of main memory helps to favor smaller processes,
584 * and reduces thrashing of the object cache.
587 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
588 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
589 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
590 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
591 /* limit the limit to no less than 2MB */
592 rss_limit = max(cnt.v_free_count, 512);
593 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
594 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
603 PROC_LOCK_ASSERT(p, MA_OWNED);
604 if ((p->p_sflag & PS_INMEM) == 0)
605 panic("faultin: proc swapped out with NO_SWAPPING!");
606 #else /* !NO_SWAPPING */
609 PROC_LOCK_ASSERT(p, MA_OWNED);
611 * If another process is swapping in this process,
612 * just wait until it finishes.
614 if (p->p_sflag & PS_SWAPPINGIN)
615 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
616 else if ((p->p_sflag & PS_INMEM) == 0) {
618 * Don't let another thread swap process p out while we are
619 * busy swapping it in.
622 mtx_lock_spin(&sched_lock);
623 p->p_sflag |= PS_SWAPPINGIN;
624 mtx_unlock_spin(&sched_lock);
627 FOREACH_THREAD_IN_PROC(p, td)
628 vm_thread_swapin(td);
631 mtx_lock_spin(&sched_lock);
632 p->p_sflag &= ~PS_SWAPPINGIN;
633 p->p_sflag |= PS_INMEM;
634 FOREACH_THREAD_IN_PROC(p, td) {
639 mtx_unlock_spin(&sched_lock);
643 /* Allow other threads to swap p out now. */
646 #endif /* NO_SWAPPING */
650 * This swapin algorithm attempts to swap-in processes only if there
651 * is enough space for them. Of course, if a process waits for a long
652 * time, it will be swapped in anyway.
654 * XXXKSE - process with the thread with highest priority counts..
656 * Giant is held on entry.
669 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
673 if (vm_page_count_min()) {
675 mtx_lock_spin(&sched_lock);
677 mtx_unlock_spin(&sched_lock);
683 sx_slock(&allproc_lock);
684 FOREACH_PROC_IN_SYSTEM(p) {
686 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
689 mtx_lock_spin(&sched_lock);
690 FOREACH_THREAD_IN_PROC(p, td) {
692 * An otherwise runnable thread of a process
693 * swapped out has only the TDI_SWAPPED bit set.
696 if (td->td_inhibitors == TDI_SWAPPED) {
698 pri = p->p_swtime + kg->kg_slptime;
699 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
700 pri -= p->p_nice * 8;
704 * if this ksegrp is higher priority
705 * and there is enough space, then select
706 * this process instead of the previous
715 mtx_unlock_spin(&sched_lock);
717 sx_sunlock(&allproc_lock);
720 * Nothing to do, back to sleep.
722 if ((p = pp) == NULL) {
723 mtx_lock_spin(&sched_lock);
725 TD_SET_IWAIT(&thread0);
726 mi_switch(SW_VOL, NULL);
729 mtx_unlock_spin(&sched_lock);
735 * Another process may be bringing or may have already
736 * brought this process in while we traverse all threads.
737 * Or, this process may even be being swapped out again.
739 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
741 mtx_lock_spin(&sched_lock);
743 mtx_unlock_spin(&sched_lock);
747 mtx_lock_spin(&sched_lock);
748 p->p_sflag &= ~PS_SWAPINREQ;
749 mtx_unlock_spin(&sched_lock);
752 * We would like to bring someone in. (only if there is space).
753 * [What checks the space? ]
757 mtx_lock_spin(&sched_lock);
760 mtx_unlock_spin(&sched_lock);
764 void kick_proc0(void)
766 struct thread *td = &thread0;
769 if (TD_AWAITING_INTR(td)) {
770 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0);
772 setrunqueue(td, SRQ_INTR);
775 CTR2(KTR_INTR, "%s: state %d",
776 __func__, td->td_state);
785 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
787 static int swap_idle_threshold1 = 2;
788 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
789 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
792 * Swap_idle_threshold2 is the time that a process can be idle before
793 * it will be swapped out, if idle swapping is enabled.
795 static int swap_idle_threshold2 = 10;
796 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
797 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
800 * Swapout is driven by the pageout daemon. Very simple, we find eligible
801 * procs and unwire their u-areas. We try to always "swap" at least one
802 * process in case we need the room for a swapin.
803 * If any procs have been sleeping/stopped for at least maxslp seconds,
804 * they are swapped. Else, we swap the longest-sleeping or stopped process,
805 * if any, otherwise the longest-resident process.
808 swapout_procs(action)
817 sx_slock(&allproc_lock);
818 FOREACH_PROC_IN_SYSTEM(p) {
820 int minslptime = 100000;
823 * Watch out for a process in
824 * creation. It may have no
825 * address space or lock yet.
827 mtx_lock_spin(&sched_lock);
828 if (p->p_state == PRS_NEW) {
829 mtx_unlock_spin(&sched_lock);
832 mtx_unlock_spin(&sched_lock);
835 * An aio daemon switches its
836 * address space while running.
837 * Perform a quick check whether
838 * a process has P_SYSTEM.
840 if ((p->p_flag & P_SYSTEM) != 0)
844 * Do not swapout a process that
845 * is waiting for VM data
846 * structures as there is a possible
847 * deadlock. Test this first as
850 * Lock the map until swapout
851 * finishes, or a thread of this
852 * process may attempt to alter
858 ("swapout_procs: a process has no address space"));
859 atomic_add_int(&vm->vm_refcnt, 1);
861 if (!vm_map_trylock(&vm->vm_map))
865 if (p->p_lock != 0 ||
866 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
871 * only aiod changes vmspace, however it will be
872 * skipped because of the if statement above checking
875 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
878 switch (p->p_state) {
880 /* Don't swap out processes in any sort
881 * of 'special' state. */
885 mtx_lock_spin(&sched_lock);
887 * do not swapout a realtime process
888 * Check all the thread groups..
890 FOREACH_KSEGRP_IN_PROC(p, kg) {
891 if (PRI_IS_REALTIME(kg->kg_pri_class))
895 * Guarantee swap_idle_threshold1
898 if (kg->kg_slptime < swap_idle_threshold1)
902 * Do not swapout a process if it is
903 * waiting on a critical event of some
904 * kind or there is a thread whose
905 * pageable memory may be accessed.
907 * This could be refined to support
908 * swapping out a thread.
910 FOREACH_THREAD_IN_GROUP(kg, td) {
911 if ((td->td_priority) < PSOCK ||
912 !thread_safetoswapout(td))
916 * If the system is under memory stress,
917 * or if we are swapping
918 * idle processes >= swap_idle_threshold2,
919 * then swap the process out.
921 if (((action & VM_SWAP_NORMAL) == 0) &&
922 (((action & VM_SWAP_IDLE) == 0) ||
923 (kg->kg_slptime < swap_idle_threshold2)))
926 if (minslptime > kg->kg_slptime)
927 minslptime = kg->kg_slptime;
931 * If the pageout daemon didn't free enough pages,
932 * or if this process is idle and the system is
933 * configured to swap proactively, swap it out.
935 if ((action & VM_SWAP_NORMAL) ||
936 ((action & VM_SWAP_IDLE) &&
937 (minslptime > swap_idle_threshold2))) {
940 mtx_unlock_spin(&sched_lock);
942 vm_map_unlock(&vm->vm_map);
944 sx_sunlock(&allproc_lock);
948 mtx_unlock_spin(&sched_lock);
952 vm_map_unlock(&vm->vm_map);
957 sx_sunlock(&allproc_lock);
959 * If we swapped something out, and another process needed memory,
960 * then wakeup the sched process.
972 PROC_LOCK_ASSERT(p, MA_OWNED);
973 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
974 #if defined(SWAP_DEBUG)
975 printf("swapping out %d\n", p->p_pid);
979 * The states of this process and its threads may have changed
980 * by now. Assuming that there is only one pageout daemon thread,
981 * this process should still be in memory.
983 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
984 ("swapout: lost a swapout race?"));
986 #if defined(INVARIANTS)
988 * Make sure that all threads are safe to be swapped out.
990 * Alternatively, we could swap out only safe threads.
992 FOREACH_THREAD_IN_PROC(p, td) {
993 KASSERT(thread_safetoswapout(td),
994 ("swapout: there is a thread not safe for swapout"));
996 #endif /* INVARIANTS */
998 ++p->p_stats->p_ru.ru_nswap;
1000 * remember the process resident count
1002 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1004 p->p_sflag &= ~PS_INMEM;
1005 p->p_sflag |= PS_SWAPPINGOUT;
1007 FOREACH_THREAD_IN_PROC(p, td)
1009 mtx_unlock_spin(&sched_lock);
1011 FOREACH_THREAD_IN_PROC(p, td)
1012 vm_thread_swapout(td);
1015 mtx_lock_spin(&sched_lock);
1016 p->p_sflag &= ~PS_SWAPPINGOUT;
1019 #endif /* !NO_SWAPPING */