2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
70 #include <sys/mutex.h>
72 #include <sys/racct.h>
73 #include <sys/resourcevar.h>
74 #include <sys/sched.h>
75 #include <sys/sf_buf.h>
77 #include <sys/vmmeter.h>
79 #include <sys/sysctl.h>
81 #include <sys/eventhandler.h>
82 #include <sys/kernel.h>
84 #include <sys/unistd.h>
87 #include <vm/vm_param.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_pager.h>
96 #include <vm/swap_pager.h>
99 * System initialization
101 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
103 * Note: run scheduling should be divorced from the vm system.
105 static void scheduler(void *);
106 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
109 static int swapout(struct proc *);
110 static void swapclear(struct proc *);
111 static void vm_thread_swapin(struct thread *td);
112 static void vm_thread_swapout(struct thread *td);
118 * WARNING! This code calls vm_map_check_protection() which only checks
119 * the associated vm_map_entry range. It does not determine whether the
120 * contents of the memory is actually readable or writable. In most cases
121 * just checking the vm_map_entry is sufficient within the kernel's address
125 kernacc(addr, len, rw)
130 vm_offset_t saddr, eaddr;
133 KASSERT((rw & ~VM_PROT_ALL) == 0,
134 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
136 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
137 (vm_offset_t)addr + len < (vm_offset_t)addr)
141 saddr = trunc_page((vm_offset_t)addr);
142 eaddr = round_page((vm_offset_t)addr + len);
143 vm_map_lock_read(kernel_map);
144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145 vm_map_unlock_read(kernel_map);
152 * WARNING! This code calls vm_map_check_protection() which only checks
153 * the associated vm_map_entry range. It does not determine whether the
154 * contents of the memory is actually readable or writable. vmapbuf(),
155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156 * used in conjuction with this call.
159 useracc(addr, len, rw)
167 KASSERT((rw & ~VM_PROT_ALL) == 0,
168 ("illegal ``rw'' argument to useracc (%x)\n", rw));
170 map = &curproc->p_vmspace->vm_map;
171 if ((vm_offset_t)addr + len > vm_map_max(map) ||
172 (vm_offset_t)addr + len < (vm_offset_t)addr) {
175 vm_map_lock_read(map);
176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
177 round_page((vm_offset_t)addr + len), prot);
178 vm_map_unlock_read(map);
183 vslock(void *addr, size_t len)
185 vm_offset_t end, last, start;
190 last = (vm_offset_t)addr + len;
191 start = trunc_page((vm_offset_t)addr);
192 end = round_page(last);
193 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
195 npages = atop(end - start);
196 if (npages > vm_page_max_wired)
199 nsize = ptoa(npages +
200 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)));
201 if (nsize > lim_cur(curproc, RLIMIT_MEMLOCK)) {
202 PROC_UNLOCK(curproc);
205 if (racct_set(curproc, RACCT_MEMLOCK, nsize)) {
206 PROC_UNLOCK(curproc);
209 PROC_UNLOCK(curproc);
214 * The limit for transient usage of wired pages should be
215 * larger than for "permanent" wired pages (mlock()).
217 * Also, the sysctl code, which is the only present user
218 * of vslock(), does a hard loop on EAGAIN.
220 if (npages + cnt.v_wire_count > vm_page_max_wired)
223 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
226 if (error != KERN_SUCCESS) {
228 racct_set(curproc, RACCT_MEMLOCK,
229 ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
230 PROC_UNLOCK(curproc);
234 * Return EFAULT on error to match copy{in,out}() behaviour
235 * rather than returning ENOMEM like mlock() would.
237 return (error == KERN_SUCCESS ? 0 : EFAULT);
241 vsunlock(void *addr, size_t len)
244 /* Rely on the parameter sanity checks performed by vslock(). */
245 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
246 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
247 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
251 racct_set(curproc, RACCT_MEMLOCK,
252 ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
253 PROC_UNLOCK(curproc);
258 * Pin the page contained within the given object at the given offset. If the
259 * page is not resident, allocate and load it using the given object's pager.
260 * Return the pinned page if successful; otherwise, return NULL.
263 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
269 VM_OBJECT_LOCK(object);
270 pindex = OFF_TO_IDX(offset);
271 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
272 if (m->valid != VM_PAGE_BITS_ALL) {
274 rv = vm_pager_get_pages(object, ma, 1, 0);
275 m = vm_page_lookup(object, pindex);
278 if (rv != VM_PAGER_OK) {
291 VM_OBJECT_UNLOCK(object);
296 * Return a CPU private mapping to the page at the given offset within the
297 * given object. The page is pinned before it is mapped.
300 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
304 m = vm_imgact_hold_page(object, offset);
308 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
312 * Destroy the given CPU private mapping and unpin the page that it mapped.
315 vm_imgact_unmap_page(struct sf_buf *sf)
328 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
331 pmap_sync_icache(map->pmap, va, sz);
334 struct kstack_cache_entry {
336 struct kstack_cache_entry *next_ks_entry;
339 static struct kstack_cache_entry *kstack_cache;
340 static int kstack_cache_size = 128;
342 static struct mtx kstack_cache_mtx;
343 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
345 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
348 #ifndef KSTACK_MAX_PAGES
349 #define KSTACK_MAX_PAGES 32
353 * Create the kernel stack (including pcb for i386) for a new thread.
354 * This routine directly affects the fork perf for a process and
355 * create performance for a thread.
358 vm_thread_new(struct thread *td, int pages)
362 vm_page_t m, ma[KSTACK_MAX_PAGES];
363 struct kstack_cache_entry *ks_ce;
368 pages = KSTACK_PAGES;
369 else if (pages > KSTACK_MAX_PAGES)
370 pages = KSTACK_MAX_PAGES;
372 if (pages == KSTACK_PAGES) {
373 mtx_lock(&kstack_cache_mtx);
374 if (kstack_cache != NULL) {
375 ks_ce = kstack_cache;
376 kstack_cache = ks_ce->next_ks_entry;
377 mtx_unlock(&kstack_cache_mtx);
379 td->td_kstack_obj = ks_ce->ksobj;
380 td->td_kstack = (vm_offset_t)ks_ce;
381 td->td_kstack_pages = KSTACK_PAGES;
384 mtx_unlock(&kstack_cache_mtx);
388 * Allocate an object for the kstack.
390 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
393 * Get a kernel virtual address for this thread's kstack.
395 #if defined(__mips__)
397 * We need to align the kstack's mapped address to fit within
398 * a single TLB entry.
400 ks = kmem_alloc_nofault_space(kernel_map,
401 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
403 ks = kmem_alloc_nofault(kernel_map,
404 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
407 printf("vm_thread_new: kstack allocation failed\n");
408 vm_object_deallocate(ksobj);
412 atomic_add_int(&kstacks, 1);
413 if (KSTACK_GUARD_PAGES != 0) {
414 pmap_qremove(ks, KSTACK_GUARD_PAGES);
415 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
417 td->td_kstack_obj = ksobj;
420 * Knowing the number of pages allocated is useful when you
421 * want to deallocate them.
423 td->td_kstack_pages = pages;
425 * For the length of the stack, link in a real page of ram for each
428 VM_OBJECT_LOCK(ksobj);
429 for (i = 0; i < pages; i++) {
431 * Get a kernel stack page.
433 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
434 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
436 m->valid = VM_PAGE_BITS_ALL;
438 VM_OBJECT_UNLOCK(ksobj);
439 pmap_qenter(ks, ma, pages);
444 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
449 atomic_add_int(&kstacks, -1);
450 pmap_qremove(ks, pages);
451 VM_OBJECT_LOCK(ksobj);
452 for (i = 0; i < pages; i++) {
453 m = vm_page_lookup(ksobj, i);
455 panic("vm_thread_dispose: kstack already missing?");
457 vm_page_unwire(m, 0);
461 VM_OBJECT_UNLOCK(ksobj);
462 vm_object_deallocate(ksobj);
463 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
464 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
468 * Dispose of a thread's kernel stack.
471 vm_thread_dispose(struct thread *td)
475 struct kstack_cache_entry *ks_ce;
478 pages = td->td_kstack_pages;
479 ksobj = td->td_kstack_obj;
482 td->td_kstack_pages = 0;
483 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
484 ks_ce = (struct kstack_cache_entry *)ks;
485 ks_ce->ksobj = ksobj;
486 mtx_lock(&kstack_cache_mtx);
487 ks_ce->next_ks_entry = kstack_cache;
488 kstack_cache = ks_ce;
489 mtx_unlock(&kstack_cache_mtx);
492 vm_thread_stack_dispose(ksobj, ks, pages);
496 vm_thread_stack_lowmem(void *nulll)
498 struct kstack_cache_entry *ks_ce, *ks_ce1;
500 mtx_lock(&kstack_cache_mtx);
501 ks_ce = kstack_cache;
503 mtx_unlock(&kstack_cache_mtx);
505 while (ks_ce != NULL) {
507 ks_ce = ks_ce->next_ks_entry;
509 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
515 kstack_cache_init(void *nulll)
518 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
519 EVENTHANDLER_PRI_ANY);
522 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
523 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
527 * Allow a thread's kernel stack to be paged out.
530 vm_thread_swapout(struct thread *td)
536 cpu_thread_swapout(td);
537 pages = td->td_kstack_pages;
538 ksobj = td->td_kstack_obj;
539 pmap_qremove(td->td_kstack, pages);
540 VM_OBJECT_LOCK(ksobj);
541 for (i = 0; i < pages; i++) {
542 m = vm_page_lookup(ksobj, i);
544 panic("vm_thread_swapout: kstack already missing?");
547 vm_page_unwire(m, 0);
550 VM_OBJECT_UNLOCK(ksobj);
554 * Bring the kernel stack for a specified thread back in.
557 vm_thread_swapin(struct thread *td)
560 vm_page_t ma[KSTACK_MAX_PAGES];
561 int i, j, k, pages, rv;
563 pages = td->td_kstack_pages;
564 ksobj = td->td_kstack_obj;
565 VM_OBJECT_LOCK(ksobj);
566 for (i = 0; i < pages; i++)
567 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
569 for (i = 0; i < pages; i++) {
570 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
571 KASSERT(ma[i]->oflags & VPO_BUSY,
573 vm_object_pip_add(ksobj, 1);
574 for (j = i + 1; j < pages; j++) {
575 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
576 (ma[j]->oflags & VPO_BUSY),
578 if (ma[j]->valid == VM_PAGE_BITS_ALL)
581 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
582 if (rv != VM_PAGER_OK)
583 panic("vm_thread_swapin: cannot get kstack for proc: %d",
585 vm_object_pip_wakeup(ksobj);
586 for (k = i; k < j; k++)
587 ma[k] = vm_page_lookup(ksobj, k);
588 vm_page_wakeup(ma[i]);
589 } else if (ma[i]->oflags & VPO_BUSY)
590 vm_page_wakeup(ma[i]);
592 VM_OBJECT_UNLOCK(ksobj);
593 pmap_qenter(td->td_kstack, ma, pages);
594 cpu_thread_swapin(td);
596 #endif /* !NO_SWAPPING */
599 * Implement fork's actions on an address space.
600 * Here we arrange for the address space to be copied or referenced,
601 * allocate a user struct (pcb and kernel stack), then call the
602 * machine-dependent layer to fill those in and make the new process
603 * ready to run. The new process is set up so that it returns directly
604 * to user mode to avoid stack copying and relocation problems.
607 vm_forkproc(td, p2, td2, vm2, flags)
614 struct proc *p1 = td->td_proc;
617 if ((flags & RFPROC) == 0) {
619 * Divorce the memory, if it is shared, essentially
620 * this changes shared memory amongst threads, into
623 if ((flags & RFMEM) == 0) {
624 if (p1->p_vmspace->vm_refcnt > 1) {
625 error = vmspace_unshare(p1);
630 cpu_fork(td, p2, td2, flags);
635 p2->p_vmspace = p1->p_vmspace;
636 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
639 while (vm_page_count_severe()) {
643 if ((flags & RFMEM) == 0) {
645 if (p1->p_vmspace->vm_shm)
650 * cpu_fork will copy and update the pcb, set up the kernel stack,
651 * and make the child ready to run.
653 cpu_fork(td, p2, td2, flags);
658 * Called after process has been wait(2)'ed apon and is being reaped.
659 * The idea is to reclaim resources that we could not reclaim while
660 * the process was still executing.
667 vmspace_exitfree(p); /* and clean-out the vmspace */
676 PROC_LOCK_ASSERT(p, MA_OWNED);
677 if ((p->p_flag & P_INMEM) == 0)
678 panic("faultin: proc swapped out with NO_SWAPPING!");
679 #else /* !NO_SWAPPING */
682 PROC_LOCK_ASSERT(p, MA_OWNED);
684 * If another process is swapping in this process,
685 * just wait until it finishes.
687 if (p->p_flag & P_SWAPPINGIN) {
688 while (p->p_flag & P_SWAPPINGIN)
689 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
692 if ((p->p_flag & P_INMEM) == 0) {
694 * Don't let another thread swap process p out while we are
695 * busy swapping it in.
698 p->p_flag |= P_SWAPPINGIN;
702 * We hold no lock here because the list of threads
703 * can not change while all threads in the process are
706 FOREACH_THREAD_IN_PROC(p, td)
707 vm_thread_swapin(td);
714 /* Allow other threads to swap p out now. */
717 #endif /* NO_SWAPPING */
721 * This swapin algorithm attempts to swap-in processes only if there
722 * is enough space for them. Of course, if a process waits for a long
723 * time, it will be swapped in anyway.
725 * Giant is held on entry.
740 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
744 if (vm_page_count_min()) {
751 sx_slock(&allproc_lock);
752 FOREACH_PROC_IN_SYSTEM(p) {
754 if (p->p_state == PRS_NEW ||
755 p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
759 swtime = (ticks - p->p_swtick) / hz;
760 FOREACH_THREAD_IN_PROC(p, td) {
762 * An otherwise runnable thread of a process
763 * swapped out has only the TDI_SWAPPED bit set.
767 if (td->td_inhibitors == TDI_SWAPPED) {
768 slptime = (ticks - td->td_slptick) / hz;
769 pri = swtime + slptime;
770 if ((td->td_flags & TDF_SWAPINREQ) == 0)
771 pri -= p->p_nice * 8;
773 * if this thread is higher priority
774 * and there is enough space, then select
775 * this process instead of the previous
787 sx_sunlock(&allproc_lock);
790 * Nothing to do, back to sleep.
792 if ((p = pp) == NULL) {
793 tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
799 * Another process may be bringing or may have already
800 * brought this process in while we traverse all threads.
801 * Or, this process may even be being swapped out again.
803 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
809 * We would like to bring someone in. (only if there is space).
810 * [What checks the space? ]
827 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
829 static int swap_idle_threshold1 = 2;
830 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
831 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
834 * Swap_idle_threshold2 is the time that a process can be idle before
835 * it will be swapped out, if idle swapping is enabled.
837 static int swap_idle_threshold2 = 10;
838 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
839 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
842 * First, if any processes have been sleeping or stopped for at least
843 * "swap_idle_threshold1" seconds, they are swapped out. If, however,
844 * no such processes exist, then the longest-sleeping or stopped
845 * process is swapped out. Finally, and only as a last resort, if
846 * there are no sleeping or stopped processes, the longest-resident
847 * process is swapped out.
850 swapout_procs(action)
858 sx_slock(&allproc_lock);
859 FOREACH_PROC_IN_SYSTEM(p) {
861 int minslptime = 100000;
865 * Watch out for a process in
866 * creation. It may have no
867 * address space or lock yet.
869 if (p->p_state == PRS_NEW)
872 * An aio daemon switches its
873 * address space while running.
874 * Perform a quick check whether
875 * a process has P_SYSTEM.
877 if ((p->p_flag & P_SYSTEM) != 0)
880 * Do not swapout a process that
881 * is waiting for VM data
882 * structures as there is a possible
883 * deadlock. Test this first as
886 * Lock the map until swapout
887 * finishes, or a thread of this
888 * process may attempt to alter
891 vm = vmspace_acquire_ref(p);
894 if (!vm_map_trylock(&vm->vm_map))
898 if (p->p_lock != 0 ||
899 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
904 * only aiod changes vmspace, however it will be
905 * skipped because of the if statement above checking
908 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
911 switch (p->p_state) {
913 /* Don't swap out processes in any sort
914 * of 'special' state. */
919 * do not swapout a realtime process
920 * Check all the thread groups..
922 FOREACH_THREAD_IN_PROC(p, td) {
924 if (PRI_IS_REALTIME(td->td_pri_class)) {
928 slptime = (ticks - td->td_slptick) / hz;
930 * Guarantee swap_idle_threshold1
933 if (slptime < swap_idle_threshold1) {
939 * Do not swapout a process if it is
940 * waiting on a critical event of some
941 * kind or there is a thread whose
942 * pageable memory may be accessed.
944 * This could be refined to support
945 * swapping out a thread.
947 if (!thread_safetoswapout(td)) {
952 * If the system is under memory stress,
953 * or if we are swapping
954 * idle processes >= swap_idle_threshold2,
955 * then swap the process out.
957 if (((action & VM_SWAP_NORMAL) == 0) &&
958 (((action & VM_SWAP_IDLE) == 0) ||
959 (slptime < swap_idle_threshold2))) {
964 if (minslptime > slptime)
965 minslptime = slptime;
970 * If the pageout daemon didn't free enough pages,
971 * or if this process is idle and the system is
972 * configured to swap proactively, swap it out.
974 if ((action & VM_SWAP_NORMAL) ||
975 ((action & VM_SWAP_IDLE) &&
976 (minslptime > swap_idle_threshold2))) {
980 vm_map_unlock(&vm->vm_map);
982 sx_sunlock(&allproc_lock);
988 vm_map_unlock(&vm->vm_map);
993 sx_sunlock(&allproc_lock);
995 * If we swapped something out, and another process needed memory,
996 * then wakeup the sched process.
1008 PROC_LOCK_ASSERT(p, MA_OWNED);
1010 FOREACH_THREAD_IN_PROC(p, td) {
1012 td->td_flags |= TDF_INMEM;
1013 td->td_flags &= ~TDF_SWAPINREQ;
1016 if (setrunnable(td)) {
1019 * XXX: We just cleared TDI_SWAPPED
1020 * above and set TDF_INMEM, so this
1021 * should never happen.
1023 panic("not waking up swapper");
1028 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1029 p->p_flag |= P_INMEM;
1038 PROC_LOCK_ASSERT(p, MA_OWNED);
1039 #if defined(SWAP_DEBUG)
1040 printf("swapping out %d\n", p->p_pid);
1044 * The states of this process and its threads may have changed
1045 * by now. Assuming that there is only one pageout daemon thread,
1046 * this process should still be in memory.
1048 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1049 ("swapout: lost a swapout race?"));
1052 * remember the process resident count
1054 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1056 * Check and mark all threads before we proceed.
1058 p->p_flag &= ~P_INMEM;
1059 p->p_flag |= P_SWAPPINGOUT;
1060 FOREACH_THREAD_IN_PROC(p, td) {
1062 if (!thread_safetoswapout(td)) {
1067 td->td_flags &= ~TDF_INMEM;
1071 td = FIRST_THREAD_IN_PROC(p);
1072 ++td->td_ru.ru_nswap;
1076 * This list is stable because all threads are now prevented from
1077 * running. The list is only modified in the context of a running
1078 * thread in this process.
1080 FOREACH_THREAD_IN_PROC(p, td)
1081 vm_thread_swapout(td);
1084 p->p_flag &= ~P_SWAPPINGOUT;
1085 p->p_swtick = ticks;
1088 #endif /* !NO_SWAPPING */