2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
70 #include <sys/mutex.h>
72 #include <sys/resourcevar.h>
74 #include <sys/vmmeter.h>
76 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
80 #include <sys/unistd.h>
83 #include <vm/vm_param.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
99 * System initialization
101 * Note: proc0 from proc.h
103 static void vm_init_limits(void *);
104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 * Note: run scheduling should be divorced from the vm system.
111 static void scheduler(void *);
112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
115 static void swapout(struct proc *);
116 static void vm_proc_swapin(struct proc *p);
117 static void vm_proc_swapout(struct proc *p);
123 * WARNING! This code calls vm_map_check_protection() which only checks
124 * the associated vm_map_entry range. It does not determine whether the
125 * contents of the memory is actually readable or writable. In most cases
126 * just checking the vm_map_entry is sufficient within the kernel's address
130 kernacc(addr, len, rw)
135 vm_offset_t saddr, eaddr;
138 KASSERT((rw & ~VM_PROT_ALL) == 0,
139 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
141 saddr = trunc_page((vm_offset_t)addr);
142 eaddr = round_page((vm_offset_t)addr + len);
143 vm_map_lock_read(kernel_map);
144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145 vm_map_unlock_read(kernel_map);
152 * WARNING! This code calls vm_map_check_protection() which only checks
153 * the associated vm_map_entry range. It does not determine whether the
154 * contents of the memory is actually readable or writable. vmapbuf(),
155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156 * used in conjuction with this call.
159 useracc(addr, len, rw)
167 KASSERT((rw & ~VM_PROT_ALL) == 0,
168 ("illegal ``rw'' argument to useracc (%x)\n", rw));
170 map = &curproc->p_vmspace->vm_map;
171 if ((vm_offset_t)addr + len > vm_map_max(map) ||
172 (vm_offset_t)addr + len < (vm_offset_t)addr) {
175 vm_map_lock_read(map);
176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
177 round_page((vm_offset_t)addr + len), prot);
178 vm_map_unlock_read(map);
183 vslock(void *addr, size_t len)
185 vm_offset_t end, last, start;
189 last = (vm_offset_t)addr + len;
190 start = trunc_page((vm_offset_t)addr);
191 end = round_page(last);
192 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194 npages = atop(end - start);
195 if (npages > vm_page_max_wired)
199 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
200 lim_cur(curproc, RLIMIT_MEMLOCK)) {
201 PROC_UNLOCK(curproc);
204 PROC_UNLOCK(curproc);
209 * The limit for transient usage of wired pages should be
210 * larger than for "permanent" wired pages (mlock()).
212 * Also, the sysctl code, which is the only present user
213 * of vslock(), does a hard loop on EAGAIN.
215 if (npages + cnt.v_wire_count > vm_page_max_wired)
218 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
219 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
221 * Return EFAULT on error to match copy{in,out}() behaviour
222 * rather than returning ENOMEM like mlock() would.
224 return (error == KERN_SUCCESS ? 0 : EFAULT);
228 vsunlock(void *addr, size_t len)
231 /* Rely on the parameter sanity checks performed by vslock(). */
232 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
233 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
234 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
238 * Create the U area for a new process.
239 * This routine directly affects the fork perf for a process.
242 vm_proc_new(struct proc *p)
244 vm_page_t ma[UAREA_PAGES];
251 * Get a kernel virtual address for the U area for this process.
253 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
255 panic("vm_proc_new: upage allocation failed");
256 p->p_uarea = (struct user *)up;
259 * Allocate object and page(s) for the U area.
261 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
262 p->p_upages_obj = upobj;
263 VM_OBJECT_LOCK(upobj);
264 for (i = 0; i < UAREA_PAGES; i++) {
265 m = vm_page_grab(upobj, i,
266 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
269 vm_page_lock_queues();
271 m->valid = VM_PAGE_BITS_ALL;
272 vm_page_unlock_queues();
274 VM_OBJECT_UNLOCK(upobj);
277 * Enter the pages into the kernel address space.
279 pmap_qenter(up, ma, UAREA_PAGES);
283 * Dispose the U area for a process that has exited.
284 * This routine directly impacts the exit perf of a process.
287 * U areas of free proc structures are no longer freed and are never
288 * swapped out. Ideally we would free U areas lazily, when low on memory.
291 vm_proc_dispose(struct proc *p)
297 upobj = p->p_upages_obj;
298 VM_OBJECT_LOCK(upobj);
299 if (upobj->resident_page_count != UAREA_PAGES)
300 panic("vm_proc_dispose: incorrect number of pages in upobj");
301 vm_page_lock_queues();
302 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
304 vm_page_unwire(m, 0);
307 vm_page_unlock_queues();
308 VM_OBJECT_UNLOCK(upobj);
309 up = (vm_offset_t)p->p_uarea;
310 pmap_qremove(up, UAREA_PAGES);
311 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
312 vm_object_deallocate(upobj);
317 * Allow the U area for a process to be prejudicially paged out.
320 vm_proc_swapout(struct proc *p)
326 upobj = p->p_upages_obj;
327 VM_OBJECT_LOCK(upobj);
328 if (upobj->resident_page_count != UAREA_PAGES)
329 panic("vm_proc_dispose: incorrect number of pages in upobj");
330 vm_page_lock_queues();
331 TAILQ_FOREACH(m, &upobj->memq, listq) {
333 vm_page_unwire(m, 0);
335 vm_page_unlock_queues();
336 VM_OBJECT_UNLOCK(upobj);
337 up = (vm_offset_t)p->p_uarea;
338 pmap_qremove(up, UAREA_PAGES);
342 * Bring the U area for a specified process back in.
345 vm_proc_swapin(struct proc *p)
347 vm_page_t ma[UAREA_PAGES];
354 upobj = p->p_upages_obj;
355 VM_OBJECT_LOCK(upobj);
356 for (i = 0; i < UAREA_PAGES; i++) {
357 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
358 if (m->valid != VM_PAGE_BITS_ALL) {
359 rv = vm_pager_get_pages(upobj, &m, 1, 0);
360 if (rv != VM_PAGER_OK)
361 panic("vm_proc_swapin: cannot get upage");
365 if (upobj->resident_page_count != UAREA_PAGES)
366 panic("vm_proc_swapin: lost pages from upobj");
367 vm_page_lock_queues();
368 TAILQ_FOREACH(m, &upobj->memq, listq) {
369 m->valid = VM_PAGE_BITS_ALL;
373 vm_page_unlock_queues();
374 VM_OBJECT_UNLOCK(upobj);
375 up = (vm_offset_t)p->p_uarea;
376 pmap_qenter(up, ma, UAREA_PAGES);
380 * Swap in the UAREAs of all processes swapped out to the given device.
381 * The pages in the UAREA are marked dirty and their swap metadata is freed.
384 vm_proc_swapin_all(struct swdevt *devidx)
391 sx_slock(&allproc_lock);
392 FOREACH_PROC_IN_SYSTEM(p) {
394 object = p->p_upages_obj;
395 if (object != NULL) {
396 VM_OBJECT_LOCK(object);
397 if (swap_pager_isswapped(object, devidx)) {
398 VM_OBJECT_UNLOCK(object);
399 sx_sunlock(&allproc_lock);
402 VM_OBJECT_LOCK(object);
403 vm_page_lock_queues();
404 TAILQ_FOREACH(m, &object->memq, listq)
406 vm_page_unlock_queues();
407 swap_pager_freespace(object, 0,
408 object->un_pager.swp.swp_bcount);
409 VM_OBJECT_UNLOCK(object);
412 VM_OBJECT_UNLOCK(object);
416 sx_sunlock(&allproc_lock);
420 #ifndef KSTACK_MAX_PAGES
421 #define KSTACK_MAX_PAGES 32
425 * Create the kernel stack (including pcb for i386) for a new thread.
426 * This routine directly affects the fork perf for a process and
427 * create performance for a thread.
430 vm_thread_new(struct thread *td, int pages)
434 vm_page_t m, ma[KSTACK_MAX_PAGES];
439 pages = KSTACK_PAGES;
440 else if (pages > KSTACK_MAX_PAGES)
441 pages = KSTACK_MAX_PAGES;
443 * Allocate an object for the kstack.
445 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
446 td->td_kstack_obj = ksobj;
448 * Get a kernel virtual address for this thread's kstack.
450 ks = kmem_alloc_nofault(kernel_map,
451 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
453 panic("vm_thread_new: kstack allocation failed");
454 if (KSTACK_GUARD_PAGES != 0) {
455 pmap_qremove(ks, KSTACK_GUARD_PAGES);
456 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
460 * Knowing the number of pages allocated is useful when you
461 * want to deallocate them.
463 td->td_kstack_pages = pages;
465 * For the length of the stack, link in a real page of ram for each
468 VM_OBJECT_LOCK(ksobj);
469 for (i = 0; i < pages; i++) {
471 * Get a kernel stack page.
473 m = vm_page_grab(ksobj, i,
474 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
476 vm_page_lock_queues();
478 m->valid = VM_PAGE_BITS_ALL;
479 vm_page_unlock_queues();
481 VM_OBJECT_UNLOCK(ksobj);
482 pmap_qenter(ks, ma, pages);
486 * Dispose of a thread's kernel stack.
489 vm_thread_dispose(struct thread *td)
496 pages = td->td_kstack_pages;
497 ksobj = td->td_kstack_obj;
499 pmap_qremove(ks, pages);
500 VM_OBJECT_LOCK(ksobj);
501 for (i = 0; i < pages; i++) {
502 m = vm_page_lookup(ksobj, i);
504 panic("vm_thread_dispose: kstack already missing?");
505 vm_page_lock_queues();
507 vm_page_unwire(m, 0);
509 vm_page_unlock_queues();
511 VM_OBJECT_UNLOCK(ksobj);
512 vm_object_deallocate(ksobj);
513 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
514 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
518 * Allow a thread's kernel stack to be paged out.
521 vm_thread_swapout(struct thread *td)
527 cpu_thread_swapout(td);
528 pages = td->td_kstack_pages;
529 ksobj = td->td_kstack_obj;
530 pmap_qremove(td->td_kstack, pages);
531 VM_OBJECT_LOCK(ksobj);
532 for (i = 0; i < pages; i++) {
533 m = vm_page_lookup(ksobj, i);
535 panic("vm_thread_swapout: kstack already missing?");
536 vm_page_lock_queues();
538 vm_page_unwire(m, 0);
539 vm_page_unlock_queues();
541 VM_OBJECT_UNLOCK(ksobj);
545 * Bring the kernel stack for a specified thread back in.
548 vm_thread_swapin(struct thread *td)
551 vm_page_t m, ma[KSTACK_MAX_PAGES];
554 pages = td->td_kstack_pages;
555 ksobj = td->td_kstack_obj;
556 VM_OBJECT_LOCK(ksobj);
557 for (i = 0; i < pages; i++) {
558 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
559 if (m->valid != VM_PAGE_BITS_ALL) {
560 rv = vm_pager_get_pages(ksobj, &m, 1, 0);
561 if (rv != VM_PAGER_OK)
562 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
563 m = vm_page_lookup(ksobj, i);
564 m->valid = VM_PAGE_BITS_ALL;
567 vm_page_lock_queues();
570 vm_page_unlock_queues();
572 VM_OBJECT_UNLOCK(ksobj);
573 pmap_qenter(td->td_kstack, ma, pages);
574 cpu_thread_swapin(td);
578 * Set up a variable-sized alternate kstack.
581 vm_thread_new_altkstack(struct thread *td, int pages)
584 td->td_altkstack = td->td_kstack;
585 td->td_altkstack_obj = td->td_kstack_obj;
586 td->td_altkstack_pages = td->td_kstack_pages;
588 vm_thread_new(td, pages);
592 * Restore the original kstack.
595 vm_thread_dispose_altkstack(struct thread *td)
598 vm_thread_dispose(td);
600 td->td_kstack = td->td_altkstack;
601 td->td_kstack_obj = td->td_altkstack_obj;
602 td->td_kstack_pages = td->td_altkstack_pages;
603 td->td_altkstack = 0;
604 td->td_altkstack_obj = NULL;
605 td->td_altkstack_pages = 0;
609 * Implement fork's actions on an address space.
610 * Here we arrange for the address space to be copied or referenced,
611 * allocate a user struct (pcb and kernel stack), then call the
612 * machine-dependent layer to fill those in and make the new process
613 * ready to run. The new process is set up so that it returns directly
614 * to user mode to avoid stack copying and relocation problems.
617 vm_forkproc(td, p2, td2, flags)
623 struct proc *p1 = td->td_proc;
625 if ((flags & RFPROC) == 0) {
627 * Divorce the memory, if it is shared, essentially
628 * this changes shared memory amongst threads, into
631 if ((flags & RFMEM) == 0) {
632 if (p1->p_vmspace->vm_refcnt > 1) {
636 cpu_fork(td, p2, td2, flags);
641 p2->p_vmspace = p1->p_vmspace;
642 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
645 while (vm_page_count_severe()) {
649 if ((flags & RFMEM) == 0) {
650 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
651 if (p1->p_vmspace->vm_shm)
656 * p_stats currently points at fields in the user struct.
657 * Copy parts of p_stats; zero the rest of p_stats (statistics).
659 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
661 p2->p_stats = &p2->p_uarea->u_stats;
662 bzero(&p2->p_stats->pstat_startzero,
663 (unsigned) RANGEOF(struct pstats, pstat_startzero, pstat_endzero));
664 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy,
665 (unsigned) RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy));
669 * cpu_fork will copy and update the pcb, set up the kernel stack,
670 * and make the child ready to run.
672 cpu_fork(td, p2, td2, flags);
676 * Called after process has been wait(2)'ed apon and is being reaped.
677 * The idea is to reclaim resources that we could not reclaim while
678 * the process was still executing.
685 vmspace_exitfree(p); /* and clean-out the vmspace */
689 * Set default limits for VM system.
690 * Called for proc 0, and then inherited by all others.
692 * XXX should probably act directly on proc0.
695 vm_init_limits(udata)
698 struct proc *p = udata;
703 * Set up the initial limits on process VM. Set the maximum resident
704 * set size to be half of (reasonably) available memory. Since this
705 * is a soft limit, it comes into effect only when the system is out
706 * of memory - half of main memory helps to favor smaller processes,
707 * and reduces thrashing of the object cache.
710 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
711 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
712 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
713 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
714 /* limit the limit to no less than 2MB */
715 rss_limit = max(cnt.v_free_count, 512);
716 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
717 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
726 PROC_LOCK_ASSERT(p, MA_OWNED);
727 if ((p->p_sflag & PS_INMEM) == 0)
728 panic("faultin: proc swapped out with NO_SWAPPING!");
729 #else /* !NO_SWAPPING */
733 PROC_LOCK_ASSERT(p, MA_OWNED);
735 * If another process is swapping in this process,
736 * just wait until it finishes.
738 if (p->p_sflag & PS_SWAPPINGIN)
739 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
740 else if ((p->p_sflag & PS_INMEM) == 0) {
742 * Don't let another thread swap process p out while we are
743 * busy swapping it in.
746 mtx_lock_spin(&sched_lock);
747 p->p_sflag |= PS_SWAPPINGIN;
748 mtx_unlock_spin(&sched_lock);
752 FOREACH_THREAD_IN_PROC(p, td)
753 vm_thread_swapin(td);
756 mtx_lock_spin(&sched_lock);
757 p->p_sflag &= ~PS_SWAPPINGIN;
758 p->p_sflag |= PS_INMEM;
759 FOREACH_THREAD_IN_PROC(p, td) {
764 mtx_unlock_spin(&sched_lock);
768 /* Allow other threads to swap p out now. */
771 #endif /* NO_SWAPPING */
775 * This swapin algorithm attempts to swap-in processes only if there
776 * is enough space for them. Of course, if a process waits for a long
777 * time, it will be swapped in anyway.
779 * XXXKSE - process with the thread with highest priority counts..
781 * Giant is still held at this point, to be released in tsleep.
794 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
798 if (vm_page_count_min()) {
805 sx_slock(&allproc_lock);
806 FOREACH_PROC_IN_SYSTEM(p) {
808 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
811 mtx_lock_spin(&sched_lock);
812 FOREACH_THREAD_IN_PROC(p, td) {
814 * An otherwise runnable thread of a process
815 * swapped out has only the TDI_SWAPPED bit set.
818 if (td->td_inhibitors == TDI_SWAPPED) {
820 pri = p->p_swtime + kg->kg_slptime;
821 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
822 pri -= p->p_nice * 8;
826 * if this ksegrp is higher priority
827 * and there is enough space, then select
828 * this process instead of the previous
837 mtx_unlock_spin(&sched_lock);
839 sx_sunlock(&allproc_lock);
842 * Nothing to do, back to sleep.
844 if ((p = pp) == NULL) {
845 tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
851 * Another process may be bringing or may have already
852 * brought this process in while we traverse all threads.
853 * Or, this process may even be being swapped out again.
855 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
860 mtx_lock_spin(&sched_lock);
861 p->p_sflag &= ~PS_SWAPINREQ;
862 mtx_unlock_spin(&sched_lock);
865 * We would like to bring someone in. (only if there is space).
866 * [What checks the space? ]
870 mtx_lock_spin(&sched_lock);
872 mtx_unlock_spin(&sched_lock);
879 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
881 static int swap_idle_threshold1 = 2;
882 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
883 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
886 * Swap_idle_threshold2 is the time that a process can be idle before
887 * it will be swapped out, if idle swapping is enabled.
889 static int swap_idle_threshold2 = 10;
890 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
891 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
894 * Swapout is driven by the pageout daemon. Very simple, we find eligible
895 * procs and unwire their u-areas. We try to always "swap" at least one
896 * process in case we need the room for a swapin.
897 * If any procs have been sleeping/stopped for at least maxslp seconds,
898 * they are swapped. Else, we swap the longest-sleeping or stopped process,
899 * if any, otherwise the longest-resident process.
902 swapout_procs(action)
913 sx_slock(&allproc_lock);
914 FOREACH_PROC_IN_SYSTEM(p) {
916 int minslptime = 100000;
919 * Watch out for a process in
920 * creation. It may have no
921 * address space or lock yet.
923 mtx_lock_spin(&sched_lock);
924 if (p->p_state == PRS_NEW) {
925 mtx_unlock_spin(&sched_lock);
928 mtx_unlock_spin(&sched_lock);
931 * An aio daemon switches its
932 * address space while running.
933 * Perform a quick check whether
934 * a process has P_SYSTEM.
936 if ((p->p_flag & P_SYSTEM) != 0)
940 * Do not swapout a process that
941 * is waiting for VM data
942 * structures as there is a possible
943 * deadlock. Test this first as
946 * Lock the map until swapout
947 * finishes, or a thread of this
948 * process may attempt to alter
954 ("swapout_procs: a process has no address space"));
955 atomic_add_int(&vm->vm_refcnt, 1);
957 if (!vm_map_trylock(&vm->vm_map))
961 if (p->p_lock != 0 ||
962 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
967 * only aiod changes vmspace, however it will be
968 * skipped because of the if statement above checking
971 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
974 switch (p->p_state) {
976 /* Don't swap out processes in any sort
977 * of 'special' state. */
981 mtx_lock_spin(&sched_lock);
983 * do not swapout a realtime process
984 * Check all the thread groups..
986 FOREACH_KSEGRP_IN_PROC(p, kg) {
987 if (PRI_IS_REALTIME(kg->kg_pri_class))
991 * Guarantee swap_idle_threshold1
994 if (kg->kg_slptime < swap_idle_threshold1)
998 * Do not swapout a process if it is
999 * waiting on a critical event of some
1000 * kind or there is a thread whose
1001 * pageable memory may be accessed.
1003 * This could be refined to support
1004 * swapping out a thread.
1006 FOREACH_THREAD_IN_GROUP(kg, td) {
1007 if ((td->td_priority) < PSOCK ||
1008 !thread_safetoswapout(td))
1012 * If the system is under memory stress,
1013 * or if we are swapping
1014 * idle processes >= swap_idle_threshold2,
1015 * then swap the process out.
1017 if (((action & VM_SWAP_NORMAL) == 0) &&
1018 (((action & VM_SWAP_IDLE) == 0) ||
1019 (kg->kg_slptime < swap_idle_threshold2)))
1022 if (minslptime > kg->kg_slptime)
1023 minslptime = kg->kg_slptime;
1027 * If the pageout daemon didn't free enough pages,
1028 * or if this process is idle and the system is
1029 * configured to swap proactively, swap it out.
1031 if ((action & VM_SWAP_NORMAL) ||
1032 ((action & VM_SWAP_IDLE) &&
1033 (minslptime > swap_idle_threshold2))) {
1036 mtx_unlock_spin(&sched_lock);
1038 vm_map_unlock(&vm->vm_map);
1040 sx_sunlock(&allproc_lock);
1044 mtx_unlock_spin(&sched_lock);
1048 vm_map_unlock(&vm->vm_map);
1053 sx_sunlock(&allproc_lock);
1055 * If we swapped something out, and another process needed memory,
1056 * then wakeup the sched process.
1068 PROC_LOCK_ASSERT(p, MA_OWNED);
1069 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1070 #if defined(SWAP_DEBUG)
1071 printf("swapping out %d\n", p->p_pid);
1075 * The states of this process and its threads may have changed
1076 * by now. Assuming that there is only one pageout daemon thread,
1077 * this process should still be in memory.
1079 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
1080 ("swapout: lost a swapout race?"));
1082 #if defined(INVARIANTS)
1084 * Make sure that all threads are safe to be swapped out.
1086 * Alternatively, we could swap out only safe threads.
1088 FOREACH_THREAD_IN_PROC(p, td) {
1089 KASSERT(thread_safetoswapout(td),
1090 ("swapout: there is a thread not safe for swapout"));
1092 #endif /* INVARIANTS */
1094 ++p->p_stats->p_ru.ru_nswap;
1096 * remember the process resident count
1098 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1100 p->p_sflag &= ~PS_INMEM;
1101 p->p_sflag |= PS_SWAPPINGOUT;
1103 FOREACH_THREAD_IN_PROC(p, td)
1105 mtx_unlock_spin(&sched_lock);
1108 FOREACH_THREAD_IN_PROC(p, td)
1109 vm_thread_swapout(td);
1112 mtx_lock_spin(&sched_lock);
1113 p->p_sflag &= ~PS_SWAPPINGOUT;
1116 #endif /* !NO_SWAPPING */