]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_glue.c
This commit was generated by cvs2svn to compensate for changes in r136527,
[FreeBSD/FreeBSD.git] / sys / vm / vm_glue.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/vmmeter.h>
75 #include <sys/sx.h>
76 #include <sys/sysctl.h>
77
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/unistd.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93
94 #include <sys/user.h>
95
96 extern int maxslp;
97
98 /*
99  * System initialization
100  *
101  * Note: proc0 from proc.h
102  */
103 static void vm_init_limits(void *);
104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
105
106 /*
107  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
108  *
109  * Note: run scheduling should be divorced from the vm system.
110  */
111 static void scheduler(void *);
112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
113
114 #ifndef NO_SWAPPING
115 static void swapout(struct proc *);
116 static void vm_proc_swapin(struct proc *p);
117 static void vm_proc_swapout(struct proc *p);
118 #endif
119
120 /*
121  * MPSAFE
122  *
123  * WARNING!  This code calls vm_map_check_protection() which only checks
124  * the associated vm_map_entry range.  It does not determine whether the
125  * contents of the memory is actually readable or writable.  In most cases
126  * just checking the vm_map_entry is sufficient within the kernel's address
127  * space.
128  */
129 int
130 kernacc(addr, len, rw)
131         void *addr;
132         int len, rw;
133 {
134         boolean_t rv;
135         vm_offset_t saddr, eaddr;
136         vm_prot_t prot;
137
138         KASSERT((rw & ~VM_PROT_ALL) == 0,
139             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
140         prot = rw;
141         saddr = trunc_page((vm_offset_t)addr);
142         eaddr = round_page((vm_offset_t)addr + len);
143         vm_map_lock_read(kernel_map);
144         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145         vm_map_unlock_read(kernel_map);
146         return (rv == TRUE);
147 }
148
149 /*
150  * MPSAFE
151  *
152  * WARNING!  This code calls vm_map_check_protection() which only checks
153  * the associated vm_map_entry range.  It does not determine whether the
154  * contents of the memory is actually readable or writable.  vmapbuf(),
155  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156  * used in conjuction with this call.
157  */
158 int
159 useracc(addr, len, rw)
160         void *addr;
161         int len, rw;
162 {
163         boolean_t rv;
164         vm_prot_t prot;
165         vm_map_t map;
166
167         KASSERT((rw & ~VM_PROT_ALL) == 0,
168             ("illegal ``rw'' argument to useracc (%x)\n", rw));
169         prot = rw;
170         map = &curproc->p_vmspace->vm_map;
171         if ((vm_offset_t)addr + len > vm_map_max(map) ||
172             (vm_offset_t)addr + len < (vm_offset_t)addr) {
173                 return (FALSE);
174         }
175         vm_map_lock_read(map);
176         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
177             round_page((vm_offset_t)addr + len), prot);
178         vm_map_unlock_read(map);
179         return (rv == TRUE);
180 }
181
182 int
183 vslock(void *addr, size_t len)
184 {
185         vm_offset_t end, last, start;
186         vm_size_t npages;
187         int error;
188
189         last = (vm_offset_t)addr + len;
190         start = trunc_page((vm_offset_t)addr);
191         end = round_page(last);
192         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
193                 return (EINVAL);
194         npages = atop(end - start);
195         if (npages > vm_page_max_wired)
196                 return (ENOMEM);
197         PROC_LOCK(curproc);
198         if (ptoa(npages +
199             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
200             lim_cur(curproc, RLIMIT_MEMLOCK)) {
201                 PROC_UNLOCK(curproc);
202                 return (ENOMEM);
203         }
204         PROC_UNLOCK(curproc);
205 #if 0
206         /*
207          * XXX - not yet
208          *
209          * The limit for transient usage of wired pages should be
210          * larger than for "permanent" wired pages (mlock()).
211          *
212          * Also, the sysctl code, which is the only present user
213          * of vslock(), does a hard loop on EAGAIN.
214          */
215         if (npages + cnt.v_wire_count > vm_page_max_wired)
216                 return (EAGAIN);
217 #endif
218         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
219             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
220         /*
221          * Return EFAULT on error to match copy{in,out}() behaviour
222          * rather than returning ENOMEM like mlock() would.
223          */
224         return (error == KERN_SUCCESS ? 0 : EFAULT);
225 }
226
227 void
228 vsunlock(void *addr, size_t len)
229 {
230
231         /* Rely on the parameter sanity checks performed by vslock(). */
232         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
233             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
234             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
235 }
236
237 /*
238  * Create the U area for a new process.
239  * This routine directly affects the fork perf for a process.
240  */
241 void
242 vm_proc_new(struct proc *p)
243 {
244         vm_page_t ma[UAREA_PAGES];
245         vm_object_t upobj;
246         vm_offset_t up;
247         vm_page_t m;
248         u_int i;
249
250         /*
251          * Get a kernel virtual address for the U area for this process.
252          */
253         up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
254         if (up == 0)
255                 panic("vm_proc_new: upage allocation failed");
256         p->p_uarea = (struct user *)up;
257
258         /*
259          * Allocate object and page(s) for the U area.
260          */
261         upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
262         p->p_upages_obj = upobj;
263         VM_OBJECT_LOCK(upobj);
264         for (i = 0; i < UAREA_PAGES; i++) {
265                 m = vm_page_grab(upobj, i,
266                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
267                 ma[i] = m;
268
269                 vm_page_lock_queues();
270                 vm_page_wakeup(m);
271                 m->valid = VM_PAGE_BITS_ALL;
272                 vm_page_unlock_queues();
273         }
274         VM_OBJECT_UNLOCK(upobj);
275
276         /*
277          * Enter the pages into the kernel address space.
278          */
279         pmap_qenter(up, ma, UAREA_PAGES);
280 }
281
282 /*
283  * Dispose the U area for a process that has exited.
284  * This routine directly impacts the exit perf of a process.
285  *
286  * XXX UNUSED
287  * U areas of free proc structures are no longer freed and are never
288  * swapped out.  Ideally we would free U areas lazily, when low on memory.
289  */
290 void
291 vm_proc_dispose(struct proc *p)
292 {
293         vm_object_t upobj;
294         vm_offset_t up;
295         vm_page_t m;
296
297         upobj = p->p_upages_obj;
298         VM_OBJECT_LOCK(upobj);
299         if (upobj->resident_page_count != UAREA_PAGES)
300                 panic("vm_proc_dispose: incorrect number of pages in upobj");
301         vm_page_lock_queues();
302         while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
303                 vm_page_busy(m);
304                 vm_page_unwire(m, 0);
305                 vm_page_free(m);
306         }
307         vm_page_unlock_queues();
308         VM_OBJECT_UNLOCK(upobj);
309         up = (vm_offset_t)p->p_uarea;
310         pmap_qremove(up, UAREA_PAGES);
311         kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
312         vm_object_deallocate(upobj);
313 }
314
315 #ifndef NO_SWAPPING
316 /*
317  * Allow the U area for a process to be prejudicially paged out.
318  */
319 static void
320 vm_proc_swapout(struct proc *p)
321 {
322         vm_object_t upobj;
323         vm_offset_t up;
324         vm_page_t m;
325
326         upobj = p->p_upages_obj;
327         VM_OBJECT_LOCK(upobj);
328         if (upobj->resident_page_count != UAREA_PAGES)
329                 panic("vm_proc_dispose: incorrect number of pages in upobj");
330         vm_page_lock_queues();
331         TAILQ_FOREACH(m, &upobj->memq, listq) {
332                 vm_page_dirty(m);
333                 vm_page_unwire(m, 0);
334         }
335         vm_page_unlock_queues();
336         VM_OBJECT_UNLOCK(upobj);
337         up = (vm_offset_t)p->p_uarea;
338         pmap_qremove(up, UAREA_PAGES);
339 }
340
341 /*
342  * Bring the U area for a specified process back in.
343  */
344 static void
345 vm_proc_swapin(struct proc *p)
346 {
347         vm_page_t ma[UAREA_PAGES];
348         vm_object_t upobj;
349         vm_offset_t up;
350         vm_page_t m;
351         int rv;
352         int i;
353
354         upobj = p->p_upages_obj;
355         VM_OBJECT_LOCK(upobj);
356         for (i = 0; i < UAREA_PAGES; i++) {
357                 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
358                 if (m->valid != VM_PAGE_BITS_ALL) {
359                         rv = vm_pager_get_pages(upobj, &m, 1, 0);
360                         if (rv != VM_PAGER_OK)
361                                 panic("vm_proc_swapin: cannot get upage");
362                 }
363                 ma[i] = m;
364         }
365         if (upobj->resident_page_count != UAREA_PAGES)
366                 panic("vm_proc_swapin: lost pages from upobj");
367         vm_page_lock_queues();
368         TAILQ_FOREACH(m, &upobj->memq, listq) {
369                 m->valid = VM_PAGE_BITS_ALL;
370                 vm_page_wire(m);
371                 vm_page_wakeup(m);
372         }
373         vm_page_unlock_queues();
374         VM_OBJECT_UNLOCK(upobj);
375         up = (vm_offset_t)p->p_uarea;
376         pmap_qenter(up, ma, UAREA_PAGES);
377 }
378
379 /*
380  * Swap in the UAREAs of all processes swapped out to the given device.
381  * The pages in the UAREA are marked dirty and their swap metadata is freed.
382  */
383 void
384 vm_proc_swapin_all(struct swdevt *devidx)
385 {
386         struct proc *p;
387         vm_object_t object;
388         vm_page_t m;
389
390 retry:
391         sx_slock(&allproc_lock);
392         FOREACH_PROC_IN_SYSTEM(p) {
393                 PROC_LOCK(p);
394                 object = p->p_upages_obj;
395                 if (object != NULL) {
396                         VM_OBJECT_LOCK(object);
397                         if (swap_pager_isswapped(object, devidx)) {
398                                 VM_OBJECT_UNLOCK(object);
399                                 sx_sunlock(&allproc_lock);
400                                 faultin(p);
401                                 PROC_UNLOCK(p);
402                                 VM_OBJECT_LOCK(object);
403                                 vm_page_lock_queues();
404                                 TAILQ_FOREACH(m, &object->memq, listq)
405                                         vm_page_dirty(m);
406                                 vm_page_unlock_queues();
407                                 swap_pager_freespace(object, 0,
408                                     object->un_pager.swp.swp_bcount);
409                                 VM_OBJECT_UNLOCK(object);
410                                 goto retry;
411                         }
412                         VM_OBJECT_UNLOCK(object);
413                 }
414                 PROC_UNLOCK(p);
415         }
416         sx_sunlock(&allproc_lock);
417 }
418 #endif
419
420 #ifndef KSTACK_MAX_PAGES
421 #define KSTACK_MAX_PAGES 32
422 #endif
423
424 /*
425  * Create the kernel stack (including pcb for i386) for a new thread.
426  * This routine directly affects the fork perf for a process and
427  * create performance for a thread.
428  */
429 void
430 vm_thread_new(struct thread *td, int pages)
431 {
432         vm_object_t ksobj;
433         vm_offset_t ks;
434         vm_page_t m, ma[KSTACK_MAX_PAGES];
435         int i;
436
437         /* Bounds check */
438         if (pages <= 1)
439                 pages = KSTACK_PAGES;
440         else if (pages > KSTACK_MAX_PAGES)
441                 pages = KSTACK_MAX_PAGES;
442         /*
443          * Allocate an object for the kstack.
444          */
445         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
446         td->td_kstack_obj = ksobj;
447         /*
448          * Get a kernel virtual address for this thread's kstack.
449          */
450         ks = kmem_alloc_nofault(kernel_map,
451            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
452         if (ks == 0)
453                 panic("vm_thread_new: kstack allocation failed");
454         if (KSTACK_GUARD_PAGES != 0) {
455                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
456                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
457         }
458         td->td_kstack = ks;
459         /*
460          * Knowing the number of pages allocated is useful when you
461          * want to deallocate them.
462          */
463         td->td_kstack_pages = pages;
464         /* 
465          * For the length of the stack, link in a real page of ram for each
466          * page of stack.
467          */
468         VM_OBJECT_LOCK(ksobj);
469         for (i = 0; i < pages; i++) {
470                 /*
471                  * Get a kernel stack page.
472                  */
473                 m = vm_page_grab(ksobj, i,
474                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
475                 ma[i] = m;
476                 vm_page_lock_queues();
477                 vm_page_wakeup(m);
478                 m->valid = VM_PAGE_BITS_ALL;
479                 vm_page_unlock_queues();
480         }
481         VM_OBJECT_UNLOCK(ksobj);
482         pmap_qenter(ks, ma, pages);
483 }
484
485 /*
486  * Dispose of a thread's kernel stack.
487  */
488 void
489 vm_thread_dispose(struct thread *td)
490 {
491         vm_object_t ksobj;
492         vm_offset_t ks;
493         vm_page_t m;
494         int i, pages;
495
496         pages = td->td_kstack_pages;
497         ksobj = td->td_kstack_obj;
498         ks = td->td_kstack;
499         pmap_qremove(ks, pages);
500         VM_OBJECT_LOCK(ksobj);
501         for (i = 0; i < pages; i++) {
502                 m = vm_page_lookup(ksobj, i);
503                 if (m == NULL)
504                         panic("vm_thread_dispose: kstack already missing?");
505                 vm_page_lock_queues();
506                 vm_page_busy(m);
507                 vm_page_unwire(m, 0);
508                 vm_page_free(m);
509                 vm_page_unlock_queues();
510         }
511         VM_OBJECT_UNLOCK(ksobj);
512         vm_object_deallocate(ksobj);
513         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
514             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
515 }
516
517 /*
518  * Allow a thread's kernel stack to be paged out.
519  */
520 void
521 vm_thread_swapout(struct thread *td)
522 {
523         vm_object_t ksobj;
524         vm_page_t m;
525         int i, pages;
526
527         cpu_thread_swapout(td);
528         pages = td->td_kstack_pages;
529         ksobj = td->td_kstack_obj;
530         pmap_qremove(td->td_kstack, pages);
531         VM_OBJECT_LOCK(ksobj);
532         for (i = 0; i < pages; i++) {
533                 m = vm_page_lookup(ksobj, i);
534                 if (m == NULL)
535                         panic("vm_thread_swapout: kstack already missing?");
536                 vm_page_lock_queues();
537                 vm_page_dirty(m);
538                 vm_page_unwire(m, 0);
539                 vm_page_unlock_queues();
540         }
541         VM_OBJECT_UNLOCK(ksobj);
542 }
543
544 /*
545  * Bring the kernel stack for a specified thread back in.
546  */
547 void
548 vm_thread_swapin(struct thread *td)
549 {
550         vm_object_t ksobj;
551         vm_page_t m, ma[KSTACK_MAX_PAGES];
552         int i, pages, rv;
553
554         pages = td->td_kstack_pages;
555         ksobj = td->td_kstack_obj;
556         VM_OBJECT_LOCK(ksobj);
557         for (i = 0; i < pages; i++) {
558                 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
559                 if (m->valid != VM_PAGE_BITS_ALL) {
560                         rv = vm_pager_get_pages(ksobj, &m, 1, 0);
561                         if (rv != VM_PAGER_OK)
562                                 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
563                         m = vm_page_lookup(ksobj, i);
564                         m->valid = VM_PAGE_BITS_ALL;
565                 }
566                 ma[i] = m;
567                 vm_page_lock_queues();
568                 vm_page_wire(m);
569                 vm_page_wakeup(m);
570                 vm_page_unlock_queues();
571         }
572         VM_OBJECT_UNLOCK(ksobj);
573         pmap_qenter(td->td_kstack, ma, pages);
574         cpu_thread_swapin(td);
575 }
576
577 /*
578  * Set up a variable-sized alternate kstack.
579  */
580 void
581 vm_thread_new_altkstack(struct thread *td, int pages)
582 {
583
584         td->td_altkstack = td->td_kstack;
585         td->td_altkstack_obj = td->td_kstack_obj;
586         td->td_altkstack_pages = td->td_kstack_pages;
587
588         vm_thread_new(td, pages);
589 }
590
591 /*
592  * Restore the original kstack.
593  */
594 void
595 vm_thread_dispose_altkstack(struct thread *td)
596 {
597
598         vm_thread_dispose(td);
599
600         td->td_kstack = td->td_altkstack;
601         td->td_kstack_obj = td->td_altkstack_obj;
602         td->td_kstack_pages = td->td_altkstack_pages;
603         td->td_altkstack = 0;
604         td->td_altkstack_obj = NULL;
605         td->td_altkstack_pages = 0;
606 }
607
608 /*
609  * Implement fork's actions on an address space.
610  * Here we arrange for the address space to be copied or referenced,
611  * allocate a user struct (pcb and kernel stack), then call the
612  * machine-dependent layer to fill those in and make the new process
613  * ready to run.  The new process is set up so that it returns directly
614  * to user mode to avoid stack copying and relocation problems.
615  */
616 void
617 vm_forkproc(td, p2, td2, flags)
618         struct thread *td;
619         struct proc *p2;
620         struct thread *td2;
621         int flags;
622 {
623         struct proc *p1 = td->td_proc;
624
625         if ((flags & RFPROC) == 0) {
626                 /*
627                  * Divorce the memory, if it is shared, essentially
628                  * this changes shared memory amongst threads, into
629                  * COW locally.
630                  */
631                 if ((flags & RFMEM) == 0) {
632                         if (p1->p_vmspace->vm_refcnt > 1) {
633                                 vmspace_unshare(p1);
634                         }
635                 }
636                 cpu_fork(td, p2, td2, flags);
637                 return;
638         }
639
640         if (flags & RFMEM) {
641                 p2->p_vmspace = p1->p_vmspace;
642                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
643         }
644
645         while (vm_page_count_severe()) {
646                 VM_WAIT;
647         }
648
649         if ((flags & RFMEM) == 0) {
650                 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
651                 if (p1->p_vmspace->vm_shm)
652                         shmfork(p1, p2);
653         }
654
655         /*
656          * p_stats currently points at fields in the user struct.
657          * Copy parts of p_stats; zero the rest of p_stats (statistics).
658          */
659 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
660
661         p2->p_stats = &p2->p_uarea->u_stats;
662         bzero(&p2->p_stats->pstat_startzero,
663             (unsigned) RANGEOF(struct pstats, pstat_startzero, pstat_endzero));
664         bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy,
665             (unsigned) RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy));
666 #undef RANGEOF
667
668         /*
669          * cpu_fork will copy and update the pcb, set up the kernel stack,
670          * and make the child ready to run.
671          */
672         cpu_fork(td, p2, td2, flags);
673 }
674
675 /*
676  * Called after process has been wait(2)'ed apon and is being reaped.
677  * The idea is to reclaim resources that we could not reclaim while
678  * the process was still executing.
679  */
680 void
681 vm_waitproc(p)
682         struct proc *p;
683 {
684
685         vmspace_exitfree(p);            /* and clean-out the vmspace */
686 }
687
688 /*
689  * Set default limits for VM system.
690  * Called for proc 0, and then inherited by all others.
691  *
692  * XXX should probably act directly on proc0.
693  */
694 static void
695 vm_init_limits(udata)
696         void *udata;
697 {
698         struct proc *p = udata;
699         struct plimit *limp;
700         int rss_limit;
701
702         /*
703          * Set up the initial limits on process VM. Set the maximum resident
704          * set size to be half of (reasonably) available memory.  Since this
705          * is a soft limit, it comes into effect only when the system is out
706          * of memory - half of main memory helps to favor smaller processes,
707          * and reduces thrashing of the object cache.
708          */
709         limp = p->p_limit;
710         limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
711         limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
712         limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
713         limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
714         /* limit the limit to no less than 2MB */
715         rss_limit = max(cnt.v_free_count, 512);
716         limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
717         limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
718 }
719
720 void
721 faultin(p)
722         struct proc *p;
723 {
724 #ifdef NO_SWAPPING
725
726         PROC_LOCK_ASSERT(p, MA_OWNED);
727         if ((p->p_sflag & PS_INMEM) == 0)
728                 panic("faultin: proc swapped out with NO_SWAPPING!");
729 #else /* !NO_SWAPPING */
730         struct thread *td;
731
732         GIANT_REQUIRED;
733         PROC_LOCK_ASSERT(p, MA_OWNED);
734         /*
735          * If another process is swapping in this process,
736          * just wait until it finishes.
737          */
738         if (p->p_sflag & PS_SWAPPINGIN)
739                 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
740         else if ((p->p_sflag & PS_INMEM) == 0) {
741                 /*
742                  * Don't let another thread swap process p out while we are
743                  * busy swapping it in.
744                  */
745                 ++p->p_lock;
746                 mtx_lock_spin(&sched_lock);
747                 p->p_sflag |= PS_SWAPPINGIN;
748                 mtx_unlock_spin(&sched_lock);
749                 PROC_UNLOCK(p);
750
751                 vm_proc_swapin(p);
752                 FOREACH_THREAD_IN_PROC(p, td)
753                         vm_thread_swapin(td);
754
755                 PROC_LOCK(p);
756                 mtx_lock_spin(&sched_lock);
757                 p->p_sflag &= ~PS_SWAPPINGIN;
758                 p->p_sflag |= PS_INMEM;
759                 FOREACH_THREAD_IN_PROC(p, td) {
760                         TD_CLR_SWAPPED(td);
761                         if (TD_CAN_RUN(td))
762                                 setrunnable(td);
763                 }
764                 mtx_unlock_spin(&sched_lock);
765
766                 wakeup(&p->p_sflag);
767
768                 /* Allow other threads to swap p out now. */
769                 --p->p_lock;
770         }
771 #endif /* NO_SWAPPING */
772 }
773
774 /*
775  * This swapin algorithm attempts to swap-in processes only if there
776  * is enough space for them.  Of course, if a process waits for a long
777  * time, it will be swapped in anyway.
778  *
779  *  XXXKSE - process with the thread with highest priority counts..
780  *
781  * Giant is still held at this point, to be released in tsleep.
782  */
783 /* ARGSUSED*/
784 static void
785 scheduler(dummy)
786         void *dummy;
787 {
788         struct proc *p;
789         struct thread *td;
790         int pri;
791         struct proc *pp;
792         int ppri;
793
794         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
795         /* GIANT_REQUIRED */
796
797 loop:
798         if (vm_page_count_min()) {
799                 VM_WAIT;
800                 goto loop;
801         }
802
803         pp = NULL;
804         ppri = INT_MIN;
805         sx_slock(&allproc_lock);
806         FOREACH_PROC_IN_SYSTEM(p) {
807                 struct ksegrp *kg;
808                 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
809                         continue;
810                 }
811                 mtx_lock_spin(&sched_lock);
812                 FOREACH_THREAD_IN_PROC(p, td) {
813                         /*
814                          * An otherwise runnable thread of a process
815                          * swapped out has only the TDI_SWAPPED bit set.
816                          * 
817                          */
818                         if (td->td_inhibitors == TDI_SWAPPED) {
819                                 kg = td->td_ksegrp;
820                                 pri = p->p_swtime + kg->kg_slptime;
821                                 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
822                                         pri -= p->p_nice * 8;
823                                 }
824
825                                 /*
826                                  * if this ksegrp is higher priority
827                                  * and there is enough space, then select
828                                  * this process instead of the previous
829                                  * selection.
830                                  */
831                                 if (pri > ppri) {
832                                         pp = p;
833                                         ppri = pri;
834                                 }
835                         }
836                 }
837                 mtx_unlock_spin(&sched_lock);
838         }
839         sx_sunlock(&allproc_lock);
840
841         /*
842          * Nothing to do, back to sleep.
843          */
844         if ((p = pp) == NULL) {
845                 tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
846                 goto loop;
847         }
848         PROC_LOCK(p);
849
850         /*
851          * Another process may be bringing or may have already
852          * brought this process in while we traverse all threads.
853          * Or, this process may even be being swapped out again.
854          */
855         if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
856                 PROC_UNLOCK(p);
857                 goto loop;
858         }
859
860         mtx_lock_spin(&sched_lock);
861         p->p_sflag &= ~PS_SWAPINREQ;
862         mtx_unlock_spin(&sched_lock);
863
864         /*
865          * We would like to bring someone in. (only if there is space).
866          * [What checks the space? ]
867          */
868         faultin(p);
869         PROC_UNLOCK(p);
870         mtx_lock_spin(&sched_lock);
871         p->p_swtime = 0;
872         mtx_unlock_spin(&sched_lock);
873         goto loop;
874 }
875
876 #ifndef NO_SWAPPING
877
878 /*
879  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
880  */
881 static int swap_idle_threshold1 = 2;
882 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
883     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
884
885 /*
886  * Swap_idle_threshold2 is the time that a process can be idle before
887  * it will be swapped out, if idle swapping is enabled.
888  */
889 static int swap_idle_threshold2 = 10;
890 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
891     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
892
893 /*
894  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
895  * procs and unwire their u-areas.  We try to always "swap" at least one
896  * process in case we need the room for a swapin.
897  * If any procs have been sleeping/stopped for at least maxslp seconds,
898  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
899  * if any, otherwise the longest-resident process.
900  */
901 void
902 swapout_procs(action)
903 int action;
904 {
905         struct proc *p;
906         struct thread *td;
907         struct ksegrp *kg;
908         int didswap = 0;
909
910         GIANT_REQUIRED;
911
912 retry:
913         sx_slock(&allproc_lock);
914         FOREACH_PROC_IN_SYSTEM(p) {
915                 struct vmspace *vm;
916                 int minslptime = 100000;
917                 
918                 /*
919                  * Watch out for a process in
920                  * creation.  It may have no
921                  * address space or lock yet.
922                  */
923                 mtx_lock_spin(&sched_lock);
924                 if (p->p_state == PRS_NEW) {
925                         mtx_unlock_spin(&sched_lock);
926                         continue;
927                 }
928                 mtx_unlock_spin(&sched_lock);
929
930                 /*
931                  * An aio daemon switches its
932                  * address space while running.
933                  * Perform a quick check whether
934                  * a process has P_SYSTEM.
935                  */
936                 if ((p->p_flag & P_SYSTEM) != 0)
937                         continue;
938
939                 /*
940                  * Do not swapout a process that
941                  * is waiting for VM data
942                  * structures as there is a possible
943                  * deadlock.  Test this first as
944                  * this may block.
945                  *
946                  * Lock the map until swapout
947                  * finishes, or a thread of this
948                  * process may attempt to alter
949                  * the map.
950                  */
951                 PROC_LOCK(p);
952                 vm = p->p_vmspace;
953                 KASSERT(vm != NULL,
954                         ("swapout_procs: a process has no address space"));
955                 atomic_add_int(&vm->vm_refcnt, 1);
956                 PROC_UNLOCK(p);
957                 if (!vm_map_trylock(&vm->vm_map))
958                         goto nextproc1;
959
960                 PROC_LOCK(p);
961                 if (p->p_lock != 0 ||
962                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
963                     ) != 0) {
964                         goto nextproc2;
965                 }
966                 /*
967                  * only aiod changes vmspace, however it will be
968                  * skipped because of the if statement above checking 
969                  * for P_SYSTEM
970                  */
971                 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
972                         goto nextproc2;
973
974                 switch (p->p_state) {
975                 default:
976                         /* Don't swap out processes in any sort
977                          * of 'special' state. */
978                         break;
979
980                 case PRS_NORMAL:
981                         mtx_lock_spin(&sched_lock);
982                         /*
983                          * do not swapout a realtime process
984                          * Check all the thread groups..
985                          */
986                         FOREACH_KSEGRP_IN_PROC(p, kg) {
987                                 if (PRI_IS_REALTIME(kg->kg_pri_class))
988                                         goto nextproc;
989
990                                 /*
991                                  * Guarantee swap_idle_threshold1
992                                  * time in memory.
993                                  */
994                                 if (kg->kg_slptime < swap_idle_threshold1)
995                                         goto nextproc;
996
997                                 /*
998                                  * Do not swapout a process if it is
999                                  * waiting on a critical event of some
1000                                  * kind or there is a thread whose
1001                                  * pageable memory may be accessed.
1002                                  *
1003                                  * This could be refined to support
1004                                  * swapping out a thread.
1005                                  */
1006                                 FOREACH_THREAD_IN_GROUP(kg, td) {
1007                                         if ((td->td_priority) < PSOCK ||
1008                                             !thread_safetoswapout(td))
1009                                                 goto nextproc;
1010                                 }
1011                                 /*
1012                                  * If the system is under memory stress,
1013                                  * or if we are swapping
1014                                  * idle processes >= swap_idle_threshold2,
1015                                  * then swap the process out.
1016                                  */
1017                                 if (((action & VM_SWAP_NORMAL) == 0) &&
1018                                     (((action & VM_SWAP_IDLE) == 0) ||
1019                                     (kg->kg_slptime < swap_idle_threshold2)))
1020                                         goto nextproc;
1021
1022                                 if (minslptime > kg->kg_slptime)
1023                                         minslptime = kg->kg_slptime;
1024                         }
1025
1026                         /*
1027                          * If the pageout daemon didn't free enough pages,
1028                          * or if this process is idle and the system is
1029                          * configured to swap proactively, swap it out.
1030                          */
1031                         if ((action & VM_SWAP_NORMAL) ||
1032                                 ((action & VM_SWAP_IDLE) &&
1033                                  (minslptime > swap_idle_threshold2))) {
1034                                 swapout(p);
1035                                 didswap++;
1036                                 mtx_unlock_spin(&sched_lock);
1037                                 PROC_UNLOCK(p);
1038                                 vm_map_unlock(&vm->vm_map);
1039                                 vmspace_free(vm);
1040                                 sx_sunlock(&allproc_lock);
1041                                 goto retry;
1042                         }
1043 nextproc:                       
1044                         mtx_unlock_spin(&sched_lock);
1045                 }
1046 nextproc2:
1047                 PROC_UNLOCK(p);
1048                 vm_map_unlock(&vm->vm_map);
1049 nextproc1:
1050                 vmspace_free(vm);
1051                 continue;
1052         }
1053         sx_sunlock(&allproc_lock);
1054         /*
1055          * If we swapped something out, and another process needed memory,
1056          * then wakeup the sched process.
1057          */
1058         if (didswap)
1059                 wakeup(&proc0);
1060 }
1061
1062 static void
1063 swapout(p)
1064         struct proc *p;
1065 {
1066         struct thread *td;
1067
1068         PROC_LOCK_ASSERT(p, MA_OWNED);
1069         mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1070 #if defined(SWAP_DEBUG)
1071         printf("swapping out %d\n", p->p_pid);
1072 #endif
1073
1074         /*
1075          * The states of this process and its threads may have changed
1076          * by now.  Assuming that there is only one pageout daemon thread,
1077          * this process should still be in memory.
1078          */
1079         KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
1080                 ("swapout: lost a swapout race?"));
1081
1082 #if defined(INVARIANTS)
1083         /*
1084          * Make sure that all threads are safe to be swapped out.
1085          *
1086          * Alternatively, we could swap out only safe threads.
1087          */
1088         FOREACH_THREAD_IN_PROC(p, td) {
1089                 KASSERT(thread_safetoswapout(td),
1090                         ("swapout: there is a thread not safe for swapout"));
1091         }
1092 #endif /* INVARIANTS */
1093
1094         ++p->p_stats->p_ru.ru_nswap;
1095         /*
1096          * remember the process resident count
1097          */
1098         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1099
1100         p->p_sflag &= ~PS_INMEM;
1101         p->p_sflag |= PS_SWAPPINGOUT;
1102         PROC_UNLOCK(p);
1103         FOREACH_THREAD_IN_PROC(p, td)
1104                 TD_SET_SWAPPED(td);
1105         mtx_unlock_spin(&sched_lock);
1106
1107         vm_proc_swapout(p);
1108         FOREACH_THREAD_IN_PROC(p, td)
1109                 vm_thread_swapout(td);
1110
1111         PROC_LOCK(p);
1112         mtx_lock_spin(&sched_lock);
1113         p->p_sflag &= ~PS_SWAPPINGOUT;
1114         p->p_swtime = 0;
1115 }
1116 #endif /* !NO_SWAPPING */