2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
59 #include <sys/cdefs.h>
61 #include "opt_kstack_pages.h"
62 #include "opt_kstack_max_pages.h"
63 #include "opt_kstack_usage_prof.h"
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/domainset.h>
69 #include <sys/limits.h>
71 #include <sys/malloc.h>
73 #include <sys/mutex.h>
75 #include <sys/racct.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sched.h>
80 #include <sys/sf_buf.h>
83 #include <sys/vmmeter.h>
86 #include <sys/sysctl.h>
87 #include <sys/kernel.h>
89 #include <sys/unistd.h>
93 #include <vm/vm_param.h>
95 #include <vm/vm_domainset.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pagequeue.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_extern.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
105 #include <vm/vm_phys.h>
107 #include <machine/cpu.h>
109 #if VM_NRESERVLEVEL > 0
110 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
112 #define KVA_KSTACK_QUANTUM_SHIFT (8 + PAGE_SHIFT)
114 #define KVA_KSTACK_QUANTUM (1ul << KVA_KSTACK_QUANTUM_SHIFT)
119 * WARNING! This code calls vm_map_check_protection() which only checks
120 * the associated vm_map_entry range. It does not determine whether the
121 * contents of the memory is actually readable or writable. In most cases
122 * just checking the vm_map_entry is sufficient within the kernel's address
126 kernacc(void *addr, int len, int rw)
129 vm_offset_t saddr, eaddr;
132 KASSERT((rw & ~VM_PROT_ALL) == 0,
133 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
136 (vm_offset_t)addr + len < (vm_offset_t)addr)
140 saddr = trunc_page((vm_offset_t)addr);
141 eaddr = round_page((vm_offset_t)addr + len);
142 vm_map_lock_read(kernel_map);
143 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
144 vm_map_unlock_read(kernel_map);
151 * WARNING! This code calls vm_map_check_protection() which only checks
152 * the associated vm_map_entry range. It does not determine whether the
153 * contents of the memory is actually readable or writable. vmapbuf(),
154 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
155 * used in conjunction with this call.
158 useracc(void *addr, int len, int rw)
164 KASSERT((rw & ~VM_PROT_ALL) == 0,
165 ("illegal ``rw'' argument to useracc (%x)\n", rw));
167 map = &curproc->p_vmspace->vm_map;
168 if ((vm_offset_t)addr + len > vm_map_max(map) ||
169 (vm_offset_t)addr + len < (vm_offset_t)addr) {
172 vm_map_lock_read(map);
173 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
174 round_page((vm_offset_t)addr + len), prot);
175 vm_map_unlock_read(map);
180 vslock(void *addr, size_t len)
182 vm_offset_t end, last, start;
186 last = (vm_offset_t)addr + len;
187 start = trunc_page((vm_offset_t)addr);
188 end = round_page(last);
189 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
191 npages = atop(end - start);
192 if (npages > vm_page_max_user_wired)
194 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
195 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
196 if (error == KERN_SUCCESS) {
197 curthread->td_vslock_sz += len;
202 * Return EFAULT on error to match copy{in,out}() behaviour
203 * rather than returning ENOMEM like mlock() would.
209 vsunlock(void *addr, size_t len)
212 /* Rely on the parameter sanity checks performed by vslock(). */
213 MPASS(curthread->td_vslock_sz >= len);
214 curthread->td_vslock_sz -= len;
215 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
216 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
217 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
221 * Pin the page contained within the given object at the given offset. If the
222 * page is not resident, allocate and load it using the given object's pager.
223 * Return the pinned page if successful; otherwise, return NULL.
226 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
231 pindex = OFF_TO_IDX(offset);
232 (void)vm_page_grab_valid_unlocked(&m, object, pindex,
233 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
238 * Return a CPU private mapping to the page at the given offset within the
239 * given object. The page is pinned before it is mapped.
242 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
246 m = vm_imgact_hold_page(object, offset);
250 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
254 * Destroy the given CPU private mapping and unpin the page that it mapped.
257 vm_imgact_unmap_page(struct sf_buf *sf)
264 vm_page_unwire(m, PQ_ACTIVE);
268 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
271 pmap_sync_icache(map->pmap, va, sz);
274 static vm_object_t kstack_object;
275 static vm_object_t kstack_alt_object;
276 static uma_zone_t kstack_cache;
277 static int kstack_cache_size;
278 static vmem_t *vmd_kstack_arena[MAXMEMDOM];
281 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
285 oldsize = kstack_cache_size;
286 error = sysctl_handle_int(oidp, arg1, arg2, req);
287 if (error == 0 && req->newptr && oldsize != kstack_cache_size)
288 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
291 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
292 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0,
293 sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks");
296 * Allocate a virtual address range from a domain kstack arena, following
297 * the specified NUMA policy.
300 vm_thread_alloc_kstack_kva(vm_size_t size, int domain)
305 vm_offset_t addr = 0;
307 size = round_page(size);
308 /* Allocate from the kernel arena for non-standard kstack sizes. */
309 if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) {
310 arena = vm_dom[domain].vmd_kernel_arena;
312 arena = vmd_kstack_arena[domain];
314 rv = vmem_alloc(arena, size, M_BESTFIT | M_NOWAIT, &addr);
317 KASSERT(atop(addr - VM_MIN_KERNEL_ADDRESS) %
318 (kstack_pages + KSTACK_GUARD_PAGES) == 0,
319 ("%s: allocated kstack KVA not aligned to multiple of kstack size",
324 return (kva_alloc(size));
329 * Release a region of kernel virtual memory
330 * allocated from the kstack arena.
332 static __noinline void
333 vm_thread_free_kstack_kva(vm_offset_t addr, vm_size_t size, int domain)
337 size = round_page(size);
339 arena = kernel_arena;
341 arena = vmd_kstack_arena[domain];
342 if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) {
343 arena = vm_dom[domain].vmd_kernel_arena;
346 vmem_free(arena, addr, size);
350 vm_thread_kstack_import_quantum(void)
354 * The kstack_quantum is larger than KVA_QUANTUM to account
355 * for holes induced by guard pages.
357 return (KVA_KSTACK_QUANTUM * (kstack_pages + KSTACK_GUARD_PAGES));
359 return (KVA_KSTACK_QUANTUM);
364 * Import KVA from a parent arena into the kstack arena. Imports must be
365 * a multiple of kernel stack pages + guard pages in size.
367 * Kstack VA allocations need to be aligned so that the linear KVA pindex
368 * is divisible by the total number of kstack VA pages. This is necessary to
369 * make vm_kstack_pindex work properly.
371 * We import a multiple of KVA_KSTACK_QUANTUM-sized region from the parent
372 * arena. The actual size used by the kstack arena is one kstack smaller to
373 * allow for the necessary alignment adjustments to be made.
376 vm_thread_kstack_arena_import(void *arena, vmem_size_t size, int flags,
380 size_t kpages = kstack_pages + KSTACK_GUARD_PAGES;
382 KASSERT(atop(size) % kpages == 0,
383 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
384 (intmax_t)size, (int)kpages));
386 error = vmem_xalloc(arena, vm_thread_kstack_import_quantum(),
387 KVA_KSTACK_QUANTUM, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags,
393 rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages;
395 /* Bump addr to next aligned address */
396 *addrp = *addrp + (kpages - rem) * PAGE_SIZE;
403 * Release KVA from a parent arena into the kstack arena. Released imports must
404 * be a multiple of kernel stack pages + guard pages in size.
407 vm_thread_kstack_arena_release(void *arena, vmem_addr_t addr, vmem_size_t size)
410 size_t kpages __diagused = kstack_pages + KSTACK_GUARD_PAGES;
412 KASSERT(size % kpages == 0,
413 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
414 (intmax_t)size, (int)kpages));
416 KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0,
417 ("%s: Address %p is not properly aligned (%p)", __func__,
418 (void *)addr, (void *)VM_MIN_KERNEL_ADDRESS));
420 * If the address is not KVA_KSTACK_QUANTUM-aligned we have to decrement
421 * it to account for the shift in kva_import_kstack.
423 rem = addr % KVA_KSTACK_QUANTUM;
425 KASSERT(rem <= ptoa(kpages),
426 ("%s: rem > kpages (%d), (%d)", __func__, rem,
430 vmem_xfree(arena, addr, vm_thread_kstack_import_quantum());
434 * Create the kernel stack for a new thread.
437 vm_thread_stack_create(struct domainset *ds, int pages)
439 vm_page_t ma[KSTACK_MAX_PAGES];
440 struct vm_domainset_iter di;
441 int req = VM_ALLOC_NORMAL;
446 obj = vm_thread_kstack_size_to_obj(pages);
448 obj->domain.dr_policy = ds;
449 vm_domainset_iter_page_init(&di, obj, 0, &domain, &req);
452 * Get a kernel virtual address for this thread's kstack.
454 ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES),
458 ks += ptoa(KSTACK_GUARD_PAGES);
461 * Allocate physical pages to back the stack.
463 if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) {
464 vm_thread_free_kstack_kva(ks - ptoa(KSTACK_GUARD_PAGES),
465 ptoa(pages + KSTACK_GUARD_PAGES), domain);
468 if (KSTACK_GUARD_PAGES != 0) {
469 pmap_qremove(ks - ptoa(KSTACK_GUARD_PAGES),
472 for (i = 0; i < pages; i++)
473 vm_page_valid(ma[i]);
474 pmap_qenter(ks, ma, pages);
476 } while (vm_domainset_iter_page(&di, obj, &domain) == 0);
481 static __noinline void
482 vm_thread_stack_dispose(vm_offset_t ks, int pages)
487 vm_object_t obj = vm_thread_kstack_size_to_obj(pages);
489 pindex = vm_kstack_pindex(ks, pages);
490 domain = vm_phys_domain(vtophys(ks));
491 pmap_qremove(ks, pages);
492 VM_OBJECT_WLOCK(obj);
493 for (i = 0; i < pages; i++) {
494 m = vm_page_lookup(obj, pindex + i);
496 panic("%s: kstack already missing?", __func__);
497 KASSERT(vm_page_domain(m) == domain,
498 ("%s: page %p domain mismatch, expected %d got %d",
499 __func__, m, domain, vm_page_domain(m)));
500 vm_page_xbusy_claim(m);
501 vm_page_unwire_noq(m);
504 VM_OBJECT_WUNLOCK(obj);
505 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
506 vm_thread_free_kstack_kva(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
507 ptoa(pages + KSTACK_GUARD_PAGES), domain);
511 * Allocate the kernel stack for a new thread.
514 vm_thread_new(struct thread *td, int pages)
521 pages = kstack_pages;
522 else if (pages > KSTACK_MAX_PAGES)
523 pages = KSTACK_MAX_PAGES;
526 if (pages == kstack_pages && kstack_cache != NULL)
527 ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
530 * Ensure that kstack objects can draw pages from any memory
531 * domain. Otherwise a local memory shortage can block a process
535 ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
540 ks_domain = vm_phys_domain(vtophys(ks));
541 KASSERT(ks_domain >= 0 && ks_domain < vm_ndomains,
542 ("%s: invalid domain for kstack %p", __func__, (void *)ks));
544 td->td_kstack_pages = pages;
545 td->td_kstack_domain = ks_domain;
550 * Dispose of a thread's kernel stack.
553 vm_thread_dispose(struct thread *td)
558 pages = td->td_kstack_pages;
561 td->td_kstack_pages = 0;
562 td->td_kstack_domain = MAXMEMDOM;
563 if (pages == kstack_pages) {
564 kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
565 uma_zfree(kstack_cache, (void *)ks);
567 vm_thread_stack_dispose(ks, pages);
572 * Calculate kstack pindex.
574 * Uses a non-identity mapping if guard pages are
575 * active to avoid pindex holes in the kstack object.
578 vm_kstack_pindex(vm_offset_t ks, int kpages)
580 vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
586 * Return the linear pindex if guard pages aren't active or if we are
587 * allocating a non-standard kstack size.
589 if (KSTACK_GUARD_PAGES == 0 || kpages != kstack_pages) {
592 KASSERT(pindex % (kpages + KSTACK_GUARD_PAGES) >= KSTACK_GUARD_PAGES,
593 ("%s: Attempting to calculate kstack guard page pindex", __func__));
596 (pindex / (kpages + KSTACK_GUARD_PAGES) + 1) * KSTACK_GUARD_PAGES);
601 * Allocate physical pages, following the specified NUMA policy, to back a
605 vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class,
608 vm_object_t obj = vm_thread_kstack_size_to_obj(npages);
613 pindex = vm_kstack_pindex(ks, npages);
615 VM_OBJECT_WLOCK(obj);
616 for (n = 0; n < npages;) {
617 m = vm_page_grab(obj, pindex + n,
618 VM_ALLOC_NOCREAT | VM_ALLOC_WIRED);
620 m = vm_page_alloc_domain(obj, pindex + n, domain,
621 req_class | VM_ALLOC_WIRED);
629 VM_OBJECT_WUNLOCK(obj);
633 vm_object_page_remove(obj, pindex, pindex + n, 0);
634 VM_OBJECT_WUNLOCK(obj);
640 vm_thread_kstack_size_to_obj(int npages)
642 return (npages == kstack_pages ? kstack_object : kstack_alt_object);
646 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
648 struct domainset *ds;
651 if (domain == UMA_ANYDOMAIN)
654 ds = DOMAINSET_PREF(domain);
656 for (i = 0; i < cnt; i++) {
657 store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
658 if (store[i] == NULL)
665 kstack_release(void *arg, void **store, int cnt)
670 for (i = 0; i < cnt; i++) {
671 ks = (vm_offset_t)store[i];
672 vm_thread_stack_dispose(ks, kstack_pages);
677 kstack_cache_init(void *null)
679 vm_size_t kstack_quantum;
682 kstack_object = vm_object_allocate(OBJT_SWAP,
683 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
684 kstack_cache = uma_zcache_create("kstack_cache",
685 kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
686 kstack_import, kstack_release, NULL,
687 UMA_ZONE_FIRSTTOUCH);
688 kstack_cache_size = imax(128, mp_ncpus * 4);
689 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
691 kstack_alt_object = vm_object_allocate(OBJT_SWAP,
692 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
694 kstack_quantum = vm_thread_kstack_import_quantum();
696 * Reduce size used by the kstack arena to allow for
697 * alignment adjustments in vm_thread_kstack_arena_import.
699 kstack_quantum -= (kstack_pages + KSTACK_GUARD_PAGES) * PAGE_SIZE;
701 * Create the kstack_arena for each domain and set kernel_arena as
704 for (domain = 0; domain < vm_ndomains; domain++) {
705 vmd_kstack_arena[domain] = vmem_create("kstack arena", 0, 0,
706 PAGE_SIZE, 0, M_WAITOK);
707 KASSERT(vmd_kstack_arena[domain] != NULL,
708 ("%s: failed to create domain %d kstack_arena", __func__,
710 vmem_set_import(vmd_kstack_arena[domain],
711 vm_thread_kstack_arena_import,
712 vm_thread_kstack_arena_release,
713 vm_dom[domain].vmd_kernel_arena, kstack_quantum);
716 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
718 #ifdef KSTACK_USAGE_PROF
720 * Track maximum stack used by a thread in kernel.
722 static int max_kstack_used;
724 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
726 "Maximum stack depth used by a thread in kernel");
729 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
731 vm_offset_t stack_top;
736 * Testing for interrupted kernel mode isn't strictly
737 * needed. It optimizes the execution, since interrupts from
738 * usermode will have only the trap frame on the stack.
740 if (TRAPF_USERMODE(frame))
743 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
744 current = (vm_offset_t)(uintptr_t)&stack_top;
747 * Try to detect if interrupt is using kernel thread stack.
748 * Hardware could use a dedicated stack for interrupt handling.
750 if (stack_top <= current || current < td->td_kstack)
753 used = stack_top - current;
755 prev_used = max_kstack_used;
756 if (prev_used >= used)
758 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
762 #endif /* KSTACK_USAGE_PROF */
765 * Implement fork's actions on an address space.
766 * Here we arrange for the address space to be copied or referenced,
767 * allocate a user struct (pcb and kernel stack), then call the
768 * machine-dependent layer to fill those in and make the new process
769 * ready to run. The new process is set up so that it returns directly
770 * to user mode to avoid stack copying and relocation problems.
773 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
774 struct vmspace *vm2, int flags)
776 struct proc *p1 = td->td_proc;
777 struct domainset *dset;
780 if ((flags & RFPROC) == 0) {
782 * Divorce the memory, if it is shared, essentially
783 * this changes shared memory amongst threads, into
786 if ((flags & RFMEM) == 0) {
787 error = vmspace_unshare(p1);
791 cpu_fork(td, p2, td2, flags);
796 p2->p_vmspace = p1->p_vmspace;
797 refcount_acquire(&p1->p_vmspace->vm_refcnt);
799 dset = td2->td_domain.dr_policy;
800 while (vm_page_count_severe_set(&dset->ds_mask)) {
801 vm_wait_doms(&dset->ds_mask, 0);
804 if ((flags & RFMEM) == 0) {
806 if (p1->p_vmspace->vm_shm)
811 * cpu_fork will copy and update the pcb, set up the kernel stack,
812 * and make the child ready to run.
814 cpu_fork(td, p2, td2, flags);
819 * Called after process has been wait(2)'ed upon and is being reaped.
820 * The idea is to reclaim resources that we could not reclaim while
821 * the process was still executing.
824 vm_waitproc(struct proc *p)
827 vmspace_exitfree(p); /* and clean-out the vmspace */