2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 #include "opt_kstack_usage_prof.h"
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/limits.h>
71 #include <sys/malloc.h>
72 #include <sys/mutex.h>
74 #include <sys/racct.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sched.h>
78 #include <sys/sf_buf.h>
80 #include <sys/vmmeter.h>
83 #include <sys/sysctl.h>
84 #include <sys/_kstack_cache.h>
85 #include <sys/eventhandler.h>
86 #include <sys/kernel.h>
88 #include <sys/unistd.h>
91 #include <vm/vm_param.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
102 #include <machine/cpu.h>
107 * WARNING! This code calls vm_map_check_protection() which only checks
108 * the associated vm_map_entry range. It does not determine whether the
109 * contents of the memory is actually readable or writable. In most cases
110 * just checking the vm_map_entry is sufficient within the kernel's address
114 kernacc(void *addr, int len, int rw)
117 vm_offset_t saddr, eaddr;
120 KASSERT((rw & ~VM_PROT_ALL) == 0,
121 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
123 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
124 (vm_offset_t)addr + len < (vm_offset_t)addr)
128 saddr = trunc_page((vm_offset_t)addr);
129 eaddr = round_page((vm_offset_t)addr + len);
130 vm_map_lock_read(kernel_map);
131 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
132 vm_map_unlock_read(kernel_map);
139 * WARNING! This code calls vm_map_check_protection() which only checks
140 * the associated vm_map_entry range. It does not determine whether the
141 * contents of the memory is actually readable or writable. vmapbuf(),
142 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
143 * used in conjunction with this call.
146 useracc(void *addr, int len, int rw)
152 KASSERT((rw & ~VM_PROT_ALL) == 0,
153 ("illegal ``rw'' argument to useracc (%x)\n", rw));
155 map = &curproc->p_vmspace->vm_map;
156 if ((vm_offset_t)addr + len > vm_map_max(map) ||
157 (vm_offset_t)addr + len < (vm_offset_t)addr) {
160 vm_map_lock_read(map);
161 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
162 round_page((vm_offset_t)addr + len), prot);
163 vm_map_unlock_read(map);
168 vslock(void *addr, size_t len)
170 vm_offset_t end, last, start;
174 last = (vm_offset_t)addr + len;
175 start = trunc_page((vm_offset_t)addr);
176 end = round_page(last);
177 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
179 npages = atop(end - start);
180 if (npages > vm_page_max_wired)
186 * The limit for transient usage of wired pages should be
187 * larger than for "permanent" wired pages (mlock()).
189 * Also, the sysctl code, which is the only present user
190 * of vslock(), does a hard loop on EAGAIN.
192 if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
195 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
196 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
197 if (error == KERN_SUCCESS) {
198 curthread->td_vslock_sz += len;
203 * Return EFAULT on error to match copy{in,out}() behaviour
204 * rather than returning ENOMEM like mlock() would.
210 vsunlock(void *addr, size_t len)
213 /* Rely on the parameter sanity checks performed by vslock(). */
214 MPASS(curthread->td_vslock_sz >= len);
215 curthread->td_vslock_sz -= len;
216 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
217 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
218 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
222 * Pin the page contained within the given object at the given offset. If the
223 * page is not resident, allocate and load it using the given object's pager.
224 * Return the pinned page if successful; otherwise, return NULL.
227 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
233 VM_OBJECT_WLOCK(object);
234 pindex = OFF_TO_IDX(offset);
235 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
236 if (m->valid != VM_PAGE_BITS_ALL) {
238 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
239 if (rv != VM_PAGER_OK) {
253 VM_OBJECT_WUNLOCK(object);
258 * Return a CPU private mapping to the page at the given offset within the
259 * given object. The page is pinned before it is mapped.
262 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
266 m = vm_imgact_hold_page(object, offset);
270 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
274 * Destroy the given CPU private mapping and unpin the page that it mapped.
277 vm_imgact_unmap_page(struct sf_buf *sf)
290 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
293 pmap_sync_icache(map->pmap, va, sz);
296 struct kstack_cache_entry *kstack_cache;
297 static int kstack_cache_size = 128;
299 static struct mtx kstack_cache_mtx;
300 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
302 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
304 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
308 * Create the kernel stack (including pcb for i386) for a new thread.
309 * This routine directly affects the fork perf for a process and
310 * create performance for a thread.
313 vm_thread_new(struct thread *td, int pages)
317 vm_page_t ma[KSTACK_MAX_PAGES];
318 struct kstack_cache_entry *ks_ce;
323 pages = kstack_pages;
324 else if (pages > KSTACK_MAX_PAGES)
325 pages = KSTACK_MAX_PAGES;
327 if (pages == kstack_pages) {
328 mtx_lock(&kstack_cache_mtx);
329 if (kstack_cache != NULL) {
330 ks_ce = kstack_cache;
331 kstack_cache = ks_ce->next_ks_entry;
332 mtx_unlock(&kstack_cache_mtx);
334 td->td_kstack_obj = ks_ce->ksobj;
335 td->td_kstack = (vm_offset_t)ks_ce;
336 td->td_kstack_pages = kstack_pages;
339 mtx_unlock(&kstack_cache_mtx);
343 * Allocate an object for the kstack.
345 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
348 * Get a kernel virtual address for this thread's kstack.
350 #if defined(__mips__)
352 * We need to align the kstack's mapped address to fit within
353 * a single TLB entry.
355 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
356 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
357 M_BESTFIT | M_NOWAIT, &ks)) {
361 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
364 printf("vm_thread_new: kstack allocation failed\n");
365 vm_object_deallocate(ksobj);
369 atomic_add_int(&kstacks, 1);
370 if (KSTACK_GUARD_PAGES != 0) {
371 pmap_qremove(ks, KSTACK_GUARD_PAGES);
372 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
374 td->td_kstack_obj = ksobj;
377 * Knowing the number of pages allocated is useful when you
378 * want to deallocate them.
380 td->td_kstack_pages = pages;
382 * For the length of the stack, link in a real page of ram for each
385 VM_OBJECT_WLOCK(ksobj);
386 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
387 VM_ALLOC_WIRED, ma, pages);
388 for (i = 0; i < pages; i++)
389 ma[i]->valid = VM_PAGE_BITS_ALL;
390 VM_OBJECT_WUNLOCK(ksobj);
391 pmap_qenter(ks, ma, pages);
396 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
401 atomic_add_int(&kstacks, -1);
402 pmap_qremove(ks, pages);
403 VM_OBJECT_WLOCK(ksobj);
404 for (i = 0; i < pages; i++) {
405 m = vm_page_lookup(ksobj, i);
407 panic("vm_thread_dispose: kstack already missing?");
409 vm_page_unwire(m, PQ_NONE);
413 VM_OBJECT_WUNLOCK(ksobj);
414 vm_object_deallocate(ksobj);
415 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
416 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
420 * Dispose of a thread's kernel stack.
423 vm_thread_dispose(struct thread *td)
427 struct kstack_cache_entry *ks_ce;
430 pages = td->td_kstack_pages;
431 ksobj = td->td_kstack_obj;
434 td->td_kstack_pages = 0;
435 if (pages == kstack_pages && kstacks <= kstack_cache_size) {
436 ks_ce = (struct kstack_cache_entry *)ks;
437 ks_ce->ksobj = ksobj;
438 mtx_lock(&kstack_cache_mtx);
439 ks_ce->next_ks_entry = kstack_cache;
440 kstack_cache = ks_ce;
441 mtx_unlock(&kstack_cache_mtx);
444 vm_thread_stack_dispose(ksobj, ks, pages);
448 vm_thread_stack_lowmem(void *nulll)
450 struct kstack_cache_entry *ks_ce, *ks_ce1;
452 mtx_lock(&kstack_cache_mtx);
453 ks_ce = kstack_cache;
455 mtx_unlock(&kstack_cache_mtx);
457 while (ks_ce != NULL) {
459 ks_ce = ks_ce->next_ks_entry;
461 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
467 kstack_cache_init(void *nulll)
470 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
471 EVENTHANDLER_PRI_ANY);
474 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
476 #ifdef KSTACK_USAGE_PROF
478 * Track maximum stack used by a thread in kernel.
480 static int max_kstack_used;
482 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
484 "Maxiumum stack depth used by a thread in kernel");
487 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
489 vm_offset_t stack_top;
494 * Testing for interrupted kernel mode isn't strictly
495 * needed. It optimizes the execution, since interrupts from
496 * usermode will have only the trap frame on the stack.
498 if (TRAPF_USERMODE(frame))
501 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
502 current = (vm_offset_t)(uintptr_t)&stack_top;
505 * Try to detect if interrupt is using kernel thread stack.
506 * Hardware could use a dedicated stack for interrupt handling.
508 if (stack_top <= current || current < td->td_kstack)
511 used = stack_top - current;
513 prev_used = max_kstack_used;
514 if (prev_used >= used)
516 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
520 #endif /* KSTACK_USAGE_PROF */
523 * Implement fork's actions on an address space.
524 * Here we arrange for the address space to be copied or referenced,
525 * allocate a user struct (pcb and kernel stack), then call the
526 * machine-dependent layer to fill those in and make the new process
527 * ready to run. The new process is set up so that it returns directly
528 * to user mode to avoid stack copying and relocation problems.
531 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
532 struct vmspace *vm2, int flags)
534 struct proc *p1 = td->td_proc;
537 if ((flags & RFPROC) == 0) {
539 * Divorce the memory, if it is shared, essentially
540 * this changes shared memory amongst threads, into
543 if ((flags & RFMEM) == 0) {
544 if (p1->p_vmspace->vm_refcnt > 1) {
545 error = vmspace_unshare(p1);
550 cpu_fork(td, p2, td2, flags);
555 p2->p_vmspace = p1->p_vmspace;
556 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
559 while (vm_page_count_severe()) {
563 if ((flags & RFMEM) == 0) {
565 if (p1->p_vmspace->vm_shm)
570 * cpu_fork will copy and update the pcb, set up the kernel stack,
571 * and make the child ready to run.
573 cpu_fork(td, p2, td2, flags);
578 * Called after process has been wait(2)'ed upon and is being reaped.
579 * The idea is to reclaim resources that we could not reclaim while
580 * the process was still executing.
587 vmspace_exitfree(p); /* and clean-out the vmspace */