2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
65 #include "opt_kstack_pages.h"
66 #include "opt_kstack_max_pages.h"
67 #include "opt_kstack_usage_prof.h"
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/limits.h>
73 #include <sys/malloc.h>
74 #include <sys/mutex.h>
76 #include <sys/racct.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sched.h>
80 #include <sys/sf_buf.h>
82 #include <sys/vmmeter.h>
85 #include <sys/sysctl.h>
86 #include <sys/_kstack_cache.h>
87 #include <sys/eventhandler.h>
88 #include <sys/kernel.h>
90 #include <sys/unistd.h>
93 #include <vm/vm_param.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_extern.h>
101 #include <vm/vm_pager.h>
102 #include <vm/swap_pager.h>
104 #include <machine/cpu.h>
109 * WARNING! This code calls vm_map_check_protection() which only checks
110 * the associated vm_map_entry range. It does not determine whether the
111 * contents of the memory is actually readable or writable. In most cases
112 * just checking the vm_map_entry is sufficient within the kernel's address
116 kernacc(void *addr, int len, int rw)
119 vm_offset_t saddr, eaddr;
122 KASSERT((rw & ~VM_PROT_ALL) == 0,
123 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
125 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
126 (vm_offset_t)addr + len < (vm_offset_t)addr)
130 saddr = trunc_page((vm_offset_t)addr);
131 eaddr = round_page((vm_offset_t)addr + len);
132 vm_map_lock_read(kernel_map);
133 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
134 vm_map_unlock_read(kernel_map);
141 * WARNING! This code calls vm_map_check_protection() which only checks
142 * the associated vm_map_entry range. It does not determine whether the
143 * contents of the memory is actually readable or writable. vmapbuf(),
144 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
145 * used in conjunction with this call.
148 useracc(void *addr, int len, int rw)
154 KASSERT((rw & ~VM_PROT_ALL) == 0,
155 ("illegal ``rw'' argument to useracc (%x)\n", rw));
157 map = &curproc->p_vmspace->vm_map;
158 if ((vm_offset_t)addr + len > vm_map_max(map) ||
159 (vm_offset_t)addr + len < (vm_offset_t)addr) {
162 vm_map_lock_read(map);
163 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
164 round_page((vm_offset_t)addr + len), prot);
165 vm_map_unlock_read(map);
170 vslock(void *addr, size_t len)
172 vm_offset_t end, last, start;
176 last = (vm_offset_t)addr + len;
177 start = trunc_page((vm_offset_t)addr);
178 end = round_page(last);
179 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
181 npages = atop(end - start);
182 if (npages > vm_page_max_wired)
188 * The limit for transient usage of wired pages should be
189 * larger than for "permanent" wired pages (mlock()).
191 * Also, the sysctl code, which is the only present user
192 * of vslock(), does a hard loop on EAGAIN.
194 if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
197 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
198 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
200 * Return EFAULT on error to match copy{in,out}() behaviour
201 * rather than returning ENOMEM like mlock() would.
203 return (error == KERN_SUCCESS ? 0 : EFAULT);
207 vsunlock(void *addr, size_t len)
210 /* Rely on the parameter sanity checks performed by vslock(). */
211 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
212 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
213 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
217 * Pin the page contained within the given object at the given offset. If the
218 * page is not resident, allocate and load it using the given object's pager.
219 * Return the pinned page if successful; otherwise, return NULL.
222 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
228 VM_OBJECT_WLOCK(object);
229 pindex = OFF_TO_IDX(offset);
230 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
231 if (m->valid != VM_PAGE_BITS_ALL) {
233 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
234 if (rv != VM_PAGER_OK) {
248 VM_OBJECT_WUNLOCK(object);
253 * Return a CPU private mapping to the page at the given offset within the
254 * given object. The page is pinned before it is mapped.
257 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
261 m = vm_imgact_hold_page(object, offset);
265 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
269 * Destroy the given CPU private mapping and unpin the page that it mapped.
272 vm_imgact_unmap_page(struct sf_buf *sf)
285 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
288 pmap_sync_icache(map->pmap, va, sz);
291 struct kstack_cache_entry *kstack_cache;
292 static int kstack_cache_size = 128;
294 static struct mtx kstack_cache_mtx;
295 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
297 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
299 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
303 * Create the kernel stack (including pcb for i386) for a new thread.
304 * This routine directly affects the fork perf for a process and
305 * create performance for a thread.
308 vm_thread_new(struct thread *td, int pages)
312 vm_page_t ma[KSTACK_MAX_PAGES];
313 struct kstack_cache_entry *ks_ce;
318 pages = kstack_pages;
319 else if (pages > KSTACK_MAX_PAGES)
320 pages = KSTACK_MAX_PAGES;
322 if (pages == kstack_pages) {
323 mtx_lock(&kstack_cache_mtx);
324 if (kstack_cache != NULL) {
325 ks_ce = kstack_cache;
326 kstack_cache = ks_ce->next_ks_entry;
327 mtx_unlock(&kstack_cache_mtx);
329 td->td_kstack_obj = ks_ce->ksobj;
330 td->td_kstack = (vm_offset_t)ks_ce;
331 td->td_kstack_pages = kstack_pages;
334 mtx_unlock(&kstack_cache_mtx);
338 * Allocate an object for the kstack.
340 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
343 * Get a kernel virtual address for this thread's kstack.
345 #if defined(__mips__)
347 * We need to align the kstack's mapped address to fit within
348 * a single TLB entry.
350 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
351 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
352 M_BESTFIT | M_NOWAIT, &ks)) {
356 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
359 printf("vm_thread_new: kstack allocation failed\n");
360 vm_object_deallocate(ksobj);
364 atomic_add_int(&kstacks, 1);
365 if (KSTACK_GUARD_PAGES != 0) {
366 pmap_qremove(ks, KSTACK_GUARD_PAGES);
367 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
369 td->td_kstack_obj = ksobj;
372 * Knowing the number of pages allocated is useful when you
373 * want to deallocate them.
375 td->td_kstack_pages = pages;
377 * For the length of the stack, link in a real page of ram for each
380 VM_OBJECT_WLOCK(ksobj);
381 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
382 VM_ALLOC_WIRED, ma, pages);
383 for (i = 0; i < pages; i++)
384 ma[i]->valid = VM_PAGE_BITS_ALL;
385 VM_OBJECT_WUNLOCK(ksobj);
386 pmap_qenter(ks, ma, pages);
391 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
396 atomic_add_int(&kstacks, -1);
397 pmap_qremove(ks, pages);
398 VM_OBJECT_WLOCK(ksobj);
399 for (i = 0; i < pages; i++) {
400 m = vm_page_lookup(ksobj, i);
402 panic("vm_thread_dispose: kstack already missing?");
404 vm_page_unwire(m, PQ_NONE);
408 VM_OBJECT_WUNLOCK(ksobj);
409 vm_object_deallocate(ksobj);
410 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
411 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
415 * Dispose of a thread's kernel stack.
418 vm_thread_dispose(struct thread *td)
422 struct kstack_cache_entry *ks_ce;
425 pages = td->td_kstack_pages;
426 ksobj = td->td_kstack_obj;
429 td->td_kstack_pages = 0;
430 if (pages == kstack_pages && kstacks <= kstack_cache_size) {
431 ks_ce = (struct kstack_cache_entry *)ks;
432 ks_ce->ksobj = ksobj;
433 mtx_lock(&kstack_cache_mtx);
434 ks_ce->next_ks_entry = kstack_cache;
435 kstack_cache = ks_ce;
436 mtx_unlock(&kstack_cache_mtx);
439 vm_thread_stack_dispose(ksobj, ks, pages);
443 vm_thread_stack_lowmem(void *nulll)
445 struct kstack_cache_entry *ks_ce, *ks_ce1;
447 mtx_lock(&kstack_cache_mtx);
448 ks_ce = kstack_cache;
450 mtx_unlock(&kstack_cache_mtx);
452 while (ks_ce != NULL) {
454 ks_ce = ks_ce->next_ks_entry;
456 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
462 kstack_cache_init(void *nulll)
465 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
466 EVENTHANDLER_PRI_ANY);
469 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
471 #ifdef KSTACK_USAGE_PROF
473 * Track maximum stack used by a thread in kernel.
475 static int max_kstack_used;
477 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
479 "Maxiumum stack depth used by a thread in kernel");
482 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
484 vm_offset_t stack_top;
489 * Testing for interrupted kernel mode isn't strictly
490 * needed. It optimizes the execution, since interrupts from
491 * usermode will have only the trap frame on the stack.
493 if (TRAPF_USERMODE(frame))
496 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
497 current = (vm_offset_t)(uintptr_t)&stack_top;
500 * Try to detect if interrupt is using kernel thread stack.
501 * Hardware could use a dedicated stack for interrupt handling.
503 if (stack_top <= current || current < td->td_kstack)
506 used = stack_top - current;
508 prev_used = max_kstack_used;
509 if (prev_used >= used)
511 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
515 #endif /* KSTACK_USAGE_PROF */
518 * Implement fork's actions on an address space.
519 * Here we arrange for the address space to be copied or referenced,
520 * allocate a user struct (pcb and kernel stack), then call the
521 * machine-dependent layer to fill those in and make the new process
522 * ready to run. The new process is set up so that it returns directly
523 * to user mode to avoid stack copying and relocation problems.
526 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
527 struct vmspace *vm2, int flags)
529 struct proc *p1 = td->td_proc;
532 if ((flags & RFPROC) == 0) {
534 * Divorce the memory, if it is shared, essentially
535 * this changes shared memory amongst threads, into
538 if ((flags & RFMEM) == 0) {
539 if (p1->p_vmspace->vm_refcnt > 1) {
540 error = vmspace_unshare(p1);
545 cpu_fork(td, p2, td2, flags);
550 p2->p_vmspace = p1->p_vmspace;
551 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
554 while (vm_page_count_severe()) {
558 if ((flags & RFMEM) == 0) {
560 if (p1->p_vmspace->vm_shm)
565 * cpu_fork will copy and update the pcb, set up the kernel stack,
566 * and make the child ready to run.
568 cpu_fork(td, p2, td2, flags);
573 * Called after process has been wait(2)'ed upon and is being reaped.
574 * The idea is to reclaim resources that we could not reclaim while
575 * the process was still executing.
582 vmspace_exitfree(p); /* and clean-out the vmspace */