2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
65 #include "opt_kstack_pages.h"
66 #include "opt_kstack_max_pages.h"
67 #include "opt_kstack_usage_prof.h"
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/domainset.h>
72 #include <sys/limits.h>
74 #include <sys/malloc.h>
75 #include <sys/mutex.h>
77 #include <sys/racct.h>
78 #include <sys/resourcevar.h>
79 #include <sys/rwlock.h>
80 #include <sys/sched.h>
81 #include <sys/sf_buf.h>
83 #include <sys/vmmeter.h>
86 #include <sys/sysctl.h>
87 #include <sys/eventhandler.h>
88 #include <sys/kernel.h>
90 #include <sys/unistd.h>
94 #include <vm/vm_param.h>
96 #include <vm/vm_domainset.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_extern.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
106 #include <machine/cpu.h>
111 * WARNING! This code calls vm_map_check_protection() which only checks
112 * the associated vm_map_entry range. It does not determine whether the
113 * contents of the memory is actually readable or writable. In most cases
114 * just checking the vm_map_entry is sufficient within the kernel's address
118 kernacc(void *addr, int len, int rw)
121 vm_offset_t saddr, eaddr;
124 KASSERT((rw & ~VM_PROT_ALL) == 0,
125 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
127 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
128 (vm_offset_t)addr + len < (vm_offset_t)addr)
132 saddr = trunc_page((vm_offset_t)addr);
133 eaddr = round_page((vm_offset_t)addr + len);
134 vm_map_lock_read(kernel_map);
135 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
136 vm_map_unlock_read(kernel_map);
143 * WARNING! This code calls vm_map_check_protection() which only checks
144 * the associated vm_map_entry range. It does not determine whether the
145 * contents of the memory is actually readable or writable. vmapbuf(),
146 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
147 * used in conjunction with this call.
150 useracc(void *addr, int len, int rw)
156 KASSERT((rw & ~VM_PROT_ALL) == 0,
157 ("illegal ``rw'' argument to useracc (%x)\n", rw));
159 map = &curproc->p_vmspace->vm_map;
160 if ((vm_offset_t)addr + len > vm_map_max(map) ||
161 (vm_offset_t)addr + len < (vm_offset_t)addr) {
164 vm_map_lock_read(map);
165 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
166 round_page((vm_offset_t)addr + len), prot);
167 vm_map_unlock_read(map);
172 vslock(void *addr, size_t len)
174 vm_offset_t end, last, start;
178 last = (vm_offset_t)addr + len;
179 start = trunc_page((vm_offset_t)addr);
180 end = round_page(last);
181 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
183 npages = atop(end - start);
184 if (npages > vm_page_max_user_wired)
186 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
187 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
188 if (error == KERN_SUCCESS) {
189 curthread->td_vslock_sz += len;
194 * Return EFAULT on error to match copy{in,out}() behaviour
195 * rather than returning ENOMEM like mlock() would.
201 vsunlock(void *addr, size_t len)
204 /* Rely on the parameter sanity checks performed by vslock(). */
205 MPASS(curthread->td_vslock_sz >= len);
206 curthread->td_vslock_sz -= len;
207 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
208 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
209 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
213 * Pin the page contained within the given object at the given offset. If the
214 * page is not resident, allocate and load it using the given object's pager.
215 * Return the pinned page if successful; otherwise, return NULL.
218 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
224 VM_OBJECT_WLOCK(object);
225 pindex = OFF_TO_IDX(offset);
226 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
228 if (m->valid != VM_PAGE_BITS_ALL) {
230 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
231 if (rv != VM_PAGER_OK) {
233 vm_page_unwire(m, PQ_NONE);
242 VM_OBJECT_WUNLOCK(object);
247 * Return a CPU private mapping to the page at the given offset within the
248 * given object. The page is pinned before it is mapped.
251 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
255 m = vm_imgact_hold_page(object, offset);
259 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
263 * Destroy the given CPU private mapping and unpin the page that it mapped.
266 vm_imgact_unmap_page(struct sf_buf *sf)
274 vm_page_unwire(m, PQ_ACTIVE);
279 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
282 pmap_sync_icache(map->pmap, va, sz);
285 static uma_zone_t kstack_cache;
286 static int kstack_cache_size = 128;
287 static int kstack_domain_iter;
290 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
294 newsize = kstack_cache_size;
295 error = sysctl_handle_int(oidp, &newsize, 0, req);
296 if (error == 0 && req->newptr && newsize != kstack_cache_size)
298 uma_zone_set_maxcache(kstack_cache, newsize);
301 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size, CTLTYPE_INT|CTLFLAG_RW,
302 &kstack_cache_size, 0, sysctl_kstack_cache_size, "IU",
303 "Maximum number of cached kernel stacks");
306 * Create the kernel stack (including pcb for i386) for a new thread.
307 * This routine directly affects the fork perf for a process and
308 * create performance for a thread.
311 vm_thread_stack_create(struct domainset *ds, vm_object_t *ksobjp, int pages)
313 vm_page_t ma[KSTACK_MAX_PAGES];
319 * Allocate an object for the kstack.
321 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
324 * Get a kernel virtual address for this thread's kstack.
326 #if defined(__mips__)
328 * We need to align the kstack's mapped address to fit within
329 * a single TLB entry.
331 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
332 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
333 M_BESTFIT | M_NOWAIT, &ks)) {
337 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
340 printf("vm_thread_new: kstack allocation failed\n");
341 vm_object_deallocate(ksobj);
344 if (vm_ndomains > 1) {
345 ksobj->domain.dr_policy = ds;
346 ksobj->domain.dr_iter =
347 atomic_fetchadd_int(&kstack_domain_iter, 1);
350 if (KSTACK_GUARD_PAGES != 0) {
351 pmap_qremove(ks, KSTACK_GUARD_PAGES);
352 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
356 * For the length of the stack, link in a real page of ram for each
359 VM_OBJECT_WLOCK(ksobj);
360 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
361 VM_ALLOC_WIRED, ma, pages);
362 for (i = 0; i < pages; i++)
363 ma[i]->valid = VM_PAGE_BITS_ALL;
364 VM_OBJECT_WUNLOCK(ksobj);
365 pmap_qenter(ks, ma, pages);
372 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
377 pmap_qremove(ks, pages);
378 VM_OBJECT_WLOCK(ksobj);
379 for (i = 0; i < pages; i++) {
380 m = vm_page_lookup(ksobj, i);
382 panic("vm_thread_dispose: kstack already missing?");
384 vm_page_unwire_noq(m);
388 VM_OBJECT_WUNLOCK(ksobj);
389 vm_object_deallocate(ksobj);
390 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
391 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
395 * Allocate the kernel stack for a new thread.
398 vm_thread_new(struct thread *td, int pages)
405 pages = kstack_pages;
406 else if (pages > KSTACK_MAX_PAGES)
407 pages = KSTACK_MAX_PAGES;
411 if (pages == kstack_pages && kstack_cache != NULL) {
412 ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
414 ksobj = PHYS_TO_VM_PAGE(pmap_kextract(ks))->object;
418 * Ensure that kstack objects can draw pages from any memory
419 * domain. Otherwise a local memory shortage can block a process
423 ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
427 td->td_kstack_obj = ksobj;
429 td->td_kstack_pages = pages;
434 * Dispose of a thread's kernel stack.
437 vm_thread_dispose(struct thread *td)
443 pages = td->td_kstack_pages;
444 ksobj = td->td_kstack_obj;
447 td->td_kstack_pages = 0;
448 if (pages == kstack_pages)
449 uma_zfree(kstack_cache, (void *)ks);
451 vm_thread_stack_dispose(ksobj, ks, pages);
455 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
460 for (i = 0; i < cnt; i++) {
461 store[i] = (void *)vm_thread_stack_create(
462 DOMAINSET_PREF(domain), &ksobj, kstack_pages);
463 if (store[i] == NULL)
470 kstack_release(void *arg, void **store, int cnt)
475 for (i = 0; i < cnt; i++) {
476 ks = (vm_offset_t)store[i];
477 vm_thread_stack_dispose(
478 PHYS_TO_VM_PAGE(pmap_kextract(ks))->object,
484 kstack_cache_init(void *null)
486 kstack_cache = uma_zcache_create("kstack_cache",
487 kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
488 kstack_import, kstack_release, NULL,
489 UMA_ZONE_NUMA|UMA_ZONE_MINBUCKET);
490 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
493 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
495 #ifdef KSTACK_USAGE_PROF
497 * Track maximum stack used by a thread in kernel.
499 static int max_kstack_used;
501 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
503 "Maxiumum stack depth used by a thread in kernel");
506 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
508 vm_offset_t stack_top;
513 * Testing for interrupted kernel mode isn't strictly
514 * needed. It optimizes the execution, since interrupts from
515 * usermode will have only the trap frame on the stack.
517 if (TRAPF_USERMODE(frame))
520 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
521 current = (vm_offset_t)(uintptr_t)&stack_top;
524 * Try to detect if interrupt is using kernel thread stack.
525 * Hardware could use a dedicated stack for interrupt handling.
527 if (stack_top <= current || current < td->td_kstack)
530 used = stack_top - current;
532 prev_used = max_kstack_used;
533 if (prev_used >= used)
535 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
539 #endif /* KSTACK_USAGE_PROF */
542 * Implement fork's actions on an address space.
543 * Here we arrange for the address space to be copied or referenced,
544 * allocate a user struct (pcb and kernel stack), then call the
545 * machine-dependent layer to fill those in and make the new process
546 * ready to run. The new process is set up so that it returns directly
547 * to user mode to avoid stack copying and relocation problems.
550 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
551 struct vmspace *vm2, int flags)
553 struct proc *p1 = td->td_proc;
554 struct domainset *dset;
557 if ((flags & RFPROC) == 0) {
559 * Divorce the memory, if it is shared, essentially
560 * this changes shared memory amongst threads, into
563 if ((flags & RFMEM) == 0) {
564 if (p1->p_vmspace->vm_refcnt > 1) {
565 error = vmspace_unshare(p1);
570 cpu_fork(td, p2, td2, flags);
575 p2->p_vmspace = p1->p_vmspace;
576 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
578 dset = td2->td_domain.dr_policy;
579 while (vm_page_count_severe_set(&dset->ds_mask)) {
580 vm_wait_doms(&dset->ds_mask);
583 if ((flags & RFMEM) == 0) {
585 if (p1->p_vmspace->vm_shm)
590 * cpu_fork will copy and update the pcb, set up the kernel stack,
591 * and make the child ready to run.
593 cpu_fork(td, p2, td2, flags);
598 * Called after process has been wait(2)'ed upon and is being reaped.
599 * The idea is to reclaim resources that we could not reclaim while
600 * the process was still executing.
607 vmspace_exitfree(p); /* and clean-out the vmspace */