2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
50 #include "opt_reset.h"
53 #include <sys/param.h>
54 #include <sys/systm.h>
57 #include <sys/kernel.h>
60 #include <sys/malloc.h>
62 #include <sys/mutex.h>
63 #include <sys/pioctl.h>
65 #include <sys/sysent.h>
66 #include <sys/sf_buf.h>
68 #include <sys/sched.h>
69 #include <sys/sysctl.h>
70 #include <sys/unistd.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
74 #include <machine/cpu.h>
75 #include <machine/cputypes.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/pcb_ext.h>
79 #include <machine/smp.h>
80 #include <machine/vm86.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_param.h>
89 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
90 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
93 get_pcb_user_save_td(struct thread *td)
97 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
98 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
99 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
100 return ((union savefpu *)p);
104 get_pcb_user_save_pcb(struct pcb *pcb)
108 p = (vm_offset_t)(pcb + 1);
109 return ((union savefpu *)p);
113 get_pcb_td(struct thread *td)
117 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
118 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
120 return ((struct pcb *)p);
124 alloc_fpusave(int flags)
127 struct savefpu_ymm *sf;
129 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
131 sf = (struct savefpu_ymm *)res;
132 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
133 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
138 * Finish a fork operation, with process p2 nearly set up.
139 * Copy and update the pcb, set up the stack so that the child
140 * ready to run and return to user mode.
143 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
150 if ((flags & RFPROC) == 0) {
151 if ((flags & RFMEM) == 0) {
152 /* unshare user LDT */
153 struct mdproc *mdp1 = &p1->p_md;
154 struct proc_ldt *pldt, *pldt1;
156 mtx_lock_spin(&dt_lock);
157 if ((pldt1 = mdp1->md_ldt) != NULL &&
158 pldt1->ldt_refcnt > 1) {
159 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
161 panic("could not copy LDT");
164 user_ldt_deref(pldt1);
166 mtx_unlock_spin(&dt_lock);
171 /* Ensure that td1's pcb is up to date. */
172 if (td1 == curthread)
173 td1->td_pcb->pcb_gs = rgs();
175 if (PCPU_GET(fpcurthread) == td1)
176 npxsave(td1->td_pcb->pcb_save);
179 /* Point the pcb to the top of the stack */
180 pcb2 = get_pcb_td(td2);
184 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
186 /* Properly initialize pcb_save */
187 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
188 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
189 cpu_max_ext_state_size);
191 /* Point mdproc and then copy over td1's contents */
193 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
196 * Create a new fresh stack for the new process.
197 * Copy the trap frame for the return to user mode as if from a
198 * syscall. This copies most of the user mode register values.
199 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe
202 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb -
203 VM86_STACK_SPACE) - 1;
204 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
206 td2->td_frame->tf_eax = 0; /* Child returns zero */
207 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
208 td2->td_frame->tf_edx = 1;
211 * If the parent process has the trap bit set (i.e. a debugger had
212 * single stepped the process to the system call), we need to clear
213 * the trap flag from the new frame unless the debugger had set PF_FORK
214 * on the parent. Otherwise, the child will receive a (likely
215 * unexpected) SIGTRAP when it executes the first instruction after
216 * returning to userland.
218 if ((p1->p_pfsflags & PF_FORK) == 0)
219 td2->td_frame->tf_eflags &= ~PSL_T;
222 * Set registers for trampoline to user mode. Leave space for the
223 * return address on stack. These are the kernel mode register values.
225 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace));
227 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
229 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
230 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
231 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
233 * pcb2->pcb_dr*: cloned above.
234 * pcb2->pcb_savefpu: cloned above.
235 * pcb2->pcb_flags: cloned above.
236 * pcb2->pcb_onfault: cloned above (always NULL here?).
237 * pcb2->pcb_gs: cloned above.
238 * pcb2->pcb_ext: cleared below.
242 * XXX don't copy the i/o pages. this should probably be fixed.
246 /* Copy the LDT, if necessary. */
247 mtx_lock_spin(&dt_lock);
248 if (mdp2->md_ldt != NULL) {
250 mdp2->md_ldt->ldt_refcnt++;
252 mdp2->md_ldt = user_ldt_alloc(mdp2,
253 mdp2->md_ldt->ldt_len);
254 if (mdp2->md_ldt == NULL)
255 panic("could not copy LDT");
258 mtx_unlock_spin(&dt_lock);
260 /* Setup to release spin count in fork_exit(). */
261 td2->td_md.md_spinlock_count = 1;
262 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
265 * Now, cpu_switch() can schedule the new process.
266 * pcb_esp is loaded pointing to the cpu_switch() stack frame
267 * containing the return address when exiting cpu_switch.
268 * This will normally be to fork_trampoline(), which will have
269 * %ebx loaded with the new proc's pointer. fork_trampoline()
270 * will set up a stack to call fork_return(p, frame); to complete
271 * the return to user-mode.
276 * Intercept the return address from a freshly forked process that has NOT
277 * been scheduled yet.
279 * This is needed to make kernel threads stay in kernel mode.
282 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
285 * Note that the trap frame follows the args, so the function
286 * is really called like this: func(arg, frame);
288 td->td_pcb->pcb_esi = (int) func; /* function */
289 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
293 cpu_exit(struct thread *td)
297 * If this process has a custom LDT, release it. Reset pc->pcb_gs
298 * and %gs before we free it in case they refer to an LDT entry.
300 mtx_lock_spin(&dt_lock);
301 if (td->td_proc->p_md.md_ldt) {
302 td->td_pcb->pcb_gs = _udatasel;
306 mtx_unlock_spin(&dt_lock);
310 cpu_thread_exit(struct thread *td)
314 if (td == PCPU_GET(fpcurthread))
318 /* Disable any hardware breakpoints. */
319 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
321 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
326 cpu_thread_clean(struct thread *td)
331 if (pcb->pcb_ext != NULL) {
332 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
334 * XXX do we need to move the TSS off the allocated pages
335 * before freeing them? (not done here)
337 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1));
343 cpu_thread_swapin(struct thread *td)
348 cpu_thread_swapout(struct thread *td)
353 cpu_thread_alloc(struct thread *td)
356 struct xstate_hdr *xhdr;
358 td->td_pcb = pcb = get_pcb_td(td);
359 td->td_frame = (struct trapframe *)((caddr_t)pcb -
360 VM86_STACK_SPACE) - 1;
362 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
364 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
365 bzero(xhdr, sizeof(*xhdr));
366 xhdr->xstate_bv = xsave_mask;
371 cpu_thread_free(struct thread *td)
374 cpu_thread_clean(td);
378 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
385 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
386 int com __unused, void *data __unused)
393 cpu_set_syscall_retval(struct thread *td, int error)
398 td->td_frame->tf_eax = td->td_retval[0];
399 td->td_frame->tf_edx = td->td_retval[1];
400 td->td_frame->tf_eflags &= ~PSL_C;
405 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
406 * 0x80 is 2 bytes. We saved this in tf_err.
408 td->td_frame->tf_eip -= td->td_frame->tf_err;
415 td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
416 td->td_frame->tf_eflags |= PSL_C;
422 * Initialize machine state, mostly pcb and trap frame for a new
423 * thread, about to return to userspace. Put enough state in the new
424 * thread's PCB to get it to go back to the fork_return(), which
425 * finalizes the thread state and handles peculiarities of the first
426 * return to userspace for the new thread.
429 cpu_copy_thread(struct thread *td, struct thread *td0)
433 /* Point the pcb to the top of the stack. */
437 * Copy the upcall pcb. This loads kernel regs.
438 * Those not loaded individually below get their default
441 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
442 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
444 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
445 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
446 cpu_max_ext_state_size);
449 * Create a new fresh stack for the new thread.
451 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
453 /* If the current thread has the trap bit set (i.e. a debugger had
454 * single stepped the process to the system call), we need to clear
455 * the trap flag from the new frame. Otherwise, the new thread will
456 * receive a (likely unexpected) SIGTRAP when it executes the first
457 * instruction after returning to userland.
459 td->td_frame->tf_eflags &= ~PSL_T;
462 * Set registers for trampoline to user mode. Leave space for the
463 * return address on stack. These are the kernel mode register values.
466 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
468 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
469 pcb2->pcb_ebx = (int)td; /* trampoline arg */
470 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
471 pcb2->pcb_gs = rgs();
473 * If we didn't copy the pcb, we'd need to do the following registers:
474 * pcb2->pcb_cr3: cloned above.
475 * pcb2->pcb_dr*: cloned above.
476 * pcb2->pcb_savefpu: cloned above.
477 * pcb2->pcb_flags: cloned above.
478 * pcb2->pcb_onfault: cloned above (always NULL here?).
479 * pcb2->pcb_gs: cloned above.
480 * pcb2->pcb_ext: cleared below.
482 pcb2->pcb_ext = NULL;
484 /* Setup to release spin count in fork_exit(). */
485 td->td_md.md_spinlock_count = 1;
486 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
490 * Set that machine state for performing an upcall that starts
491 * the entry function with the given argument.
494 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
499 * Do any extra cleaning that needs to be done.
500 * The thread may have optional components
501 * that are not present in a fresh thread.
502 * This may be a recycled thread so make it look
503 * as though it's newly allocated.
505 cpu_thread_clean(td);
508 * Set the trap frame to point at the beginning of the entry
511 td->td_frame->tf_ebp = 0;
512 td->td_frame->tf_esp =
513 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
514 td->td_frame->tf_eip = (int)entry;
516 /* Return address sentinel value to stop stack unwinding. */
517 suword((void *)td->td_frame->tf_esp, 0);
519 /* Pass the argument to the entry point. */
520 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
525 cpu_set_user_tls(struct thread *td, void *tls_base)
527 struct segment_descriptor sd;
531 * Construct a descriptor and store it in the pcb for
532 * the next context switch. Also store it in the gdt
533 * so that the load of tf_fs into %fs will activate it
534 * at return to userland.
536 base = (uint32_t)tls_base;
537 sd.sd_lobase = base & 0xffffff;
538 sd.sd_hibase = (base >> 24) & 0xff;
539 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
541 sd.sd_type = SDT_MEMRWA;
549 td->td_pcb->pcb_gsd = sd;
550 if (td == curthread) {
551 PCPU_GET(fsgs_gdt)[1] = sd;
552 load_gs(GSEL(GUGS_SEL, SEL_UPL));
559 * Convert kernel VA to physical address
566 pa = pmap_kextract((vm_offset_t)addr);
568 panic("kvtop: zero page frame");
573 * Get an sf_buf from the freelist. May block if none are available.
576 sf_buf_map(struct sf_buf *sf, int flags)
581 sf_buf_shootdown(sf, flags);
587 sf_buf_shootdown(struct sf_buf *sf, int flags)
593 cpuid = PCPU_GET(cpuid);
594 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
595 CPU_SET(cpuid, &sf->cpumask);
598 if ((flags & SFB_CPUPRIVATE) == 0) {
599 other_cpus = all_cpus;
600 CPU_CLR(cpuid, &other_cpus);
601 CPU_ANDNOT(&other_cpus, &sf->cpumask);
602 if (!CPU_EMPTY(&other_cpus)) {
603 CPU_OR(&sf->cpumask, &other_cpus);
604 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap);
612 * MD part of sf_buf_free().
615 sf_buf_unmap(struct sf_buf *sf)
622 sf_buf_invalidate(struct sf_buf *sf)
627 * Use pmap_qenter to update the pte for
628 * existing mapping, in particular, the PAT
629 * settings are recalculated.
631 pmap_qenter(sf->kva, &m, 1);
632 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
636 * Invalidate the cache lines that may belong to the page, if
637 * (possibly old) mapping of the page by sf buffer exists. Returns
638 * TRUE when mapping was found and cache invalidated.
641 sf_buf_invalidate_cache(vm_page_t m)
644 return (sf_buf_process_page(m, sf_buf_invalidate));
648 * Software interrupt handler for queued VM system processing.
653 if (busdma_swi_pending != 0)
658 * Tell whether this address is in some physical memory region.
659 * Currently used by the kernel coredump code in order to avoid
660 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
661 * or other unpredictable behaviour.
665 is_physical_memory(vm_paddr_t addr)
669 /* The ISA ``memory hole''. */
670 if (addr >= 0xa0000 && addr < 0x100000)
675 * stuff other tests for known memory-mapped devices (PCI?)