2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
50 #include "opt_reset.h"
53 #include <sys/param.h>
54 #include <sys/systm.h>
57 #include <sys/kernel.h>
60 #include <sys/malloc.h>
62 #include <sys/mutex.h>
63 #include <sys/pioctl.h>
65 #include <sys/sysent.h>
66 #include <sys/sf_buf.h>
68 #include <sys/sched.h>
69 #include <sys/sysctl.h>
70 #include <sys/unistd.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
74 #include <machine/cpu.h>
75 #include <machine/cputypes.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/pcb_ext.h>
79 #include <machine/smp.h>
80 #include <machine/vm86.h>
83 #include <machine/elan_mmcr.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_param.h>
93 #include <isa/isareg.h>
96 #define NSFBUFS (512 + maxusers * 16)
99 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
100 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
101 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
102 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
103 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
104 "__OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
106 static void cpu_reset_real(void);
108 static void cpu_reset_proxy(void);
109 static u_int cpu_reset_proxyid;
110 static volatile u_int cpu_reset_proxy_active;
114 get_pcb_user_save_td(struct thread *td)
118 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
119 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
120 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
121 return ((union savefpu *)p);
125 get_pcb_user_save_pcb(struct pcb *pcb)
129 p = (vm_offset_t)(pcb + 1);
130 return ((union savefpu *)p);
134 get_pcb_td(struct thread *td)
138 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
139 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
141 return ((struct pcb *)p);
145 alloc_fpusave(int flags)
148 struct savefpu_ymm *sf;
150 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
152 sf = (struct savefpu_ymm *)res;
153 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
154 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
159 * Finish a fork operation, with process p2 nearly set up.
160 * Copy and update the pcb, set up the stack so that the child
161 * ready to run and return to user mode.
164 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
171 if ((flags & RFPROC) == 0) {
172 if ((flags & RFMEM) == 0) {
173 /* unshare user LDT */
174 struct mdproc *mdp1 = &p1->p_md;
175 struct proc_ldt *pldt, *pldt1;
177 mtx_lock_spin(&dt_lock);
178 if ((pldt1 = mdp1->md_ldt) != NULL &&
179 pldt1->ldt_refcnt > 1) {
180 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
182 panic("could not copy LDT");
185 user_ldt_deref(pldt1);
187 mtx_unlock_spin(&dt_lock);
192 /* Ensure that td1's pcb is up to date. */
193 if (td1 == curthread)
194 td1->td_pcb->pcb_gs = rgs();
196 if (PCPU_GET(fpcurthread) == td1)
197 npxsave(td1->td_pcb->pcb_save);
200 /* Point the pcb to the top of the stack */
201 pcb2 = get_pcb_td(td2);
205 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
207 /* Properly initialize pcb_save */
208 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
209 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
210 cpu_max_ext_state_size);
212 /* Point mdproc and then copy over td1's contents */
214 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
217 * Create a new fresh stack for the new process.
218 * Copy the trap frame for the return to user mode as if from a
219 * syscall. This copies most of the user mode register values.
220 * The -16 is so we can expand the trapframe if we go to vm86.
222 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
223 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
225 td2->td_frame->tf_eax = 0; /* Child returns zero */
226 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
227 td2->td_frame->tf_edx = 1;
230 * If the parent process has the trap bit set (i.e. a debugger had
231 * single stepped the process to the system call), we need to clear
232 * the trap flag from the new frame unless the debugger had set PF_FORK
233 * on the parent. Otherwise, the child will receive a (likely
234 * unexpected) SIGTRAP when it executes the first instruction after
235 * returning to userland.
237 if ((p1->p_pfsflags & PF_FORK) == 0)
238 td2->td_frame->tf_eflags &= ~PSL_T;
241 * Set registers for trampoline to user mode. Leave space for the
242 * return address on stack. These are the kernel mode register values.
244 #if defined(PAE) || defined(PAE_TABLES)
245 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
247 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
250 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
252 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
253 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
254 pcb2->pcb_eip = (int)fork_trampoline;
256 * pcb2->pcb_dr*: cloned above.
257 * pcb2->pcb_savefpu: cloned above.
258 * pcb2->pcb_flags: cloned above.
259 * pcb2->pcb_onfault: cloned above (always NULL here?).
260 * pcb2->pcb_gs: cloned above.
261 * pcb2->pcb_ext: cleared below.
265 * XXX don't copy the i/o pages. this should probably be fixed.
269 /* Copy the LDT, if necessary. */
270 mtx_lock_spin(&dt_lock);
271 if (mdp2->md_ldt != NULL) {
273 mdp2->md_ldt->ldt_refcnt++;
275 mdp2->md_ldt = user_ldt_alloc(mdp2,
276 mdp2->md_ldt->ldt_len);
277 if (mdp2->md_ldt == NULL)
278 panic("could not copy LDT");
281 mtx_unlock_spin(&dt_lock);
283 /* Setup to release spin count in fork_exit(). */
284 td2->td_md.md_spinlock_count = 1;
285 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
288 * Now, cpu_switch() can schedule the new process.
289 * pcb_esp is loaded pointing to the cpu_switch() stack frame
290 * containing the return address when exiting cpu_switch.
291 * This will normally be to fork_trampoline(), which will have
292 * %ebx loaded with the new proc's pointer. fork_trampoline()
293 * will set up a stack to call fork_return(p, frame); to complete
294 * the return to user-mode.
299 * Intercept the return address from a freshly forked process that has NOT
300 * been scheduled yet.
302 * This is needed to make kernel threads stay in kernel mode.
305 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
308 * Note that the trap frame follows the args, so the function
309 * is really called like this: func(arg, frame);
311 td->td_pcb->pcb_esi = (int) func; /* function */
312 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
316 cpu_exit(struct thread *td)
320 * If this process has a custom LDT, release it. Reset pc->pcb_gs
321 * and %gs before we free it in case they refer to an LDT entry.
323 mtx_lock_spin(&dt_lock);
324 if (td->td_proc->p_md.md_ldt) {
325 td->td_pcb->pcb_gs = _udatasel;
329 mtx_unlock_spin(&dt_lock);
333 cpu_thread_exit(struct thread *td)
337 if (td == PCPU_GET(fpcurthread))
341 /* Disable any hardware breakpoints. */
342 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
344 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
349 cpu_thread_clean(struct thread *td)
354 if (pcb->pcb_ext != NULL) {
355 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
357 * XXX do we need to move the TSS off the allocated pages
358 * before freeing them? (not done here)
360 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext,
367 cpu_thread_swapin(struct thread *td)
372 cpu_thread_swapout(struct thread *td)
377 cpu_thread_alloc(struct thread *td)
380 struct xstate_hdr *xhdr;
382 td->td_pcb = pcb = get_pcb_td(td);
383 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1;
385 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
387 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
388 bzero(xhdr, sizeof(*xhdr));
389 xhdr->xstate_bv = xsave_mask;
394 cpu_thread_free(struct thread *td)
397 cpu_thread_clean(td);
401 cpu_set_syscall_retval(struct thread *td, int error)
406 td->td_frame->tf_eax = td->td_retval[0];
407 td->td_frame->tf_edx = td->td_retval[1];
408 td->td_frame->tf_eflags &= ~PSL_C;
413 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
414 * 0x80 is 2 bytes. We saved this in tf_err.
416 td->td_frame->tf_eip -= td->td_frame->tf_err;
423 td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
424 td->td_frame->tf_eflags |= PSL_C;
430 * Initialize machine state, mostly pcb and trap frame for a new
431 * thread, about to return to userspace. Put enough state in the new
432 * thread's PCB to get it to go back to the fork_return(), which
433 * finalizes the thread state and handles peculiarities of the first
434 * return to userspace for the new thread.
437 cpu_copy_thread(struct thread *td, struct thread *td0)
441 /* Point the pcb to the top of the stack. */
445 * Copy the upcall pcb. This loads kernel regs.
446 * Those not loaded individually below get their default
449 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
450 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
452 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
453 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
454 cpu_max_ext_state_size);
457 * Create a new fresh stack for the new thread.
459 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
461 /* If the current thread has the trap bit set (i.e. a debugger had
462 * single stepped the process to the system call), we need to clear
463 * the trap flag from the new frame. Otherwise, the new thread will
464 * receive a (likely unexpected) SIGTRAP when it executes the first
465 * instruction after returning to userland.
467 td->td_frame->tf_eflags &= ~PSL_T;
470 * Set registers for trampoline to user mode. Leave space for the
471 * return address on stack. These are the kernel mode register values.
474 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
476 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
477 pcb2->pcb_ebx = (int)td; /* trampoline arg */
478 pcb2->pcb_eip = (int)fork_trampoline;
479 pcb2->pcb_gs = rgs();
481 * If we didn't copy the pcb, we'd need to do the following registers:
482 * pcb2->pcb_cr3: cloned above.
483 * pcb2->pcb_dr*: cloned above.
484 * pcb2->pcb_savefpu: cloned above.
485 * pcb2->pcb_flags: cloned above.
486 * pcb2->pcb_onfault: cloned above (always NULL here?).
487 * pcb2->pcb_gs: cloned above.
488 * pcb2->pcb_ext: cleared below.
490 pcb2->pcb_ext = NULL;
492 /* Setup to release spin count in fork_exit(). */
493 td->td_md.md_spinlock_count = 1;
494 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
498 * Set that machine state for performing an upcall that starts
499 * the entry function with the given argument.
502 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
507 * Do any extra cleaning that needs to be done.
508 * The thread may have optional components
509 * that are not present in a fresh thread.
510 * This may be a recycled thread so make it look
511 * as though it's newly allocated.
513 cpu_thread_clean(td);
516 * Set the trap frame to point at the beginning of the entry
519 td->td_frame->tf_ebp = 0;
520 td->td_frame->tf_esp =
521 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
522 td->td_frame->tf_eip = (int)entry;
524 /* Return address sentinel value to stop stack unwinding. */
525 suword((void *)td->td_frame->tf_esp, 0);
527 /* Pass the argument to the entry point. */
528 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
533 cpu_set_user_tls(struct thread *td, void *tls_base)
535 struct segment_descriptor sd;
539 * Construct a descriptor and store it in the pcb for
540 * the next context switch. Also store it in the gdt
541 * so that the load of tf_fs into %fs will activate it
542 * at return to userland.
544 base = (uint32_t)tls_base;
545 sd.sd_lobase = base & 0xffffff;
546 sd.sd_hibase = (base >> 24) & 0xff;
547 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
549 sd.sd_type = SDT_MEMRWA;
557 td->td_pcb->pcb_gsd = sd;
558 if (td == curthread) {
559 PCPU_GET(fsgs_gdt)[1] = sd;
560 load_gs(GSEL(GUGS_SEL, SEL_UPL));
567 * Convert kernel VA to physical address
574 pa = pmap_kextract((vm_offset_t)addr);
576 panic("kvtop: zero page frame");
586 cpu_reset_proxy_active = 1;
587 while (cpu_reset_proxy_active == 1)
588 ; /* Wait for other cpu to see that we've started */
589 CPU_SETOF(cpu_reset_proxyid, &tcrp);
591 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
606 CPU_CLR(PCPU_GET(cpuid), &map);
607 CPU_NAND(&map, &stopped_cpus);
608 if (!CPU_EMPTY(&map)) {
609 printf("cpu_reset: Stopping other CPUs\n");
613 if (PCPU_GET(cpuid) != 0) {
614 cpu_reset_proxyid = PCPU_GET(cpuid);
615 cpustop_restartfunc = cpu_reset_proxy;
616 cpu_reset_proxy_active = 0;
617 printf("cpu_reset: Restarting BSP\n");
619 /* Restart CPU #0. */
620 /* XXX: restart_cpus(1 << 0); */
621 CPU_SETOF(0, &started_cpus);
625 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
626 cnt++; /* Wait for BSP to announce restart */
627 if (cpu_reset_proxy_active == 0)
628 printf("cpu_reset: Failed to restart BSP\n");
630 cpu_reset_proxy_active = 2;
646 struct region_descriptor null_idt;
651 if (elan_mmcr != NULL)
652 elan_mmcr->RESCFG = 1;
655 if (cpu == CPU_GEODE1100) {
656 /* Attempt Geode's own reset */
657 outl(0xcf8, 0x80009044ul);
661 #if !defined(BROKEN_KEYBOARD_RESET)
663 * Attempt to do a CPU reset via the keyboard controller,
664 * do not turn off GateA20, as any machine that fails
665 * to do the reset here would then end up in no man's land.
667 outb(IO_KBD + 4, 0xFE);
668 DELAY(500000); /* wait 0.5 sec to see if that did it */
672 * Attempt to force a reset via the Reset Control register at
673 * I/O port 0xcf9. Bit 2 forces a system reset when it
674 * transitions from 0 to 1. Bit 1 selects the type of reset
675 * to attempt: 0 selects a "soft" reset, and 1 selects a
676 * "hard" reset. We try a "hard" reset. The first write sets
677 * bit 1 to select a "hard" reset and clears bit 2. The
678 * second write forces a 0 -> 1 transition in bit 2 to trigger
683 DELAY(500000); /* wait 0.5 sec to see if that did it */
686 * Attempt to force a reset via the Fast A20 and Init register
687 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
688 * Bit 0 asserts INIT# when set to 1. We are careful to only
689 * preserve bit 1 while setting bit 0. We also must clear bit
690 * 0 before setting it if it isn't already clear.
695 outb(0x92, b & 0xfe);
697 DELAY(500000); /* wait 0.5 sec to see if that did it */
700 printf("No known reset method worked, attempting CPU shutdown\n");
701 DELAY(1000000); /* wait 1 sec for printf to complete */
704 null_idt.rd_limit = 0;
705 null_idt.rd_base = 0;
708 /* "good night, sweet prince .... <THUNK!>" */
716 * Get an sf_buf from the freelist. May block if none are available.
719 sf_buf_map(struct sf_buf *sf, int flags)
721 pt_entry_t opte, *ptep;
724 * Update the sf_buf's virtual-to-physical mapping, flushing the
725 * virtual address from the TLB. Since the reference count for
726 * the sf_buf's old mapping was zero, that mapping is not
727 * currently in use. Consequently, there is no need to exchange
728 * the old and new PTEs atomically, even under PAE.
730 ptep = vtopte(sf->kva);
732 *ptep = VM_PAGE_TO_PHYS(sf->m) | pgeflag | PG_RW | PG_V |
733 pmap_cache_bits(sf->m->md.pat_mode, 0);
736 * Avoid unnecessary TLB invalidations: If the sf_buf's old
737 * virtual-to-physical mapping was not used, then any processor
738 * that has invalidated the sf_buf's virtual address from its TLB
739 * since the last used mapping need not invalidate again.
742 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
743 CPU_ZERO(&sf->cpumask);
745 sf_buf_shootdown(sf, flags);
747 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
748 pmap_invalidate_page(kernel_pmap, sf->kva);
754 sf_buf_shootdown(struct sf_buf *sf, int flags)
760 cpuid = PCPU_GET(cpuid);
761 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
762 CPU_SET(cpuid, &sf->cpumask);
765 if ((flags & SFB_CPUPRIVATE) == 0) {
766 other_cpus = all_cpus;
767 CPU_CLR(cpuid, &other_cpus);
768 CPU_NAND(&other_cpus, &sf->cpumask);
769 if (!CPU_EMPTY(&other_cpus)) {
770 CPU_OR(&sf->cpumask, &other_cpus);
771 smp_masked_invlpg(other_cpus, sf->kva);
779 * MD part of sf_buf_free().
782 sf_buf_unmap(struct sf_buf *sf)
789 sf_buf_invalidate(struct sf_buf *sf)
794 * Use pmap_qenter to update the pte for
795 * existing mapping, in particular, the PAT
796 * settings are recalculated.
798 pmap_qenter(sf->kva, &m, 1);
799 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
803 * Invalidate the cache lines that may belong to the page, if
804 * (possibly old) mapping of the page by sf buffer exists. Returns
805 * TRUE when mapping was found and cache invalidated.
808 sf_buf_invalidate_cache(vm_page_t m)
811 return (sf_buf_process_page(m, sf_buf_invalidate));
815 * Software interrupt handler for queued VM system processing.
820 if (busdma_swi_pending != 0)
825 * Tell whether this address is in some physical memory region.
826 * Currently used by the kernel coredump code in order to avoid
827 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
828 * or other unpredictable behaviour.
832 is_physical_memory(vm_paddr_t addr)
836 /* The ISA ``memory hole''. */
837 if (addr >= 0xa0000 && addr < 0x100000)
842 * stuff other tests for known memory-mapped devices (PCI?)