2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007, 2018 The FreeBSD Foundation
7 * Portions of this software were developed by A. Joseph Koshy under
8 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Portions of this software were developed by Konstantin Belousov
10 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include "opt_atpic.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_hyperv.h"
46 #include <machine/psl.h>
47 #include <machine/asmacros.h>
48 #include <machine/trap.h>
52 .globl dtrace_invop_jump_addr
54 .type dtrace_invop_jump_addr, @object
55 .size dtrace_invop_jump_addr, 4
56 dtrace_invop_jump_addr:
58 .globl dtrace_invop_calltrap_addr
60 .type dtrace_invop_calltrap_addr, @object
61 .size dtrace_invop_calltrap_addr, 4
62 dtrace_invop_calltrap_addr:
66 ENTRY(start_exceptions)
68 tramp_idleptd: .long 0
70 /*****************************************************************************/
72 /*****************************************************************************/
74 * Trap and fault vector routines.
76 * All traps are 'interrupt gates', SDT_SYS386IGT. Interrupts are disabled
77 * by hardware to not allow interrupts until code switched to the kernel
78 * address space and the kernel thread stack.
80 * The cpu will push a certain amount of state onto the kernel stack for
81 * the current process. The amount of state depends on the type of trap
82 * and whether the trap crossed rings or not. See i386/include/frame.h.
83 * At the very least the current EFLAGS (status register, which includes
84 * the interrupt disable state prior to the trap), the code segment register,
85 * and the return instruction pointer are pushed by the cpu. The cpu
86 * will also push an 'error' code for certain traps. We push a dummy
87 * error code for those traps where the cpu doesn't in order to maintain
88 * a consistent frame. We also push a contrived 'trap number'.
90 * The cpu does not push the general registers, we must do that, and we
91 * must restore them prior to calling 'iret'. The cpu adjusts the %cs and
92 * %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
93 * must load them with appropriate values for supervisor mode operation.
95 * This code is not executed at the linked address, it is copied to the
96 * trampoline area. As the consequence, all code there and in included files
100 #define TRAP(a) pushl $(a) ; jmp alltraps
103 pushl $0; TRAP(T_DIVIDE)
105 pushl $0; TRAP(T_BPTFLT)
107 pushl $0; TRAP(T_DTRACE_RET)
109 pushl $0; TRAP(T_OFLOW)
111 pushl $0; TRAP(T_BOUND)
112 #ifndef KDTRACE_HOOKS
114 pushl $0; TRAP(T_PRIVINFLT)
117 pushl $0; TRAP(T_DNA)
119 pushl $0; TRAP(T_FPOPFLT)
132 testl $PSL_VM, TF_EFLAGS-TF_ERR(%esp)
134 testb $SEL_RPL_MASK, TF_CS-TF_ERR(%esp)
136 cmpl $PMAP_TRM_MIN_ADDRESS, TF_EIP-TF_ERR(%esp)
140 * This is a handshake between copyout_fast.s and page fault
141 * handler. We check for page fault occuring at the special
142 * places in the copyout fast path, where page fault can
143 * legitimately happen while accessing either user space or
144 * kernel pageable memory, and return control to *%edx.
145 * We switch to the idleptd page table from a user page table,
149 movl TF_EIP-TF_ERR+4(%esp), %eax
173 2: movl $tramp_idleptd, %eax
179 movl %edx, TF_EIP-TF_ERR(%esp)
188 upf: pushl $T_PAGEFLT
192 pushl $0; TRAP(T_RESERVED)
194 pushl $0; TRAP(T_ARITHTRAP)
198 pushl $0; TRAP(T_XMMFLT)
201 * All traps except ones for syscalls or invalid segment,
202 * jump to alltraps. If
203 * interrupts were enabled when the trap occurred, then interrupts
204 * are enabled now if the trap was through a trap gate, else
205 * disabled if the trap was through an interrupt gate. Note that
206 * int0x80_syscall is a trap gate. Interrupt gates are used by
207 * page faults, non-maskable interrupts, debug and breakpoint
212 .type alltraps,@function
215 alltraps_with_regs_pushed:
226 * Return via doreti to handle ASTs.
231 .type irettraps,@function
233 testl $PSL_VM, TF_EFLAGS-TF_TRAPNO(%esp)
235 testb $SEL_RPL_MASK, TF_CS-TF_TRAPNO(%esp)
240 * The special case there is the kernel mode with user %cr3 and
241 * trampoline stack. We need to copy both current frame and the
242 * hardware portion of the frame we tried to return to, to the
243 * normal stack. This logic must follow the stack unwind order
251 leal (doreti_iret - 1b)(%ebx), %edx
252 cmpl %edx, TF_EIP(%esp)
254 /* -8 because exception did not switch ring */
255 movl $(2 * TF_SZ - TF_EIP - 8), %ecx
257 2: leal (doreti_popl_ds - 1b)(%ebx), %edx
258 cmpl %edx, TF_EIP(%esp)
260 movl $(2 * TF_SZ - TF_DS - 8), %ecx
262 3: leal (doreti_popl_es - 1b)(%ebx), %edx
263 cmpl %edx, TF_EIP(%esp)
265 movl $(2 * TF_SZ - TF_ES - 8), %ecx
267 4: leal (doreti_popl_fs - 1b)(%ebx), %edx
268 cmpl %edx, TF_EIP(%esp)
270 movl $(2 * TF_SZ - TF_FS - 8), %ecx
271 5: cmpl $PMAP_TRM_MIN_ADDRESS, %esp /* trampoline stack ? */
272 jb calltrap /* if not, no need to change stacks */
273 movl (tramp_idleptd - 1b)(%ebx), %eax
275 movl PCPU(KESP0), %edx
281 /* kernel mode, normal */
285 * Privileged instruction fault.
291 * Check if this is a user fault. If so, just handle it as a normal
294 testl $PSL_VM, 8(%esp) /* and vm86 mode. */
296 cmpl $GSEL_KPL, 4(%esp) /* Check the code segment */
300 * Check if a DTrace hook is registered. The trampoline cannot
303 cmpl $0, dtrace_invop_jump_addr
307 * This is a kernel instruction fault that might have been caused
308 * by a DTrace provider.
314 * Set our jump address for the jump back in the event that
315 * the exception wasn't caused by DTrace at all.
317 movl $norm_ill, dtrace_invop_calltrap_addr
319 /* Jump to the code hooked in by DTrace. */
320 jmpl *dtrace_invop_jump_addr
323 * Process the instruction fault in the normal way.
332 * See comment in the handler for the kernel case T_TRCTRAP in trap.c.
333 * The exception handler must be ready to execute with wrong %cr3.
334 * We save original %cr3 in frame->tf_err, similarly to NMI and MCE
344 movl %eax, TF_ERR(%esp)
347 movl (tramp_idleptd - 1b)(%eax), %eax
349 testl $PSL_VM, TF_EFLAGS(%esp)
351 testb $SEL_RPL_MASK,TF_CS(%esp)
355 movl $handle_ibrs_entry,%eax
361 movl $T_RESERVED, TF_TRAPNO(%esp)
377 * Save %cr3 into tf_err. There is no good place to put it.
378 * Always reload %cr3, since we might have interrupted the
379 * kernel entry or exit.
380 * Do not switch to the thread kernel stack, otherwise we might
381 * obliterate the previous context partially copied from the
383 * Do not re-enable IBRS, there is no good place to store
384 * previous state if we come from the kernel.
387 movl %eax, TF_ERR(%esp)
390 movl (tramp_idleptd - 1b)(%eax), %eax
395 * Trap gate entry for syscalls (int 0x80).
396 * This is used by FreeBSD ELF executables, "new" a.out executables, and all
399 * Even though the name says 'int0x80', this is actually a trap gate, not an
400 * interrupt gate. Thus interrupts are enabled on entry just as they are for
404 IDTVEC(int0x80_syscall)
405 pushl $2 /* sizeof "int 0x80" */
406 pushl $0 /* tf_trapno */
411 movl $handle_ibrs_entry,%eax
420 ENTRY(fork_trampoline)
421 pushl %esp /* trapframe pointer */
422 pushl %ebx /* arg1 */
423 pushl %esi /* function */
424 movl $fork_exit, %eax
427 /* cut from syscall */
430 * Return via doreti to handle ASTs.
441 #include <i386/i386/atpic_vector.S>
444 #if defined(DEV_APIC) && defined(DEV_ATPIC)
452 #include <i386/i386/apic_vector.S>
460 #include <dev/hyperv/vmbus/i386/vmbus_vector.S>
467 #include <i386/i386/vm86bios.S>
471 #include <i386/i386/copyout_fast.S>
474 * void doreti(struct trapframe)
476 * Handle return from interrupts, traps and syscalls.
480 .type doreti,@function
485 * Check if ASTs can be handled now. ASTs cannot be safely
486 * processed when returning from an NMI.
488 cmpb $T_NMI,TF_TRAPNO(%esp)
495 * PSL_VM must be checked first since segment registers only
496 * have an RPL in non-VM86 mode.
497 * ASTs can not be handled now if we are in a vm86 call.
499 testl $PSL_VM,TF_EFLAGS(%esp)
501 movl PCPU(CURPCB),%ecx
502 testl $PCB_VM86CALL,PCB_FLAGS(%ecx)
507 testb $SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */
508 jz doreti_exit /* can't handle ASTs now if not */
512 * Check for ASTs atomically with returning. Disabling CPU
513 * interrupts provides sufficient locking even in the SMP case,
514 * since we will be informed of any new ASTs by an IPI.
517 movl PCPU(CURTHREAD),%eax
521 pushl %esp /* pass a pointer to the trapframe */
528 * doreti_exit: pop registers, iret.
530 * The segment register pop is a special case, since it may
531 * fault if (for example) a sigreturn specifies bad segment
532 * registers. The fault is handled in trap.c.
535 cmpl $T_NMI, TF_TRAPNO(%esp)
537 cmpl $T_MCHK, TF_TRAPNO(%esp)
539 cmpl $T_TRCTRAP, TF_TRAPNO(%esp)
541 testl $PSL_VM,TF_EFLAGS(%esp)
542 jnz 1f /* PCB_VM86CALL is not set */
543 testl $SEL_RPL_MASK, TF_CS(%esp)
545 1: movl $handle_ibrs_exit,%eax
547 movl mds_handler,%eax
550 movl PCPU(TRAMPSTK), %edx
552 testl $PSL_VM,TF_EFLAGS(%esp)
553 jz 2f /* PCB_VM86CALL is not set */
554 addl $VM86_STACK_SPACE, %ecx
559 movl PCPU(CURPCB),%eax
560 movl PCB_CR3(%eax), %eax
563 .globl doreti_popl_fs
566 .globl doreti_popl_es
569 .globl doreti_popl_ds
579 movl TF_ERR(%esp), %eax
584 * doreti_iret_fault and friends. Alternative return code for
585 * the case where we get a fault in the doreti_exit code
586 * above. trap() (i386/i386/trap.c) catches this specific
587 * case, and continues in the corresponding place in the code
590 * If the fault occurred during return to usermode, we recreate
591 * the trap frame and call trap() to send a signal. Otherwise
592 * the kernel was tricked into fault by attempt to restore invalid
593 * usermode segment selectors on return from nested fault or
594 * interrupt, where interrupted kernel entry code not yet loaded
595 * kernel selectors. In the latter case, emulate iret and zero
596 * the invalid selector.
599 .globl doreti_iret_fault
601 pushl $0 /* tf_err */
602 pushl $0 /* tf_trapno XXXKIB: provide more useful value ? */
606 .globl doreti_popl_ds_fault
607 doreti_popl_ds_fault:
608 testb $SEL_RPL_MASK,TF_CS-TF_DS(%esp)
609 jz doreti_popl_ds_kfault
612 .globl doreti_popl_es_fault
613 doreti_popl_es_fault:
614 testb $SEL_RPL_MASK,TF_CS-TF_ES(%esp)
615 jz doreti_popl_es_kfault
618 .globl doreti_popl_fs_fault
619 doreti_popl_fs_fault:
620 testb $SEL_RPL_MASK,TF_CS-TF_FS(%esp)
621 jz doreti_popl_fs_kfault
622 movl $0,TF_ERR(%esp) /* XXX should be the error code */
623 movl $T_PROTFLT,TF_TRAPNO(%esp)
627 doreti_popl_ds_kfault:
630 doreti_popl_es_kfault:
633 doreti_popl_fs_kfault:
640 * Since we are returning from an NMI, check if the current trap
641 * was from user mode and if so whether the current thread
642 * needs a user call chain capture.
644 testl $PSL_VM, TF_EFLAGS(%esp)
646 testb $SEL_RPL_MASK,TF_CS(%esp)
648 movl PCPU(CURTHREAD),%eax /* curthread present? */
651 testl $TDP_CALLCHAIN,TD_PFLAGS(%eax) /* flagged for capture? */
654 * Switch to thread stack. Reset tf_trapno to not indicate NMI,
655 * to cause normal userspace exit.
657 movl $T_RESERVED, TF_TRAPNO(%esp)
660 * Take the processor out of NMI mode by executing a fake "iret".
666 leal (outofnmi-1b)(%eax),%eax
671 * Call the callchain capture hook after turning interrupts back on.
676 pushl %esp /* frame pointer */
677 pushl $PMC_FN_USER_CALLCHAIN /* command */
678 movl PCPU(CURTHREAD),%eax
679 pushl %eax /* curthread */
686 ENTRY(end_exceptions)