]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/amd64/trap.c
Merge ^/head r327341 through r327623.
[FreeBSD/FreeBSD.git] / sys / amd64 / amd64 / trap.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (C) 1994, David Greenman
5  * Copyright (c) 1990, 1993
6  *      The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the University of Utah, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by the University of
22  *      California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
40  */
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44
45 /*
46  * AMD64 Trap and System call handling
47  */
48
49 #include "opt_clock.h"
50 #include "opt_cpu.h"
51 #include "opt_hwpmc_hooks.h"
52 #include "opt_isa.h"
53 #include "opt_kdb.h"
54 #include "opt_stack.h"
55
56 #include <sys/param.h>
57 #include <sys/bus.h>
58 #include <sys/systm.h>
59 #include <sys/proc.h>
60 #include <sys/pioctl.h>
61 #include <sys/ptrace.h>
62 #include <sys/kdb.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/lock.h>
66 #include <sys/mutex.h>
67 #include <sys/resourcevar.h>
68 #include <sys/signalvar.h>
69 #include <sys/syscall.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/uio.h>
73 #include <sys/vmmeter.h>
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DEFINE( , , page_fault, all);
77 PMC_SOFT_DEFINE( , , page_fault, read);
78 PMC_SOFT_DEFINE( , , page_fault, write);
79 #endif
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_extern.h>
88
89 #include <machine/cpu.h>
90 #include <machine/intr_machdep.h>
91 #include <x86/mca.h>
92 #include <machine/md_var.h>
93 #include <machine/pcb.h>
94 #ifdef SMP
95 #include <machine/smp.h>
96 #endif
97 #include <machine/stack.h>
98 #include <machine/tss.h>
99
100 #ifdef KDTRACE_HOOKS
101 #include <sys/dtrace_bsd.h>
102 #endif
103
104 void __noinline trap(struct trapframe *frame);
105 void trap_check(struct trapframe *frame);
106 void dblfault_handler(struct trapframe *frame);
107
108 static int trap_pfault(struct trapframe *, int);
109 static void trap_fatal(struct trapframe *, vm_offset_t);
110
111 #define MAX_TRAP_MSG            32
112 static char *trap_msg[] = {
113         "",                                     /*  0 unused */
114         "privileged instruction fault",         /*  1 T_PRIVINFLT */
115         "",                                     /*  2 unused */
116         "breakpoint instruction fault",         /*  3 T_BPTFLT */
117         "",                                     /*  4 unused */
118         "",                                     /*  5 unused */
119         "arithmetic trap",                      /*  6 T_ARITHTRAP */
120         "",                                     /*  7 unused */
121         "",                                     /*  8 unused */
122         "general protection fault",             /*  9 T_PROTFLT */
123         "trace trap",                           /* 10 T_TRCTRAP */
124         "",                                     /* 11 unused */
125         "page fault",                           /* 12 T_PAGEFLT */
126         "",                                     /* 13 unused */
127         "alignment fault",                      /* 14 T_ALIGNFLT */
128         "",                                     /* 15 unused */
129         "",                                     /* 16 unused */
130         "",                                     /* 17 unused */
131         "integer divide fault",                 /* 18 T_DIVIDE */
132         "non-maskable interrupt trap",          /* 19 T_NMI */
133         "overflow trap",                        /* 20 T_OFLOW */
134         "FPU bounds check fault",               /* 21 T_BOUND */
135         "FPU device not available",             /* 22 T_DNA */
136         "double fault",                         /* 23 T_DOUBLEFLT */
137         "FPU operand fetch fault",              /* 24 T_FPOPFLT */
138         "invalid TSS fault",                    /* 25 T_TSSFLT */
139         "segment not present fault",            /* 26 T_SEGNPFLT */
140         "stack fault",                          /* 27 T_STKFLT */
141         "machine check trap",                   /* 28 T_MCHK */
142         "SIMD floating-point exception",        /* 29 T_XMMFLT */
143         "reserved (unknown) fault",             /* 30 T_RESERVED */
144         "",                                     /* 31 unused (reserved) */
145         "DTrace pid return trap",               /* 32 T_DTRACE_RET */
146 };
147
148 static int prot_fault_translation;
149 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
150     &prot_fault_translation, 0,
151     "Select signal to deliver on protection fault");
152 static int uprintf_signal;
153 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
154     &uprintf_signal, 0,
155     "Print debugging information on trap signal to ctty");
156
157 /*
158  * Exception, fault, and trap interface to the FreeBSD kernel.
159  * This common code is called from assembly language IDT gate entry
160  * routines that prepare a suitable stack frame, and restore this
161  * frame after the exception has been processed.
162  */
163
164 void
165 trap(struct trapframe *frame)
166 {
167         ksiginfo_t ksi;
168         struct thread *td;
169         struct proc *p;
170         register_t addr;
171 #ifdef KDB
172         register_t dr6;
173 #endif
174         int signo, ucode;
175         u_int type;
176
177         td = curthread;
178         p = td->td_proc;
179         signo = 0;
180         ucode = 0;
181         addr = 0;
182
183         VM_CNT_INC(v_trap);
184         type = frame->tf_trapno;
185
186 #ifdef SMP
187         /* Handler for NMI IPIs used for stopping CPUs. */
188         if (type == T_NMI && ipi_nmi_handler() == 0)
189                 return;
190 #endif
191
192 #ifdef KDB
193         if (kdb_active) {
194                 kdb_reenter();
195                 return;
196         }
197 #endif
198
199         if (type == T_RESERVED) {
200                 trap_fatal(frame, 0);
201                 return;
202         }
203
204         if (type == T_NMI) {
205 #ifdef HWPMC_HOOKS
206                 /*
207                  * CPU PMCs interrupt using an NMI.  If the PMC module is
208                  * active, pass the 'rip' value to the PMC module's interrupt
209                  * handler.  A non-zero return value from the handler means that
210                  * the NMI was consumed by it and we can return immediately.
211                  */
212                 if (pmc_intr != NULL &&
213                     (*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
214                         return;
215 #endif
216
217 #ifdef STACK
218                 if (stack_nmi_handler(frame) != 0)
219                         return;
220 #endif
221         }
222
223         if (type == T_MCHK) {
224                 mca_intr();
225                 return;
226         }
227
228         if ((frame->tf_rflags & PSL_I) == 0) {
229                 /*
230                  * Buggy application or kernel code has disabled
231                  * interrupts and then trapped.  Enabling interrupts
232                  * now is wrong, but it is better than running with
233                  * interrupts disabled until they are accidentally
234                  * enabled later.
235                  */
236                 if (TRAPF_USERMODE(frame))
237                         uprintf(
238                             "pid %ld (%s): trap %d with interrupts disabled\n",
239                             (long)curproc->p_pid, curthread->td_name, type);
240                 else if (type != T_NMI && type != T_BPTFLT &&
241                     type != T_TRCTRAP) {
242                         /*
243                          * XXX not quite right, since this may be for a
244                          * multiple fault in user mode.
245                          */
246                         printf("kernel trap %d with interrupts disabled\n",
247                             type);
248
249                         /*
250                          * We shouldn't enable interrupts while holding a
251                          * spin lock.
252                          */
253                         if (td->td_md.md_spinlock_count == 0)
254                                 enable_intr();
255                 }
256         }
257
258         if (TRAPF_USERMODE(frame)) {
259                 /* user trap */
260
261                 td->td_pticks = 0;
262                 td->td_frame = frame;
263                 addr = frame->tf_rip;
264                 if (td->td_cowgen != p->p_cowgen)
265                         thread_cow_update(td);
266
267                 switch (type) {
268                 case T_PRIVINFLT:       /* privileged instruction fault */
269                         signo = SIGILL;
270                         ucode = ILL_PRVOPC;
271                         break;
272
273                 case T_BPTFLT:          /* bpt instruction fault */
274                 case T_TRCTRAP:         /* trace trap */
275                         enable_intr();
276 #ifdef KDTRACE_HOOKS
277                         if (type == T_BPTFLT) {
278                                 if (dtrace_pid_probe_ptr != NULL &&
279                                     dtrace_pid_probe_ptr(frame) == 0)
280                                         return;
281                         }
282 #endif
283                         frame->tf_rflags &= ~PSL_T;
284                         signo = SIGTRAP;
285                         ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
286                         break;
287
288                 case T_ARITHTRAP:       /* arithmetic trap */
289                         ucode = fputrap_x87();
290                         if (ucode == -1)
291                                 return;
292                         signo = SIGFPE;
293                         break;
294
295                 case T_PROTFLT:         /* general protection fault */
296                         signo = SIGBUS;
297                         ucode = BUS_OBJERR;
298                         break;
299                 case T_STKFLT:          /* stack fault */
300                 case T_SEGNPFLT:        /* segment not present fault */
301                         signo = SIGBUS;
302                         ucode = BUS_ADRERR;
303                         break;
304                 case T_TSSFLT:          /* invalid TSS fault */
305                         signo = SIGBUS;
306                         ucode = BUS_OBJERR;
307                         break;
308                 case T_ALIGNFLT:
309                         signo = SIGBUS;
310                         ucode = BUS_ADRALN;
311                         break;
312                 case T_DOUBLEFLT:       /* double fault */
313                 default:
314                         signo = SIGBUS;
315                         ucode = BUS_OBJERR;
316                         break;
317
318                 case T_PAGEFLT:         /* page fault */
319                         /*
320                          * Emulator can take care about this trap?
321                          */
322                         if (*p->p_sysent->sv_trap != NULL &&
323                             (*p->p_sysent->sv_trap)(td) == 0)
324                                 return;
325
326                         addr = frame->tf_addr;
327                         signo = trap_pfault(frame, TRUE);
328                         if (signo == -1)
329                                 return;
330                         if (signo == 0)
331                                 goto userret;
332                         if (signo == SIGSEGV) {
333                                 ucode = SEGV_MAPERR;
334                         } else if (prot_fault_translation == 0) {
335                                 /*
336                                  * Autodetect.  This check also covers
337                                  * the images without the ABI-tag ELF
338                                  * note.
339                                  */
340                                 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD &&
341                                     p->p_osrel >= P_OSREL_SIGSEGV) {
342                                         signo = SIGSEGV;
343                                         ucode = SEGV_ACCERR;
344                                 } else {
345                                         signo = SIGBUS;
346                                         ucode = BUS_PAGE_FAULT;
347                                 }
348                         } else if (prot_fault_translation == 1) {
349                                 /*
350                                  * Always compat mode.
351                                  */
352                                 signo = SIGBUS;
353                                 ucode = BUS_PAGE_FAULT;
354                         } else {
355                                 /*
356                                  * Always SIGSEGV mode.
357                                  */
358                                 signo = SIGSEGV;
359                                 ucode = SEGV_ACCERR;
360                         }
361                         break;
362
363                 case T_DIVIDE:          /* integer divide fault */
364                         ucode = FPE_INTDIV;
365                         signo = SIGFPE;
366                         break;
367
368 #ifdef DEV_ISA
369                 case T_NMI:
370                         nmi_handle_intr(type, frame);
371                         return;
372 #endif
373
374                 case T_OFLOW:           /* integer overflow fault */
375                         ucode = FPE_INTOVF;
376                         signo = SIGFPE;
377                         break;
378
379                 case T_BOUND:           /* bounds check fault */
380                         ucode = FPE_FLTSUB;
381                         signo = SIGFPE;
382                         break;
383
384                 case T_DNA:
385                         /* transparent fault (due to context switch "late") */
386                         KASSERT(PCB_USER_FPU(td->td_pcb),
387                             ("kernel FPU ctx has leaked"));
388                         fpudna();
389                         return;
390
391                 case T_FPOPFLT:         /* FPU operand fetch fault */
392                         ucode = ILL_COPROC;
393                         signo = SIGILL;
394                         break;
395
396                 case T_XMMFLT:          /* SIMD floating-point exception */
397                         ucode = fputrap_sse();
398                         if (ucode == -1)
399                                 return;
400                         signo = SIGFPE;
401                         break;
402 #ifdef KDTRACE_HOOKS
403                 case T_DTRACE_RET:
404                         enable_intr();
405                         if (dtrace_return_probe_ptr != NULL)
406                                 dtrace_return_probe_ptr(frame);
407                         return;
408 #endif
409                 }
410         } else {
411                 /* kernel trap */
412
413                 KASSERT(cold || td->td_ucred != NULL,
414                     ("kernel trap doesn't have ucred"));
415                 switch (type) {
416                 case T_PAGEFLT:                 /* page fault */
417                         (void) trap_pfault(frame, FALSE);
418                         return;
419
420                 case T_DNA:
421                         if (PCB_USER_FPU(td->td_pcb))
422                                 panic("Unregistered use of FPU in kernel");
423                         fpudna();
424                         return;
425
426                 case T_ARITHTRAP:       /* arithmetic trap */
427                 case T_XMMFLT:          /* SIMD floating-point exception */
428                 case T_FPOPFLT:         /* FPU operand fetch fault */
429                         /*
430                          * For now, supporting kernel handler
431                          * registration for FPU traps is overkill.
432                          */
433                         trap_fatal(frame, 0);
434                         return;
435
436                 case T_STKFLT:          /* stack fault */
437                 case T_PROTFLT:         /* general protection fault */
438                 case T_SEGNPFLT:        /* segment not present fault */
439                         if (td->td_intr_nesting_level != 0)
440                                 break;
441
442                         /*
443                          * Invalid segment selectors and out of bounds
444                          * %rip's and %rsp's can be set up in user mode.
445                          * This causes a fault in kernel mode when the
446                          * kernel tries to return to user mode.  We want
447                          * to get this fault so that we can fix the
448                          * problem here and not have to check all the
449                          * selectors and pointers when the user changes
450                          * them.
451                          */
452                         if (frame->tf_rip == (long)doreti_iret) {
453                                 frame->tf_rip = (long)doreti_iret_fault;
454                                 return;
455                         }
456                         if (frame->tf_rip == (long)ld_ds) {
457                                 frame->tf_rip = (long)ds_load_fault;
458                                 return;
459                         }
460                         if (frame->tf_rip == (long)ld_es) {
461                                 frame->tf_rip = (long)es_load_fault;
462                                 return;
463                         }
464                         if (frame->tf_rip == (long)ld_fs) {
465                                 frame->tf_rip = (long)fs_load_fault;
466                                 return;
467                         }
468                         if (frame->tf_rip == (long)ld_gs) {
469                                 frame->tf_rip = (long)gs_load_fault;
470                                 return;
471                         }
472                         if (frame->tf_rip == (long)ld_gsbase) {
473                                 frame->tf_rip = (long)gsbase_load_fault;
474                                 return;
475                         }
476                         if (frame->tf_rip == (long)ld_fsbase) {
477                                 frame->tf_rip = (long)fsbase_load_fault;
478                                 return;
479                         }
480                         if (curpcb->pcb_onfault != NULL) {
481                                 frame->tf_rip = (long)curpcb->pcb_onfault;
482                                 return;
483                         }
484                         break;
485
486                 case T_TSSFLT:
487                         /*
488                          * PSL_NT can be set in user mode and isn't cleared
489                          * automatically when the kernel is entered.  This
490                          * causes a TSS fault when the kernel attempts to
491                          * `iret' because the TSS link is uninitialized.  We
492                          * want to get this fault so that we can fix the
493                          * problem here and not every time the kernel is
494                          * entered.
495                          */
496                         if (frame->tf_rflags & PSL_NT) {
497                                 frame->tf_rflags &= ~PSL_NT;
498                                 return;
499                         }
500                         break;
501
502                 case T_TRCTRAP:  /* trace trap */
503                         /*
504                          * Ignore debug register trace traps due to
505                          * accesses in the user's address space, which
506                          * can happen under several conditions such as
507                          * if a user sets a watchpoint on a buffer and
508                          * then passes that buffer to a system call.
509                          * We still want to get TRCTRAPS for addresses
510                          * in kernel space because that is useful when
511                          * debugging the kernel.
512                          */
513                         if (user_dbreg_trap()) {
514                                 /*
515                                  * Reset breakpoint bits because the
516                                  * processor doesn't
517                                  */
518                                 load_dr6(rdr6() & ~0xf);
519                                 return;
520                         }
521                         /*
522                          * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
523                          */
524                 case T_BPTFLT:
525                         /*
526                          * If KDB is enabled, let it handle the debugger trap.
527                          * Otherwise, debugger traps "can't happen".
528                          */
529 #ifdef KDB
530                         /* XXX %dr6 is not quite reentrant. */
531                         dr6 = rdr6();
532                         load_dr6(dr6 & ~0x4000);
533                         if (kdb_trap(type, dr6, frame))
534                                 return;
535 #endif
536                         break;
537
538 #ifdef DEV_ISA
539                 case T_NMI:
540                         nmi_handle_intr(type, frame);
541                         return;
542 #endif
543                 }
544
545                 trap_fatal(frame, 0);
546                 return;
547         }
548
549         /* Translate fault for emulators (e.g. Linux) */
550         if (*p->p_sysent->sv_transtrap != NULL)
551                 signo = (*p->p_sysent->sv_transtrap)(signo, type);
552
553         ksiginfo_init_trap(&ksi);
554         ksi.ksi_signo = signo;
555         ksi.ksi_code = ucode;
556         ksi.ksi_trapno = type;
557         ksi.ksi_addr = (void *)addr;
558         if (uprintf_signal) {
559                 uprintf("pid %d comm %s: signal %d err %lx code %d type %d "
560                     "addr 0x%lx rsp 0x%lx rip 0x%lx "
561                     "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
562                     p->p_pid, p->p_comm, signo, frame->tf_err, ucode, type,
563                     addr, frame->tf_rsp, frame->tf_rip,
564                     fubyte((void *)(frame->tf_rip + 0)),
565                     fubyte((void *)(frame->tf_rip + 1)),
566                     fubyte((void *)(frame->tf_rip + 2)),
567                     fubyte((void *)(frame->tf_rip + 3)),
568                     fubyte((void *)(frame->tf_rip + 4)),
569                     fubyte((void *)(frame->tf_rip + 5)),
570                     fubyte((void *)(frame->tf_rip + 6)),
571                     fubyte((void *)(frame->tf_rip + 7)));
572         }
573         KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled"));
574         trapsignal(td, &ksi);
575 userret:
576         userret(td, frame);
577         KASSERT(PCB_USER_FPU(td->td_pcb),
578             ("Return from trap with kernel FPU ctx leaked"));
579 }
580
581 /*
582  * Ensure that we ignore any DTrace-induced faults. This function cannot
583  * be instrumented, so it cannot generate such faults itself.
584  */
585 void
586 trap_check(struct trapframe *frame)
587 {
588
589 #ifdef KDTRACE_HOOKS
590         if (dtrace_trap_func != NULL &&
591             (*dtrace_trap_func)(frame, frame->tf_trapno) != 0)
592                 return;
593 #endif
594         trap(frame);
595 }
596
597 static int
598 trap_pfault(struct trapframe *frame, int usermode)
599 {
600         struct thread *td;
601         struct proc *p;
602         vm_map_t map;
603         vm_offset_t va;
604         int rv;
605         vm_prot_t ftype;
606         vm_offset_t eva;
607
608         td = curthread;
609         p = td->td_proc;
610         eva = frame->tf_addr;
611
612         if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
613                 /*
614                  * Due to both processor errata and lazy TLB invalidation when
615                  * access restrictions are removed from virtual pages, memory
616                  * accesses that are allowed by the physical mapping layer may
617                  * nonetheless cause one spurious page fault per virtual page. 
618                  * When the thread is executing a "no faulting" section that
619                  * is bracketed by vm_fault_{disable,enable}_pagefaults(),
620                  * every page fault is treated as a spurious page fault,
621                  * unless it accesses the same virtual address as the most
622                  * recent page fault within the same "no faulting" section.
623                  */
624                 if (td->td_md.md_spurflt_addr != eva ||
625                     (td->td_pflags & TDP_RESETSPUR) != 0) {
626                         /*
627                          * Do nothing to the TLB.  A stale TLB entry is
628                          * flushed automatically by a page fault.
629                          */
630                         td->td_md.md_spurflt_addr = eva;
631                         td->td_pflags &= ~TDP_RESETSPUR;
632                         return (0);
633                 }
634         } else {
635                 /*
636                  * If we get a page fault while in a critical section, then
637                  * it is most likely a fatal kernel page fault.  The kernel
638                  * is already going to panic trying to get a sleep lock to
639                  * do the VM lookup, so just consider it a fatal trap so the
640                  * kernel can print out a useful trap message and even get
641                  * to the debugger.
642                  *
643                  * If we get a page fault while holding a non-sleepable
644                  * lock, then it is most likely a fatal kernel page fault.
645                  * If WITNESS is enabled, then it's going to whine about
646                  * bogus LORs with various VM locks, so just skip to the
647                  * fatal trap handling directly.
648                  */
649                 if (td->td_critnest != 0 ||
650                     WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
651                     "Kernel page fault") != 0) {
652                         trap_fatal(frame, eva);
653                         return (-1);
654                 }
655         }
656         va = trunc_page(eva);
657         if (va >= VM_MIN_KERNEL_ADDRESS) {
658                 /*
659                  * Don't allow user-mode faults in kernel address space.
660                  */
661                 if (usermode)
662                         return (SIGSEGV);
663
664                 map = kernel_map;
665         } else {
666                 map = &p->p_vmspace->vm_map;
667
668                 /*
669                  * When accessing a usermode address, kernel must be
670                  * ready to accept the page fault, and provide a
671                  * handling routine.  Since accessing the address
672                  * without the handler is a bug, do not try to handle
673                  * it normally, and panic immediately.
674                  */
675                 if (!usermode && (td->td_intr_nesting_level != 0 ||
676                     curpcb->pcb_onfault == NULL)) {
677                         trap_fatal(frame, eva);
678                         return (-1);
679                 }
680         }
681
682         /*
683          * If the trap was caused by errant bits in the PTE then panic.
684          */
685         if (frame->tf_err & PGEX_RSV) {
686                 trap_fatal(frame, eva);
687                 return (-1);
688         }
689
690         /*
691          * PGEX_I is defined only if the execute disable bit capability is
692          * supported and enabled.
693          */
694         if (frame->tf_err & PGEX_W)
695                 ftype = VM_PROT_WRITE;
696         else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
697                 ftype = VM_PROT_EXECUTE;
698         else
699                 ftype = VM_PROT_READ;
700
701         /* Fault in the page. */
702         rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
703         if (rv == KERN_SUCCESS) {
704 #ifdef HWPMC_HOOKS
705                 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
706                         PMC_SOFT_CALL_TF( , , page_fault, all, frame);
707                         if (ftype == VM_PROT_READ)
708                                 PMC_SOFT_CALL_TF( , , page_fault, read,
709                                     frame);
710                         else
711                                 PMC_SOFT_CALL_TF( , , page_fault, write,
712                                     frame);
713                 }
714 #endif
715                 return (0);
716         }
717         if (!usermode) {
718                 if (td->td_intr_nesting_level == 0 &&
719                     curpcb->pcb_onfault != NULL) {
720                         frame->tf_rip = (long)curpcb->pcb_onfault;
721                         return (0);
722                 }
723                 trap_fatal(frame, eva);
724                 return (-1);
725         }
726         return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
727 }
728
729 static void
730 trap_fatal(frame, eva)
731         struct trapframe *frame;
732         vm_offset_t eva;
733 {
734         int code, ss;
735         u_int type;
736         struct soft_segment_descriptor softseg;
737         char *msg;
738
739         code = frame->tf_err;
740         type = frame->tf_trapno;
741         sdtossd(&gdt[NGDT * PCPU_GET(cpuid) + IDXSEL(frame->tf_cs & 0xffff)],
742             &softseg);
743
744         if (type <= MAX_TRAP_MSG)
745                 msg = trap_msg[type];
746         else
747                 msg = "UNKNOWN";
748         printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
749             TRAPF_USERMODE(frame) ? "user" : "kernel");
750 #ifdef SMP
751         /* two separate prints in case of a trap on an unmapped page */
752         printf("cpuid = %d; ", PCPU_GET(cpuid));
753         printf("apic id = %02x\n", PCPU_GET(apic_id));
754 #endif
755         if (type == T_PAGEFLT) {
756                 printf("fault virtual address   = 0x%lx\n", eva);
757                 printf("fault code              = %s %s %s, %s\n",
758                         code & PGEX_U ? "user" : "supervisor",
759                         code & PGEX_W ? "write" : "read",
760                         code & PGEX_I ? "instruction" : "data",
761                         code & PGEX_RSV ? "reserved bits in PTE" :
762                         code & PGEX_P ? "protection violation" : "page not present");
763         }
764         printf("instruction pointer     = 0x%lx:0x%lx\n",
765                frame->tf_cs & 0xffff, frame->tf_rip);
766         ss = frame->tf_ss & 0xffff;
767         printf("stack pointer           = 0x%x:0x%lx\n", ss, frame->tf_rsp);
768         printf("frame pointer           = 0x%x:0x%lx\n", ss, frame->tf_rbp);
769         printf("code segment            = base 0x%lx, limit 0x%lx, type 0x%x\n",
770                softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
771         printf("                        = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
772                softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
773                softseg.ssd_gran);
774         printf("processor eflags        = ");
775         if (frame->tf_rflags & PSL_T)
776                 printf("trace trap, ");
777         if (frame->tf_rflags & PSL_I)
778                 printf("interrupt enabled, ");
779         if (frame->tf_rflags & PSL_NT)
780                 printf("nested task, ");
781         if (frame->tf_rflags & PSL_RF)
782                 printf("resume, ");
783         printf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
784         printf("current process         = %d (%s)\n",
785             curproc->p_pid, curthread->td_name);
786
787 #ifdef KDB
788         if (debugger_on_panic || kdb_active)
789                 if (kdb_trap(type, 0, frame))
790                         return;
791 #endif
792         printf("trap number             = %d\n", type);
793         if (type <= MAX_TRAP_MSG)
794                 panic("%s", trap_msg[type]);
795         else
796                 panic("unknown/reserved trap");
797 }
798
799 /*
800  * Double fault handler. Called when a fault occurs while writing
801  * a frame for a trap/exception onto the stack. This usually occurs
802  * when the stack overflows (such is the case with infinite recursion,
803  * for example).
804  */
805 void
806 dblfault_handler(struct trapframe *frame)
807 {
808 #ifdef KDTRACE_HOOKS
809         if (dtrace_doubletrap_func != NULL)
810                 (*dtrace_doubletrap_func)();
811 #endif
812         printf("\nFatal double fault\n"
813             "rip %#lx rsp %#lx rbp %#lx\n"
814             "rax %#lx rdx %#lx rbx %#lx\n"
815             "rcx %#lx rsi %#lx rdi %#lx\n"
816             "r8 %#lx r9 %#lx r10 %#lx\n"
817             "r11 %#lx r12 %#lx r13 %#lx\n"
818             "r14 %#lx r15 %#lx rflags %#lx\n"
819             "cs %#lx ss %#lx ds %#hx es %#hx fs %#hx gs %#hx\n"
820             "fsbase %#lx gsbase %#lx kgsbase %#lx\n",
821             frame->tf_rip, frame->tf_rsp, frame->tf_rbp,
822             frame->tf_rax, frame->tf_rdx, frame->tf_rbx,
823             frame->tf_rcx, frame->tf_rdi, frame->tf_rsi,
824             frame->tf_r8, frame->tf_r9, frame->tf_r10,
825             frame->tf_r11, frame->tf_r12, frame->tf_r13,
826             frame->tf_r14, frame->tf_r15, frame->tf_rflags,
827             frame->tf_cs, frame->tf_ss, frame->tf_ds, frame->tf_es,
828             frame->tf_fs, frame->tf_gs,
829             rdmsr(MSR_FSBASE), rdmsr(MSR_GSBASE), rdmsr(MSR_KGSBASE));
830 #ifdef SMP
831         /* two separate prints in case of a trap on an unmapped page */
832         printf("cpuid = %d; ", PCPU_GET(cpuid));
833         printf("apic id = %02x\n", PCPU_GET(apic_id));
834 #endif
835         panic("double fault");
836 }
837
838 int
839 cpu_fetch_syscall_args(struct thread *td)
840 {
841         struct proc *p;
842         struct trapframe *frame;
843         register_t *argp;
844         struct syscall_args *sa;
845         caddr_t params;
846         int reg, regcnt, error;
847
848         p = td->td_proc;
849         frame = td->td_frame;
850         sa = &td->td_sa;
851         reg = 0;
852         regcnt = 6;
853
854         params = (caddr_t)frame->tf_rsp + sizeof(register_t);
855         sa->code = frame->tf_rax;
856
857         if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
858                 sa->code = frame->tf_rdi;
859                 reg++;
860                 regcnt--;
861         }
862         if (p->p_sysent->sv_mask)
863                 sa->code &= p->p_sysent->sv_mask;
864
865         if (sa->code >= p->p_sysent->sv_size)
866                 sa->callp = &p->p_sysent->sv_table[0];
867         else
868                 sa->callp = &p->p_sysent->sv_table[sa->code];
869
870         sa->narg = sa->callp->sy_narg;
871         KASSERT(sa->narg <= sizeof(sa->args) / sizeof(sa->args[0]),
872             ("Too many syscall arguments!"));
873         error = 0;
874         argp = &frame->tf_rdi;
875         argp += reg;
876         bcopy(argp, sa->args, sizeof(sa->args[0]) * regcnt);
877         if (sa->narg > regcnt) {
878                 KASSERT(params != NULL, ("copyin args with no params!"));
879                 error = copyin(params, &sa->args[regcnt],
880                     (sa->narg - regcnt) * sizeof(sa->args[0]));
881         }
882
883         if (error == 0) {
884                 td->td_retval[0] = 0;
885                 td->td_retval[1] = frame->tf_rdx;
886         }
887
888         return (error);
889 }
890
891 #include "../../kern/subr_syscall.c"
892
893 /*
894  * System call handler for native binaries.  The trap frame is already
895  * set up by the assembler trampoline and a pointer to it is saved in
896  * td_frame.
897  */
898 void
899 amd64_syscall(struct thread *td, int traced)
900 {
901         int error;
902         ksiginfo_t ksi;
903
904 #ifdef DIAGNOSTIC
905         if (!TRAPF_USERMODE(td->td_frame)) {
906                 panic("syscall");
907                 /* NOT REACHED */
908         }
909 #endif
910         error = syscallenter(td);
911
912         /*
913          * Traced syscall.
914          */
915         if (__predict_false(traced)) {
916                 td->td_frame->tf_rflags &= ~PSL_T;
917                 ksiginfo_init_trap(&ksi);
918                 ksi.ksi_signo = SIGTRAP;
919                 ksi.ksi_code = TRAP_TRACE;
920                 ksi.ksi_addr = (void *)td->td_frame->tf_rip;
921                 trapsignal(td, &ksi);
922         }
923
924         KASSERT(PCB_USER_FPU(td->td_pcb),
925             ("System call %s returning with kernel FPU ctx leaked",
926              syscallname(td->td_proc, td->td_sa.code)));
927         KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
928             ("System call %s returning with mangled pcb_save",
929              syscallname(td->td_proc, td->td_sa.code)));
930         KASSERT(td->td_md.md_invl_gen.gen == 0,
931             ("System call %s returning with leaked invl_gen %lu",
932             syscallname(td->td_proc, td->td_sa.code),
933             td->td_md.md_invl_gen.gen));
934
935         syscallret(td, error);
936
937         /*
938          * If the user-supplied value of %rip is not a canonical
939          * address, then some CPUs will trigger a ring 0 #GP during
940          * the sysret instruction.  However, the fault handler would
941          * execute in ring 0 with the user's %gs and %rsp which would
942          * not be safe.  Instead, use the full return path which
943          * catches the problem safely.
944          */
945         if (__predict_false(td->td_frame->tf_rip >= VM_MAXUSER_ADDRESS))
946                 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
947 }