]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/amd64/trap.c
Merge compiler-rt release_38 branch r258968.
[FreeBSD/FreeBSD.git] / sys / amd64 / amd64 / trap.c
1 /*-
2  * Copyright (C) 1994, David Greenman
3  * Copyright (c) 1990, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the University of Utah, and William Jolitz.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by the University of
20  *      California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 /*
44  * AMD64 Trap and System call handling
45  */
46
47 #include "opt_clock.h"
48 #include "opt_cpu.h"
49 #include "opt_hwpmc_hooks.h"
50 #include "opt_isa.h"
51 #include "opt_kdb.h"
52 #include "opt_stack.h"
53
54 #include <sys/param.h>
55 #include <sys/bus.h>
56 #include <sys/systm.h>
57 #include <sys/proc.h>
58 #include <sys/pioctl.h>
59 #include <sys/ptrace.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/lock.h>
64 #include <sys/mutex.h>
65 #include <sys/resourcevar.h>
66 #include <sys/signalvar.h>
67 #include <sys/syscall.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysent.h>
70 #include <sys/uio.h>
71 #include <sys/vmmeter.h>
72 #ifdef HWPMC_HOOKS
73 #include <sys/pmckern.h>
74 PMC_SOFT_DEFINE( , , page_fault, all);
75 PMC_SOFT_DEFINE( , , page_fault, read);
76 PMC_SOFT_DEFINE( , , page_fault, write);
77 #endif
78
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_extern.h>
86
87 #include <machine/cpu.h>
88 #include <machine/intr_machdep.h>
89 #include <x86/mca.h>
90 #include <machine/md_var.h>
91 #include <machine/pcb.h>
92 #ifdef SMP
93 #include <machine/smp.h>
94 #endif
95 #include <machine/stack.h>
96 #include <machine/tss.h>
97
98 #ifdef KDTRACE_HOOKS
99 #include <sys/dtrace_bsd.h>
100 #endif
101
102 extern void __noinline trap(struct trapframe *frame);
103 extern void trap_check(struct trapframe *frame);
104 extern void syscall(struct trapframe *frame);
105 void dblfault_handler(struct trapframe *frame);
106
107 static int trap_pfault(struct trapframe *, int);
108 static void trap_fatal(struct trapframe *, vm_offset_t);
109
110 #define MAX_TRAP_MSG            32
111 static char *trap_msg[] = {
112         "",                                     /*  0 unused */
113         "privileged instruction fault",         /*  1 T_PRIVINFLT */
114         "",                                     /*  2 unused */
115         "breakpoint instruction fault",         /*  3 T_BPTFLT */
116         "",                                     /*  4 unused */
117         "",                                     /*  5 unused */
118         "arithmetic trap",                      /*  6 T_ARITHTRAP */
119         "",                                     /*  7 unused */
120         "",                                     /*  8 unused */
121         "general protection fault",             /*  9 T_PROTFLT */
122         "trace trap",                           /* 10 T_TRCTRAP */
123         "",                                     /* 11 unused */
124         "page fault",                           /* 12 T_PAGEFLT */
125         "",                                     /* 13 unused */
126         "alignment fault",                      /* 14 T_ALIGNFLT */
127         "",                                     /* 15 unused */
128         "",                                     /* 16 unused */
129         "",                                     /* 17 unused */
130         "integer divide fault",                 /* 18 T_DIVIDE */
131         "non-maskable interrupt trap",          /* 19 T_NMI */
132         "overflow trap",                        /* 20 T_OFLOW */
133         "FPU bounds check fault",               /* 21 T_BOUND */
134         "FPU device not available",             /* 22 T_DNA */
135         "double fault",                         /* 23 T_DOUBLEFLT */
136         "FPU operand fetch fault",              /* 24 T_FPOPFLT */
137         "invalid TSS fault",                    /* 25 T_TSSFLT */
138         "segment not present fault",            /* 26 T_SEGNPFLT */
139         "stack fault",                          /* 27 T_STKFLT */
140         "machine check trap",                   /* 28 T_MCHK */
141         "SIMD floating-point exception",        /* 29 T_XMMFLT */
142         "reserved (unknown) fault",             /* 30 T_RESERVED */
143         "",                                     /* 31 unused (reserved) */
144         "DTrace pid return trap",               /* 32 T_DTRACE_RET */
145 };
146
147 #ifdef KDB
148 static int kdb_on_nmi = 1;
149 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
150         &kdb_on_nmi, 0, "Go to KDB on NMI");
151 #endif
152 static int panic_on_nmi = 1;
153 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
154         &panic_on_nmi, 0, "Panic on NMI");
155 static int prot_fault_translation;
156 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
157     &prot_fault_translation, 0,
158     "Select signal to deliver on protection fault");
159 static int uprintf_signal;
160 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
161     &uprintf_signal, 0,
162     "Print debugging information on trap signal to ctty");
163
164 /*
165  * Exception, fault, and trap interface to the FreeBSD kernel.
166  * This common code is called from assembly language IDT gate entry
167  * routines that prepare a suitable stack frame, and restore this
168  * frame after the exception has been processed.
169  */
170
171 void
172 trap(struct trapframe *frame)
173 {
174 #ifdef KDTRACE_HOOKS
175         struct reg regs;
176 #endif
177         struct thread *td = curthread;
178         struct proc *p = td->td_proc;
179         int i = 0, ucode = 0, code;
180         u_int type;
181         register_t addr = 0;
182         ksiginfo_t ksi;
183
184         PCPU_INC(cnt.v_trap);
185         type = frame->tf_trapno;
186
187 #ifdef SMP
188         /* Handler for NMI IPIs used for stopping CPUs. */
189         if (type == T_NMI) {
190                  if (ipi_nmi_handler() == 0)
191                            goto out;
192         }
193 #endif /* SMP */
194
195 #ifdef KDB
196         if (kdb_active) {
197                 kdb_reenter();
198                 goto out;
199         }
200 #endif
201
202         if (type == T_RESERVED) {
203                 trap_fatal(frame, 0);
204                 goto out;
205         }
206
207         if (type == T_NMI) {
208 #ifdef HWPMC_HOOKS
209                 /*
210                  * CPU PMCs interrupt using an NMI.  If the PMC module is
211                  * active, pass the 'rip' value to the PMC module's interrupt
212                  * handler.  A non-zero return value from the handler means that
213                  * the NMI was consumed by it and we can return immediately.
214                  */
215                 if (pmc_intr != NULL &&
216                     (*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
217                         goto out;
218 #endif
219
220 #ifdef STACK
221                 if (stack_nmi_handler(frame) != 0)
222                         goto out;
223 #endif
224         }
225
226         if (type == T_MCHK) {
227                 mca_intr();
228                 goto out;
229         }
230
231         if ((frame->tf_rflags & PSL_I) == 0) {
232                 /*
233                  * Buggy application or kernel code has disabled
234                  * interrupts and then trapped.  Enabling interrupts
235                  * now is wrong, but it is better than running with
236                  * interrupts disabled until they are accidentally
237                  * enabled later.
238                  */
239                 if (ISPL(frame->tf_cs) == SEL_UPL)
240                         uprintf(
241                             "pid %ld (%s): trap %d with interrupts disabled\n",
242                             (long)curproc->p_pid, curthread->td_name, type);
243                 else if (type != T_NMI && type != T_BPTFLT &&
244                     type != T_TRCTRAP) {
245                         /*
246                          * XXX not quite right, since this may be for a
247                          * multiple fault in user mode.
248                          */
249                         printf("kernel trap %d with interrupts disabled\n",
250                             type);
251
252                         /*
253                          * We shouldn't enable interrupts while holding a
254                          * spin lock.
255                          */
256                         if (td->td_md.md_spinlock_count == 0)
257                                 enable_intr();
258                 }
259         }
260
261         code = frame->tf_err;
262
263         if (ISPL(frame->tf_cs) == SEL_UPL) {
264                 /* user trap */
265
266                 td->td_pticks = 0;
267                 td->td_frame = frame;
268                 addr = frame->tf_rip;
269                 if (td->td_cowgen != p->p_cowgen)
270                         thread_cow_update(td);
271
272                 switch (type) {
273                 case T_PRIVINFLT:       /* privileged instruction fault */
274                         i = SIGILL;
275                         ucode = ILL_PRVOPC;
276                         break;
277
278                 case T_BPTFLT:          /* bpt instruction fault */
279                 case T_TRCTRAP:         /* trace trap */
280                         enable_intr();
281 #ifdef KDTRACE_HOOKS
282                         if (type == T_BPTFLT) {
283                                 fill_frame_regs(frame, &regs);
284                                 if (dtrace_pid_probe_ptr != NULL &&
285                                     dtrace_pid_probe_ptr(&regs) == 0)
286                                         goto out;
287                         }
288 #endif
289                         frame->tf_rflags &= ~PSL_T;
290                         i = SIGTRAP;
291                         ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
292                         break;
293
294                 case T_ARITHTRAP:       /* arithmetic trap */
295                         ucode = fputrap_x87();
296                         if (ucode == -1)
297                                 goto userout;
298                         i = SIGFPE;
299                         break;
300
301                 case T_PROTFLT:         /* general protection fault */
302                         i = SIGBUS;
303                         ucode = BUS_OBJERR;
304                         break;
305                 case T_STKFLT:          /* stack fault */
306                 case T_SEGNPFLT:        /* segment not present fault */
307                         i = SIGBUS;
308                         ucode = BUS_ADRERR;
309                         break;
310                 case T_TSSFLT:          /* invalid TSS fault */
311                         i = SIGBUS;
312                         ucode = BUS_OBJERR;
313                         break;
314                 case T_ALIGNFLT:
315                         i = SIGBUS;
316                         ucode = BUS_ADRALN;
317                         break;
318                 case T_DOUBLEFLT:       /* double fault */
319                 default:
320                         i = SIGBUS;
321                         ucode = BUS_OBJERR;
322                         break;
323
324                 case T_PAGEFLT:         /* page fault */
325                         /*
326                          * Emulator can take care about this trap?
327                          */
328                         if (*p->p_sysent->sv_trap != NULL &&
329                             (*p->p_sysent->sv_trap)(td) == 0)
330                                 goto userout;
331
332                         addr = frame->tf_addr;
333                         i = trap_pfault(frame, TRUE);
334                         if (i == -1)
335                                 goto userout;
336                         if (i == 0)
337                                 goto user;
338
339                         if (i == SIGSEGV)
340                                 ucode = SEGV_MAPERR;
341                         else {
342                                 if (prot_fault_translation == 0) {
343                                         /*
344                                          * Autodetect.
345                                          * This check also covers the images
346                                          * without the ABI-tag ELF note.
347                                          */
348                                         if (SV_CURPROC_ABI() == SV_ABI_FREEBSD
349                                             && p->p_osrel >= P_OSREL_SIGSEGV) {
350                                                 i = SIGSEGV;
351                                                 ucode = SEGV_ACCERR;
352                                         } else {
353                                                 i = SIGBUS;
354                                                 ucode = BUS_PAGE_FAULT;
355                                         }
356                                 } else if (prot_fault_translation == 1) {
357                                         /*
358                                          * Always compat mode.
359                                          */
360                                         i = SIGBUS;
361                                         ucode = BUS_PAGE_FAULT;
362                                 } else {
363                                         /*
364                                          * Always SIGSEGV mode.
365                                          */
366                                         i = SIGSEGV;
367                                         ucode = SEGV_ACCERR;
368                                 }
369                         }
370                         break;
371
372                 case T_DIVIDE:          /* integer divide fault */
373                         ucode = FPE_INTDIV;
374                         i = SIGFPE;
375                         break;
376
377 #ifdef DEV_ISA
378                 case T_NMI:
379                         /* machine/parity/power fail/"kitchen sink" faults */
380                         if (isa_nmi(code) == 0) {
381 #ifdef KDB
382                                 /*
383                                  * NMI can be hooked up to a pushbutton
384                                  * for debugging.
385                                  */
386                                 if (kdb_on_nmi) {
387                                         printf ("NMI ... going to debugger\n");
388                                         kdb_trap(type, 0, frame);
389                                 }
390 #endif /* KDB */
391                                 goto userout;
392                         } else if (panic_on_nmi)
393                                 panic("NMI indicates hardware failure");
394                         break;
395 #endif /* DEV_ISA */
396
397                 case T_OFLOW:           /* integer overflow fault */
398                         ucode = FPE_INTOVF;
399                         i = SIGFPE;
400                         break;
401
402                 case T_BOUND:           /* bounds check fault */
403                         ucode = FPE_FLTSUB;
404                         i = SIGFPE;
405                         break;
406
407                 case T_DNA:
408                         /* transparent fault (due to context switch "late") */
409                         KASSERT(PCB_USER_FPU(td->td_pcb),
410                             ("kernel FPU ctx has leaked"));
411                         fpudna();
412                         goto userout;
413
414                 case T_FPOPFLT:         /* FPU operand fetch fault */
415                         ucode = ILL_COPROC;
416                         i = SIGILL;
417                         break;
418
419                 case T_XMMFLT:          /* SIMD floating-point exception */
420                         ucode = fputrap_sse();
421                         if (ucode == -1)
422                                 goto userout;
423                         i = SIGFPE;
424                         break;
425 #ifdef KDTRACE_HOOKS
426                 case T_DTRACE_RET:
427                         enable_intr();
428                         fill_frame_regs(frame, &regs);
429                         if (dtrace_return_probe_ptr != NULL &&
430                             dtrace_return_probe_ptr(&regs) == 0)
431                                 goto out;
432                         break;
433 #endif
434                 }
435         } else {
436                 /* kernel trap */
437
438                 KASSERT(cold || td->td_ucred != NULL,
439                     ("kernel trap doesn't have ucred"));
440                 switch (type) {
441                 case T_PAGEFLT:                 /* page fault */
442                         (void) trap_pfault(frame, FALSE);
443                         goto out;
444
445                 case T_DNA:
446                         KASSERT(!PCB_USER_FPU(td->td_pcb),
447                             ("Unregistered use of FPU in kernel"));
448                         fpudna();
449                         goto out;
450
451                 case T_ARITHTRAP:       /* arithmetic trap */
452                 case T_XMMFLT:          /* SIMD floating-point exception */
453                 case T_FPOPFLT:         /* FPU operand fetch fault */
454                         /*
455                          * For now, supporting kernel handler
456                          * registration for FPU traps is overkill.
457                          */
458                         trap_fatal(frame, 0);
459                         goto out;
460
461                 case T_STKFLT:          /* stack fault */
462                 case T_PROTFLT:         /* general protection fault */
463                 case T_SEGNPFLT:        /* segment not present fault */
464                         if (td->td_intr_nesting_level != 0)
465                                 break;
466
467                         /*
468                          * Invalid segment selectors and out of bounds
469                          * %rip's and %rsp's can be set up in user mode.
470                          * This causes a fault in kernel mode when the
471                          * kernel tries to return to user mode.  We want
472                          * to get this fault so that we can fix the
473                          * problem here and not have to check all the
474                          * selectors and pointers when the user changes
475                          * them.
476                          */
477                         if (frame->tf_rip == (long)doreti_iret) {
478                                 frame->tf_rip = (long)doreti_iret_fault;
479                                 goto out;
480                         }
481                         if (frame->tf_rip == (long)ld_ds) {
482                                 frame->tf_rip = (long)ds_load_fault;
483                                 goto out;
484                         }
485                         if (frame->tf_rip == (long)ld_es) {
486                                 frame->tf_rip = (long)es_load_fault;
487                                 goto out;
488                         }
489                         if (frame->tf_rip == (long)ld_fs) {
490                                 frame->tf_rip = (long)fs_load_fault;
491                                 goto out;
492                         }
493                         if (frame->tf_rip == (long)ld_gs) {
494                                 frame->tf_rip = (long)gs_load_fault;
495                                 goto out;
496                         }
497                         if (frame->tf_rip == (long)ld_gsbase) {
498                                 frame->tf_rip = (long)gsbase_load_fault;
499                                 goto out;
500                         }
501                         if (frame->tf_rip == (long)ld_fsbase) {
502                                 frame->tf_rip = (long)fsbase_load_fault;
503                                 goto out;
504                         }
505                         if (curpcb->pcb_onfault != NULL) {
506                                 frame->tf_rip = (long)curpcb->pcb_onfault;
507                                 goto out;
508                         }
509                         break;
510
511                 case T_TSSFLT:
512                         /*
513                          * PSL_NT can be set in user mode and isn't cleared
514                          * automatically when the kernel is entered.  This
515                          * causes a TSS fault when the kernel attempts to
516                          * `iret' because the TSS link is uninitialized.  We
517                          * want to get this fault so that we can fix the
518                          * problem here and not every time the kernel is
519                          * entered.
520                          */
521                         if (frame->tf_rflags & PSL_NT) {
522                                 frame->tf_rflags &= ~PSL_NT;
523                                 goto out;
524                         }
525                         break;
526
527                 case T_TRCTRAP:  /* trace trap */
528                         /*
529                          * Ignore debug register trace traps due to
530                          * accesses in the user's address space, which
531                          * can happen under several conditions such as
532                          * if a user sets a watchpoint on a buffer and
533                          * then passes that buffer to a system call.
534                          * We still want to get TRCTRAPS for addresses
535                          * in kernel space because that is useful when
536                          * debugging the kernel.
537                          */
538                         if (user_dbreg_trap()) {
539                                 /*
540                                  * Reset breakpoint bits because the
541                                  * processor doesn't
542                                  */
543                                 /* XXX check upper bits here */
544                                 load_dr6(rdr6() & 0xfffffff0);
545                                 goto out;
546                         }
547                         /*
548                          * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
549                          */
550                 case T_BPTFLT:
551                         /*
552                          * If KDB is enabled, let it handle the debugger trap.
553                          * Otherwise, debugger traps "can't happen".
554                          */
555 #ifdef KDB
556                         if (kdb_trap(type, 0, frame))
557                                 goto out;
558 #endif
559                         break;
560
561 #ifdef DEV_ISA
562                 case T_NMI:
563                         /* machine/parity/power fail/"kitchen sink" faults */
564                         if (isa_nmi(code) == 0) {
565 #ifdef KDB
566                                 /*
567                                  * NMI can be hooked up to a pushbutton
568                                  * for debugging.
569                                  */
570                                 if (kdb_on_nmi) {
571                                         printf ("NMI ... going to debugger\n");
572                                         kdb_trap(type, 0, frame);
573                                 }
574 #endif /* KDB */
575                                 goto out;
576                         } else if (panic_on_nmi == 0)
577                                 goto out;
578                         /* FALLTHROUGH */
579 #endif /* DEV_ISA */
580                 }
581
582                 trap_fatal(frame, 0);
583                 goto out;
584         }
585
586         /* Translate fault for emulators (e.g. Linux) */
587         if (*p->p_sysent->sv_transtrap)
588                 i = (*p->p_sysent->sv_transtrap)(i, type);
589
590         ksiginfo_init_trap(&ksi);
591         ksi.ksi_signo = i;
592         ksi.ksi_code = ucode;
593         ksi.ksi_trapno = type;
594         ksi.ksi_addr = (void *)addr;
595         if (uprintf_signal) {
596                 uprintf("pid %d comm %s: signal %d err %lx code %d type %d "
597                     "addr 0x%lx rsp 0x%lx rip 0x%lx "
598                     "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
599                     p->p_pid, p->p_comm, i, frame->tf_err, ucode, type, addr,
600                     frame->tf_rsp, frame->tf_rip,
601                     fubyte((void *)(frame->tf_rip + 0)),
602                     fubyte((void *)(frame->tf_rip + 1)),
603                     fubyte((void *)(frame->tf_rip + 2)),
604                     fubyte((void *)(frame->tf_rip + 3)),
605                     fubyte((void *)(frame->tf_rip + 4)),
606                     fubyte((void *)(frame->tf_rip + 5)),
607                     fubyte((void *)(frame->tf_rip + 6)),
608                     fubyte((void *)(frame->tf_rip + 7)));
609         }
610         KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled"));
611         trapsignal(td, &ksi);
612
613 user:
614         userret(td, frame);
615         KASSERT(PCB_USER_FPU(td->td_pcb),
616             ("Return from trap with kernel FPU ctx leaked"));
617 userout:
618 out:
619         return;
620 }
621
622 /*
623  * Ensure that we ignore any DTrace-induced faults. This function cannot
624  * be instrumented, so it cannot generate such faults itself.
625  */
626 void
627 trap_check(struct trapframe *frame)
628 {
629
630 #ifdef KDTRACE_HOOKS
631         if (dtrace_trap_func != NULL &&
632             (*dtrace_trap_func)(frame, frame->tf_trapno) != 0)
633                 return;
634 #endif
635         trap(frame);
636 }
637
638 static int
639 trap_pfault(frame, usermode)
640         struct trapframe *frame;
641         int usermode;
642 {
643         vm_offset_t va;
644         vm_map_t map;
645         int rv = 0;
646         vm_prot_t ftype;
647         struct thread *td = curthread;
648         struct proc *p = td->td_proc;
649         vm_offset_t eva = frame->tf_addr;
650
651         if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
652                 /*
653                  * Due to both processor errata and lazy TLB invalidation when
654                  * access restrictions are removed from virtual pages, memory
655                  * accesses that are allowed by the physical mapping layer may
656                  * nonetheless cause one spurious page fault per virtual page. 
657                  * When the thread is executing a "no faulting" section that
658                  * is bracketed by vm_fault_{disable,enable}_pagefaults(),
659                  * every page fault is treated as a spurious page fault,
660                  * unless it accesses the same virtual address as the most
661                  * recent page fault within the same "no faulting" section.
662                  */
663                 if (td->td_md.md_spurflt_addr != eva ||
664                     (td->td_pflags & TDP_RESETSPUR) != 0) {
665                         /*
666                          * Do nothing to the TLB.  A stale TLB entry is
667                          * flushed automatically by a page fault.
668                          */
669                         td->td_md.md_spurflt_addr = eva;
670                         td->td_pflags &= ~TDP_RESETSPUR;
671                         return (0);
672                 }
673         } else {
674                 /*
675                  * If we get a page fault while in a critical section, then
676                  * it is most likely a fatal kernel page fault.  The kernel
677                  * is already going to panic trying to get a sleep lock to
678                  * do the VM lookup, so just consider it a fatal trap so the
679                  * kernel can print out a useful trap message and even get
680                  * to the debugger.
681                  *
682                  * If we get a page fault while holding a non-sleepable
683                  * lock, then it is most likely a fatal kernel page fault.
684                  * If WITNESS is enabled, then it's going to whine about
685                  * bogus LORs with various VM locks, so just skip to the
686                  * fatal trap handling directly.
687                  */
688                 if (td->td_critnest != 0 ||
689                     WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
690                     "Kernel page fault") != 0) {
691                         trap_fatal(frame, eva);
692                         return (-1);
693                 }
694         }
695         va = trunc_page(eva);
696         if (va >= VM_MIN_KERNEL_ADDRESS) {
697                 /*
698                  * Don't allow user-mode faults in kernel address space.
699                  */
700                 if (usermode)
701                         goto nogo;
702
703                 map = kernel_map;
704         } else {
705                 map = &p->p_vmspace->vm_map;
706
707                 /*
708                  * When accessing a usermode address, kernel must be
709                  * ready to accept the page fault, and provide a
710                  * handling routine.  Since accessing the address
711                  * without the handler is a bug, do not try to handle
712                  * it normally, and panic immediately.
713                  */
714                 if (!usermode && (td->td_intr_nesting_level != 0 ||
715                     curpcb->pcb_onfault == NULL)) {
716                         trap_fatal(frame, eva);
717                         return (-1);
718                 }
719         }
720
721         /*
722          * If the trap was caused by errant bits in the PTE then panic.
723          */
724         if (frame->tf_err & PGEX_RSV) {
725                 trap_fatal(frame, eva);
726                 return (-1);
727         }
728
729         /*
730          * PGEX_I is defined only if the execute disable bit capability is
731          * supported and enabled.
732          */
733         if (frame->tf_err & PGEX_W)
734                 ftype = VM_PROT_WRITE;
735         else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
736                 ftype = VM_PROT_EXECUTE;
737         else
738                 ftype = VM_PROT_READ;
739
740         /* Fault in the page. */
741         rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
742         if (rv == KERN_SUCCESS) {
743 #ifdef HWPMC_HOOKS
744                 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
745                         PMC_SOFT_CALL_TF( , , page_fault, all, frame);
746                         if (ftype == VM_PROT_READ)
747                                 PMC_SOFT_CALL_TF( , , page_fault, read,
748                                     frame);
749                         else
750                                 PMC_SOFT_CALL_TF( , , page_fault, write,
751                                     frame);
752                 }
753 #endif
754                 return (0);
755         }
756 nogo:
757         if (!usermode) {
758                 if (td->td_intr_nesting_level == 0 &&
759                     curpcb->pcb_onfault != NULL) {
760                         frame->tf_rip = (long)curpcb->pcb_onfault;
761                         return (0);
762                 }
763                 trap_fatal(frame, eva);
764                 return (-1);
765         }
766         return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
767 }
768
769 static void
770 trap_fatal(frame, eva)
771         struct trapframe *frame;
772         vm_offset_t eva;
773 {
774         int code, ss;
775         u_int type;
776         long esp;
777         struct soft_segment_descriptor softseg;
778         char *msg;
779
780         code = frame->tf_err;
781         type = frame->tf_trapno;
782         sdtossd(&gdt[NGDT * PCPU_GET(cpuid) + IDXSEL(frame->tf_cs & 0xffff)],
783             &softseg);
784
785         if (type <= MAX_TRAP_MSG)
786                 msg = trap_msg[type];
787         else
788                 msg = "UNKNOWN";
789         printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
790             ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
791 #ifdef SMP
792         /* two separate prints in case of a trap on an unmapped page */
793         printf("cpuid = %d; ", PCPU_GET(cpuid));
794         printf("apic id = %02x\n", PCPU_GET(apic_id));
795 #endif
796         if (type == T_PAGEFLT) {
797                 printf("fault virtual address   = 0x%lx\n", eva);
798                 printf("fault code              = %s %s %s%s, %s\n",
799                         code & PGEX_U ? "user" : "supervisor",
800                         code & PGEX_W ? "write" : "read",
801                         code & PGEX_I ? "instruction" : "data",
802                         code & PGEX_RSV ? " rsv" : "",
803                         code & PGEX_P ? "protection violation" : "page not present");
804         }
805         printf("instruction pointer     = 0x%lx:0x%lx\n",
806                frame->tf_cs & 0xffff, frame->tf_rip);
807         if (ISPL(frame->tf_cs) == SEL_UPL) {
808                 ss = frame->tf_ss & 0xffff;
809                 esp = frame->tf_rsp;
810         } else {
811                 ss = GSEL(GDATA_SEL, SEL_KPL);
812                 esp = (long)&frame->tf_rsp;
813         }
814         printf("stack pointer           = 0x%x:0x%lx\n", ss, esp);
815         printf("frame pointer           = 0x%x:0x%lx\n", ss, frame->tf_rbp);
816         printf("code segment            = base 0x%lx, limit 0x%lx, type 0x%x\n",
817                softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
818         printf("                        = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
819                softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
820                softseg.ssd_gran);
821         printf("processor eflags        = ");
822         if (frame->tf_rflags & PSL_T)
823                 printf("trace trap, ");
824         if (frame->tf_rflags & PSL_I)
825                 printf("interrupt enabled, ");
826         if (frame->tf_rflags & PSL_NT)
827                 printf("nested task, ");
828         if (frame->tf_rflags & PSL_RF)
829                 printf("resume, ");
830         printf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
831         printf("current process         = %d (%s)\n",
832             curproc->p_pid, curthread->td_name);
833
834 #ifdef KDB
835         if (debugger_on_panic || kdb_active)
836                 if (kdb_trap(type, 0, frame))
837                         return;
838 #endif
839         printf("trap number             = %d\n", type);
840         if (type <= MAX_TRAP_MSG)
841                 panic("%s", trap_msg[type]);
842         else
843                 panic("unknown/reserved trap");
844 }
845
846 /*
847  * Double fault handler. Called when a fault occurs while writing
848  * a frame for a trap/exception onto the stack. This usually occurs
849  * when the stack overflows (such is the case with infinite recursion,
850  * for example).
851  */
852 void
853 dblfault_handler(struct trapframe *frame)
854 {
855 #ifdef KDTRACE_HOOKS
856         if (dtrace_doubletrap_func != NULL)
857                 (*dtrace_doubletrap_func)();
858 #endif
859         printf("\nFatal double fault\n");
860         printf("rip = 0x%lx\n", frame->tf_rip);
861         printf("rsp = 0x%lx\n", frame->tf_rsp);
862         printf("rbp = 0x%lx\n", frame->tf_rbp);
863 #ifdef SMP
864         /* two separate prints in case of a trap on an unmapped page */
865         printf("cpuid = %d; ", PCPU_GET(cpuid));
866         printf("apic id = %02x\n", PCPU_GET(apic_id));
867 #endif
868         panic("double fault");
869 }
870
871 int
872 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
873 {
874         struct proc *p;
875         struct trapframe *frame;
876         register_t *argp;
877         caddr_t params;
878         int reg, regcnt, error;
879
880         p = td->td_proc;
881         frame = td->td_frame;
882         reg = 0;
883         regcnt = 6;
884
885         params = (caddr_t)frame->tf_rsp + sizeof(register_t);
886         sa->code = frame->tf_rax;
887
888         if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
889                 sa->code = frame->tf_rdi;
890                 reg++;
891                 regcnt--;
892         }
893         if (p->p_sysent->sv_mask)
894                 sa->code &= p->p_sysent->sv_mask;
895
896         if (sa->code >= p->p_sysent->sv_size)
897                 sa->callp = &p->p_sysent->sv_table[0];
898         else
899                 sa->callp = &p->p_sysent->sv_table[sa->code];
900
901         sa->narg = sa->callp->sy_narg;
902         KASSERT(sa->narg <= sizeof(sa->args) / sizeof(sa->args[0]),
903             ("Too many syscall arguments!"));
904         error = 0;
905         argp = &frame->tf_rdi;
906         argp += reg;
907         bcopy(argp, sa->args, sizeof(sa->args[0]) * regcnt);
908         if (sa->narg > regcnt) {
909                 KASSERT(params != NULL, ("copyin args with no params!"));
910                 error = copyin(params, &sa->args[regcnt],
911                     (sa->narg - regcnt) * sizeof(sa->args[0]));
912         }
913
914         if (error == 0) {
915                 td->td_retval[0] = 0;
916                 td->td_retval[1] = frame->tf_rdx;
917         }
918
919         return (error);
920 }
921
922 #include "../../kern/subr_syscall.c"
923
924 /*
925  * System call handler for native binaries.  The trap frame is already
926  * set up by the assembler trampoline and a pointer to it is saved in
927  * td_frame.
928  */
929 void
930 amd64_syscall(struct thread *td, int traced)
931 {
932         struct syscall_args sa;
933         int error;
934         ksiginfo_t ksi;
935
936 #ifdef DIAGNOSTIC
937         if (ISPL(td->td_frame->tf_cs) != SEL_UPL) {
938                 panic("syscall");
939                 /* NOT REACHED */
940         }
941 #endif
942         error = syscallenter(td, &sa);
943
944         /*
945          * Traced syscall.
946          */
947         if (__predict_false(traced)) {
948                 td->td_frame->tf_rflags &= ~PSL_T;
949                 ksiginfo_init_trap(&ksi);
950                 ksi.ksi_signo = SIGTRAP;
951                 ksi.ksi_code = TRAP_TRACE;
952                 ksi.ksi_addr = (void *)td->td_frame->tf_rip;
953                 trapsignal(td, &ksi);
954         }
955
956         KASSERT(PCB_USER_FPU(td->td_pcb),
957             ("System call %s returing with kernel FPU ctx leaked",
958              syscallname(td->td_proc, sa.code)));
959         KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
960             ("System call %s returning with mangled pcb_save",
961              syscallname(td->td_proc, sa.code)));
962
963         syscallret(td, error, &sa);
964
965         /*
966          * If the user-supplied value of %rip is not a canonical
967          * address, then some CPUs will trigger a ring 0 #GP during
968          * the sysret instruction.  However, the fault handler would
969          * execute in ring 0 with the user's %gs and %rsp which would
970          * not be safe.  Instead, use the full return path which
971          * catches the problem safely.
972          */
973         if (td->td_frame->tf_rip >= VM_MAXUSER_ADDRESS)
974                 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
975 }