]> CyberLeo.Net >> Repos - FreeBSD/releng/9.3.git/blob - sys/i386/i386/trap.c
Copy stable/9 to releng/9.3 as part of the 9.3-RELEASE cycle.
[FreeBSD/releng/9.3.git] / sys / i386 / i386 / trap.c
1 /*-
2  * Copyright (C) 1994, David Greenman
3  * Copyright (c) 1990, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the University of Utah, and William Jolitz.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by the University of
20  *      California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 /*
44  * 386 Trap and System call handling
45  */
46
47 #include "opt_clock.h"
48 #include "opt_cpu.h"
49 #include "opt_hwpmc_hooks.h"
50 #include "opt_isa.h"
51 #include "opt_kdb.h"
52 #include "opt_kdtrace.h"
53 #include "opt_npx.h"
54 #include "opt_trap.h"
55
56 #include <sys/param.h>
57 #include <sys/bus.h>
58 #include <sys/systm.h>
59 #include <sys/proc.h>
60 #include <sys/pioctl.h>
61 #include <sys/ptrace.h>
62 #include <sys/kdb.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/lock.h>
66 #include <sys/mutex.h>
67 #include <sys/resourcevar.h>
68 #include <sys/signalvar.h>
69 #include <sys/syscall.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/uio.h>
73 #include <sys/vmmeter.h>
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DEFINE( , , page_fault, all);
77 PMC_SOFT_DEFINE( , , page_fault, read);
78 PMC_SOFT_DEFINE( , , page_fault, write);
79 #endif
80 #include <security/audit/audit.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_extern.h>
89
90 #include <machine/cpu.h>
91 #include <machine/intr_machdep.h>
92 #include <x86/mca.h>
93 #include <machine/md_var.h>
94 #include <machine/pcb.h>
95 #ifdef SMP
96 #include <machine/smp.h>
97 #endif
98 #include <machine/tss.h>
99 #include <machine/vm86.h>
100
101 #ifdef POWERFAIL_NMI
102 #include <sys/syslog.h>
103 #include <machine/clock.h>
104 #endif
105
106 #ifdef KDTRACE_HOOKS
107 #include <sys/dtrace_bsd.h>
108
109 /*
110  * This is a hook which is initialised by the dtrace module
111  * to handle traps which might occur during DTrace probe
112  * execution.
113  */
114 dtrace_trap_func_t      dtrace_trap_func;
115
116 dtrace_doubletrap_func_t        dtrace_doubletrap_func;
117
118 /*
119  * This is a hook which is initialised by the systrace module
120  * when it is loaded. This keeps the DTrace syscall provider
121  * implementation opaque. 
122  */
123 systrace_probe_func_t   systrace_probe_func;
124
125 /*
126  * These hooks are necessary for the pid and usdt providers.
127  */
128 dtrace_pid_probe_ptr_t          dtrace_pid_probe_ptr;
129 dtrace_return_probe_ptr_t       dtrace_return_probe_ptr;
130 #endif
131
132 extern void trap(struct trapframe *frame);
133 extern void syscall(struct trapframe *frame);
134
135 static int trap_pfault(struct trapframe *, int, vm_offset_t);
136 static void trap_fatal(struct trapframe *, vm_offset_t);
137 void dblfault_handler(void);
138
139 extern inthand_t IDTVEC(lcall_syscall);
140
141 #define MAX_TRAP_MSG            32
142 static char *trap_msg[] = {
143         "",                                     /*  0 unused */
144         "privileged instruction fault",         /*  1 T_PRIVINFLT */
145         "",                                     /*  2 unused */
146         "breakpoint instruction fault",         /*  3 T_BPTFLT */
147         "",                                     /*  4 unused */
148         "",                                     /*  5 unused */
149         "arithmetic trap",                      /*  6 T_ARITHTRAP */
150         "",                                     /*  7 unused */
151         "",                                     /*  8 unused */
152         "general protection fault",             /*  9 T_PROTFLT */
153         "trace trap",                           /* 10 T_TRCTRAP */
154         "",                                     /* 11 unused */
155         "page fault",                           /* 12 T_PAGEFLT */
156         "",                                     /* 13 unused */
157         "alignment fault",                      /* 14 T_ALIGNFLT */
158         "",                                     /* 15 unused */
159         "",                                     /* 16 unused */
160         "",                                     /* 17 unused */
161         "integer divide fault",                 /* 18 T_DIVIDE */
162         "non-maskable interrupt trap",          /* 19 T_NMI */
163         "overflow trap",                        /* 20 T_OFLOW */
164         "FPU bounds check fault",               /* 21 T_BOUND */
165         "FPU device not available",             /* 22 T_DNA */
166         "double fault",                         /* 23 T_DOUBLEFLT */
167         "FPU operand fetch fault",              /* 24 T_FPOPFLT */
168         "invalid TSS fault",                    /* 25 T_TSSFLT */
169         "segment not present fault",            /* 26 T_SEGNPFLT */
170         "stack fault",                          /* 27 T_STKFLT */
171         "machine check trap",                   /* 28 T_MCHK */
172         "SIMD floating-point exception",        /* 29 T_XMMFLT */
173         "reserved (unknown) fault",             /* 30 T_RESERVED */
174         "",                                     /* 31 unused (reserved) */
175         "DTrace pid return trap",               /* 32 T_DTRACE_RET */
176 };
177
178 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
179 extern int has_f00f_bug;
180 #endif
181
182 #ifdef KDB
183 static int kdb_on_nmi = 1;
184 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RW,
185         &kdb_on_nmi, 0, "Go to KDB on NMI");
186 TUNABLE_INT("machdep.kdb_on_nmi", &kdb_on_nmi);
187 #endif
188 static int panic_on_nmi = 1;
189 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
190         &panic_on_nmi, 0, "Panic on NMI");
191 TUNABLE_INT("machdep.panic_on_nmi", &panic_on_nmi);
192 static int prot_fault_translation = 0;
193 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RW,
194         &prot_fault_translation, 0, "Select signal to deliver on protection fault");
195 static int uprintf_signal;
196 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RW,
197     &uprintf_signal, 0,
198     "Print debugging information on trap signal to ctty");
199
200 /*
201  * Exception, fault, and trap interface to the FreeBSD kernel.
202  * This common code is called from assembly language IDT gate entry
203  * routines that prepare a suitable stack frame, and restore this
204  * frame after the exception has been processed.
205  */
206
207 void
208 trap(struct trapframe *frame)
209 {
210         struct thread *td = curthread;
211         struct proc *p = td->td_proc;
212         int i = 0, ucode = 0, code;
213         u_int type;
214         register_t addr = 0;
215         vm_offset_t eva;
216         ksiginfo_t ksi;
217 #ifdef POWERFAIL_NMI
218         static int lastalert = 0;
219 #endif
220
221         PCPU_INC(cnt.v_trap);
222         type = frame->tf_trapno;
223
224 #ifdef SMP
225         /* Handler for NMI IPIs used for stopping CPUs. */
226         if (type == T_NMI) {
227                  if (ipi_nmi_handler() == 0)
228                            goto out;
229         }
230 #endif /* SMP */
231
232 #ifdef KDB
233         if (kdb_active) {
234                 kdb_reenter();
235                 goto out;
236         }
237 #endif
238
239         if (type == T_RESERVED) {
240                 trap_fatal(frame, 0);
241                 goto out;
242         }
243
244 #ifdef  HWPMC_HOOKS
245         /*
246          * CPU PMCs interrupt using an NMI so we check for that first.
247          * If the HWPMC module is active, 'pmc_hook' will point to
248          * the function to be called.  A return value of '1' from the
249          * hook means that the NMI was handled by it and that we can
250          * return immediately.
251          */
252         if (type == T_NMI && pmc_intr &&
253             (*pmc_intr)(PCPU_GET(cpuid), frame))
254             goto out;
255 #endif
256
257         if (type == T_MCHK) {
258                 mca_intr();
259                 goto out;
260         }
261
262 #ifdef KDTRACE_HOOKS
263         /*
264          * A trap can occur while DTrace executes a probe. Before
265          * executing the probe, DTrace blocks re-scheduling and sets
266          * a flag in it's per-cpu flags to indicate that it doesn't
267          * want to fault. On returning from the probe, the no-fault
268          * flag is cleared and finally re-scheduling is enabled.
269          *
270          * If the DTrace kernel module has registered a trap handler,
271          * call it and if it returns non-zero, assume that it has
272          * handled the trap and modified the trap frame so that this
273          * function can return normally.
274          */
275         if (type == T_DTRACE_RET || type == T_BPTFLT) {
276                 struct reg regs;
277
278                 fill_frame_regs(frame, &regs);
279                 if (type == T_BPTFLT &&
280                     dtrace_pid_probe_ptr != NULL &&
281                     dtrace_pid_probe_ptr(&regs) == 0)
282                         goto out;
283                 if (type == T_DTRACE_RET &&
284                     dtrace_return_probe_ptr != NULL &&
285                     dtrace_return_probe_ptr(&regs) == 0)
286                         goto out;
287         }
288         if ((type == T_PROTFLT || type == T_PAGEFLT) &&
289             dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
290                 goto out;
291 #endif
292
293         if ((frame->tf_eflags & PSL_I) == 0) {
294                 /*
295                  * Buggy application or kernel code has disabled
296                  * interrupts and then trapped.  Enabling interrupts
297                  * now is wrong, but it is better than running with
298                  * interrupts disabled until they are accidentally
299                  * enabled later.
300                  */
301                 if (ISPL(frame->tf_cs) == SEL_UPL || (frame->tf_eflags & PSL_VM))
302                         uprintf(
303                             "pid %ld (%s): trap %d with interrupts disabled\n",
304                             (long)curproc->p_pid, curthread->td_name, type);
305                 else if (type != T_BPTFLT && type != T_TRCTRAP &&
306                          frame->tf_eip != (int)cpu_switch_load_gs) {
307                         /*
308                          * XXX not quite right, since this may be for a
309                          * multiple fault in user mode.
310                          */
311                         printf("kernel trap %d with interrupts disabled\n",
312                             type);
313                         /*
314                          * Page faults need interrupts disabled until later,
315                          * and we shouldn't enable interrupts while holding
316                          * a spin lock or if servicing an NMI.
317                          */
318                         if (type != T_NMI && type != T_PAGEFLT &&
319                             td->td_md.md_spinlock_count == 0)
320                                 enable_intr();
321                 }
322         }
323         eva = 0;
324         code = frame->tf_err;
325         if (type == T_PAGEFLT) {
326                 /*
327                  * For some Cyrix CPUs, %cr2 is clobbered by
328                  * interrupts.  This problem is worked around by using
329                  * an interrupt gate for the pagefault handler.  We
330                  * are finally ready to read %cr2 and conditionally
331                  * reenable interrupts.  If we hold a spin lock, then
332                  * we must not reenable interrupts.  This might be a
333                  * spurious page fault.
334                  */
335                 eva = rcr2();
336                 if (td->td_md.md_spinlock_count == 0)
337                         enable_intr();
338         }
339
340         if ((ISPL(frame->tf_cs) == SEL_UPL) ||
341             ((frame->tf_eflags & PSL_VM) && 
342                 !(curpcb->pcb_flags & PCB_VM86CALL))) {
343                 /* user trap */
344
345                 td->td_pticks = 0;
346                 td->td_frame = frame;
347                 addr = frame->tf_eip;
348                 if (td->td_ucred != p->p_ucred) 
349                         cred_update_thread(td);
350
351                 switch (type) {
352                 case T_PRIVINFLT:       /* privileged instruction fault */
353                         i = SIGILL;
354                         ucode = ILL_PRVOPC;
355                         break;
356
357                 case T_BPTFLT:          /* bpt instruction fault */
358                 case T_TRCTRAP:         /* trace trap */
359                         enable_intr();
360                         frame->tf_eflags &= ~PSL_T;
361                         i = SIGTRAP;
362                         ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
363                         break;
364
365                 case T_ARITHTRAP:       /* arithmetic trap */
366 #ifdef DEV_NPX
367                         ucode = npxtrap_x87();
368                         if (ucode == -1)
369                                 goto userout;
370 #else
371                         ucode = 0;
372 #endif
373                         i = SIGFPE;
374                         break;
375
376                         /*
377                          * The following two traps can happen in
378                          * vm86 mode, and, if so, we want to handle
379                          * them specially.
380                          */
381                 case T_PROTFLT:         /* general protection fault */
382                 case T_STKFLT:          /* stack fault */
383                         if (frame->tf_eflags & PSL_VM) {
384                                 i = vm86_emulate((struct vm86frame *)frame);
385                                 if (i == 0)
386                                         goto user;
387                                 break;
388                         }
389                         i = SIGBUS;
390                         ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR;
391                         break;
392                 case T_SEGNPFLT:        /* segment not present fault */
393                         i = SIGBUS;
394                         ucode = BUS_ADRERR;
395                         break;
396                 case T_TSSFLT:          /* invalid TSS fault */
397                         i = SIGBUS;
398                         ucode = BUS_OBJERR;
399                         break;
400                 case T_DOUBLEFLT:       /* double fault */
401                 default:
402                         i = SIGBUS;
403                         ucode = BUS_OBJERR;
404                         break;
405
406                 case T_PAGEFLT:         /* page fault */
407
408                         i = trap_pfault(frame, TRUE, eva);
409 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
410                         if (i == -2) {
411                                 /*
412                                  * The f00f hack workaround has triggered, so
413                                  * treat the fault as an illegal instruction 
414                                  * (T_PRIVINFLT) instead of a page fault.
415                                  */
416                                 type = frame->tf_trapno = T_PRIVINFLT;
417
418                                 /* Proceed as in that case. */
419                                 ucode = ILL_PRVOPC;
420                                 i = SIGILL;
421                                 break;
422                         }
423 #endif
424                         if (i == -1)
425                                 goto userout;
426                         if (i == 0)
427                                 goto user;
428
429                         if (i == SIGSEGV)
430                                 ucode = SEGV_MAPERR;
431                         else {
432                                 if (prot_fault_translation == 0) {
433                                         /*
434                                          * Autodetect.
435                                          * This check also covers the images
436                                          * without the ABI-tag ELF note.
437                                          */
438                                         if (SV_CURPROC_ABI() == SV_ABI_FREEBSD
439                                             && p->p_osrel >= P_OSREL_SIGSEGV) {
440                                                 i = SIGSEGV;
441                                                 ucode = SEGV_ACCERR;
442                                         } else {
443                                                 i = SIGBUS;
444                                                 ucode = BUS_PAGE_FAULT;
445                                         }
446                                 } else if (prot_fault_translation == 1) {
447                                         /*
448                                          * Always compat mode.
449                                          */
450                                         i = SIGBUS;
451                                         ucode = BUS_PAGE_FAULT;
452                                 } else {
453                                         /*
454                                          * Always SIGSEGV mode.
455                                          */
456                                         i = SIGSEGV;
457                                         ucode = SEGV_ACCERR;
458                                 }
459                         }
460                         addr = eva;
461                         break;
462
463                 case T_DIVIDE:          /* integer divide fault */
464                         ucode = FPE_INTDIV;
465                         i = SIGFPE;
466                         break;
467
468 #ifdef DEV_ISA
469                 case T_NMI:
470 #ifdef POWERFAIL_NMI
471 #ifndef TIMER_FREQ
472 #  define TIMER_FREQ 1193182
473 #endif
474                         if (time_second - lastalert > 10) {
475                                 log(LOG_WARNING, "NMI: power fail\n");
476                                 sysbeep(880, hz);
477                                 lastalert = time_second;
478                         }
479                         goto userout;
480 #else /* !POWERFAIL_NMI */
481                         /* machine/parity/power fail/"kitchen sink" faults */
482                         if (isa_nmi(code) == 0) {
483 #ifdef KDB
484                                 /*
485                                  * NMI can be hooked up to a pushbutton
486                                  * for debugging.
487                                  */
488                                 if (kdb_on_nmi) {
489                                         printf ("NMI ... going to debugger\n");
490                                         kdb_trap(type, 0, frame);
491                                 }
492 #endif /* KDB */
493                                 goto userout;
494                         } else if (panic_on_nmi)
495                                 panic("NMI indicates hardware failure");
496                         break;
497 #endif /* POWERFAIL_NMI */
498 #endif /* DEV_ISA */
499
500                 case T_OFLOW:           /* integer overflow fault */
501                         ucode = FPE_INTOVF;
502                         i = SIGFPE;
503                         break;
504
505                 case T_BOUND:           /* bounds check fault */
506                         ucode = FPE_FLTSUB;
507                         i = SIGFPE;
508                         break;
509
510                 case T_DNA:
511 #ifdef DEV_NPX
512                         KASSERT(PCB_USER_FPU(td->td_pcb),
513                             ("kernel FPU ctx has leaked"));
514                         /* transparent fault (due to context switch "late") */
515                         if (npxdna())
516                                 goto userout;
517 #endif
518                         uprintf("pid %d killed due to lack of floating point\n",
519                                 p->p_pid);
520                         i = SIGKILL;
521                         ucode = 0;
522                         break;
523
524                 case T_FPOPFLT:         /* FPU operand fetch fault */
525                         ucode = ILL_COPROC;
526                         i = SIGILL;
527                         break;
528
529                 case T_XMMFLT:          /* SIMD floating-point exception */
530 #if defined(DEV_NPX) && !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
531                         ucode = npxtrap_sse();
532                         if (ucode == -1)
533                                 goto userout;
534 #else
535                         ucode = 0;
536 #endif
537                         i = SIGFPE;
538                         break;
539                 }
540         } else {
541                 /* kernel trap */
542
543                 KASSERT(cold || td->td_ucred != NULL,
544                     ("kernel trap doesn't have ucred"));
545                 switch (type) {
546                 case T_PAGEFLT:                 /* page fault */
547                         (void) trap_pfault(frame, FALSE, eva);
548                         goto out;
549
550                 case T_DNA:
551 #ifdef DEV_NPX
552                         KASSERT(!PCB_USER_FPU(td->td_pcb),
553                             ("Unregistered use of FPU in kernel"));
554                         if (npxdna())
555                                 goto out;
556 #endif
557                         break;
558
559                 case T_ARITHTRAP:       /* arithmetic trap */
560                 case T_XMMFLT:          /* SIMD floating-point exception */
561                 case T_FPOPFLT:         /* FPU operand fetch fault */
562                         /*
563                          * XXXKIB for now disable any FPU traps in kernel
564                          * handler registration seems to be overkill
565                          */
566                         trap_fatal(frame, 0);
567                         goto out;
568
569                         /*
570                          * The following two traps can happen in
571                          * vm86 mode, and, if so, we want to handle
572                          * them specially.
573                          */
574                 case T_PROTFLT:         /* general protection fault */
575                 case T_STKFLT:          /* stack fault */
576                         if (frame->tf_eflags & PSL_VM) {
577                                 i = vm86_emulate((struct vm86frame *)frame);
578                                 if (i != 0)
579                                         /*
580                                          * returns to original process
581                                          */
582                                         vm86_trap((struct vm86frame *)frame);
583                                 goto out;
584                         }
585                         if (type == T_STKFLT)
586                                 break;
587
588                         /* FALL THROUGH */
589
590                 case T_SEGNPFLT:        /* segment not present fault */
591                         if (curpcb->pcb_flags & PCB_VM86CALL)
592                                 break;
593
594                         /*
595                          * Invalid %fs's and %gs's can be created using
596                          * procfs or PT_SETREGS or by invalidating the
597                          * underlying LDT entry.  This causes a fault
598                          * in kernel mode when the kernel attempts to
599                          * switch contexts.  Lose the bad context
600                          * (XXX) so that we can continue, and generate
601                          * a signal.
602                          */
603                         if (frame->tf_eip == (int)cpu_switch_load_gs) {
604                                 curpcb->pcb_gs = 0;
605 #if 0                           
606                                 PROC_LOCK(p);
607                                 kern_psignal(p, SIGBUS);
608                                 PROC_UNLOCK(p);
609 #endif                          
610                                 goto out;
611                         }
612
613                         if (td->td_intr_nesting_level != 0)
614                                 break;
615
616                         /*
617                          * Invalid segment selectors and out of bounds
618                          * %eip's and %esp's can be set up in user mode.
619                          * This causes a fault in kernel mode when the
620                          * kernel tries to return to user mode.  We want
621                          * to get this fault so that we can fix the
622                          * problem here and not have to check all the
623                          * selectors and pointers when the user changes
624                          * them.
625                          */
626                         if (frame->tf_eip == (int)doreti_iret) {
627                                 frame->tf_eip = (int)doreti_iret_fault;
628                                 goto out;
629                         }
630                         if (frame->tf_eip == (int)doreti_popl_ds) {
631                                 frame->tf_eip = (int)doreti_popl_ds_fault;
632                                 goto out;
633                         }
634                         if (frame->tf_eip == (int)doreti_popl_es) {
635                                 frame->tf_eip = (int)doreti_popl_es_fault;
636                                 goto out;
637                         }
638                         if (frame->tf_eip == (int)doreti_popl_fs) {
639                                 frame->tf_eip = (int)doreti_popl_fs_fault;
640                                 goto out;
641                         }
642                         if (curpcb->pcb_onfault != NULL) {
643                                 frame->tf_eip =
644                                     (int)curpcb->pcb_onfault;
645                                 goto out;
646                         }
647                         break;
648
649                 case T_TSSFLT:
650                         /*
651                          * PSL_NT can be set in user mode and isn't cleared
652                          * automatically when the kernel is entered.  This
653                          * causes a TSS fault when the kernel attempts to
654                          * `iret' because the TSS link is uninitialized.  We
655                          * want to get this fault so that we can fix the
656                          * problem here and not every time the kernel is
657                          * entered.
658                          */
659                         if (frame->tf_eflags & PSL_NT) {
660                                 frame->tf_eflags &= ~PSL_NT;
661                                 goto out;
662                         }
663                         break;
664
665                 case T_TRCTRAP:  /* trace trap */
666                         if (frame->tf_eip == (int)IDTVEC(lcall_syscall)) {
667                                 /*
668                                  * We've just entered system mode via the
669                                  * syscall lcall.  Continue single stepping
670                                  * silently until the syscall handler has
671                                  * saved the flags.
672                                  */
673                                 goto out;
674                         }
675                         if (frame->tf_eip == (int)IDTVEC(lcall_syscall) + 1) {
676                                 /*
677                                  * The syscall handler has now saved the
678                                  * flags.  Stop single stepping it.
679                                  */
680                                 frame->tf_eflags &= ~PSL_T;
681                                 goto out;
682                         }
683                         /*
684                          * Ignore debug register trace traps due to
685                          * accesses in the user's address space, which
686                          * can happen under several conditions such as
687                          * if a user sets a watchpoint on a buffer and
688                          * then passes that buffer to a system call.
689                          * We still want to get TRCTRAPS for addresses
690                          * in kernel space because that is useful when
691                          * debugging the kernel.
692                          */
693                         if (user_dbreg_trap() && 
694                            !(curpcb->pcb_flags & PCB_VM86CALL)) {
695                                 /*
696                                  * Reset breakpoint bits because the
697                                  * processor doesn't
698                                  */
699                                 load_dr6(rdr6() & 0xfffffff0);
700                                 goto out;
701                         }
702                         /*
703                          * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
704                          */
705                 case T_BPTFLT:
706                         /*
707                          * If KDB is enabled, let it handle the debugger trap.
708                          * Otherwise, debugger traps "can't happen".
709                          */
710 #ifdef KDB
711                         if (kdb_trap(type, 0, frame))
712                                 goto out;
713 #endif
714                         break;
715
716 #ifdef DEV_ISA
717                 case T_NMI:
718 #ifdef POWERFAIL_NMI
719                         if (time_second - lastalert > 10) {
720                                 log(LOG_WARNING, "NMI: power fail\n");
721                                 sysbeep(880, hz);
722                                 lastalert = time_second;
723                         }
724                         goto out;
725 #else /* !POWERFAIL_NMI */
726                         /* machine/parity/power fail/"kitchen sink" faults */
727                         if (isa_nmi(code) == 0) {
728 #ifdef KDB
729                                 /*
730                                  * NMI can be hooked up to a pushbutton
731                                  * for debugging.
732                                  */
733                                 if (kdb_on_nmi) {
734                                         printf ("NMI ... going to debugger\n");
735                                         kdb_trap(type, 0, frame);
736                                 }
737 #endif /* KDB */
738                                 goto out;
739                         } else if (panic_on_nmi == 0)
740                                 goto out;
741                         /* FALLTHROUGH */
742 #endif /* POWERFAIL_NMI */
743 #endif /* DEV_ISA */
744                 }
745
746                 trap_fatal(frame, eva);
747                 goto out;
748         }
749
750         /* Translate fault for emulators (e.g. Linux) */
751         if (*p->p_sysent->sv_transtrap)
752                 i = (*p->p_sysent->sv_transtrap)(i, type);
753
754         ksiginfo_init_trap(&ksi);
755         ksi.ksi_signo = i;
756         ksi.ksi_code = ucode;
757         ksi.ksi_addr = (void *)addr;
758         ksi.ksi_trapno = type;
759         if (uprintf_signal) {
760                 uprintf("pid %d comm %s: signal %d err %x code %d type %d "
761                     "addr 0x%x esp 0x%08x eip 0x%08x "
762                     "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
763                     p->p_pid, p->p_comm, i, frame->tf_err, ucode, type, addr,
764                     frame->tf_esp, frame->tf_eip,
765                     fubyte((void *)(frame->tf_eip + 0)),
766                     fubyte((void *)(frame->tf_eip + 1)),
767                     fubyte((void *)(frame->tf_eip + 2)),
768                     fubyte((void *)(frame->tf_eip + 3)),
769                     fubyte((void *)(frame->tf_eip + 4)),
770                     fubyte((void *)(frame->tf_eip + 5)),
771                     fubyte((void *)(frame->tf_eip + 6)),
772                     fubyte((void *)(frame->tf_eip + 7)));
773         }
774         KASSERT((read_eflags() & PSL_I) != 0, ("interrupts disabled"));
775         trapsignal(td, &ksi);
776
777 #ifdef DEBUG
778         if (type <= MAX_TRAP_MSG) {
779                 uprintf("fatal process exception: %s",
780                         trap_msg[type]);
781                 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
782                         uprintf(", fault VA = 0x%lx", (u_long)eva);
783                 uprintf("\n");
784         }
785 #endif
786
787 user:
788         userret(td, frame);
789         mtx_assert(&Giant, MA_NOTOWNED);
790         KASSERT(PCB_USER_FPU(td->td_pcb),
791             ("Return from trap with kernel FPU ctx leaked"));
792 userout:
793 out:
794         return;
795 }
796
797 static int
798 trap_pfault(frame, usermode, eva)
799         struct trapframe *frame;
800         int usermode;
801         vm_offset_t eva;
802 {
803         vm_offset_t va;
804         struct vmspace *vm = NULL;
805         vm_map_t map;
806         int rv = 0;
807         vm_prot_t ftype;
808         struct thread *td = curthread;
809         struct proc *p = td->td_proc;
810
811         if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
812                 /*
813                  * Due to both processor errata and lazy TLB invalidation when
814                  * access restrictions are removed from virtual pages, memory
815                  * accesses that are allowed by the physical mapping layer may
816                  * nonetheless cause one spurious page fault per virtual page. 
817                  * When the thread is executing a "no faulting" section that
818                  * is bracketed by vm_fault_{disable,enable}_pagefaults(),
819                  * every page fault is treated as a spurious page fault,
820                  * unless it accesses the same virtual address as the most
821                  * recent page fault within the same "no faulting" section.
822                  */
823                 if (td->td_md.md_spurflt_addr != eva ||
824                     (td->td_pflags & TDP_RESETSPUR) != 0) {
825                         /*
826                          * Do nothing to the TLB.  A stale TLB entry is
827                          * flushed automatically by a page fault.
828                          */
829                         td->td_md.md_spurflt_addr = eva;
830                         td->td_pflags &= ~TDP_RESETSPUR;
831                         return (0);
832                 }
833         } else {
834                 /*
835                  * If we get a page fault while in a critical section, then
836                  * it is most likely a fatal kernel page fault.  The kernel
837                  * is already going to panic trying to get a sleep lock to
838                  * do the VM lookup, so just consider it a fatal trap so the
839                  * kernel can print out a useful trap message and even get
840                  * to the debugger.
841                  *
842                  * If we get a page fault while holding a non-sleepable
843                  * lock, then it is most likely a fatal kernel page fault.
844                  * If WITNESS is enabled, then it's going to whine about
845                  * bogus LORs with various VM locks, so just skip to the
846                  * fatal trap handling directly.
847                  */
848                 if (td->td_critnest != 0 ||
849                     WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
850                     "Kernel page fault") != 0) {
851                         trap_fatal(frame, eva);
852                         return (-1);
853                 }
854         }
855         va = trunc_page(eva);
856         if (va >= KERNBASE) {
857                 /*
858                  * Don't allow user-mode faults in kernel address space.
859                  * An exception:  if the faulting address is the invalid
860                  * instruction entry in the IDT, then the Intel Pentium
861                  * F00F bug workaround was triggered, and we need to
862                  * treat it is as an illegal instruction, and not a page
863                  * fault.
864                  */
865 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
866                 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug)
867                         return -2;
868 #endif
869                 if (usermode)
870                         goto nogo;
871
872                 map = kernel_map;
873         } else {
874                 /*
875                  * This is a fault on non-kernel virtual memory.
876                  * vm is initialized above to NULL. If curproc is NULL
877                  * or curproc->p_vmspace is NULL the fault is fatal.
878                  */
879                 if (p != NULL)
880                         vm = p->p_vmspace;
881
882                 if (vm == NULL)
883                         goto nogo;
884
885                 map = &vm->vm_map;
886                 if (!usermode && (td->td_intr_nesting_level != 0 ||
887                     curpcb->pcb_onfault == NULL)) {
888                         trap_fatal(frame, eva);
889                         return (-1);
890                 }
891         }
892
893         /*
894          * PGEX_I is defined only if the execute disable bit capability is
895          * supported and enabled.
896          */
897         if (frame->tf_err & PGEX_W)
898                 ftype = VM_PROT_WRITE;
899 #ifdef PAE
900         else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
901                 ftype = VM_PROT_EXECUTE;
902 #endif
903         else
904                 ftype = VM_PROT_READ;
905
906         if (map != kernel_map) {
907                 /*
908                  * Keep swapout from messing with us during this
909                  *      critical time.
910                  */
911                 PROC_LOCK(p);
912                 ++p->p_lock;
913                 PROC_UNLOCK(p);
914
915                 /* Fault in the user page: */
916                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
917
918                 PROC_LOCK(p);
919                 --p->p_lock;
920                 PROC_UNLOCK(p);
921         } else {
922                 /*
923                  * Don't have to worry about process locking or stacks in the
924                  * kernel.
925                  */
926                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
927         }
928         if (rv == KERN_SUCCESS) {
929 #ifdef HWPMC_HOOKS
930                 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
931                         PMC_SOFT_CALL_TF( , , page_fault, all, frame);
932                         if (ftype == VM_PROT_READ)
933                                 PMC_SOFT_CALL_TF( , , page_fault, read,
934                                     frame);
935                         else
936                                 PMC_SOFT_CALL_TF( , , page_fault, write,
937                                     frame);
938                 }
939 #endif
940                 return (0);
941         }
942 nogo:
943         if (!usermode) {
944                 if (td->td_intr_nesting_level == 0 &&
945                     curpcb->pcb_onfault != NULL) {
946                         frame->tf_eip = (int)curpcb->pcb_onfault;
947                         return (0);
948                 }
949                 trap_fatal(frame, eva);
950                 return (-1);
951         }
952
953         return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
954 }
955
956 static void
957 trap_fatal(frame, eva)
958         struct trapframe *frame;
959         vm_offset_t eva;
960 {
961         int code, ss, esp;
962         u_int type;
963         struct soft_segment_descriptor softseg;
964         char *msg;
965
966         code = frame->tf_err;
967         type = frame->tf_trapno;
968         sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
969
970         if (type <= MAX_TRAP_MSG)
971                 msg = trap_msg[type];
972         else
973                 msg = "UNKNOWN";
974         printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
975             frame->tf_eflags & PSL_VM ? "vm86" :
976             ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
977 #ifdef SMP
978         /* two separate prints in case of a trap on an unmapped page */
979         printf("cpuid = %d; ", PCPU_GET(cpuid));
980         printf("apic id = %02x\n", PCPU_GET(apic_id));
981 #endif
982         if (type == T_PAGEFLT) {
983                 printf("fault virtual address   = 0x%x\n", eva);
984                 printf("fault code              = %s %s, %s\n",
985                         code & PGEX_U ? "user" : "supervisor",
986                         code & PGEX_W ? "write" : "read",
987                         code & PGEX_P ? "protection violation" : "page not present");
988         }
989         printf("instruction pointer     = 0x%x:0x%x\n",
990                frame->tf_cs & 0xffff, frame->tf_eip);
991         if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
992                 ss = frame->tf_ss & 0xffff;
993                 esp = frame->tf_esp;
994         } else {
995                 ss = GSEL(GDATA_SEL, SEL_KPL);
996                 esp = (int)&frame->tf_esp;
997         }
998         printf("stack pointer           = 0x%x:0x%x\n", ss, esp);
999         printf("frame pointer           = 0x%x:0x%x\n", ss, frame->tf_ebp);
1000         printf("code segment            = base 0x%x, limit 0x%x, type 0x%x\n",
1001                softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
1002         printf("                        = DPL %d, pres %d, def32 %d, gran %d\n",
1003                softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
1004                softseg.ssd_gran);
1005         printf("processor eflags        = ");
1006         if (frame->tf_eflags & PSL_T)
1007                 printf("trace trap, ");
1008         if (frame->tf_eflags & PSL_I)
1009                 printf("interrupt enabled, ");
1010         if (frame->tf_eflags & PSL_NT)
1011                 printf("nested task, ");
1012         if (frame->tf_eflags & PSL_RF)
1013                 printf("resume, ");
1014         if (frame->tf_eflags & PSL_VM)
1015                 printf("vm86, ");
1016         printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
1017         printf("current process         = ");
1018         if (curproc) {
1019                 printf("%lu (%s)\n", (u_long)curproc->p_pid, curthread->td_name);
1020         } else {
1021                 printf("Idle\n");
1022         }
1023
1024 #ifdef KDB
1025         if (debugger_on_panic || kdb_active) {
1026                 frame->tf_err = eva;    /* smuggle fault address to ddb */
1027                 if (kdb_trap(type, 0, frame)) {
1028                         frame->tf_err = code;   /* restore error code */
1029                         return;
1030                 }
1031                 frame->tf_err = code;           /* restore error code */
1032         }
1033 #endif
1034         printf("trap number             = %d\n", type);
1035         if (type <= MAX_TRAP_MSG)
1036                 panic("%s", trap_msg[type]);
1037         else
1038                 panic("unknown/reserved trap");
1039 }
1040
1041 /*
1042  * Double fault handler. Called when a fault occurs while writing
1043  * a frame for a trap/exception onto the stack. This usually occurs
1044  * when the stack overflows (such is the case with infinite recursion,
1045  * for example).
1046  *
1047  * XXX Note that the current PTD gets replaced by IdlePTD when the
1048  * task switch occurs. This means that the stack that was active at
1049  * the time of the double fault is not available at <kstack> unless
1050  * the machine was idle when the double fault occurred. The downside
1051  * of this is that "trace <ebp>" in ddb won't work.
1052  */
1053 void
1054 dblfault_handler()
1055 {
1056 #ifdef KDTRACE_HOOKS
1057         if (dtrace_doubletrap_func != NULL)
1058                 (*dtrace_doubletrap_func)();
1059 #endif
1060         printf("\nFatal double fault:\n");
1061         printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
1062         printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
1063         printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
1064 #ifdef SMP
1065         /* two separate prints in case of a trap on an unmapped page */
1066         printf("cpuid = %d; ", PCPU_GET(cpuid));
1067         printf("apic id = %02x\n", PCPU_GET(apic_id));
1068 #endif
1069         panic("double fault");
1070 }
1071
1072 int
1073 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
1074 {
1075         struct proc *p;
1076         struct trapframe *frame;
1077         caddr_t params;
1078         int error;
1079
1080         p = td->td_proc;
1081         frame = td->td_frame;
1082
1083         params = (caddr_t)frame->tf_esp + sizeof(int);
1084         sa->code = frame->tf_eax;
1085
1086         /*
1087          * Need to check if this is a 32 bit or 64 bit syscall.
1088          */
1089         if (sa->code == SYS_syscall) {
1090                 /*
1091                  * Code is first argument, followed by actual args.
1092                  */
1093                 sa->code = fuword(params);
1094                 params += sizeof(int);
1095         } else if (sa->code == SYS___syscall) {
1096                 /*
1097                  * Like syscall, but code is a quad, so as to maintain
1098                  * quad alignment for the rest of the arguments.
1099                  */
1100                 sa->code = fuword(params);
1101                 params += sizeof(quad_t);
1102         }
1103
1104         if (p->p_sysent->sv_mask)
1105                 sa->code &= p->p_sysent->sv_mask;
1106         if (sa->code >= p->p_sysent->sv_size)
1107                 sa->callp = &p->p_sysent->sv_table[0];
1108         else
1109                 sa->callp = &p->p_sysent->sv_table[sa->code];
1110         sa->narg = sa->callp->sy_narg;
1111
1112         if (params != NULL && sa->narg != 0)
1113                 error = copyin(params, (caddr_t)sa->args,
1114                     (u_int)(sa->narg * sizeof(int)));
1115         else
1116                 error = 0;
1117
1118         if (error == 0) {
1119                 td->td_retval[0] = 0;
1120                 td->td_retval[1] = frame->tf_edx;
1121         }
1122                 
1123         return (error);
1124 }
1125
1126 #include "../../kern/subr_syscall.c"
1127
1128 /*
1129  * syscall - system call request C handler.  A system call is
1130  * essentially treated as a trap by reusing the frame layout.
1131  */
1132 void
1133 syscall(struct trapframe *frame)
1134 {
1135         struct thread *td;
1136         struct syscall_args sa;
1137         register_t orig_tf_eflags;
1138         int error;
1139         ksiginfo_t ksi;
1140
1141 #ifdef DIAGNOSTIC
1142         if (ISPL(frame->tf_cs) != SEL_UPL) {
1143                 panic("syscall");
1144                 /* NOT REACHED */
1145         }
1146 #endif
1147         orig_tf_eflags = frame->tf_eflags;
1148
1149         td = curthread;
1150         td->td_frame = frame;
1151
1152         error = syscallenter(td, &sa);
1153
1154         /*
1155          * Traced syscall.
1156          */
1157         if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
1158                 frame->tf_eflags &= ~PSL_T;
1159                 ksiginfo_init_trap(&ksi);
1160                 ksi.ksi_signo = SIGTRAP;
1161                 ksi.ksi_code = TRAP_TRACE;
1162                 ksi.ksi_addr = (void *)frame->tf_eip;
1163                 trapsignal(td, &ksi);
1164         }
1165
1166         KASSERT(PCB_USER_FPU(td->td_pcb),
1167             ("System call %s returning with kernel FPU ctx leaked",
1168              syscallname(td->td_proc, sa.code)));
1169         KASSERT(td->td_pcb->pcb_save == &td->td_pcb->pcb_user_save,
1170             ("System call %s returning with mangled pcb_save",
1171              syscallname(td->td_proc, sa.code)));
1172
1173         syscallret(td, error, &sa);
1174 }