]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/ia64/ia64/trap.c
Copy stable/8 to releng/8.2 in preparation for FreeBSD-8.2 release.
[FreeBSD/releng/8.2.git] / sys / ia64 / ia64 / trap.c
1 /*-
2  * Copyright (c) 2005 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_ddb.h"
31 #include "opt_ktrace.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kdb.h>
36 #include <sys/ktr.h>
37 #include <sys/sysproto.h>
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/exec.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/vmmeter.h>
46 #include <sys/sysent.h>
47 #include <sys/signalvar.h>
48 #include <sys/syscall.h>
49 #include <sys/pioctl.h>
50 #include <sys/ptrace.h>
51 #include <sys/sysctl.h>
52 #include <vm/vm.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_param.h>
58 #include <sys/ptrace.h>
59 #include <machine/cpu.h>
60 #include <machine/md_var.h>
61 #include <machine/reg.h>
62 #include <machine/pal.h>
63 #include <machine/fpu.h>
64 #include <machine/efi.h>
65 #include <machine/pcb.h>
66 #ifdef SMP
67 #include <machine/smp.h>
68 #endif
69
70 #ifdef KTRACE
71 #include <sys/uio.h>
72 #include <sys/ktrace.h>
73 #endif
74
75 #include <security/audit/audit.h>
76
77 #include <ia64/disasm/disasm.h>
78
79 static int print_usertrap = 0;
80 SYSCTL_INT(_machdep, OID_AUTO, print_usertrap,
81     CTLFLAG_RW, &print_usertrap, 0, "");
82
83 static void break_syscall(struct trapframe *tf);
84
85 /*
86  * EFI-Provided FPSWA interface (Floating Point SoftWare Assist)
87  */
88 extern struct fpswa_iface *fpswa_iface;
89
90 static const char *ia64_vector_names[] = {
91         "VHPT Translation",                     /* 0 */
92         "Instruction TLB",                      /* 1 */
93         "Data TLB",                             /* 2 */
94         "Alternate Instruction TLB",            /* 3 */
95         "Alternate Data TLB",                   /* 4 */
96         "Data Nested TLB",                      /* 5 */
97         "Instruction Key Miss",                 /* 6 */
98         "Data Key Miss",                        /* 7 */
99         "Dirty-Bit",                            /* 8 */
100         "Instruction Access-Bit",               /* 9 */
101         "Data Access-Bit",                      /* 10 */
102         "Break Instruction",                    /* 11 */
103         "External Interrupt",                   /* 12 */
104         "Reserved 13",                          /* 13 */
105         "Reserved 14",                          /* 14 */
106         "Reserved 15",                          /* 15 */
107         "Reserved 16",                          /* 16 */
108         "Reserved 17",                          /* 17 */
109         "Reserved 18",                          /* 18 */
110         "Reserved 19",                          /* 19 */
111         "Page Not Present",                     /* 20 */
112         "Key Permission",                       /* 21 */
113         "Instruction Access Rights",            /* 22 */
114         "Data Access Rights",                   /* 23 */
115         "General Exception",                    /* 24 */
116         "Disabled FP-Register",                 /* 25 */
117         "NaT Consumption",                      /* 26 */
118         "Speculation",                          /* 27 */
119         "Reserved 28",                          /* 28 */
120         "Debug",                                /* 29 */
121         "Unaligned Reference",                  /* 30 */
122         "Unsupported Data Reference",           /* 31 */
123         "Floating-point Fault",                 /* 32 */
124         "Floating-point Trap",                  /* 33 */
125         "Lower-Privilege Transfer Trap",        /* 34 */
126         "Taken Branch Trap",                    /* 35 */
127         "Single Step Trap",                     /* 36 */
128         "Reserved 37",                          /* 37 */
129         "Reserved 38",                          /* 38 */
130         "Reserved 39",                          /* 39 */
131         "Reserved 40",                          /* 40 */
132         "Reserved 41",                          /* 41 */
133         "Reserved 42",                          /* 42 */
134         "Reserved 43",                          /* 43 */
135         "Reserved 44",                          /* 44 */
136         "IA-32 Exception",                      /* 45 */
137         "IA-32 Intercept",                      /* 46 */
138         "IA-32 Interrupt",                      /* 47 */
139         "Reserved 48",                          /* 48 */
140         "Reserved 49",                          /* 49 */
141         "Reserved 50",                          /* 50 */
142         "Reserved 51",                          /* 51 */
143         "Reserved 52",                          /* 52 */
144         "Reserved 53",                          /* 53 */
145         "Reserved 54",                          /* 54 */
146         "Reserved 55",                          /* 55 */
147         "Reserved 56",                          /* 56 */
148         "Reserved 57",                          /* 57 */
149         "Reserved 58",                          /* 58 */
150         "Reserved 59",                          /* 59 */
151         "Reserved 60",                          /* 60 */
152         "Reserved 61",                          /* 61 */
153         "Reserved 62",                          /* 62 */
154         "Reserved 63",                          /* 63 */
155         "Reserved 64",                          /* 64 */
156         "Reserved 65",                          /* 65 */
157         "Reserved 66",                          /* 66 */
158         "Reserved 67",                          /* 67 */
159 };
160
161 struct bitname {
162         uint64_t mask;
163         const char* name;
164 };
165
166 static void
167 printbits(uint64_t mask, struct bitname *bn, int count)
168 {
169         int i, first = 1;
170         uint64_t bit;
171
172         for (i = 0; i < count; i++) {
173                 /*
174                  * Handle fields wider than one bit.
175                  */
176                 bit = bn[i].mask & ~(bn[i].mask - 1);
177                 if (bn[i].mask > bit) {
178                         if (first)
179                                 first = 0;
180                         else
181                                 printf(",");
182                         printf("%s=%ld", bn[i].name,
183                                (mask & bn[i].mask) / bit);
184                 } else if (mask & bit) {
185                         if (first)
186                                 first = 0;
187                         else
188                                 printf(",");
189                         printf("%s", bn[i].name);
190                 }
191         }
192 }
193
194 struct bitname psr_bits[] = {
195         {IA64_PSR_BE,   "be"},
196         {IA64_PSR_UP,   "up"},
197         {IA64_PSR_AC,   "ac"},
198         {IA64_PSR_MFL,  "mfl"},
199         {IA64_PSR_MFH,  "mfh"},
200         {IA64_PSR_IC,   "ic"},
201         {IA64_PSR_I,    "i"},
202         {IA64_PSR_PK,   "pk"},
203         {IA64_PSR_DT,   "dt"},
204         {IA64_PSR_DFL,  "dfl"},
205         {IA64_PSR_DFH,  "dfh"},
206         {IA64_PSR_SP,   "sp"},
207         {IA64_PSR_PP,   "pp"},
208         {IA64_PSR_DI,   "di"},
209         {IA64_PSR_SI,   "si"},
210         {IA64_PSR_DB,   "db"},
211         {IA64_PSR_LP,   "lp"},
212         {IA64_PSR_TB,   "tb"},
213         {IA64_PSR_RT,   "rt"},
214         {IA64_PSR_CPL,  "cpl"},
215         {IA64_PSR_IS,   "is"},
216         {IA64_PSR_MC,   "mc"},
217         {IA64_PSR_IT,   "it"},
218         {IA64_PSR_ID,   "id"},
219         {IA64_PSR_DA,   "da"},
220         {IA64_PSR_DD,   "dd"},
221         {IA64_PSR_SS,   "ss"},
222         {IA64_PSR_RI,   "ri"},
223         {IA64_PSR_ED,   "ed"},
224         {IA64_PSR_BN,   "bn"},
225         {IA64_PSR_IA,   "ia"},
226 };
227
228 static void
229 printpsr(uint64_t psr)
230 {
231         printbits(psr, psr_bits, sizeof(psr_bits)/sizeof(psr_bits[0]));
232 }
233
234 struct bitname isr_bits[] = {
235         {IA64_ISR_CODE, "code"},
236         {IA64_ISR_VECTOR, "vector"},
237         {IA64_ISR_X,    "x"},
238         {IA64_ISR_W,    "w"},
239         {IA64_ISR_R,    "r"},
240         {IA64_ISR_NA,   "na"},
241         {IA64_ISR_SP,   "sp"},
242         {IA64_ISR_RS,   "rs"},
243         {IA64_ISR_IR,   "ir"},
244         {IA64_ISR_NI,   "ni"},
245         {IA64_ISR_SO,   "so"},
246         {IA64_ISR_EI,   "ei"},
247         {IA64_ISR_ED,   "ed"},
248 };
249
250 static void printisr(uint64_t isr)
251 {
252         printbits(isr, isr_bits, sizeof(isr_bits)/sizeof(isr_bits[0]));
253 }
254
255 static void
256 printtrap(int vector, struct trapframe *tf, int isfatal, int user)
257 {
258         printf("\n");
259         printf("%s %s trap (cpu %d):\n", isfatal? "fatal" : "handled",
260                user ? "user" : "kernel", PCPU_GET(cpuid));
261         printf("\n");
262         printf("    trap vector = 0x%x (%s)\n",
263                vector, ia64_vector_names[vector]);
264         printf("    cr.iip      = 0x%lx\n", tf->tf_special.iip);
265         printf("    cr.ipsr     = 0x%lx (", tf->tf_special.psr);
266         printpsr(tf->tf_special.psr);
267         printf(")\n");
268         printf("    cr.isr      = 0x%lx (", tf->tf_special.isr);
269         printisr(tf->tf_special.isr);
270         printf(")\n");
271         printf("    cr.ifa      = 0x%lx\n", tf->tf_special.ifa);
272         if (tf->tf_special.psr & IA64_PSR_IS) {
273                 printf("    ar.cflg     = 0x%lx\n", ia64_get_cflg());
274                 printf("    ar.csd      = 0x%lx\n", ia64_get_csd());
275                 printf("    ar.ssd      = 0x%lx\n", ia64_get_ssd());
276         }
277         printf("    curthread   = %p\n", curthread);
278         if (curthread != NULL)
279                 printf("        pid = %d, comm = %s\n",
280                        curthread->td_proc->p_pid, curthread->td_name);
281         printf("\n");
282 }
283
284 /*
285  * We got a trap caused by a break instruction and the immediate was 0.
286  * This indicates that we may have a break.b with some non-zero immediate.
287  * The break.b doesn't cause the immediate to be put in cr.iim.  Hence,
288  * we need to disassemble the bundle and return the immediate found there.
289  * This may be a 0 value anyway.  Return 0 for any error condition.  This
290  * will result in a SIGILL, which is pretty much the best thing to do.
291  */
292 static uint64_t
293 trap_decode_break(struct trapframe *tf)
294 {
295         struct asm_bundle bundle;
296         struct asm_inst *inst;
297         int slot;
298
299         if (!asm_decode(tf->tf_special.iip, &bundle))
300                 return (0);
301
302         slot = ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_0) ? 0 :
303             ((tf->tf_special.psr & IA64_PSR_RI) == IA64_PSR_RI_1) ? 1 : 2;
304         inst = bundle.b_inst + slot;
305
306         /*
307          * Sanity checking: It must be a break instruction and the operand
308          * that has the break value must be an immediate.
309          */
310         if (inst->i_op != ASM_OP_BREAK ||
311             inst->i_oper[1].o_type != ASM_OPER_IMM)
312                 return (0);
313
314         return (inst->i_oper[1].o_value);
315 }
316
317 void
318 trap_panic(int vector, struct trapframe *tf)
319 {
320
321         printtrap(vector, tf, 1, TRAPF_USERMODE(tf));
322 #ifdef KDB
323         kdb_trap(vector, 0, tf);
324 #endif
325         panic("trap");
326 }
327
328 /*
329  *
330  */
331 int
332 do_ast(struct trapframe *tf)
333 {
334
335         ia64_disable_intr();
336         while (curthread->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
337                 ia64_enable_intr();
338                 ast(tf);
339                 ia64_disable_intr();
340         }
341         /*
342          * Keep interrupts disabled. We return r10 as a favor to the EPC
343          * syscall code so that it can quicky determine if the syscall
344          * needs to be restarted or not.
345          */
346         return (tf->tf_scratch.gr10);
347 }
348
349 /*
350  * Trap is called from exception.s to handle most types of processor traps.
351  */
352 /*ARGSUSED*/
353 void
354 trap(int vector, struct trapframe *tf)
355 {
356         struct proc *p;
357         struct thread *td;
358         uint64_t ucode;
359         int error, sig, user;
360         ksiginfo_t ksi;
361
362         user = TRAPF_USERMODE(tf) ? 1 : 0;
363
364         PCPU_INC(cnt.v_trap);
365
366         td = curthread;
367         p = td->td_proc;
368         ucode = 0;
369
370         if (user) {
371                 ia64_set_fpsr(IA64_FPSR_DEFAULT);
372                 td->td_pticks = 0;
373                 td->td_frame = tf;
374                 if (td->td_ucred != p->p_ucred)
375                         cred_update_thread(td);
376         } else {
377                 KASSERT(cold || td->td_ucred != NULL,
378                     ("kernel trap doesn't have ucred"));
379 #ifdef KDB
380                 if (kdb_active)
381                         kdb_reenter();
382 #endif
383         }
384
385         sig = 0;
386         switch (vector) {
387         case IA64_VEC_VHPT:
388                 /*
389                  * This one is tricky. We should hardwire the VHPT, but
390                  * don't at this time. I think we're mostly lucky that
391                  * the VHPT is mapped.
392                  */
393                 trap_panic(vector, tf);
394                 break;
395
396         case IA64_VEC_ITLB:
397         case IA64_VEC_DTLB:
398         case IA64_VEC_EXT_INTR:
399                 /* We never call trap() with these vectors. */
400                 trap_panic(vector, tf);
401                 break;
402
403         case IA64_VEC_ALT_ITLB:
404         case IA64_VEC_ALT_DTLB:
405                 /*
406                  * These should never happen, because regions 0-4 use the
407                  * VHPT. If we get one of these it means we didn't program
408                  * the region registers correctly.
409                  */
410                 trap_panic(vector, tf);
411                 break;
412
413         case IA64_VEC_NESTED_DTLB:
414                 /*
415                  * When the nested TLB handler encounters an unexpected
416                  * condition, it'll switch to the backup stack and transfer
417                  * here. All we need to do is panic.
418                  */
419                 trap_panic(vector, tf);
420                 break;
421
422         case IA64_VEC_IKEY_MISS:
423         case IA64_VEC_DKEY_MISS:
424         case IA64_VEC_KEY_PERMISSION:
425                 /*
426                  * We don't use protection keys, so we should never get
427                  * these faults.
428                  */
429                 trap_panic(vector, tf);
430                 break;
431
432         case IA64_VEC_DIRTY_BIT:
433         case IA64_VEC_INST_ACCESS:
434         case IA64_VEC_DATA_ACCESS:
435                 /*
436                  * We get here if we read or write to a page of which the
437                  * PTE does not have the access bit or dirty bit set and
438                  * we can not find the PTE in our datastructures. This
439                  * either means we have a stale PTE in the TLB, or we lost
440                  * the PTE in our datastructures.
441                  */
442                 trap_panic(vector, tf);
443                 break;
444
445         case IA64_VEC_BREAK:
446                 if (user) {
447                         ucode = (int)tf->tf_special.ifa & 0x1FFFFF;
448                         if (ucode == 0) {
449                                 /*
450                                  * A break.b doesn't cause the immediate to be
451                                  * stored in cr.iim (and saved in the TF in
452                                  * tf_special.ifa).  We need to decode the
453                                  * instruction to find out what the immediate
454                                  * was.  Note that if the break instruction
455                                  * didn't happen to be a break.b, but any
456                                  * other break with an immediate of 0, we
457                                  * will do unnecessary work to get the value
458                                  * we already had.  Not an issue, because a
459                                  * break 0 is invalid.
460                                  */
461                                 ucode = trap_decode_break(tf);
462                         }
463                         if (ucode < 0x80000) {
464                                 /* Software interrupts. */
465                                 switch (ucode) {
466                                 case 0:         /* Unknown error. */
467                                         sig = SIGILL;
468                                         break;
469                                 case 1:         /* Integer divide by zero. */
470                                         sig = SIGFPE;
471                                         ucode = FPE_INTDIV;
472                                         break;
473                                 case 2:         /* Integer overflow. */
474                                         sig = SIGFPE;
475                                         ucode = FPE_INTOVF;
476                                         break;
477                                 case 3:         /* Range check/bounds check. */
478                                         sig = SIGFPE;
479                                         ucode = FPE_FLTSUB;
480                                         break;
481                                 case 6:         /* Decimal overflow. */
482                                 case 7:         /* Decimal divide by zero. */
483                                 case 8:         /* Packed decimal error. */
484                                 case 9:         /* Invalid ASCII digit. */
485                                 case 10:        /* Invalid decimal digit. */
486                                         sig = SIGFPE;
487                                         ucode = FPE_FLTINV;
488                                         break;
489                                 case 4:         /* Null pointer dereference. */
490                                 case 5:         /* Misaligned data. */
491                                 case 11:        /* Paragraph stack overflow. */
492                                         sig = SIGSEGV;
493                                         break;
494                                 default:
495                                         sig = SIGILL;
496                                         break;
497                                 }
498                         } else if (ucode < 0x100000) {
499                                 /* Debugger breakpoint. */
500                                 tf->tf_special.psr &= ~IA64_PSR_SS;
501                                 sig = SIGTRAP;
502                         } else if (ucode == 0x100000) {
503                                 break_syscall(tf);
504                                 return;         /* do_ast() already called. */
505                         } else if (ucode == 0x180000) {
506                                 mcontext_t mc;
507
508                                 error = copyin((void*)tf->tf_scratch.gr8,
509                                     &mc, sizeof(mc));
510                                 if (!error) {
511                                         set_mcontext(td, &mc);
512                                         return; /* Don't call do_ast()!!! */
513                                 }
514                                 sig = SIGSEGV;
515                                 ucode = tf->tf_scratch.gr8;
516                         } else
517                                 sig = SIGILL;
518                 } else {
519 #ifdef KDB
520                         if (kdb_trap(vector, 0, tf))
521                                 return;
522                         panic("trap");
523 #else
524                         trap_panic(vector, tf);
525 #endif
526                 }
527                 break;
528
529         case IA64_VEC_PAGE_NOT_PRESENT:
530         case IA64_VEC_INST_ACCESS_RIGHTS:
531         case IA64_VEC_DATA_ACCESS_RIGHTS: {
532                 vm_offset_t va;
533                 struct vmspace *vm;
534                 vm_map_t map;
535                 vm_prot_t ftype;
536                 int rv;
537
538                 rv = 0;
539                 va = trunc_page(tf->tf_special.ifa);
540
541                 if (va >= VM_MAX_ADDRESS) {
542                         /*
543                          * Don't allow user-mode faults for kernel virtual
544                          * addresses, including the gateway page.
545                          */
546                         if (user)
547                                 goto no_fault_in;
548                         map = kernel_map;
549                 } else {
550                         vm = (p != NULL) ? p->p_vmspace : NULL;
551                         if (vm == NULL)
552                                 goto no_fault_in;
553                         map = &vm->vm_map;
554                 }
555
556                 if (tf->tf_special.isr & IA64_ISR_X)
557                         ftype = VM_PROT_EXECUTE;
558                 else if (tf->tf_special.isr & IA64_ISR_W)
559                         ftype = VM_PROT_WRITE;
560                 else
561                         ftype = VM_PROT_READ;
562
563                 if (map != kernel_map) {
564                         /*
565                          * Keep swapout from messing with us during this
566                          * critical time.
567                          */
568                         PROC_LOCK(p);
569                         ++p->p_lock;
570                         PROC_UNLOCK(p);
571
572                         /* Fault in the user page: */
573                         rv = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE)
574                             ? VM_FAULT_DIRTY : VM_FAULT_NORMAL);
575
576                         PROC_LOCK(p);
577                         --p->p_lock;
578                         PROC_UNLOCK(p);
579                 } else {
580                         /*
581                          * Don't have to worry about process locking or
582                          * stacks in the kernel.
583                          */
584                         rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
585                 }
586
587                 if (rv == KERN_SUCCESS)
588                         goto out;
589
590         no_fault_in:
591                 if (!user) {
592                         /* Check for copyin/copyout fault. */
593                         if (td != NULL && td->td_pcb->pcb_onfault != 0) {
594                                 tf->tf_special.iip =
595                                     td->td_pcb->pcb_onfault;
596                                 tf->tf_special.psr &= ~IA64_PSR_RI;
597                                 td->td_pcb->pcb_onfault = 0;
598                                 goto out;
599                         }
600                         trap_panic(vector, tf);
601                 }
602                 ucode = va;
603                 sig = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
604                 break;
605         }
606
607         case IA64_VEC_GENERAL_EXCEPTION: {
608                 int code;
609
610                 if (!user)
611                         trap_panic(vector, tf);
612
613                 code = tf->tf_special.isr & (IA64_ISR_CODE & 0xf0ull);
614                 switch (code) {
615                 case 0x0:       /* Illegal Operation Fault. */
616                         sig = ia64_emulate(tf, td);
617                         break;
618                 default:
619                         sig = SIGILL;
620                         break;
621                 }
622                 if (sig == 0)
623                         goto out;
624                 ucode = vector;
625                 break;
626         }
627
628         case IA64_VEC_SPECULATION:
629                 /*
630                  * The branching behaviour of the chk instruction is not
631                  * implemented by the processor. All we need to do is
632                  * compute the target address of the branch and make sure
633                  * that control is transfered to that address.
634                  * We should do this in the IVT table and not by entring
635                  * the kernel...
636                  */
637                 tf->tf_special.iip += tf->tf_special.ifa << 4;
638                 tf->tf_special.psr &= ~IA64_PSR_RI;
639                 goto out;
640
641         case IA64_VEC_NAT_CONSUMPTION:
642         case IA64_VEC_UNSUPP_DATA_REFERENCE:
643                 if (user) {
644                         ucode = vector;
645                         sig = SIGILL;
646                 } else
647                         trap_panic(vector, tf);
648                 break;
649
650         case IA64_VEC_DISABLED_FP: {
651                 if (user)
652                         ia64_highfp_enable(td, tf);
653                 else
654                         trap_panic(vector, tf);
655                 goto out;
656         }
657
658         case IA64_VEC_DEBUG:
659         case IA64_VEC_SINGLE_STEP_TRAP:
660                 tf->tf_special.psr &= ~IA64_PSR_SS;
661                 if (!user) {
662 #ifdef KDB
663                         if (kdb_trap(vector, 0, tf))
664                                 return;
665                         panic("trap");
666 #else
667                         trap_panic(vector, tf);
668 #endif
669                 }
670                 sig = SIGTRAP;
671                 break;
672
673         case IA64_VEC_UNALIGNED_REFERENCE:
674                 /*
675                  * If user-land, do whatever fixups, printing, and
676                  * signalling is appropriate (based on system-wide
677                  * and per-process unaligned-access-handling flags).
678                  */
679                 if (user) {
680                         sig = unaligned_fixup(tf, td);
681                         if (sig == 0)
682                                 goto out;
683                         ucode = tf->tf_special.ifa;     /* VA */
684                 } else {
685                         /* Check for copyin/copyout fault. */
686                         if (td != NULL && td->td_pcb->pcb_onfault != 0) {
687                                 tf->tf_special.iip =
688                                     td->td_pcb->pcb_onfault;
689                                 tf->tf_special.psr &= ~IA64_PSR_RI;
690                                 td->td_pcb->pcb_onfault = 0;
691                                 goto out;
692                         }
693                         trap_panic(vector, tf);
694                 }
695                 break;
696
697         case IA64_VEC_FLOATING_POINT_FAULT:
698         case IA64_VEC_FLOATING_POINT_TRAP: {
699                 struct fpswa_bundle bundle;
700                 struct fpswa_fpctx fpctx;
701                 struct fpswa_ret ret;
702                 char *ip;
703                 u_long fault;
704
705                 /* Always fatal in kernel. Should never happen. */
706                 if (!user)
707                         trap_panic(vector, tf);
708
709                 if (fpswa_iface == NULL) {
710                         sig = SIGFPE;
711                         ucode = 0;
712                         break;
713                 }
714
715                 ip = (char *)tf->tf_special.iip;
716                 if (vector == IA64_VEC_FLOATING_POINT_TRAP &&
717                     (tf->tf_special.psr & IA64_PSR_RI) == 0)
718                         ip -= 16;
719                 error = copyin(ip, &bundle, sizeof(bundle));
720                 if (error) {
721                         sig = SIGBUS;   /* EFAULT, basically */
722                         ucode = 0;      /* exception summary */
723                         break;
724                 }
725
726                 /* f6-f15 are saved in exception_save */
727                 fpctx.mask_low = 0xffc0;                /* bits 6 - 15 */
728                 fpctx.mask_high = 0;
729                 fpctx.fp_low_preserved = NULL;
730                 fpctx.fp_low_volatile = &tf->tf_scratch_fp.fr6;
731                 fpctx.fp_high_preserved = NULL;
732                 fpctx.fp_high_volatile = NULL;
733
734                 fault = (vector == IA64_VEC_FLOATING_POINT_FAULT) ? 1 : 0;
735
736                 /*
737                  * We have the high FP registers disabled while in the
738                  * kernel. Enable them for the FPSWA handler only.
739                  */
740                 ia64_enable_highfp();
741
742                 /* The docs are unclear.  Is Fpswa reentrant? */
743                 ret = fpswa_iface->if_fpswa(fault, &bundle,
744                     &tf->tf_special.psr, &tf->tf_special.fpsr,
745                     &tf->tf_special.isr, &tf->tf_special.pr,
746                     &tf->tf_special.cfm, &fpctx);
747
748                 ia64_disable_highfp();
749
750                 /*
751                  * Update ipsr and iip to next instruction. We only
752                  * have to do that for faults.
753                  */
754                 if (fault && (ret.status == 0 || (ret.status & 2))) {
755                         int ei;
756
757                         ei = (tf->tf_special.isr >> 41) & 0x03;
758                         if (ei == 0) {          /* no template for this case */
759                                 tf->tf_special.psr &= ~IA64_ISR_EI;
760                                 tf->tf_special.psr |= IA64_ISR_EI_1;
761                         } else if (ei == 1) {   /* MFI or MFB */
762                                 tf->tf_special.psr &= ~IA64_ISR_EI;
763                                 tf->tf_special.psr |= IA64_ISR_EI_2;
764                         } else if (ei == 2) {   /* MMF */
765                                 tf->tf_special.psr &= ~IA64_ISR_EI;
766                                 tf->tf_special.iip += 0x10;
767                         }
768                 }
769
770                 if (ret.status == 0) {
771                         goto out;
772                 } else if (ret.status == -1) {
773                         printf("FATAL: FPSWA err1 %lx, err2 %lx, err3 %lx\n",
774                             ret.err1, ret.err2, ret.err3);
775                         panic("fpswa fatal error on fp fault");
776                 } else {
777                         sig = SIGFPE;
778                         ucode = 0;              /* XXX exception summary */
779                         break;
780                 }
781         }
782
783         case IA64_VEC_LOWER_PRIVILEGE_TRANSFER:
784                 /*
785                  * The lower-privilege transfer trap is used by the EPC
786                  * syscall code to trigger re-entry into the kernel when the
787                  * process should be single stepped. The problem is that
788                  * there's no way to set single stepping directly without
789                  * using the rfi instruction. So instead we enable the
790                  * lower-privilege transfer trap and when we get here we
791                  * know that the process is about to enter userland (and
792                  * has already lowered its privilege).
793                  * However, there's another gotcha. When the process has
794                  * lowered it's privilege it's still running in the gateway
795                  * page. If we enable single stepping, we'll be stepping
796                  * the code in the gateway page. In and by itself this is
797                  * not a problem, but it's an address debuggers won't know
798                  * anything about. Hence, it can only cause confusion.
799                  * We know that we need to branch to get out of the gateway
800                  * page, so what we do here is enable the taken branch
801                  * trap and just let the process continue. When we branch
802                  * out of the gateway page we'll get back into the kernel
803                  * and then we enable single stepping.
804                  * Since this a rather round-about way of enabling single
805                  * stepping, don't make things even more complicated by
806                  * calling userret() and do_ast(). We do that later...
807                  */
808                 tf->tf_special.psr &= ~IA64_PSR_LP;
809                 tf->tf_special.psr |= IA64_PSR_TB;
810                 return;
811
812         case IA64_VEC_TAKEN_BRANCH_TRAP:
813                 /*
814                  * Don't assume there aren't any branches other than the
815                  * branch that takes us out of the gateway page. Check the
816                  * iip and enable single stepping only when it's an user
817                  * address.
818                  */
819                 if (tf->tf_special.iip >= VM_MAX_ADDRESS)
820                         return;
821                 tf->tf_special.psr &= ~IA64_PSR_TB;
822                 tf->tf_special.psr |= IA64_PSR_SS;
823                 return;
824
825         case IA64_VEC_IA32_EXCEPTION:
826         case IA64_VEC_IA32_INTERCEPT:
827         case IA64_VEC_IA32_INTERRUPT:
828                 sig = SIGEMT;
829                 ucode = tf->tf_special.iip;
830                 break;
831
832         default:
833                 /* Reserved vectors get here. Should never happen of course. */
834                 trap_panic(vector, tf);
835                 break;
836         }
837
838         KASSERT(sig != 0, ("foo"));
839
840         if (print_usertrap)
841                 printtrap(vector, tf, 1, user);
842
843         ksiginfo_init(&ksi);
844         ksi.ksi_signo = sig;
845         ksi.ksi_code = ucode;
846         trapsignal(td, &ksi);
847
848 out:
849         if (user) {
850                 userret(td, tf);
851                 mtx_assert(&Giant, MA_NOTOWNED);
852                 do_ast(tf);
853         }
854         return;
855 }
856
857 /*
858  * Handle break instruction based system calls.
859  */
860 void
861 break_syscall(struct trapframe *tf)
862 {
863         uint64_t *bsp, *tfp;
864         uint64_t iip, psr;
865         int error, nargs;
866
867         /* Save address of break instruction. */
868         iip = tf->tf_special.iip;
869         psr = tf->tf_special.psr;
870
871         /* Advance to the next instruction. */
872         tf->tf_special.psr += IA64_PSR_RI_1;
873         if ((tf->tf_special.psr & IA64_PSR_RI) > IA64_PSR_RI_2) {
874                 tf->tf_special.iip += 16;
875                 tf->tf_special.psr &= ~IA64_PSR_RI;
876         }
877
878         /*
879          * Copy the arguments on the register stack into the trapframe
880          * to avoid having interleaved NaT collections.
881          */
882         tfp = &tf->tf_scratch.gr16;
883         nargs = tf->tf_special.cfm & 0x7f;
884         bsp = (uint64_t*)(curthread->td_kstack + tf->tf_special.ndirty +
885             (tf->tf_special.bspstore & 0x1ffUL));
886         bsp -= (((uintptr_t)bsp & 0x1ff) < (nargs << 3)) ? (nargs + 1): nargs;
887         while (nargs--) {
888                 *tfp++ = *bsp++;
889                 if (((uintptr_t)bsp & 0x1ff) == 0x1f8)
890                         bsp++;
891         }
892         error = syscall(tf);
893         if (error == ERESTART) {
894                 tf->tf_special.iip = iip;
895                 tf->tf_special.psr = psr;
896         }
897
898         do_ast(tf);
899 }
900
901 int
902 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
903 {
904         struct proc *p;
905         struct trapframe *tf;
906
907         p = td->td_proc;
908         tf = td->td_frame;
909
910         sa->code = tf->tf_scratch.gr15;
911         sa->args = &tf->tf_scratch.gr16;
912
913         /*
914          * syscall() and __syscall() are handled the same on
915          * the ia64, as everything is 64-bit aligned, anyway.
916          */
917         if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
918                 /*
919                  * Code is first argument, followed by actual args.
920                  */
921                 sa->code = sa->args[0];
922                 sa->args++;
923         }
924
925         if (p->p_sysent->sv_mask)
926                 sa->code &= p->p_sysent->sv_mask;
927         if (sa->code >= p->p_sysent->sv_size)
928                 sa->callp = &p->p_sysent->sv_table[0];
929         else
930                 sa->callp = &p->p_sysent->sv_table[sa->code];
931         sa->narg = sa->callp->sy_narg;
932
933         td->td_retval[0] = 0;
934         td->td_retval[1] = 0;
935
936         return (0);
937 }
938
939 /*
940  * Process a system call.
941  *
942  * See syscall.s for details as to how we get here. In order to support
943  * the ERESTART case, we return the error to our caller. They deal with
944  * the hairy details.
945  */
946 int
947 syscall(struct trapframe *tf)
948 {
949         struct syscall_args sa;
950         struct thread *td;
951         int error;
952
953         td = curthread;
954         td->td_frame = tf;
955
956         ia64_set_fpsr(IA64_FPSR_DEFAULT);
957         tf->tf_scratch.gr10 = EJUSTRETURN;
958
959         error = syscallenter(td, &sa);
960         syscallret(td, error, &sa);
961
962         return (error);
963 }