]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/powerpc/powerpc/trap.c
Merge lldb trunk r351319, resolve conflicts, and update FREEBSD-Xlist.
[FreeBSD/FreeBSD.git] / sys / powerpc / powerpc / trap.c
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/proc.h>
40 #include <sys/ktr.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/pioctl.h>
44 #include <sys/ptrace.h>
45 #include <sys/reboot.h>
46 #include <sys/syscall.h>
47 #include <sys/sysent.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/uio.h>
51 #include <sys/signalvar.h>
52 #include <sys/vmmeter.h>
53
54 #include <security/audit/audit.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63
64 #include <machine/_inttypes.h>
65 #include <machine/altivec.h>
66 #include <machine/cpu.h>
67 #include <machine/db_machdep.h>
68 #include <machine/fpu.h>
69 #include <machine/frame.h>
70 #include <machine/pcb.h>
71 #include <machine/psl.h>
72 #include <machine/trap.h>
73 #include <machine/spr.h>
74 #include <machine/sr.h>
75
76 /* Below matches setjmp.S */
77 #define FAULTBUF_LR     21
78 #define FAULTBUF_R1     1
79 #define FAULTBUF_R2     2
80 #define FAULTBUF_CR     22
81 #define FAULTBUF_R14    3
82
83 #define MOREARGS(sp)    ((caddr_t)((uintptr_t)(sp) + \
84     sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */
85
86 static void     trap_fatal(struct trapframe *frame);
87 static void     printtrap(u_int vector, struct trapframe *frame, int isfatal,
88                     int user);
89 static int      trap_pfault(struct trapframe *frame, int user);
90 static int      fix_unaligned(struct thread *td, struct trapframe *frame);
91 static int      handle_onfault(struct trapframe *frame);
92 static void     syscall(struct trapframe *frame);
93
94 #if defined(__powerpc64__) && defined(AIM)
95        void     handle_kernel_slb_spill(int, register_t, register_t);
96 static int      handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
97 extern int      n_slbs;
98 static void     normalize_inputs(void);
99 #endif
100
101 extern vm_offset_t __startkernel;
102
103 #ifdef KDB
104 int db_trap_glue(struct trapframe *);           /* Called from trap_subr.S */
105 #endif
106
107 struct powerpc_exception {
108         u_int   vector;
109         char    *name;
110 };
111
112 #ifdef KDTRACE_HOOKS
113 #include <sys/dtrace_bsd.h>
114
115 int (*dtrace_invop_jump_addr)(struct trapframe *);
116 #endif
117
118 static struct powerpc_exception powerpc_exceptions[] = {
119         { EXC_CRIT,     "critical input" },
120         { EXC_RST,      "system reset" },
121         { EXC_MCHK,     "machine check" },
122         { EXC_DSI,      "data storage interrupt" },
123         { EXC_DSE,      "data segment exception" },
124         { EXC_ISI,      "instruction storage interrupt" },
125         { EXC_ISE,      "instruction segment exception" },
126         { EXC_EXI,      "external interrupt" },
127         { EXC_ALI,      "alignment" },
128         { EXC_PGM,      "program" },
129         { EXC_HEA,      "hypervisor emulation assistance" },
130         { EXC_FPU,      "floating-point unavailable" },
131         { EXC_APU,      "auxiliary proc unavailable" },
132         { EXC_DECR,     "decrementer" },
133         { EXC_FIT,      "fixed-interval timer" },
134         { EXC_WDOG,     "watchdog timer" },
135         { EXC_SC,       "system call" },
136         { EXC_TRC,      "trace" },
137         { EXC_FPA,      "floating-point assist" },
138         { EXC_DEBUG,    "debug" },
139         { EXC_PERF,     "performance monitoring" },
140         { EXC_VEC,      "altivec unavailable" },
141         { EXC_VSX,      "vsx unavailable" },
142         { EXC_FAC,      "facility unavailable" },
143         { EXC_ITMISS,   "instruction tlb miss" },
144         { EXC_DLMISS,   "data load tlb miss" },
145         { EXC_DSMISS,   "data store tlb miss" },
146         { EXC_BPT,      "instruction breakpoint" },
147         { EXC_SMI,      "system management" },
148         { EXC_VECAST_G4,        "altivec assist" },
149         { EXC_THRM,     "thermal management" },
150         { EXC_RUNMODETRC,       "run mode/trace" },
151         { EXC_SOFT_PATCH, "soft patch exception" },
152         { EXC_LAST,     NULL }
153 };
154
155 #define ESR_BITMASK                                                     \
156     "\20"                                                               \
157     "\040b0\037b1\036b2\035b3\034PIL\033PRR\032PTR\031FP"               \
158     "\030ST\027b9\026DLK\025ILK\024b12\023b13\022BO\021PIE"             \
159     "\020b16\017b17\016b18\015b19\014b20\013b21\012b22\011b23"          \
160     "\010SPE\007EPID\006b26\005b27\004b28\003b29\002b30\001b31"
161 #define MCSR_BITMASK                                                    \
162     "\20"                                                               \
163     "\040MCP\037ICERR\036DCERR\035TLBPERR\034L2MMU_MHIT\033b5\032b6\031b7"      \
164     "\030b8\027b9\026b10\025NMI\024MAV\023MEA\022b14\021IF"             \
165     "\020LD\017ST\016LDG\015b19\014b20\013b21\012b22\011b23"            \
166     "\010b24\007b25\006b26\005b27\004b28\003b29\002TLBSYNC\001BSL2_ERR"
167 #define MSSSR_BITMASK                                                   \
168     "\20"                                                               \
169     "\040b0\037b1\036b2\035b3\034b4\033b5\032b6\031b7"                  \
170     "\030b8\027b9\026b10\025b11\024b12\023L2TAG\022L2DAT\021L3TAG"      \
171     "\020L3DAT\017APE\016DPE\015TEA\014b20\013b21\012b22\011b23"        \
172     "\010b24\007b25\006b26\005b27\004b28\003b29\002b30\001b31"
173
174
175 static const char *
176 trapname(u_int vector)
177 {
178         struct  powerpc_exception *pe;
179
180         for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) {
181                 if (pe->vector == vector)
182                         return (pe->name);
183         }
184
185         return ("unknown");
186 }
187
188 static inline bool
189 frame_is_trap_inst(struct trapframe *frame)
190 {
191 #ifdef AIM
192         return (frame->exc == EXC_PGM && frame->srr1 & EXC_PGM_TRAP);
193 #else
194         return ((frame->cpu.booke.esr & ESR_PTR) != 0);
195 #endif
196 }
197
198 void
199 trap(struct trapframe *frame)
200 {
201         struct thread   *td;
202         struct proc     *p;
203 #ifdef KDTRACE_HOOKS
204         uint32_t inst;
205 #endif
206         int             sig, type, user;
207         u_int           ucode;
208         ksiginfo_t      ksi;
209         register_t      fscr;
210
211         VM_CNT_INC(v_trap);
212
213 #ifdef KDB
214         if (kdb_active) {
215                 kdb_reenter();
216                 return;
217         }
218 #endif
219
220         td = curthread;
221         p = td->td_proc;
222
223         type = ucode = frame->exc;
224         sig = 0;
225         user = frame->srr1 & PSL_PR;
226
227         CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
228             trapname(type), user ? "user" : "kernel");
229
230 #ifdef KDTRACE_HOOKS
231         /*
232          * A trap can occur while DTrace executes a probe. Before
233          * executing the probe, DTrace blocks re-scheduling and sets
234          * a flag in its per-cpu flags to indicate that it doesn't
235          * want to fault. On returning from the probe, the no-fault
236          * flag is cleared and finally re-scheduling is enabled.
237          *
238          * If the DTrace kernel module has registered a trap handler,
239          * call it and if it returns non-zero, assume that it has
240          * handled the trap and modified the trap frame so that this
241          * function can return normally.
242          */
243         if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
244                 return;
245 #endif
246
247         if (user) {
248                 td->td_pticks = 0;
249                 td->td_frame = frame;
250                 if (td->td_cowgen != p->p_cowgen)
251                         thread_cow_update(td);
252
253                 /* User Mode Traps */
254                 switch (type) {
255                 case EXC_RUNMODETRC:
256                 case EXC_TRC:
257                         frame->srr1 &= ~PSL_SE;
258                         sig = SIGTRAP;
259                         ucode = TRAP_TRACE;
260                         break;
261
262 #if defined(__powerpc64__) && defined(AIM)
263                 case EXC_ISE:
264                 case EXC_DSE:
265                         if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
266                             (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
267                                 sig = SIGSEGV;
268                                 ucode = SEGV_MAPERR;
269                         }
270                         break;
271 #endif
272                 case EXC_DSI:
273                 case EXC_ISI:
274                         sig = trap_pfault(frame, 1);
275                         if (sig == SIGSEGV)
276                                 ucode = SEGV_MAPERR;
277                         break;
278
279                 case EXC_SC:
280                         syscall(frame);
281                         break;
282
283                 case EXC_FPU:
284                         KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
285                             ("FPU already enabled for thread"));
286                         enable_fpu(td);
287                         break;
288
289                 case EXC_VEC:
290                         KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
291                             ("Altivec already enabled for thread"));
292                         enable_vec(td);
293                         break;
294
295                 case EXC_VSX:
296                         KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
297                             ("VSX already enabled for thread"));
298                         if (!(td->td_pcb->pcb_flags & PCB_VEC))
299                                 enable_vec(td);
300                         if (!(td->td_pcb->pcb_flags & PCB_FPU))
301                                 save_fpu(td);
302                         td->td_pcb->pcb_flags |= PCB_VSX;
303                         enable_fpu(td);
304                         break;
305
306                 case EXC_FAC:
307                         fscr = mfspr(SPR_FSCR);
308                         if ((fscr & FSCR_IC_MASK) == FSCR_IC_HTM) {
309                                 CTR0(KTR_TRAP, "Hardware Transactional Memory subsystem disabled");
310                         }
311                         sig = SIGILL;
312                         ucode = ILL_ILLOPC;
313                         break;
314                 case EXC_HEA:
315                         sig = SIGILL;
316                         ucode = ILL_ILLOPC;
317                         break;
318
319                 case EXC_VECAST_E:
320                 case EXC_VECAST_G4:
321                 case EXC_VECAST_G5:
322                         /*
323                          * We get a VPU assist exception for IEEE mode
324                          * vector operations on denormalized floats.
325                          * Emulating this is a giant pain, so for now,
326                          * just switch off IEEE mode and treat them as
327                          * zero.
328                          */
329
330                         save_vec(td);
331                         td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
332                         enable_vec(td);
333                         break;
334
335                 case EXC_ALI:
336                         if (fix_unaligned(td, frame) != 0) {
337                                 sig = SIGBUS;
338                                 ucode = BUS_ADRALN;
339                         }
340                         else
341                                 frame->srr0 += 4;
342                         break;
343
344                 case EXC_DEBUG: /* Single stepping */
345                         mtspr(SPR_DBSR, mfspr(SPR_DBSR));
346                         frame->srr1 &= ~PSL_DE;
347                         frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
348                         sig = SIGTRAP;
349                         ucode = TRAP_TRACE;
350                         break;
351
352                 case EXC_PGM:
353                         /* Identify the trap reason */
354                         if (frame_is_trap_inst(frame)) {
355 #ifdef KDTRACE_HOOKS
356                                 inst = fuword32((const void *)frame->srr0);
357                                 if (inst == 0x0FFFDDDD &&
358                                     dtrace_pid_probe_ptr != NULL) {
359                                         (*dtrace_pid_probe_ptr)(frame);
360                                         break;
361                                 }
362 #endif
363                                 sig = SIGTRAP;
364                                 ucode = TRAP_BRKPT;
365                         } else {
366                                 sig = ppc_instr_emulate(frame, td->td_pcb);
367                                 if (sig == SIGILL) {
368                                         if (frame->srr1 & EXC_PGM_PRIV)
369                                                 ucode = ILL_PRVOPC;
370                                         else if (frame->srr1 & EXC_PGM_ILLEGAL)
371                                                 ucode = ILL_ILLOPC;
372                                 } else if (sig == SIGFPE)
373                                         ucode = FPE_FLTINV;     /* Punt for now, invalid operation. */
374                         }
375                         break;
376
377                 case EXC_MCHK:
378                         /*
379                          * Note that this may not be recoverable for the user
380                          * process, depending on the type of machine check,
381                          * but it at least prevents the kernel from dying.
382                          */
383                         sig = SIGBUS;
384                         ucode = BUS_OBJERR;
385                         break;
386
387 #if defined(__powerpc64__) && defined(AIM)
388                 case EXC_SOFT_PATCH:
389                         /*
390                          * Point to the instruction that generated the exception to execute it again,
391                          * and normalize the register values.
392                          */
393                         frame->srr0 -= 4;
394                         normalize_inputs();
395                         break;
396 #endif
397
398                 default:
399                         trap_fatal(frame);
400                 }
401         } else {
402                 /* Kernel Mode Traps */
403
404                 KASSERT(cold || td->td_ucred != NULL,
405                     ("kernel trap doesn't have ucred"));
406                 switch (type) {
407                 case EXC_PGM:
408 #ifdef KDTRACE_HOOKS
409                         if (frame_is_trap_inst(frame)) {
410                                 if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
411                                         if (dtrace_invop_jump_addr != NULL) {
412                                                 dtrace_invop_jump_addr(frame);
413                                                 return;
414                                         }
415                                 }
416                         }
417 #endif
418 #ifdef KDB
419                         if (db_trap_glue(frame))
420                                 return;
421 #endif
422                         break;
423 #if defined(__powerpc64__) && defined(AIM)
424                 case EXC_DSE:
425                         if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
426                             (frame->dar & SEGMENT_MASK) == USER_ADDR) {
427                                 __asm __volatile ("slbmte %0, %1" ::
428                                         "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
429                                         "r"(USER_SLB_SLBE));
430                                 return;
431                         }
432                         break;
433 #endif
434                 case EXC_DSI:
435                         if (trap_pfault(frame, 0) == 0)
436                                 return;
437                         break;
438                 case EXC_MCHK:
439                         if (handle_onfault(frame))
440                                 return;
441                         break;
442                 default:
443                         break;
444                 }
445                 trap_fatal(frame);
446         }
447
448         if (sig != 0) {
449                 if (p->p_sysent->sv_transtrap != NULL)
450                         sig = (p->p_sysent->sv_transtrap)(sig, type);
451                 ksiginfo_init_trap(&ksi);
452                 ksi.ksi_signo = sig;
453                 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
454                 ksi.ksi_addr = (void *)frame->srr0;
455                 ksi.ksi_trapno = type;
456                 trapsignal(td, &ksi);
457         }
458
459         userret(td, frame);
460 }
461
462 static void
463 trap_fatal(struct trapframe *frame)
464 {
465 #ifdef KDB
466         bool handled;
467 #endif
468
469         printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
470 #ifdef KDB
471         if (debugger_on_trap) {
472                 kdb_why = KDB_WHY_TRAP;
473                 handled = kdb_trap(frame->exc, 0, frame);
474                 kdb_why = KDB_WHY_UNSET;
475                 if (handled)
476                         return;
477         }
478 #endif
479         panic("%s trap", trapname(frame->exc));
480 }
481
482 static void
483 cpu_printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
484 {
485 #ifdef AIM
486         uint16_t ver;
487
488         switch (vector) {
489         case EXC_DSE:
490         case EXC_DSI:
491         case EXC_DTMISS:
492                 printf("   dsisr           = 0x%lx\n",
493                     (u_long)frame->cpu.aim.dsisr);
494                 break;
495         case EXC_MCHK:
496                 ver = mfpvr() >> 16;
497                 if (MPC745X_P(ver))
498                         printf("    msssr0         = 0x%b\n",
499                             (int)mfspr(SPR_MSSSR0), MSSSR_BITMASK);
500                 break;
501         }
502 #elif defined(BOOKE)
503         vm_paddr_t pa;
504
505         switch (vector) {
506         case EXC_MCHK:
507                 pa = mfspr(SPR_MCARU);
508                 pa = (pa << 32) | (u_register_t)mfspr(SPR_MCAR);
509                 printf("   mcsr            = 0x%b\n",
510                     (int)mfspr(SPR_MCSR), MCSR_BITMASK);
511                 printf("   mcar            = 0x%jx\n", (uintmax_t)pa);
512         }
513         printf("   esr             = 0x%b\n",
514             (int)frame->cpu.booke.esr, ESR_BITMASK);
515 #endif
516 }
517
518 static void
519 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
520 {
521
522         printf("\n");
523         printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
524             user ? "user" : "kernel");
525         printf("\n");
526         printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
527         switch (vector) {
528         case EXC_DSE:
529         case EXC_DSI:
530         case EXC_DTMISS:
531                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->dar);
532                 break;
533         case EXC_ISE:
534         case EXC_ISI:
535         case EXC_ITMISS:
536                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
537                 break;
538         case EXC_MCHK:
539                 break;
540         }
541         cpu_printtrap(vector, frame, isfatal, user);
542         printf("   srr0            = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
543             frame->srr0, frame->srr0 - (register_t)(__startkernel - KERNBASE));
544         printf("   srr1            = 0x%lx\n", (u_long)frame->srr1);
545         printf("   current msr     = 0x%" PRIxPTR "\n", mfmsr());
546         printf("   lr              = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n",
547             frame->lr, frame->lr - (register_t)(__startkernel - KERNBASE));
548         printf("   curthread       = %p\n", curthread);
549         if (curthread != NULL)
550                 printf("          pid = %d, comm = %s\n",
551                     curthread->td_proc->p_pid, curthread->td_name);
552         printf("\n");
553 }
554
555 /*
556  * Handles a fatal fault when we have onfault state to recover.  Returns
557  * non-zero if there was onfault recovery state available.
558  */
559 static int
560 handle_onfault(struct trapframe *frame)
561 {
562         struct          thread *td;
563         jmp_buf         *fb;
564
565         td = curthread;
566         fb = td->td_pcb->pcb_onfault;
567         if (fb != NULL) {
568                 frame->srr0 = (*fb)->_jb[FAULTBUF_LR];
569                 frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1];
570                 frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2];
571                 frame->fixreg[3] = 1;
572                 frame->cr = (*fb)->_jb[FAULTBUF_CR];
573                 bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14],
574                     18 * sizeof(register_t));
575                 td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */
576                 return (1);
577         }
578         return (0);
579 }
580
581 int
582 cpu_fetch_syscall_args(struct thread *td)
583 {
584         struct proc *p;
585         struct trapframe *frame;
586         struct syscall_args *sa;
587         caddr_t params;
588         size_t argsz;
589         int error, n, i;
590
591         p = td->td_proc;
592         frame = td->td_frame;
593         sa = &td->td_sa;
594
595         sa->code = frame->fixreg[0];
596         params = (caddr_t)(frame->fixreg + FIRSTARG);
597         n = NARGREG;
598
599         if (sa->code == SYS_syscall) {
600                 /*
601                  * code is first argument,
602                  * followed by actual args.
603                  */
604                 sa->code = *(register_t *) params;
605                 params += sizeof(register_t);
606                 n -= 1;
607         } else if (sa->code == SYS___syscall) {
608                 /*
609                  * Like syscall, but code is a quad,
610                  * so as to maintain quad alignment
611                  * for the rest of the args.
612                  */
613                 if (SV_PROC_FLAG(p, SV_ILP32)) {
614                         params += sizeof(register_t);
615                         sa->code = *(register_t *) params;
616                         params += sizeof(register_t);
617                         n -= 2;
618                 } else {
619                         sa->code = *(register_t *) params;
620                         params += sizeof(register_t);
621                         n -= 1;
622                 }
623         }
624
625         if (sa->code >= p->p_sysent->sv_size)
626                 sa->callp = &p->p_sysent->sv_table[0];
627         else
628                 sa->callp = &p->p_sysent->sv_table[sa->code];
629
630         sa->narg = sa->callp->sy_narg;
631
632         if (SV_PROC_FLAG(p, SV_ILP32)) {
633                 argsz = sizeof(uint32_t);
634
635                 for (i = 0; i < n; i++)
636                         sa->args[i] = ((u_register_t *)(params))[i] &
637                             0xffffffff;
638         } else {
639                 argsz = sizeof(uint64_t);
640
641                 for (i = 0; i < n; i++)
642                         sa->args[i] = ((u_register_t *)(params))[i];
643         }
644
645         if (sa->narg > n)
646                 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
647                                (sa->narg - n) * argsz);
648         else
649                 error = 0;
650
651 #ifdef __powerpc64__
652         if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
653                 /* Expand the size of arguments copied from the stack */
654
655                 for (i = sa->narg; i >= n; i--)
656                         sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
657         }
658 #endif
659
660         if (error == 0) {
661                 td->td_retval[0] = 0;
662                 td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
663         }
664         return (error);
665 }
666
667 #include "../../kern/subr_syscall.c"
668
669 void
670 syscall(struct trapframe *frame)
671 {
672         struct thread *td;
673         int error;
674
675         td = curthread;
676         td->td_frame = frame;
677
678 #if defined(__powerpc64__) && defined(AIM)
679         /*
680          * Speculatively restore last user SLB segment, which we know is
681          * invalid already, since we are likely to do copyin()/copyout().
682          */
683         if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0)
684                 __asm __volatile ("slbmte %0, %1; isync" ::
685                     "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
686 #endif
687
688         error = syscallenter(td);
689         syscallret(td, error);
690 }
691
692 #if defined(__powerpc64__) && defined(AIM)
693 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
694 void
695 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
696 {
697         struct slb *slbcache;
698         uint64_t slbe, slbv;
699         uint64_t esid, addr;
700         int i;
701
702         addr = (type == EXC_ISE) ? srr0 : dar;
703         slbcache = PCPU_GET(aim.slb);
704         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
705         slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
706         
707         /* See if the hardware flushed this somehow (can happen in LPARs) */
708         for (i = 0; i < n_slbs; i++)
709                 if (slbcache[i].slbe == (slbe | (uint64_t)i))
710                         return;
711
712         /* Not in the map, needs to actually be added */
713         slbv = kernel_va_to_slbv(addr);
714         if (slbcache[USER_SLB_SLOT].slbe == 0) {
715                 for (i = 0; i < n_slbs; i++) {
716                         if (i == USER_SLB_SLOT)
717                                 continue;
718                         if (!(slbcache[i].slbe & SLBE_VALID))
719                                 goto fillkernslb;
720                 }
721
722                 if (i == n_slbs)
723                         slbcache[USER_SLB_SLOT].slbe = 1;
724         }
725
726         /* Sacrifice a random SLB entry that is not the user entry */
727         i = mftb() % n_slbs;
728         if (i == USER_SLB_SLOT)
729                 i = (i+1) % n_slbs;
730
731 fillkernslb:
732         /* Write new entry */
733         slbcache[i].slbv = slbv;
734         slbcache[i].slbe = slbe | (uint64_t)i;
735
736         /* Trap handler will restore from cache on exit */
737 }
738
739 static int 
740 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
741 {
742         struct slb *user_entry;
743         uint64_t esid;
744         int i;
745
746         if (pm->pm_slb == NULL)
747                 return (-1);
748
749         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
750
751         PMAP_LOCK(pm);
752         user_entry = user_va_to_slb_entry(pm, addr);
753
754         if (user_entry == NULL) {
755                 /* allocate_vsid auto-spills it */
756                 (void)allocate_user_vsid(pm, esid, 0);
757         } else {
758                 /*
759                  * Check that another CPU has not already mapped this.
760                  * XXX: Per-thread SLB caches would be better.
761                  */
762                 for (i = 0; i < pm->pm_slb_len; i++)
763                         if (pm->pm_slb[i] == user_entry)
764                                 break;
765
766                 if (i == pm->pm_slb_len)
767                         slb_insert_user(pm, user_entry);
768         }
769         PMAP_UNLOCK(pm);
770
771         return (0);
772 }
773 #endif
774
775 static int
776 trap_pfault(struct trapframe *frame, int user)
777 {
778         vm_offset_t     eva, va;
779         struct          thread *td;
780         struct          proc *p;
781         vm_map_t        map;
782         vm_prot_t       ftype;
783         int             rv, is_user;
784
785         td = curthread;
786         p = td->td_proc;
787         if (frame->exc == EXC_ISI) {
788                 eva = frame->srr0;
789                 ftype = VM_PROT_EXECUTE;
790                 if (frame->srr1 & SRR1_ISI_PFAULT)
791                         ftype |= VM_PROT_READ;
792         } else {
793                 eva = frame->dar;
794 #ifdef BOOKE
795                 if (frame->cpu.booke.esr & ESR_ST)
796 #else
797                 if (frame->cpu.aim.dsisr & DSISR_STORE)
798 #endif
799                         ftype = VM_PROT_WRITE;
800                 else
801                         ftype = VM_PROT_READ;
802         }
803
804         if (user) {
805                 KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace  NULL"));
806                 map = &p->p_vmspace->vm_map;
807         } else {
808                 rv = pmap_decode_kernel_ptr(eva, &is_user, &eva);
809                 if (rv != 0)
810                         return (SIGSEGV);
811
812                 if (is_user)
813                         map = &p->p_vmspace->vm_map;
814                 else
815                         map = kernel_map;
816         }
817         va = trunc_page(eva);
818
819         /* Fault in the page. */
820         rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
821         /*
822          * XXXDTRACE: add dtrace_doubletrap_func here?
823          */
824
825         if (rv == KERN_SUCCESS)
826                 return (0);
827
828         if (!user && handle_onfault(frame))
829                 return (0);
830
831         return (SIGSEGV);
832 }
833
834 /*
835  * For now, this only deals with the particular unaligned access case
836  * that gcc tends to generate.  Eventually it should handle all of the
837  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
838  */
839
840 static int
841 fix_unaligned(struct thread *td, struct trapframe *frame)
842 {
843         struct thread   *fputhread;
844 #ifdef  __SPE__
845         uint32_t        inst;
846 #endif
847         int             indicator, reg;
848         double          *fpr;
849
850 #ifdef __SPE__
851         indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE));
852         if (indicator & ESR_SPE) {
853                 if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0)
854                         return (-1);
855                 reg = EXC_ALI_SPE_REG(inst);
856                 fpr = (double *)td->td_pcb->pcb_vec.vr[reg];
857                 fputhread = PCPU_GET(vecthread);
858
859                 /* Juggle the SPE to ensure that we've initialized
860                  * the registers, and that their current state is in
861                  * the PCB.
862                  */
863                 if (fputhread != td) {
864                         if (fputhread)
865                                 save_vec(fputhread);
866                         enable_vec(td);
867                 }
868                 save_vec(td);
869
870                 if (!(indicator & ESR_ST)) {
871                         if (copyin((void *)frame->dar, fpr,
872                             sizeof(double)) != 0)
873                                 return (-1);
874                         frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1];
875                         enable_vec(td);
876                 } else {
877                         td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg];
878                         if (copyout(fpr, (void *)frame->dar,
879                             sizeof(double)) != 0)
880                                 return (-1);
881                 }
882                 return (0);
883         }
884 #else
885         indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
886
887         switch (indicator) {
888         case EXC_ALI_LFD:
889         case EXC_ALI_STFD:
890                 reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
891                 fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr;
892                 fputhread = PCPU_GET(fputhread);
893
894                 /* Juggle the FPU to ensure that we've initialized
895                  * the FPRs, and that their current state is in
896                  * the PCB.
897                  */
898                 if (fputhread != td) {
899                         if (fputhread)
900                                 save_fpu(fputhread);
901                         enable_fpu(td);
902                 }
903                 save_fpu(td);
904
905                 if (indicator == EXC_ALI_LFD) {
906                         if (copyin((void *)frame->dar, fpr,
907                             sizeof(double)) != 0)
908                                 return (-1);
909                         enable_fpu(td);
910                 } else {
911                         if (copyout(fpr, (void *)frame->dar,
912                             sizeof(double)) != 0)
913                                 return (-1);
914                 }
915                 return (0);
916                 break;
917         }
918 #endif
919
920         return (-1);
921 }
922
923 #if defined(__powerpc64__) && defined(AIM)
924 #define MSKNSHL(x, m, n) "(((" #x ") & " #m ") << " #n ")"
925 #define MSKNSHR(x, m, n) "(((" #x ") & " #m ") >> " #n ")"
926
927 /* xvcpsgndp instruction, built in opcode format.
928  * This can be changed to use mnemonic after a toolchain update.
929  */
930 #define XVCPSGNDP(xt, xa, xb) \
931         __asm __volatile(".long (" \
932                 MSKNSHL(60, 0x3f, 26) " | " \
933                 MSKNSHL(xt, 0x1f, 21) " | " \
934                 MSKNSHL(xa, 0x1f, 16) " | " \
935                 MSKNSHL(xb, 0x1f, 11) " | " \
936                 MSKNSHL(240, 0xff, 3) " | " \
937                 MSKNSHR(xa,  0x20, 3) " | " \
938                 MSKNSHR(xa,  0x20, 4) " | " \
939                 MSKNSHR(xa,  0x20, 5) ")")
940
941 /* Macros to normalize 1 or 10 VSX registers */
942 #define NORM(x) XVCPSGNDP(x, x, x)
943 #define NORM10(x) \
944         NORM(x ## 0); NORM(x ## 1); NORM(x ## 2); NORM(x ## 3); NORM(x ## 4); \
945         NORM(x ## 5); NORM(x ## 6); NORM(x ## 7); NORM(x ## 8); NORM(x ## 9)
946
947 static void
948 normalize_inputs(void)
949 {
950         unsigned long msr;
951
952         /* enable VSX */
953         msr = mfmsr();
954         mtmsr(msr | PSL_VSX);
955
956         NORM(0);   NORM(1);   NORM(2);   NORM(3);   NORM(4);
957         NORM(5);   NORM(6);   NORM(7);   NORM(8);   NORM(9);
958         NORM10(1); NORM10(2); NORM10(3); NORM10(4); NORM10(5);
959         NORM(60);  NORM(61);  NORM(62);  NORM(63);
960
961         /* restore MSR */
962         mtmsr(msr);
963 }
964 #endif
965
966 #ifdef KDB
967 int
968 db_trap_glue(struct trapframe *frame)
969 {
970
971         if (!(frame->srr1 & PSL_PR)
972             && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
973                 || frame_is_trap_inst(frame)
974                 || frame->exc == EXC_BPT
975                 || frame->exc == EXC_DEBUG
976                 || frame->exc == EXC_DSI)) {
977                 int type = frame->exc;
978
979                 /* Ignore DTrace traps. */
980                 if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
981                         return (0);
982                 if (frame_is_trap_inst(frame)) {
983                         type = T_BREAKPOINT;
984                 }
985                 return (kdb_trap(type, 0, frame));
986         }
987
988         return (0);
989 }
990 #endif