]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/powerpc/powerpc/trap.c
MFV ntp-4.2.8p4 (r289715)
[FreeBSD/FreeBSD.git] / sys / powerpc / powerpc / trap.c
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/proc.h>
40 #include <sys/ktr.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/pioctl.h>
44 #include <sys/ptrace.h>
45 #include <sys/reboot.h>
46 #include <sys/syscall.h>
47 #include <sys/sysent.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/uio.h>
51 #include <sys/signalvar.h>
52 #include <sys/vmmeter.h>
53
54 #include <security/audit/audit.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63
64 #include <machine/_inttypes.h>
65 #include <machine/altivec.h>
66 #include <machine/cpu.h>
67 #include <machine/db_machdep.h>
68 #include <machine/fpu.h>
69 #include <machine/frame.h>
70 #include <machine/pcb.h>
71 #include <machine/pmap.h>
72 #include <machine/psl.h>
73 #include <machine/trap.h>
74 #include <machine/spr.h>
75 #include <machine/sr.h>
76
77 #define FAULTBUF_LR     0
78 #define FAULTBUF_R1     1
79 #define FAULTBUF_R2     2
80 #define FAULTBUF_CR     3
81 #define FAULTBUF_R13    4
82
83 static void     trap_fatal(struct trapframe *frame);
84 static void     printtrap(u_int vector, struct trapframe *frame, int isfatal,
85                     int user);
86 static int      trap_pfault(struct trapframe *frame, int user);
87 static int      fix_unaligned(struct thread *td, struct trapframe *frame);
88 static int      handle_onfault(struct trapframe *frame);
89 static void     syscall(struct trapframe *frame);
90
91 #ifdef __powerpc64__
92        void     handle_kernel_slb_spill(int, register_t, register_t);
93 static int      handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
94 extern int      n_slbs;
95 #endif
96
97 struct powerpc_exception {
98         u_int   vector;
99         char    *name;
100 };
101
102 #ifdef KDTRACE_HOOKS
103 #include <sys/dtrace_bsd.h>
104
105 int (*dtrace_invop_jump_addr)(struct trapframe *);
106 #endif
107
108 static struct powerpc_exception powerpc_exceptions[] = {
109         { EXC_CRIT,     "critical input" },
110         { EXC_RST,      "system reset" },
111         { EXC_MCHK,     "machine check" },
112         { EXC_DSI,      "data storage interrupt" },
113         { EXC_DSE,      "data segment exception" },
114         { EXC_ISI,      "instruction storage interrupt" },
115         { EXC_ISE,      "instruction segment exception" },
116         { EXC_EXI,      "external interrupt" },
117         { EXC_ALI,      "alignment" },
118         { EXC_PGM,      "program" },
119         { EXC_FPU,      "floating-point unavailable" },
120         { EXC_APU,      "auxiliary proc unavailable" },
121         { EXC_DECR,     "decrementer" },
122         { EXC_FIT,      "fixed-interval timer" },
123         { EXC_WDOG,     "watchdog timer" },
124         { EXC_SC,       "system call" },
125         { EXC_TRC,      "trace" },
126         { EXC_FPA,      "floating-point assist" },
127         { EXC_DEBUG,    "debug" },
128         { EXC_PERF,     "performance monitoring" },
129         { EXC_VEC,      "altivec unavailable" },
130         { EXC_VSX,      "vsx unavailable" },
131         { EXC_ITMISS,   "instruction tlb miss" },
132         { EXC_DLMISS,   "data load tlb miss" },
133         { EXC_DSMISS,   "data store tlb miss" },
134         { EXC_BPT,      "instruction breakpoint" },
135         { EXC_SMI,      "system management" },
136         { EXC_VECAST_G4,        "altivec assist" },
137         { EXC_THRM,     "thermal management" },
138         { EXC_RUNMODETRC,       "run mode/trace" },
139         { EXC_LAST,     NULL }
140 };
141
142 static const char *
143 trapname(u_int vector)
144 {
145         struct  powerpc_exception *pe;
146
147         for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) {
148                 if (pe->vector == vector)
149                         return (pe->name);
150         }
151
152         return ("unknown");
153 }
154
155 void
156 trap(struct trapframe *frame)
157 {
158         struct thread   *td;
159         struct proc     *p;
160 #ifdef KDTRACE_HOOKS
161         uint32_t inst;
162 #endif
163         int             sig, type, user;
164         u_int           ucode;
165         ksiginfo_t      ksi;
166
167         PCPU_INC(cnt.v_trap);
168
169         td = curthread;
170         p = td->td_proc;
171
172         type = ucode = frame->exc;
173         sig = 0;
174         user = frame->srr1 & PSL_PR;
175
176         CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
177             trapname(type), user ? "user" : "kernel");
178
179 #ifdef KDTRACE_HOOKS
180         /*
181          * A trap can occur while DTrace executes a probe. Before
182          * executing the probe, DTrace blocks re-scheduling and sets
183          * a flag in its per-cpu flags to indicate that it doesn't
184          * want to fault. On returning from the probe, the no-fault
185          * flag is cleared and finally re-scheduling is enabled.
186          *
187          * If the DTrace kernel module has registered a trap handler,
188          * call it and if it returns non-zero, assume that it has
189          * handled the trap and modified the trap frame so that this
190          * function can return normally.
191          */
192         if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
193                 return;
194 #endif
195
196         if (user) {
197                 td->td_pticks = 0;
198                 td->td_frame = frame;
199                 if (td->td_cowgen != p->p_cowgen)
200                         thread_cow_update(td);
201
202                 /* User Mode Traps */
203                 switch (type) {
204                 case EXC_RUNMODETRC:
205                 case EXC_TRC:
206                         frame->srr1 &= ~PSL_SE;
207                         sig = SIGTRAP;
208                         ucode = TRAP_TRACE;
209                         break;
210
211 #ifdef __powerpc64__
212                 case EXC_ISE:
213                 case EXC_DSE:
214                         if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
215                             (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
216                                 sig = SIGSEGV;
217                                 ucode = SEGV_MAPERR;
218                         }
219                         break;
220 #endif
221                 case EXC_DSI:
222                 case EXC_ISI:
223                         sig = trap_pfault(frame, 1);
224                         if (sig == SIGSEGV)
225                                 ucode = SEGV_MAPERR;
226                         break;
227
228                 case EXC_SC:
229                         syscall(frame);
230                         break;
231
232                 case EXC_FPU:
233                         KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
234                             ("FPU already enabled for thread"));
235                         enable_fpu(td);
236                         break;
237
238                 case EXC_VEC:
239                         KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
240                             ("Altivec already enabled for thread"));
241                         enable_vec(td);
242                         break;
243
244                 case EXC_VSX:
245                         KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
246                             ("VSX already enabled for thread"));
247                         if (!(td->td_pcb->pcb_flags & PCB_VEC))
248                                 enable_vec(td);
249                         if (!(td->td_pcb->pcb_flags & PCB_FPU))
250                                 save_fpu(td);
251                         td->td_pcb->pcb_flags |= PCB_VSX;
252                         enable_fpu(td);
253                         break;
254
255                 case EXC_VECAST_G4:
256                 case EXC_VECAST_G5:
257                         /*
258                          * We get a VPU assist exception for IEEE mode
259                          * vector operations on denormalized floats.
260                          * Emulating this is a giant pain, so for now,
261                          * just switch off IEEE mode and treat them as
262                          * zero.
263                          */
264
265                         save_vec(td);
266                         td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
267                         enable_vec(td);
268                         break;
269
270                 case EXC_ALI:
271                         if (fix_unaligned(td, frame) != 0) {
272                                 sig = SIGBUS;
273                                 ucode = BUS_ADRALN;
274                         }
275                         else
276                                 frame->srr0 += 4;
277                         break;
278
279                 case EXC_DEBUG: /* Single stepping */
280                         mtspr(SPR_DBSR, mfspr(SPR_DBSR));
281                         frame->srr1 &= ~PSL_DE;
282                         frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
283                         sig = SIGTRAP;
284                         ucode = TRAP_TRACE;
285                         break;
286
287                 case EXC_PGM:
288                         /* Identify the trap reason */
289 #ifdef AIM
290                         if (frame->srr1 & EXC_PGM_TRAP) {
291 #else
292                         if (frame->cpu.booke.esr & ESR_PTR) {
293 #endif
294 #ifdef KDTRACE_HOOKS
295                                 inst = fuword32((const void *)frame->srr0);
296                                 if (inst == 0x0FFFDDDD &&
297                                     dtrace_pid_probe_ptr != NULL) {
298                                         struct reg regs;
299                                         fill_regs(td, &regs);
300                                         (*dtrace_pid_probe_ptr)(&regs);
301                                         break;
302                                 }
303 #endif
304                                 sig = SIGTRAP;
305                                 ucode = TRAP_BRKPT;
306                         } else {
307                                 sig = ppc_instr_emulate(frame, td->td_pcb);
308                                 if (sig == SIGILL) {
309                                         if (frame->srr1 & EXC_PGM_PRIV)
310                                                 ucode = ILL_PRVOPC;
311                                         else if (frame->srr1 & EXC_PGM_ILLEGAL)
312                                                 ucode = ILL_ILLOPC;
313                                 } else if (sig == SIGFPE)
314                                         ucode = FPE_FLTINV;     /* Punt for now, invalid operation. */
315                         }
316                         break;
317
318                 case EXC_MCHK:
319                         /*
320                          * Note that this may not be recoverable for the user
321                          * process, depending on the type of machine check,
322                          * but it at least prevents the kernel from dying.
323                          */
324                         sig = SIGBUS;
325                         ucode = BUS_OBJERR;
326                         break;
327
328                 default:
329                         trap_fatal(frame);
330                 }
331         } else {
332                 /* Kernel Mode Traps */
333
334                 KASSERT(cold || td->td_ucred != NULL,
335                     ("kernel trap doesn't have ucred"));
336                 switch (type) {
337 #ifdef KDTRACE_HOOKS
338                 case EXC_PGM:
339                         if (frame->srr1 & EXC_PGM_TRAP) {
340                                 if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
341                                         if (dtrace_invop_jump_addr != NULL) {
342                                                 dtrace_invop_jump_addr(frame);
343                                                 return;
344                                         }
345                                 }
346                         }
347                         break;
348 #endif
349 #ifdef __powerpc64__
350                 case EXC_DSE:
351                         if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
352                                 __asm __volatile ("slbmte %0, %1" ::
353                                         "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
354                                         "r"(USER_SLB_SLBE));
355                                 return;
356                         }
357                         break;
358 #endif
359                 case EXC_DSI:
360                         if (trap_pfault(frame, 0) == 0)
361                                 return;
362                         break;
363                 case EXC_MCHK:
364                         if (handle_onfault(frame))
365                                 return;
366                         break;
367                 default:
368                         break;
369                 }
370                 trap_fatal(frame);
371         }
372
373         if (sig != 0) {
374                 if (p->p_sysent->sv_transtrap != NULL)
375                         sig = (p->p_sysent->sv_transtrap)(sig, type);
376                 ksiginfo_init_trap(&ksi);
377                 ksi.ksi_signo = sig;
378                 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
379                 /* ksi.ksi_addr = ? */
380                 ksi.ksi_trapno = type;
381                 trapsignal(td, &ksi);
382         }
383
384         userret(td, frame);
385 }
386
387 static void
388 trap_fatal(struct trapframe *frame)
389 {
390
391         printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
392 #ifdef KDB
393         if ((debugger_on_panic || kdb_active) &&
394             kdb_trap(frame->exc, 0, frame))
395                 return;
396 #endif
397         panic("%s trap", trapname(frame->exc));
398 }
399
400 static void
401 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
402 {
403         uint16_t ver;
404
405         printf("\n");
406         printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
407             user ? "user" : "kernel");
408         printf("\n");
409         printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
410         switch (vector) {
411         case EXC_DSE:
412         case EXC_DSI:
413         case EXC_DTMISS:
414                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->dar);
415 #ifdef AIM
416                 printf("   dsisr           = 0x%lx\n",
417                     (u_long)frame->cpu.aim.dsisr);
418 #endif
419                 break;
420         case EXC_ISE:
421         case EXC_ISI:
422         case EXC_ITMISS:
423                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
424                 break;
425         case EXC_MCHK:
426                 ver = mfpvr() >> 16;
427 #if defined(AIM)
428                 if (MPC745X_P(ver))
429                         printf("    msssr0         = 0x%lx\n",
430                             (u_long)mfspr(SPR_MSSSR0));
431 #elif defined(BOOKE)
432                 printf("   mcsr           = 0x%lx\n", (u_long)mfspr(SPR_MCSR));
433 #endif
434                 break;
435         }
436 #ifdef BOOKE
437         printf("   esr             = 0x%" PRIxPTR "\n",
438             frame->cpu.booke.esr);
439 #endif
440         printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
441         printf("   srr1            = 0x%lx\n", (u_long)frame->srr1);
442         printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
443         printf("   curthread       = %p\n", curthread);
444         if (curthread != NULL)
445                 printf("          pid = %d, comm = %s\n",
446                     curthread->td_proc->p_pid, curthread->td_name);
447         printf("\n");
448 }
449
450 /*
451  * Handles a fatal fault when we have onfault state to recover.  Returns
452  * non-zero if there was onfault recovery state available.
453  */
454 static int
455 handle_onfault(struct trapframe *frame)
456 {
457         struct          thread *td;
458         faultbuf        *fb;
459
460         td = curthread;
461         fb = td->td_pcb->pcb_onfault;
462         if (fb != NULL) {
463                 frame->srr0 = (*fb)[FAULTBUF_LR];
464                 frame->fixreg[1] = (*fb)[FAULTBUF_R1];
465                 frame->fixreg[2] = (*fb)[FAULTBUF_R2];
466                 frame->fixreg[3] = 1;
467                 frame->cr = (*fb)[FAULTBUF_CR];
468                 bcopy(&(*fb)[FAULTBUF_R13], &frame->fixreg[13],
469                     19 * sizeof(register_t));
470                 return (1);
471         }
472         return (0);
473 }
474
475 int
476 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
477 {
478         struct proc *p;
479         struct trapframe *frame;
480         caddr_t params;
481         size_t argsz;
482         int error, n, i;
483
484         p = td->td_proc;
485         frame = td->td_frame;
486
487         sa->code = frame->fixreg[0];
488         params = (caddr_t)(frame->fixreg + FIRSTARG);
489         n = NARGREG;
490
491         if (sa->code == SYS_syscall) {
492                 /*
493                  * code is first argument,
494                  * followed by actual args.
495                  */
496                 sa->code = *(register_t *) params;
497                 params += sizeof(register_t);
498                 n -= 1;
499         } else if (sa->code == SYS___syscall) {
500                 /*
501                  * Like syscall, but code is a quad,
502                  * so as to maintain quad alignment
503                  * for the rest of the args.
504                  */
505                 if (SV_PROC_FLAG(p, SV_ILP32)) {
506                         params += sizeof(register_t);
507                         sa->code = *(register_t *) params;
508                         params += sizeof(register_t);
509                         n -= 2;
510                 } else {
511                         sa->code = *(register_t *) params;
512                         params += sizeof(register_t);
513                         n -= 1;
514                 }
515         }
516
517         if (p->p_sysent->sv_mask)
518                 sa->code &= p->p_sysent->sv_mask;
519         if (sa->code >= p->p_sysent->sv_size)
520                 sa->callp = &p->p_sysent->sv_table[0];
521         else
522                 sa->callp = &p->p_sysent->sv_table[sa->code];
523
524         sa->narg = sa->callp->sy_narg;
525
526         if (SV_PROC_FLAG(p, SV_ILP32)) {
527                 argsz = sizeof(uint32_t);
528
529                 for (i = 0; i < n; i++)
530                         sa->args[i] = ((u_register_t *)(params))[i] &
531                             0xffffffff;
532         } else {
533                 argsz = sizeof(uint64_t);
534
535                 for (i = 0; i < n; i++)
536                         sa->args[i] = ((u_register_t *)(params))[i];
537         }
538
539         if (sa->narg > n)
540                 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
541                                (sa->narg - n) * argsz);
542         else
543                 error = 0;
544
545 #ifdef __powerpc64__
546         if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
547                 /* Expand the size of arguments copied from the stack */
548
549                 for (i = sa->narg; i >= n; i--)
550                         sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
551         }
552 #endif
553
554         if (error == 0) {
555                 td->td_retval[0] = 0;
556                 td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
557         }
558         return (error);
559 }
560
561 #include "../../kern/subr_syscall.c"
562
563 void
564 syscall(struct trapframe *frame)
565 {
566         struct thread *td;
567         struct syscall_args sa;
568         int error;
569
570         td = curthread;
571         td->td_frame = frame;
572
573 #ifdef __powerpc64__
574         /*
575          * Speculatively restore last user SLB segment, which we know is
576          * invalid already, since we are likely to do copyin()/copyout().
577          */
578         __asm __volatile ("slbmte %0, %1; isync" ::
579             "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
580 #endif
581
582         error = syscallenter(td, &sa);
583         syscallret(td, error, &sa);
584 }
585
586 #ifdef __powerpc64__
587 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
588 void
589 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
590 {
591         struct slb *slbcache;
592         uint64_t slbe, slbv;
593         uint64_t esid, addr;
594         int i;
595
596         addr = (type == EXC_ISE) ? srr0 : dar;
597         slbcache = PCPU_GET(slb);
598         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
599         slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
600         
601         /* See if the hardware flushed this somehow (can happen in LPARs) */
602         for (i = 0; i < n_slbs; i++)
603                 if (slbcache[i].slbe == (slbe | (uint64_t)i))
604                         return;
605
606         /* Not in the map, needs to actually be added */
607         slbv = kernel_va_to_slbv(addr);
608         if (slbcache[USER_SLB_SLOT].slbe == 0) {
609                 for (i = 0; i < n_slbs; i++) {
610                         if (i == USER_SLB_SLOT)
611                                 continue;
612                         if (!(slbcache[i].slbe & SLBE_VALID))
613                                 goto fillkernslb;
614                 }
615
616                 if (i == n_slbs)
617                         slbcache[USER_SLB_SLOT].slbe = 1;
618         }
619
620         /* Sacrifice a random SLB entry that is not the user entry */
621         i = mftb() % n_slbs;
622         if (i == USER_SLB_SLOT)
623                 i = (i+1) % n_slbs;
624
625 fillkernslb:
626         /* Write new entry */
627         slbcache[i].slbv = slbv;
628         slbcache[i].slbe = slbe | (uint64_t)i;
629
630         /* Trap handler will restore from cache on exit */
631 }
632
633 static int 
634 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
635 {
636         struct slb *user_entry;
637         uint64_t esid;
638         int i;
639
640         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
641
642         PMAP_LOCK(pm);
643         user_entry = user_va_to_slb_entry(pm, addr);
644
645         if (user_entry == NULL) {
646                 /* allocate_vsid auto-spills it */
647                 (void)allocate_user_vsid(pm, esid, 0);
648         } else {
649                 /*
650                  * Check that another CPU has not already mapped this.
651                  * XXX: Per-thread SLB caches would be better.
652                  */
653                 for (i = 0; i < pm->pm_slb_len; i++)
654                         if (pm->pm_slb[i] == user_entry)
655                                 break;
656
657                 if (i == pm->pm_slb_len)
658                         slb_insert_user(pm, user_entry);
659         }
660         PMAP_UNLOCK(pm);
661
662         return (0);
663 }
664 #endif
665
666 static int
667 trap_pfault(struct trapframe *frame, int user)
668 {
669         vm_offset_t     eva, va;
670         struct          thread *td;
671         struct          proc *p;
672         vm_map_t        map;
673         vm_prot_t       ftype;
674         int             rv;
675 #ifdef AIM
676         register_t      user_sr;
677 #endif
678
679         td = curthread;
680         p = td->td_proc;
681         if (frame->exc == EXC_ISI) {
682                 eva = frame->srr0;
683                 ftype = VM_PROT_EXECUTE;
684                 if (frame->srr1 & SRR1_ISI_PFAULT)
685                         ftype |= VM_PROT_READ;
686         } else {
687                 eva = frame->dar;
688 #ifdef BOOKE
689                 if (frame->cpu.booke.esr & ESR_ST)
690 #else
691                 if (frame->cpu.aim.dsisr & DSISR_STORE)
692 #endif
693                         ftype = VM_PROT_WRITE;
694                 else
695                         ftype = VM_PROT_READ;
696         }
697
698         if (user) {
699                 KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace  NULL"));
700                 map = &p->p_vmspace->vm_map;
701         } else {
702 #ifdef BOOKE
703                 if (eva < VM_MAXUSER_ADDRESS) {
704 #else
705                 if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
706 #endif
707                         map = &p->p_vmspace->vm_map;
708
709 #ifdef AIM
710                         user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
711                         eva &= ADDR_PIDX | ADDR_POFF;
712                         eva |= user_sr << ADDR_SR_SHFT;
713 #endif
714                 } else {
715                         map = kernel_map;
716                 }
717         }
718         va = trunc_page(eva);
719
720         /* Fault in the page. */
721         rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
722         /*
723          * XXXDTRACE: add dtrace_doubletrap_func here?
724          */
725
726         if (rv == KERN_SUCCESS)
727                 return (0);
728
729         if (!user && handle_onfault(frame))
730                 return (0);
731
732         return (SIGSEGV);
733 }
734
735 /*
736  * For now, this only deals with the particular unaligned access case
737  * that gcc tends to generate.  Eventually it should handle all of the
738  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
739  */
740
741 static int
742 fix_unaligned(struct thread *td, struct trapframe *frame)
743 {
744         struct thread   *fputhread;
745         int             indicator, reg;
746         double          *fpr;
747
748         indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
749
750         switch (indicator) {
751         case EXC_ALI_LFD:
752         case EXC_ALI_STFD:
753                 reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
754                 fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr;
755                 fputhread = PCPU_GET(fputhread);
756
757                 /* Juggle the FPU to ensure that we've initialized
758                  * the FPRs, and that their current state is in
759                  * the PCB.
760                  */
761                 if (fputhread != td) {
762                         if (fputhread)
763                                 save_fpu(fputhread);
764                         enable_fpu(td);
765                 }
766                 save_fpu(td);
767
768                 if (indicator == EXC_ALI_LFD) {
769                         if (copyin((void *)frame->dar, fpr,
770                             sizeof(double)) != 0)
771                                 return (-1);
772                         enable_fpu(td);
773                 } else {
774                         if (copyout(fpr, (void *)frame->dar,
775                             sizeof(double)) != 0)
776                                 return (-1);
777                 }
778                 return (0);
779                 break;
780         }
781
782         return (-1);
783 }
784
785 #ifdef KDB
786 int db_trap_glue(struct trapframe *);           /* Called from trap_subr.S */
787
788 int
789 db_trap_glue(struct trapframe *frame)
790 {
791         if (!(frame->srr1 & PSL_PR)
792             && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
793 #ifdef AIM
794                 || (frame->exc == EXC_PGM
795                     && (frame->srr1 & EXC_PGM_TRAP))
796 #else
797                 || (frame->exc == EXC_DEBUG)
798 #endif
799                 || frame->exc == EXC_BPT
800                 || frame->exc == EXC_DSI)) {
801                 int type = frame->exc;
802
803                 /* Ignore DTrace traps. */
804                 if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
805                         return (0);
806 #ifdef AIM
807                 if (type == EXC_PGM && (frame->srr1 & EXC_PGM_TRAP)) {
808 #else
809                 if (frame->cpu.booke.esr & ESR_PTR) {
810 #endif
811                         type = T_BREAKPOINT;
812                 }
813                 return (kdb_trap(type, 0, frame));
814         }
815
816         return (0);
817 }
818 #endif