]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/powerpc/aim/trap.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / powerpc / aim / trap.c
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_kdtrace.h"
38
39 #include <sys/param.h>
40 #include <sys/kdb.h>
41 #include <sys/proc.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/pioctl.h>
46 #include <sys/ptrace.h>
47 #include <sys/reboot.h>
48 #include <sys/syscall.h>
49 #include <sys/sysent.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/uio.h>
53 #include <sys/signalvar.h>
54 #include <sys/vmmeter.h>
55
56 #include <security/audit/audit.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_page.h>
65
66 #include <machine/_inttypes.h>
67 #include <machine/altivec.h>
68 #include <machine/cpu.h>
69 #include <machine/db_machdep.h>
70 #include <machine/fpu.h>
71 #include <machine/frame.h>
72 #include <machine/pcb.h>
73 #include <machine/pmap.h>
74 #include <machine/psl.h>
75 #include <machine/trap.h>
76 #include <machine/spr.h>
77 #include <machine/sr.h>
78
79 static void     trap_fatal(struct trapframe *frame);
80 static void     printtrap(u_int vector, struct trapframe *frame, int isfatal,
81                     int user);
82 static int      trap_pfault(struct trapframe *frame, int user);
83 static int      fix_unaligned(struct thread *td, struct trapframe *frame);
84 static int      handle_onfault(struct trapframe *frame);
85 static void     syscall(struct trapframe *frame);
86
87 #ifdef __powerpc64__
88        void     handle_kernel_slb_spill(int, register_t, register_t);
89 static int      handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
90 extern int      n_slbs;
91 #endif
92
93 struct powerpc_exception {
94         u_int   vector;
95         char    *name;
96 };
97
98 #ifdef KDTRACE_HOOKS
99 #include <sys/dtrace_bsd.h>
100
101 int (*dtrace_invop_jump_addr)(struct trapframe *);
102 #endif
103
104 static struct powerpc_exception powerpc_exceptions[] = {
105         { 0x0100, "system reset" },
106         { 0x0200, "machine check" },
107         { 0x0300, "data storage interrupt" },
108         { 0x0380, "data segment exception" },
109         { 0x0400, "instruction storage interrupt" },
110         { 0x0480, "instruction segment exception" },
111         { 0x0500, "external interrupt" },
112         { 0x0600, "alignment" },
113         { 0x0700, "program" },
114         { 0x0800, "floating-point unavailable" },
115         { 0x0900, "decrementer" },
116         { 0x0c00, "system call" },
117         { 0x0d00, "trace" },
118         { 0x0e00, "floating-point assist" },
119         { 0x0f00, "performance monitoring" },
120         { 0x0f20, "altivec unavailable" },
121         { 0x1000, "instruction tlb miss" },
122         { 0x1100, "data load tlb miss" },
123         { 0x1200, "data store tlb miss" },
124         { 0x1300, "instruction breakpoint" },
125         { 0x1400, "system management" },
126         { 0x1600, "altivec assist" },
127         { 0x1700, "thermal management" },
128         { 0x2000, "run mode/trace" },
129         { 0x3000, NULL }
130 };
131
132 static const char *
133 trapname(u_int vector)
134 {
135         struct  powerpc_exception *pe;
136
137         for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
138                 if (pe->vector == vector)
139                         return (pe->name);
140         }
141
142         return ("unknown");
143 }
144
145 void
146 trap(struct trapframe *frame)
147 {
148         struct thread   *td;
149         struct proc     *p;
150 #ifdef KDTRACE_HOOKS
151         uint32_t inst;
152 #endif
153         int             sig, type, user;
154         u_int           ucode;
155         ksiginfo_t      ksi;
156
157         PCPU_INC(cnt.v_trap);
158
159         td = curthread;
160         p = td->td_proc;
161
162         type = ucode = frame->exc;
163         sig = 0;
164         user = frame->srr1 & PSL_PR;
165
166         CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
167             trapname(type), user ? "user" : "kernel");
168
169 #ifdef KDTRACE_HOOKS
170         /*
171          * A trap can occur while DTrace executes a probe. Before
172          * executing the probe, DTrace blocks re-scheduling and sets
173          * a flag in it's per-cpu flags to indicate that it doesn't
174          * want to fault. On returning from the probe, the no-fault
175          * flag is cleared and finally re-scheduling is enabled.
176          *
177          * If the DTrace kernel module has registered a trap handler,
178          * call it and if it returns non-zero, assume that it has
179          * handled the trap and modified the trap frame so that this
180          * function can return normally.
181          */
182         /*
183          * XXXDTRACE: add pid probe handler here (if ever)
184          */
185         if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
186                 return;
187 #endif
188
189         if (user) {
190                 td->td_pticks = 0;
191                 td->td_frame = frame;
192                 if (td->td_ucred != p->p_ucred)
193                         cred_update_thread(td);
194
195                 /* User Mode Traps */
196                 switch (type) {
197                 case EXC_RUNMODETRC:
198                 case EXC_TRC:
199                         frame->srr1 &= ~PSL_SE;
200                         sig = SIGTRAP;
201                         ucode = TRAP_TRACE;
202                         break;
203
204 #ifdef __powerpc64__
205                 case EXC_ISE:
206                 case EXC_DSE:
207                         if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
208                             (type == EXC_ISE) ? frame->srr0 :
209                             frame->cpu.aim.dar) != 0) {
210                                 sig = SIGSEGV;
211                                 ucode = SEGV_MAPERR;
212                         }
213                         break;
214 #endif
215                 case EXC_DSI:
216                 case EXC_ISI:
217                         sig = trap_pfault(frame, 1);
218                         if (sig == SIGSEGV)
219                                 ucode = SEGV_MAPERR;
220                         break;
221
222                 case EXC_SC:
223                         syscall(frame);
224                         break;
225
226                 case EXC_FPU:
227                         KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
228                             ("FPU already enabled for thread"));
229                         enable_fpu(td);
230                         break;
231
232                 case EXC_VEC:
233                         KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
234                             ("Altivec already enabled for thread"));
235                         enable_vec(td);
236                         break;
237
238                 case EXC_VECAST_G4:
239                 case EXC_VECAST_G5:
240                         /*
241                          * We get a VPU assist exception for IEEE mode
242                          * vector operations on denormalized floats.
243                          * Emulating this is a giant pain, so for now,
244                          * just switch off IEEE mode and treat them as
245                          * zero.
246                          */
247
248                         save_vec(td);
249                         td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
250                         enable_vec(td);
251                         break;
252
253                 case EXC_ALI:
254                         if (fix_unaligned(td, frame) != 0) {
255                                 sig = SIGBUS;
256                                 ucode = BUS_ADRALN;
257                         }
258                         else
259                                 frame->srr0 += 4;
260                         break;
261
262                 case EXC_PGM:
263                         /* Identify the trap reason */
264                         if (frame->srr1 & EXC_PGM_TRAP) {
265 #ifdef KDTRACE_HOOKS
266                                 inst = fuword32((const void *)frame->srr0);
267                                 if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) {
268                                         struct reg regs;
269                                         fill_regs(td, &regs);
270                                         (*dtrace_pid_probe_ptr)(&regs);
271                                         break;
272                                 }
273 #endif
274                                 sig = SIGTRAP;
275                                 ucode = TRAP_BRKPT;
276                         } else {
277                                 sig = ppc_instr_emulate(frame, td->td_pcb);
278                                 if (sig == SIGILL) {
279                                         if (frame->srr1 & EXC_PGM_PRIV)
280                                                 ucode = ILL_PRVOPC;
281                                         else if (frame->srr1 & EXC_PGM_ILLEGAL)
282                                                 ucode = ILL_ILLOPC;
283                                 } else if (sig == SIGFPE)
284                                         ucode = FPE_FLTINV;     /* Punt for now, invalid operation. */
285                         }
286                         break;
287
288                 case EXC_MCHK:
289                         /*
290                          * Note that this may not be recoverable for the user
291                          * process, depending on the type of machine check,
292                          * but it at least prevents the kernel from dying.
293                          */
294                         sig = SIGBUS;
295                         ucode = BUS_OBJERR;
296                         break;
297
298                 default:
299                         trap_fatal(frame);
300                 }
301         } else {
302                 /* Kernel Mode Traps */
303
304                 KASSERT(cold || td->td_ucred != NULL,
305                     ("kernel trap doesn't have ucred"));
306                 switch (type) {
307 #ifdef KDTRACE_HOOKS
308                 case EXC_PGM:
309                         if (frame->srr1 & EXC_PGM_TRAP) {
310                                 if (*(uint32_t *)frame->srr0 == 0x7c810808) {
311                                         if (dtrace_invop_jump_addr != NULL) {
312                                                 dtrace_invop_jump_addr(frame);
313                                                 return;
314                                         }
315                                 }
316                         }
317                         break;
318 #endif
319 #ifdef __powerpc64__
320                 case EXC_DSE:
321                         if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
322                                 __asm __volatile ("slbmte %0, %1" ::
323                                         "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
324                                         "r"(USER_SLB_SLBE));
325                                 return;
326                         }
327                         break;
328 #endif
329                 case EXC_DSI:
330                         if (trap_pfault(frame, 0) == 0)
331                                 return;
332                         break;
333                 case EXC_MCHK:
334                         if (handle_onfault(frame))
335                                 return;
336                         break;
337                 default:
338                         break;
339                 }
340                 trap_fatal(frame);
341         }
342
343         if (sig != 0) {
344                 if (p->p_sysent->sv_transtrap != NULL)
345                         sig = (p->p_sysent->sv_transtrap)(sig, type);
346                 ksiginfo_init_trap(&ksi);
347                 ksi.ksi_signo = sig;
348                 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
349                 /* ksi.ksi_addr = ? */
350                 ksi.ksi_trapno = type;
351                 trapsignal(td, &ksi);
352         }
353
354         userret(td, frame);
355 }
356
357 static void
358 trap_fatal(struct trapframe *frame)
359 {
360
361         printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
362 #ifdef KDB
363         if ((debugger_on_panic || kdb_active) &&
364             kdb_trap(frame->exc, 0, frame))
365                 return;
366 #endif
367         panic("%s trap", trapname(frame->exc));
368 }
369
370 static void
371 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
372 {
373
374         printf("\n");
375         printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
376             user ? "user" : "kernel");
377         printf("\n");
378         printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
379         switch (vector) {
380         case EXC_DSE:
381         case EXC_DSI:
382                 printf("   virtual address = 0x%" PRIxPTR "\n",
383                     frame->cpu.aim.dar);
384                 printf("   dsisr           = 0x%" PRIxPTR "\n",
385                     frame->cpu.aim.dsisr);
386                 break;
387         case EXC_ISE:
388         case EXC_ISI:
389                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
390                 break;
391         }
392         printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
393         printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
394         printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
395         printf("   curthread       = %p\n", curthread);
396         if (curthread != NULL)
397                 printf("          pid = %d, comm = %s\n",
398                     curthread->td_proc->p_pid, curthread->td_name);
399         printf("\n");
400 }
401
402 /*
403  * Handles a fatal fault when we have onfault state to recover.  Returns
404  * non-zero if there was onfault recovery state available.
405  */
406 static int
407 handle_onfault(struct trapframe *frame)
408 {
409         struct          thread *td;
410         faultbuf        *fb;
411
412         td = curthread;
413         fb = td->td_pcb->pcb_onfault;
414         if (fb != NULL) {
415                 frame->srr0 = (*fb)[0];
416                 frame->fixreg[1] = (*fb)[1];
417                 frame->fixreg[2] = (*fb)[2];
418                 frame->fixreg[3] = 1;
419                 frame->cr = (*fb)[3];
420                 bcopy(&(*fb)[4], &frame->fixreg[13],
421                     19 * sizeof(register_t));
422                 return (1);
423         }
424         return (0);
425 }
426
427 int
428 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
429 {
430         struct proc *p;
431         struct trapframe *frame;
432         caddr_t params;
433         size_t argsz;
434         int error, n, i;
435
436         p = td->td_proc;
437         frame = td->td_frame;
438
439         sa->code = frame->fixreg[0];
440         params = (caddr_t)(frame->fixreg + FIRSTARG);
441         n = NARGREG;
442
443         if (sa->code == SYS_syscall) {
444                 /*
445                  * code is first argument,
446                  * followed by actual args.
447                  */
448                 sa->code = *(register_t *) params;
449                 params += sizeof(register_t);
450                 n -= 1;
451         } else if (sa->code == SYS___syscall) {
452                 /*
453                  * Like syscall, but code is a quad,
454                  * so as to maintain quad alignment
455                  * for the rest of the args.
456                  */
457                 if (SV_PROC_FLAG(p, SV_ILP32)) {
458                         params += sizeof(register_t);
459                         sa->code = *(register_t *) params;
460                         params += sizeof(register_t);
461                         n -= 2;
462                 } else {
463                         sa->code = *(register_t *) params;
464                         params += sizeof(register_t);
465                         n -= 1;
466                 }
467         }
468
469         if (p->p_sysent->sv_mask)
470                 sa->code &= p->p_sysent->sv_mask;
471         if (sa->code >= p->p_sysent->sv_size)
472                 sa->callp = &p->p_sysent->sv_table[0];
473         else
474                 sa->callp = &p->p_sysent->sv_table[sa->code];
475
476         sa->narg = sa->callp->sy_narg;
477
478         if (SV_PROC_FLAG(p, SV_ILP32)) {
479                 argsz = sizeof(uint32_t);
480
481                 for (i = 0; i < n; i++)
482                         sa->args[i] = ((u_register_t *)(params))[i] &
483                             0xffffffff;
484         } else {
485                 argsz = sizeof(uint64_t);
486
487                 for (i = 0; i < n; i++)
488                         sa->args[i] = ((u_register_t *)(params))[i];
489         }
490
491         if (sa->narg > n)
492                 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
493                                (sa->narg - n) * argsz);
494         else
495                 error = 0;
496
497 #ifdef __powerpc64__
498         if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
499                 /* Expand the size of arguments copied from the stack */
500
501                 for (i = sa->narg; i >= n; i--)
502                         sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
503         }
504 #endif
505
506         if (error == 0) {
507                 td->td_retval[0] = 0;
508                 td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
509         }
510         return (error);
511 }
512
513 #include "../../kern/subr_syscall.c"
514
515 void
516 syscall(struct trapframe *frame)
517 {
518         struct thread *td;
519         struct syscall_args sa;
520         int error;
521
522         td = curthread;
523         td->td_frame = frame;
524
525 #ifdef __powerpc64__
526         /*
527          * Speculatively restore last user SLB segment, which we know is
528          * invalid already, since we are likely to do copyin()/copyout().
529          */
530         __asm __volatile ("slbmte %0, %1; isync" ::
531             "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
532 #endif
533
534         error = syscallenter(td, &sa);
535         syscallret(td, error, &sa);
536 }
537
538 #ifdef __powerpc64__
539 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
540 void
541 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
542 {
543         struct slb *slbcache;
544         uint64_t slbe, slbv;
545         uint64_t esid, addr;
546         int i;
547
548         addr = (type == EXC_ISE) ? srr0 : dar;
549         slbcache = PCPU_GET(slb);
550         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
551         slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
552         
553         /* See if the hardware flushed this somehow (can happen in LPARs) */
554         for (i = 0; i < n_slbs; i++)
555                 if (slbcache[i].slbe == (slbe | (uint64_t)i))
556                         return;
557
558         /* Not in the map, needs to actually be added */
559         slbv = kernel_va_to_slbv(addr);
560         if (slbcache[USER_SLB_SLOT].slbe == 0) {
561                 for (i = 0; i < n_slbs; i++) {
562                         if (i == USER_SLB_SLOT)
563                                 continue;
564                         if (!(slbcache[i].slbe & SLBE_VALID))
565                                 goto fillkernslb;
566                 }
567
568                 if (i == n_slbs)
569                         slbcache[USER_SLB_SLOT].slbe = 1;
570         }
571
572         /* Sacrifice a random SLB entry that is not the user entry */
573         i = mftb() % n_slbs;
574         if (i == USER_SLB_SLOT)
575                 i = (i+1) % n_slbs;
576
577 fillkernslb:
578         /* Write new entry */
579         slbcache[i].slbv = slbv;
580         slbcache[i].slbe = slbe | (uint64_t)i;
581
582         /* Trap handler will restore from cache on exit */
583 }
584
585 static int 
586 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
587 {
588         struct slb *user_entry;
589         uint64_t esid;
590         int i;
591
592         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
593
594         PMAP_LOCK(pm);
595         user_entry = user_va_to_slb_entry(pm, addr);
596
597         if (user_entry == NULL) {
598                 /* allocate_vsid auto-spills it */
599                 (void)allocate_user_vsid(pm, esid, 0);
600         } else {
601                 /*
602                  * Check that another CPU has not already mapped this.
603                  * XXX: Per-thread SLB caches would be better.
604                  */
605                 for (i = 0; i < pm->pm_slb_len; i++)
606                         if (pm->pm_slb[i] == user_entry)
607                                 break;
608
609                 if (i == pm->pm_slb_len)
610                         slb_insert_user(pm, user_entry);
611         }
612         PMAP_UNLOCK(pm);
613
614         return (0);
615 }
616 #endif
617
618 static int
619 trap_pfault(struct trapframe *frame, int user)
620 {
621         vm_offset_t     eva, va;
622         struct          thread *td;
623         struct          proc *p;
624         vm_map_t        map;
625         vm_prot_t       ftype;
626         int             rv;
627         register_t      user_sr;
628
629         td = curthread;
630         p = td->td_proc;
631         if (frame->exc == EXC_ISI) {
632                 eva = frame->srr0;
633                 ftype = VM_PROT_EXECUTE;
634                 if (frame->srr1 & SRR1_ISI_PFAULT)
635                         ftype |= VM_PROT_READ;
636         } else {
637                 eva = frame->cpu.aim.dar;
638                 if (frame->cpu.aim.dsisr & DSISR_STORE)
639                         ftype = VM_PROT_WRITE;
640                 else
641                         ftype = VM_PROT_READ;
642         }
643
644         if (user) {
645                 map = &p->p_vmspace->vm_map;
646         } else {
647                 if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
648                         if (p->p_vmspace == NULL)
649                                 return (SIGSEGV);
650
651                         map = &p->p_vmspace->vm_map;
652
653                         user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
654                         eva &= ADDR_PIDX | ADDR_POFF;
655                         eva |= user_sr << ADDR_SR_SHFT;
656                 } else {
657                         map = kernel_map;
658                 }
659         }
660         va = trunc_page(eva);
661
662         if (map != kernel_map) {
663                 /*
664                  * Keep swapout from messing with us during this
665                  *      critical time.
666                  */
667                 PROC_LOCK(p);
668                 ++p->p_lock;
669                 PROC_UNLOCK(p);
670
671                 /* Fault in the user page: */
672                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
673
674                 PROC_LOCK(p);
675                 --p->p_lock;
676                 PROC_UNLOCK(p);
677                 /*
678                  * XXXDTRACE: add dtrace_doubletrap_func here?
679                  */
680         } else {
681                 /*
682                  * Don't have to worry about process locking or stacks in the
683                  * kernel.
684                  */
685                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
686         }
687
688         if (rv == KERN_SUCCESS)
689                 return (0);
690
691         if (!user && handle_onfault(frame))
692                 return (0);
693
694         return (SIGSEGV);
695 }
696
697 /*
698  * For now, this only deals with the particular unaligned access case
699  * that gcc tends to generate.  Eventually it should handle all of the
700  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
701  */
702
703 static int
704 fix_unaligned(struct thread *td, struct trapframe *frame)
705 {
706         struct thread   *fputhread;
707         int             indicator, reg;
708         double          *fpr;
709
710         indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
711
712         switch (indicator) {
713         case EXC_ALI_LFD:
714         case EXC_ALI_STFD:
715                 reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
716                 fpr = &td->td_pcb->pcb_fpu.fpr[reg];
717                 fputhread = PCPU_GET(fputhread);
718
719                 /* Juggle the FPU to ensure that we've initialized
720                  * the FPRs, and that their current state is in
721                  * the PCB.
722                  */
723                 if (fputhread != td) {
724                         if (fputhread)
725                                 save_fpu(fputhread);
726                         enable_fpu(td);
727                 }
728                 save_fpu(td);
729
730                 if (indicator == EXC_ALI_LFD) {
731                         if (copyin((void *)frame->cpu.aim.dar, fpr,
732                             sizeof(double)) != 0)
733                                 return -1;
734                         enable_fpu(td);
735                 } else {
736                         if (copyout(fpr, (void *)frame->cpu.aim.dar,
737                             sizeof(double)) != 0)
738                                 return -1;
739                 }
740                 return 0;
741                 break;
742         }
743
744         return -1;
745 }
746