]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/powerpc/aim/trap.c
MFC r269701:
[FreeBSD/stable/10.git] / sys / powerpc / aim / trap.c
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_kdtrace.h"
38
39 #include <sys/param.h>
40 #include <sys/kdb.h>
41 #include <sys/proc.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/pioctl.h>
46 #include <sys/ptrace.h>
47 #include <sys/reboot.h>
48 #include <sys/syscall.h>
49 #include <sys/sysent.h>
50 #include <sys/systm.h>
51 #include <sys/uio.h>
52 #include <sys/signalvar.h>
53 #include <sys/vmmeter.h>
54
55 #include <security/audit/audit.h>
56
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_param.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64
65 #include <machine/_inttypes.h>
66 #include <machine/altivec.h>
67 #include <machine/cpu.h>
68 #include <machine/db_machdep.h>
69 #include <machine/fpu.h>
70 #include <machine/frame.h>
71 #include <machine/pcb.h>
72 #include <machine/pmap.h>
73 #include <machine/psl.h>
74 #include <machine/trap.h>
75 #include <machine/spr.h>
76 #include <machine/sr.h>
77
78 static void     trap_fatal(struct trapframe *frame);
79 static void     printtrap(u_int vector, struct trapframe *frame, int isfatal,
80                     int user);
81 static int      trap_pfault(struct trapframe *frame, int user);
82 static int      fix_unaligned(struct thread *td, struct trapframe *frame);
83 static int      handle_onfault(struct trapframe *frame);
84 static void     syscall(struct trapframe *frame);
85
86 #ifdef __powerpc64__
87        void     handle_kernel_slb_spill(int, register_t, register_t);
88 static int      handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
89 extern int      n_slbs;
90 #endif
91
92 struct powerpc_exception {
93         u_int   vector;
94         char    *name;
95 };
96
97 #ifdef KDTRACE_HOOKS
98 #include <sys/dtrace_bsd.h>
99
100 int (*dtrace_invop_jump_addr)(struct trapframe *);
101 #endif
102
103 static struct powerpc_exception powerpc_exceptions[] = {
104         { 0x0100, "system reset" },
105         { 0x0200, "machine check" },
106         { 0x0300, "data storage interrupt" },
107         { 0x0380, "data segment exception" },
108         { 0x0400, "instruction storage interrupt" },
109         { 0x0480, "instruction segment exception" },
110         { 0x0500, "external interrupt" },
111         { 0x0600, "alignment" },
112         { 0x0700, "program" },
113         { 0x0800, "floating-point unavailable" },
114         { 0x0900, "decrementer" },
115         { 0x0c00, "system call" },
116         { 0x0d00, "trace" },
117         { 0x0e00, "floating-point assist" },
118         { 0x0f00, "performance monitoring" },
119         { 0x0f20, "altivec unavailable" },
120         { 0x1000, "instruction tlb miss" },
121         { 0x1100, "data load tlb miss" },
122         { 0x1200, "data store tlb miss" },
123         { 0x1300, "instruction breakpoint" },
124         { 0x1400, "system management" },
125         { 0x1600, "altivec assist" },
126         { 0x1700, "thermal management" },
127         { 0x2000, "run mode/trace" },
128         { 0x3000, NULL }
129 };
130
131 static const char *
132 trapname(u_int vector)
133 {
134         struct  powerpc_exception *pe;
135
136         for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
137                 if (pe->vector == vector)
138                         return (pe->name);
139         }
140
141         return ("unknown");
142 }
143
144 void
145 trap(struct trapframe *frame)
146 {
147         struct thread   *td;
148         struct proc     *p;
149 #ifdef KDTRACE_HOOKS
150         uint32_t inst;
151 #endif
152         int             sig, type, user;
153         u_int           ucode;
154         ksiginfo_t      ksi;
155
156         PCPU_INC(cnt.v_trap);
157
158         td = curthread;
159         p = td->td_proc;
160
161         type = ucode = frame->exc;
162         sig = 0;
163         user = frame->srr1 & PSL_PR;
164
165         CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
166             trapname(type), user ? "user" : "kernel");
167
168 #ifdef KDTRACE_HOOKS
169         /*
170          * A trap can occur while DTrace executes a probe. Before
171          * executing the probe, DTrace blocks re-scheduling and sets
172          * a flag in it's per-cpu flags to indicate that it doesn't
173          * want to fault. On returning from the probe, the no-fault
174          * flag is cleared and finally re-scheduling is enabled.
175          *
176          * If the DTrace kernel module has registered a trap handler,
177          * call it and if it returns non-zero, assume that it has
178          * handled the trap and modified the trap frame so that this
179          * function can return normally.
180          */
181         /*
182          * XXXDTRACE: add pid probe handler here (if ever)
183          */
184         if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
185                 return;
186 #endif
187
188         if (user) {
189                 td->td_pticks = 0;
190                 td->td_frame = frame;
191                 if (td->td_ucred != p->p_ucred)
192                         cred_update_thread(td);
193
194                 /* User Mode Traps */
195                 switch (type) {
196                 case EXC_RUNMODETRC:
197                 case EXC_TRC:
198                         frame->srr1 &= ~PSL_SE;
199                         sig = SIGTRAP;
200                         ucode = TRAP_TRACE;
201                         break;
202
203 #ifdef __powerpc64__
204                 case EXC_ISE:
205                 case EXC_DSE:
206                         if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
207                             (type == EXC_ISE) ? frame->srr0 :
208                             frame->cpu.aim.dar) != 0) {
209                                 sig = SIGSEGV;
210                                 ucode = SEGV_MAPERR;
211                         }
212                         break;
213 #endif
214                 case EXC_DSI:
215                 case EXC_ISI:
216                         sig = trap_pfault(frame, 1);
217                         if (sig == SIGSEGV)
218                                 ucode = SEGV_MAPERR;
219                         break;
220
221                 case EXC_SC:
222                         syscall(frame);
223                         break;
224
225                 case EXC_FPU:
226                         KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
227                             ("FPU already enabled for thread"));
228                         enable_fpu(td);
229                         break;
230
231                 case EXC_VEC:
232                         KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
233                             ("Altivec already enabled for thread"));
234                         enable_vec(td);
235                         break;
236
237                 case EXC_VECAST_G4:
238                 case EXC_VECAST_G5:
239                         /*
240                          * We get a VPU assist exception for IEEE mode
241                          * vector operations on denormalized floats.
242                          * Emulating this is a giant pain, so for now,
243                          * just switch off IEEE mode and treat them as
244                          * zero.
245                          */
246
247                         save_vec(td);
248                         td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
249                         enable_vec(td);
250                         break;
251
252                 case EXC_ALI:
253                         if (fix_unaligned(td, frame) != 0) {
254                                 sig = SIGBUS;
255                                 ucode = BUS_ADRALN;
256                         }
257                         else
258                                 frame->srr0 += 4;
259                         break;
260
261                 case EXC_PGM:
262                         /* Identify the trap reason */
263                         if (frame->srr1 & EXC_PGM_TRAP) {
264 #ifdef KDTRACE_HOOKS
265                                 inst = fuword32((const void *)frame->srr0);
266                                 if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) {
267                                         struct reg regs;
268                                         fill_regs(td, &regs);
269                                         (*dtrace_pid_probe_ptr)(&regs);
270                                         break;
271                                 }
272 #endif
273                                 sig = SIGTRAP;
274                                 ucode = TRAP_BRKPT;
275                         } else {
276                                 sig = ppc_instr_emulate(frame, td->td_pcb);
277                                 if (sig == SIGILL) {
278                                         if (frame->srr1 & EXC_PGM_PRIV)
279                                                 ucode = ILL_PRVOPC;
280                                         else if (frame->srr1 & EXC_PGM_ILLEGAL)
281                                                 ucode = ILL_ILLOPC;
282                                 } else if (sig == SIGFPE)
283                                         ucode = FPE_FLTINV;     /* Punt for now, invalid operation. */
284                         }
285                         break;
286
287                 case EXC_MCHK:
288                         /*
289                          * Note that this may not be recoverable for the user
290                          * process, depending on the type of machine check,
291                          * but it at least prevents the kernel from dying.
292                          */
293                         sig = SIGBUS;
294                         ucode = BUS_OBJERR;
295                         break;
296
297                 default:
298                         trap_fatal(frame);
299                 }
300         } else {
301                 /* Kernel Mode Traps */
302
303                 KASSERT(cold || td->td_ucred != NULL,
304                     ("kernel trap doesn't have ucred"));
305                 switch (type) {
306 #ifdef KDTRACE_HOOKS
307                 case EXC_PGM:
308                         if (frame->srr1 & EXC_PGM_TRAP) {
309                                 if (*(uint32_t *)frame->srr0 == 0x7c810808) {
310                                         if (dtrace_invop_jump_addr != NULL) {
311                                                 dtrace_invop_jump_addr(frame);
312                                                 return;
313                                         }
314                                 }
315                         }
316                         break;
317 #endif
318 #ifdef __powerpc64__
319                 case EXC_DSE:
320                         if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
321                                 __asm __volatile ("slbmte %0, %1" ::
322                                         "r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
323                                         "r"(USER_SLB_SLBE));
324                                 return;
325                         }
326                         break;
327 #endif
328                 case EXC_DSI:
329                         if (trap_pfault(frame, 0) == 0)
330                                 return;
331                         break;
332                 case EXC_MCHK:
333                         if (handle_onfault(frame))
334                                 return;
335                         break;
336                 default:
337                         break;
338                 }
339                 trap_fatal(frame);
340         }
341
342         if (sig != 0) {
343                 if (p->p_sysent->sv_transtrap != NULL)
344                         sig = (p->p_sysent->sv_transtrap)(sig, type);
345                 ksiginfo_init_trap(&ksi);
346                 ksi.ksi_signo = sig;
347                 ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
348                 /* ksi.ksi_addr = ? */
349                 ksi.ksi_trapno = type;
350                 trapsignal(td, &ksi);
351         }
352
353         userret(td, frame);
354 }
355
356 static void
357 trap_fatal(struct trapframe *frame)
358 {
359
360         printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
361 #ifdef KDB
362         if ((debugger_on_panic || kdb_active) &&
363             kdb_trap(frame->exc, 0, frame))
364                 return;
365 #endif
366         panic("%s trap", trapname(frame->exc));
367 }
368
369 static void
370 printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
371 {
372
373         printf("\n");
374         printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
375             user ? "user" : "kernel");
376         printf("\n");
377         printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
378         switch (vector) {
379         case EXC_DSE:
380         case EXC_DSI:
381                 printf("   virtual address = 0x%" PRIxPTR "\n",
382                     frame->cpu.aim.dar);
383                 printf("   dsisr           = 0x%" PRIxPTR "\n",
384                     frame->cpu.aim.dsisr);
385                 break;
386         case EXC_ISE:
387         case EXC_ISI:
388                 printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
389                 break;
390         }
391         printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
392         printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
393         printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
394         printf("   curthread       = %p\n", curthread);
395         if (curthread != NULL)
396                 printf("          pid = %d, comm = %s\n",
397                     curthread->td_proc->p_pid, curthread->td_name);
398         printf("\n");
399 }
400
401 /*
402  * Handles a fatal fault when we have onfault state to recover.  Returns
403  * non-zero if there was onfault recovery state available.
404  */
405 static int
406 handle_onfault(struct trapframe *frame)
407 {
408         struct          thread *td;
409         faultbuf        *fb;
410
411         td = curthread;
412         fb = td->td_pcb->pcb_onfault;
413         if (fb != NULL) {
414                 frame->srr0 = (*fb)[0];
415                 frame->fixreg[1] = (*fb)[1];
416                 frame->fixreg[2] = (*fb)[2];
417                 frame->fixreg[3] = 1;
418                 frame->cr = (*fb)[3];
419                 bcopy(&(*fb)[4], &frame->fixreg[13],
420                     19 * sizeof(register_t));
421                 return (1);
422         }
423         return (0);
424 }
425
426 int
427 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
428 {
429         struct proc *p;
430         struct trapframe *frame;
431         caddr_t params;
432         size_t argsz;
433         int error, n, i;
434
435         p = td->td_proc;
436         frame = td->td_frame;
437
438         sa->code = frame->fixreg[0];
439         params = (caddr_t)(frame->fixreg + FIRSTARG);
440         n = NARGREG;
441
442         if (sa->code == SYS_syscall) {
443                 /*
444                  * code is first argument,
445                  * followed by actual args.
446                  */
447                 sa->code = *(register_t *) params;
448                 params += sizeof(register_t);
449                 n -= 1;
450         } else if (sa->code == SYS___syscall) {
451                 /*
452                  * Like syscall, but code is a quad,
453                  * so as to maintain quad alignment
454                  * for the rest of the args.
455                  */
456                 if (SV_PROC_FLAG(p, SV_ILP32)) {
457                         params += sizeof(register_t);
458                         sa->code = *(register_t *) params;
459                         params += sizeof(register_t);
460                         n -= 2;
461                 } else {
462                         sa->code = *(register_t *) params;
463                         params += sizeof(register_t);
464                         n -= 1;
465                 }
466         }
467
468         if (p->p_sysent->sv_mask)
469                 sa->code &= p->p_sysent->sv_mask;
470         if (sa->code >= p->p_sysent->sv_size)
471                 sa->callp = &p->p_sysent->sv_table[0];
472         else
473                 sa->callp = &p->p_sysent->sv_table[sa->code];
474
475         sa->narg = sa->callp->sy_narg;
476
477         if (SV_PROC_FLAG(p, SV_ILP32)) {
478                 argsz = sizeof(uint32_t);
479
480                 for (i = 0; i < n; i++)
481                         sa->args[i] = ((u_register_t *)(params))[i] &
482                             0xffffffff;
483         } else {
484                 argsz = sizeof(uint64_t);
485
486                 for (i = 0; i < n; i++)
487                         sa->args[i] = ((u_register_t *)(params))[i];
488         }
489
490         if (sa->narg > n)
491                 error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
492                                (sa->narg - n) * argsz);
493         else
494                 error = 0;
495
496 #ifdef __powerpc64__
497         if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
498                 /* Expand the size of arguments copied from the stack */
499
500                 for (i = sa->narg; i >= n; i--)
501                         sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
502         }
503 #endif
504
505         if (error == 0) {
506                 td->td_retval[0] = 0;
507                 td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
508         }
509         return (error);
510 }
511
512 #include "../../kern/subr_syscall.c"
513
514 void
515 syscall(struct trapframe *frame)
516 {
517         struct thread *td;
518         struct syscall_args sa;
519         int error;
520
521         td = curthread;
522         td->td_frame = frame;
523
524 #ifdef __powerpc64__
525         /*
526          * Speculatively restore last user SLB segment, which we know is
527          * invalid already, since we are likely to do copyin()/copyout().
528          */
529         __asm __volatile ("slbmte %0, %1; isync" ::
530             "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
531 #endif
532
533         error = syscallenter(td, &sa);
534         syscallret(td, error, &sa);
535 }
536
537 #ifdef __powerpc64__
538 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
539 void
540 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
541 {
542         struct slb *slbcache;
543         uint64_t slbe, slbv;
544         uint64_t esid, addr;
545         int i;
546
547         addr = (type == EXC_ISE) ? srr0 : dar;
548         slbcache = PCPU_GET(slb);
549         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
550         slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
551         
552         /* See if the hardware flushed this somehow (can happen in LPARs) */
553         for (i = 0; i < n_slbs; i++)
554                 if (slbcache[i].slbe == (slbe | (uint64_t)i))
555                         return;
556
557         /* Not in the map, needs to actually be added */
558         slbv = kernel_va_to_slbv(addr);
559         if (slbcache[USER_SLB_SLOT].slbe == 0) {
560                 for (i = 0; i < n_slbs; i++) {
561                         if (i == USER_SLB_SLOT)
562                                 continue;
563                         if (!(slbcache[i].slbe & SLBE_VALID))
564                                 goto fillkernslb;
565                 }
566
567                 if (i == n_slbs)
568                         slbcache[USER_SLB_SLOT].slbe = 1;
569         }
570
571         /* Sacrifice a random SLB entry that is not the user entry */
572         i = mftb() % n_slbs;
573         if (i == USER_SLB_SLOT)
574                 i = (i+1) % n_slbs;
575
576 fillkernslb:
577         /* Write new entry */
578         slbcache[i].slbv = slbv;
579         slbcache[i].slbe = slbe | (uint64_t)i;
580
581         /* Trap handler will restore from cache on exit */
582 }
583
584 static int 
585 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
586 {
587         struct slb *user_entry;
588         uint64_t esid;
589         int i;
590
591         esid = (uintptr_t)addr >> ADDR_SR_SHFT;
592
593         PMAP_LOCK(pm);
594         user_entry = user_va_to_slb_entry(pm, addr);
595
596         if (user_entry == NULL) {
597                 /* allocate_vsid auto-spills it */
598                 (void)allocate_user_vsid(pm, esid, 0);
599         } else {
600                 /*
601                  * Check that another CPU has not already mapped this.
602                  * XXX: Per-thread SLB caches would be better.
603                  */
604                 for (i = 0; i < pm->pm_slb_len; i++)
605                         if (pm->pm_slb[i] == user_entry)
606                                 break;
607
608                 if (i == pm->pm_slb_len)
609                         slb_insert_user(pm, user_entry);
610         }
611         PMAP_UNLOCK(pm);
612
613         return (0);
614 }
615 #endif
616
617 static int
618 trap_pfault(struct trapframe *frame, int user)
619 {
620         vm_offset_t     eva, va;
621         struct          thread *td;
622         struct          proc *p;
623         vm_map_t        map;
624         vm_prot_t       ftype;
625         int             rv;
626         register_t      user_sr;
627
628         td = curthread;
629         p = td->td_proc;
630         if (frame->exc == EXC_ISI) {
631                 eva = frame->srr0;
632                 ftype = VM_PROT_EXECUTE;
633                 if (frame->srr1 & SRR1_ISI_PFAULT)
634                         ftype |= VM_PROT_READ;
635         } else {
636                 eva = frame->cpu.aim.dar;
637                 if (frame->cpu.aim.dsisr & DSISR_STORE)
638                         ftype = VM_PROT_WRITE;
639                 else
640                         ftype = VM_PROT_READ;
641         }
642
643         if (user) {
644                 map = &p->p_vmspace->vm_map;
645         } else {
646                 if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
647                         if (p->p_vmspace == NULL)
648                                 return (SIGSEGV);
649
650                         map = &p->p_vmspace->vm_map;
651
652                         user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
653                         eva &= ADDR_PIDX | ADDR_POFF;
654                         eva |= user_sr << ADDR_SR_SHFT;
655                 } else {
656                         map = kernel_map;
657                 }
658         }
659         va = trunc_page(eva);
660
661         if (map != kernel_map) {
662                 /*
663                  * Keep swapout from messing with us during this
664                  *      critical time.
665                  */
666                 PROC_LOCK(p);
667                 ++p->p_lock;
668                 PROC_UNLOCK(p);
669
670                 /* Fault in the user page: */
671                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
672
673                 PROC_LOCK(p);
674                 --p->p_lock;
675                 PROC_UNLOCK(p);
676                 /*
677                  * XXXDTRACE: add dtrace_doubletrap_func here?
678                  */
679         } else {
680                 /*
681                  * Don't have to worry about process locking or stacks in the
682                  * kernel.
683                  */
684                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
685         }
686
687         if (rv == KERN_SUCCESS)
688                 return (0);
689
690         if (!user && handle_onfault(frame))
691                 return (0);
692
693         return (SIGSEGV);
694 }
695
696 /*
697  * For now, this only deals with the particular unaligned access case
698  * that gcc tends to generate.  Eventually it should handle all of the
699  * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
700  */
701
702 static int
703 fix_unaligned(struct thread *td, struct trapframe *frame)
704 {
705         struct thread   *fputhread;
706         int             indicator, reg;
707         double          *fpr;
708
709         indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
710
711         switch (indicator) {
712         case EXC_ALI_LFD:
713         case EXC_ALI_STFD:
714                 reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
715                 fpr = &td->td_pcb->pcb_fpu.fpr[reg];
716                 fputhread = PCPU_GET(fputhread);
717
718                 /* Juggle the FPU to ensure that we've initialized
719                  * the FPRs, and that their current state is in
720                  * the PCB.
721                  */
722                 if (fputhread != td) {
723                         if (fputhread)
724                                 save_fpu(fputhread);
725                         enable_fpu(td);
726                 }
727                 save_fpu(td);
728
729                 if (indicator == EXC_ALI_LFD) {
730                         if (copyin((void *)frame->cpu.aim.dar, fpr,
731                             sizeof(double)) != 0)
732                                 return -1;
733                         enable_fpu(td);
734                 } else {
735                         if (copyout(fpr, (void *)frame->cpu.aim.dar,
736                             sizeof(double)) != 0)
737                                 return -1;
738                 }
739                 return 0;
740                 break;
741         }
742
743         return -1;
744 }
745