1 /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 1988 University of Utah.
7 * Copyright (c) 1992, 1993
8 * The Regents of the University of California. All rights reserved.
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department and Ralph Campbell.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: Utah Hdr: trap.c 1.32 91/04/06
40 * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
41 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
47 #include "opt_ktrace.h"
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/sysent.h>
53 #include <sys/kernel.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscall.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_param.h>
64 #include <sys/vmmeter.h>
65 #include <sys/ptrace.h>
68 #include <sys/vnode.h>
69 #include <sys/pioctl.h>
70 #include <sys/sysctl.h>
71 #include <sys/syslog.h>
74 #include <sys/ktrace.h>
76 #include <net/netisr.h>
78 #include <machine/trap.h>
79 #include <machine/cpu.h>
80 #include <machine/cpuinfo.h>
81 #include <machine/pte.h>
82 #include <machine/pmap.h>
83 #include <machine/md_var.h>
84 #include <machine/mips_opcode.h>
85 #include <machine/frame.h>
86 #include <machine/regnum.h>
87 #include <machine/tls.h>
90 #include <machine/db_machdep.h>
91 #include <ddb/db_sym.h>
97 #include <sys/dtrace_bsd.h>
102 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
103 &trap_debug, 0, "Debug information on all traps");
106 #define lbu_macro(data, addr) \
107 __asm __volatile ("lbu %0, 0x0(%1)" \
108 : "=r" (data) /* outputs */ \
109 : "r" (addr)); /* inputs */
111 #define lb_macro(data, addr) \
112 __asm __volatile ("lb %0, 0x0(%1)" \
113 : "=r" (data) /* outputs */ \
114 : "r" (addr)); /* inputs */
116 #define lwl_macro(data, addr) \
117 __asm __volatile ("lwl %0, 0x0(%1)" \
118 : "=r" (data) /* outputs */ \
119 : "r" (addr)); /* inputs */
121 #define lwr_macro(data, addr) \
122 __asm __volatile ("lwr %0, 0x0(%1)" \
123 : "=r" (data) /* outputs */ \
124 : "r" (addr)); /* inputs */
126 #define ldl_macro(data, addr) \
127 __asm __volatile ("ldl %0, 0x0(%1)" \
128 : "=r" (data) /* outputs */ \
129 : "r" (addr)); /* inputs */
131 #define ldr_macro(data, addr) \
132 __asm __volatile ("ldr %0, 0x0(%1)" \
133 : "=r" (data) /* outputs */ \
134 : "r" (addr)); /* inputs */
136 #define sb_macro(data, addr) \
137 __asm __volatile ("sb %0, 0x0(%1)" \
139 : "r" (data), "r" (addr)); /* inputs */
141 #define swl_macro(data, addr) \
142 __asm __volatile ("swl %0, 0x0(%1)" \
144 : "r" (data), "r" (addr)); /* inputs */
146 #define swr_macro(data, addr) \
147 __asm __volatile ("swr %0, 0x0(%1)" \
149 : "r" (data), "r" (addr)); /* inputs */
151 #define sdl_macro(data, addr) \
152 __asm __volatile ("sdl %0, 0x0(%1)" \
154 : "r" (data), "r" (addr)); /* inputs */
156 #define sdr_macro(data, addr) \
157 __asm __volatile ("sdr %0, 0x0(%1)" \
159 : "r" (data), "r" (addr)); /* inputs */
161 static void log_illegal_instruction(const char *, struct trapframe *);
162 static void log_bad_page_fault(char *, struct trapframe *, int);
163 static void log_frame_dump(struct trapframe *frame);
164 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
166 int (*dtrace_invop_jump_addr)(struct trapframe *);
169 static void trap_frame_dump(struct trapframe *frame);
172 void (*machExceptionTable[]) (void)= {
174 * The kernel exception handlers.
176 MipsKernIntr, /* external interrupt */
177 MipsKernGenException, /* TLB modification */
178 MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
179 MipsTLBInvalidException,/* TLB miss (store) */
180 MipsKernGenException, /* address error (load or I-fetch) */
181 MipsKernGenException, /* address error (store) */
182 MipsKernGenException, /* bus error (I-fetch) */
183 MipsKernGenException, /* bus error (load or store) */
184 MipsKernGenException, /* system call */
185 MipsKernGenException, /* breakpoint */
186 MipsKernGenException, /* reserved instruction */
187 MipsKernGenException, /* coprocessor unusable */
188 MipsKernGenException, /* arithmetic overflow */
189 MipsKernGenException, /* trap exception */
190 MipsKernGenException, /* virtual coherence exception inst */
191 MipsKernGenException, /* floating point exception */
192 MipsKernGenException, /* reserved */
193 MipsKernGenException, /* reserved */
194 MipsKernGenException, /* reserved */
195 MipsKernGenException, /* reserved */
196 MipsKernGenException, /* reserved */
197 MipsKernGenException, /* reserved */
198 MipsKernGenException, /* reserved */
199 MipsKernGenException, /* watch exception */
200 MipsKernGenException, /* reserved */
201 MipsKernGenException, /* reserved */
202 MipsKernGenException, /* reserved */
203 MipsKernGenException, /* reserved */
204 MipsKernGenException, /* reserved */
205 MipsKernGenException, /* reserved */
206 MipsKernGenException, /* reserved */
207 MipsKernGenException, /* virtual coherence exception data */
209 * The user exception handlers.
211 MipsUserIntr, /* 0 */
212 MipsUserGenException, /* 1 */
213 MipsTLBInvalidException,/* 2 */
214 MipsTLBInvalidException,/* 3 */
215 MipsUserGenException, /* 4 */
216 MipsUserGenException, /* 5 */
217 MipsUserGenException, /* 6 */
218 MipsUserGenException, /* 7 */
219 MipsUserGenException, /* 8 */
220 MipsUserGenException, /* 9 */
221 MipsUserGenException, /* 10 */
222 MipsUserGenException, /* 11 */
223 MipsUserGenException, /* 12 */
224 MipsUserGenException, /* 13 */
225 MipsUserGenException, /* 14 */
226 MipsUserGenException, /* 15 */
227 MipsUserGenException, /* 16 */
228 MipsUserGenException, /* 17 */
229 MipsUserGenException, /* 18 */
230 MipsUserGenException, /* 19 */
231 MipsUserGenException, /* 20 */
232 MipsUserGenException, /* 21 */
233 MipsUserGenException, /* 22 */
234 MipsUserGenException, /* 23 */
235 MipsUserGenException, /* 24 */
236 MipsUserGenException, /* 25 */
237 MipsUserGenException, /* 26 */
238 MipsUserGenException, /* 27 */
239 MipsUserGenException, /* 28 */
240 MipsUserGenException, /* 29 */
241 MipsUserGenException, /* 20 */
242 MipsUserGenException, /* 31 */
245 char *trap_type[] = {
246 "external interrupt",
248 "TLB miss (load or instr. fetch)",
250 "address error (load or I-fetch)",
251 "address error (store)",
252 "bus error (I-fetch)",
253 "bus error (load or store)",
256 "reserved instruction",
257 "coprocessor unusable",
258 "arithmetic overflow",
260 "virtual coherency instruction",
277 "virtual coherency data",
280 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
281 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
284 #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
285 #define DELAYBRANCH(x) ((x) & MIPS_CR_BR_DELAY)
288 * MIPS load/store access type
301 char *access_name[] = {
302 "Load Halfword Unsigned",
304 "Load Word Unsigned",
313 #include <machine/octeon_cop2.h>
316 static int allow_unaligned_acc = 1;
318 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
319 &allow_unaligned_acc, 0, "Allow unaligned accesses");
322 * FP emulation is assumed to work on O32, but the code is outdated and crufty
323 * enough that it's a more sensible default to have it disabled when using
324 * other ABIs. At the very least, it needs a lot of help in using
325 * type-semantic ABI-oblivious macros for everything it does.
327 #if defined(__mips_o32)
328 static int emulate_fp = 1;
330 static int emulate_fp = 0;
332 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
333 &emulate_fp, 0, "Emulate unimplemented FPU instructions");
335 static int emulate_unaligned_access(struct trapframe *frame, int mode);
337 extern void fswintrberr(void); /* XXX */
340 cpu_fetch_syscall_args(struct thread *td)
342 struct trapframe *locr0;
343 struct sysentvec *se;
344 struct syscall_args *sa;
347 locr0 = td->td_frame;
350 bzero(sa->args, sizeof(sa->args));
352 /* compute next PC after syscall instruction */
353 td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
354 if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */
355 locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
357 locr0->pc += sizeof(int);
358 sa->code = locr0->v0;
364 * This is an indirect syscall, in which the code is the first argument.
366 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
367 if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
369 * Like syscall, but code is a quad, so as to maintain alignment
370 * for the rest of the arguments.
372 if (_QUAD_LOWWORD == 0)
373 sa->code = locr0->a0;
375 sa->code = locr0->a1;
376 sa->args[0] = locr0->a2;
377 sa->args[1] = locr0->a3;
383 * This is either not a quad syscall, or is a quad syscall with a
384 * new ABI in which quads fit in a single register.
386 sa->code = locr0->a0;
387 sa->args[0] = locr0->a1;
388 sa->args[1] = locr0->a2;
389 sa->args[2] = locr0->a3;
391 #if defined(__mips_n32) || defined(__mips_n64)
392 #ifdef COMPAT_FREEBSD32
393 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
396 * Non-o32 ABIs support more arguments in registers.
398 sa->args[3] = locr0->a4;
399 sa->args[4] = locr0->a5;
400 sa->args[5] = locr0->a6;
401 sa->args[6] = locr0->a7;
403 #ifdef COMPAT_FREEBSD32
410 * A direct syscall, arguments are just parameters to the syscall.
412 sa->args[0] = locr0->a0;
413 sa->args[1] = locr0->a1;
414 sa->args[2] = locr0->a2;
415 sa->args[3] = locr0->a3;
417 #if defined (__mips_n32) || defined(__mips_n64)
418 #ifdef COMPAT_FREEBSD32
419 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
422 * Non-o32 ABIs support more arguments in registers.
424 sa->args[4] = locr0->a4;
425 sa->args[5] = locr0->a5;
426 sa->args[6] = locr0->a6;
427 sa->args[7] = locr0->a7;
429 #ifdef COMPAT_FREEBSD32
438 printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
441 se = td->td_proc->p_sysent;
444 * Shouldn't this go before switching on the code?
447 if (sa->code >= se->sv_size)
448 sa->callp = &se->sv_table[0];
450 sa->callp = &se->sv_table[sa->code];
452 sa->narg = sa->callp->sy_narg;
454 if (sa->narg > nsaved) {
455 #if defined(__mips_n32) || defined(__mips_n64)
458 * Is this right for new ABIs? I think the 4 there
459 * should be 8, size there are 8 registers to skip,
460 * not 4, but I'm not certain.
462 #ifdef COMPAT_FREEBSD32
463 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
465 printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
466 sa->code, td->td_proc->p_pid, sa->narg, nsaved);
468 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
469 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
473 error = 0; /* XXX GCC is awful. */
474 for (i = nsaved; i < sa->narg; i++) {
475 error = copyin((caddr_t)(intptr_t)(locr0->sp +
476 (4 + (i - nsaved)) * sizeof(int32_t)),
477 (caddr_t)&arg, sizeof arg);
484 error = copyin((caddr_t)(intptr_t)(locr0->sp +
485 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
486 (u_int)(sa->narg - nsaved) * sizeof(register_t));
495 td->td_retval[0] = 0;
496 td->td_retval[1] = locr0->v1;
504 #include "../../kern/subr_syscall.c"
507 * Handle an exception.
508 * Called from MipsKernGenException() or MipsUserGenException()
509 * when a processor trap occurs.
510 * In the case of a kernel trap, we return the pc where to resume if
511 * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
514 trap(struct trapframe *trapframe)
519 struct thread *td = curthread;
520 struct proc *p = curproc;
529 register_t *frame_regs;
531 trapdebug_enter(trapframe, 0);
538 type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
539 if (TRAPF_USERMODE(trapframe)) {
547 * Enable hardware interrupts if they were on before the trap. If it
548 * was off disable all so we don't accidently enable it when doing a
549 * return to userland.
551 if (trapframe->sr & MIPS_SR_INT_IE) {
552 set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
560 static vm_offset_t last_badvaddr = 0;
561 static vm_offset_t this_badvaddr = 0;
562 static int count = 0;
565 printf("trap type %x (%s - ", type,
566 trap_type[type & (~T_USER)]);
569 printf("user mode)\n");
571 printf("kernel mode)\n");
574 printf("cpuid = %d\n", PCPU_GET(cpuid));
576 pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
577 printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
578 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
579 (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
580 (curproc ? curproc->p_pid : -1), pid);
582 switch (type & ~T_USER) {
588 this_badvaddr = trapframe->badvaddr;
591 this_badvaddr = trapframe->ra;
594 this_badvaddr = trapframe->pc;
597 if ((last_badvaddr == this_badvaddr) &&
598 ((type & ~T_USER) != T_SYSCALL) &&
599 ((type & ~T_USER) != T_COP_UNUSABLE)) {
601 trap_frame_dump(trapframe);
602 panic("too many faults at %p\n", (void *)last_badvaddr);
605 last_badvaddr = this_badvaddr;
613 * A trap can occur while DTrace executes a probe. Before
614 * executing the probe, DTrace blocks re-scheduling and sets
615 * a flag in its per-cpu flags to indicate that it doesn't
616 * want to fault. On returning from the probe, the no-fault
617 * flag is cleared and finally re-scheduling is enabled.
619 * If the DTrace kernel module has registered a trap handler,
620 * call it and if it returns non-zero, assume that it has
621 * handled the trap and modified the trap frame so that this
622 * function can return normally.
625 * XXXDTRACE: add pid probe handler here (if ever)
628 if (dtrace_trap_func != NULL &&
629 (*dtrace_trap_func)(trapframe, type) != 0)
630 return (trapframe->pc);
637 kdb_trap(type, 0, trapframe);
642 /* check for kernel address */
643 if (KERNLAND(trapframe->badvaddr)) {
644 if (pmap_emulate_modified(kernel_pmap,
645 trapframe->badvaddr) != 0) {
646 ftype = VM_PROT_WRITE;
649 return (trapframe->pc);
653 case T_TLB_MOD + T_USER:
654 pmap = &p->p_vmspace->vm_pmap;
655 if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
656 ftype = VM_PROT_WRITE;
660 return (trapframe->pc);
665 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
666 /* check for kernel address */
667 if (KERNLAND(trapframe->badvaddr)) {
672 va = (vm_offset_t)trapframe->badvaddr;
673 rv = vm_fault_trap(kernel_map, va, ftype,
674 VM_FAULT_NORMAL, NULL, NULL);
675 if (rv == KERN_SUCCESS)
676 return (trapframe->pc);
677 if (td->td_pcb->pcb_onfault != NULL) {
678 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
679 td->td_pcb->pcb_onfault = NULL;
686 * It is an error for the kernel to access user space except
687 * through the copyin/copyout routines.
689 if (td->td_pcb->pcb_onfault == NULL)
694 case T_TLB_LD_MISS + T_USER:
695 ftype = VM_PROT_READ;
698 case T_TLB_ST_MISS + T_USER:
699 ftype = VM_PROT_WRITE;
709 va = (vm_offset_t)trapframe->badvaddr;
710 if (KERNLAND(trapframe->badvaddr)) {
712 * Don't allow user-mode faults in kernel
718 rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL,
721 * XXXDTRACE: add dtrace_doubletrap_func here?
724 printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
725 map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
726 ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
729 if (rv == KERN_SUCCESS) {
731 return (trapframe->pc);
737 if (td->td_pcb->pcb_onfault != NULL) {
738 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
739 td->td_pcb->pcb_onfault = NULL;
744 addr = trapframe->pc;
746 msg = "BAD_PAGE_FAULT";
747 log_bad_page_fault(msg, trapframe, type);
752 case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
753 case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
754 if (trapframe->badvaddr < 0 ||
755 trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
756 msg = "ADDRESS_SPACE_ERR";
757 } else if (allow_unaligned_acc) {
760 if (type == (T_ADDR_ERR_LD + T_USER))
763 mode = VM_PROT_WRITE;
765 access_type = emulate_unaligned_access(trapframe, mode);
766 if (access_type != 0)
768 msg = "ALIGNMENT_FIX_ERR";
775 case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
776 case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
777 ucode = 0; /* XXX should be VM_PROT_something */
779 addr = trapframe->pc;
782 log_bad_page_fault(msg, trapframe, type);
785 case T_SYSCALL + T_USER:
787 td->td_sa.trapframe = trapframe;
790 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
791 if (trp == trapdebug)
792 trapdebug[TRAPSIZE - 1].code = td->td_sa.code;
794 trp[-1].code = td->td_sa.code;
796 trapdebug_enter(td->td_frame, -td->td_sa.code);
799 * The sync'ing of I & D caches for SYS_ptrace() is
800 * done by procfs_domem() through procfs_rwmem()
801 * instead of being done here under a special check
805 return (trapframe->pc);
808 #if defined(KDTRACE_HOOKS) || defined(DDB)
811 if (!usermode && dtrace_invop_jump_addr != 0) {
812 dtrace_invop_jump_addr(trapframe);
813 return (trapframe->pc);
817 kdb_trap(type, 0, trapframe);
818 return (trapframe->pc);
822 case T_BREAK + T_USER:
829 addr = trapframe->pc;
831 /* compute address of break instruction */
833 if (DELAYBRANCH(trapframe->cause))
836 if (td->td_md.md_ss_addr != va)
839 /* read break instruction */
840 instr = fuword32((caddr_t)va);
842 if (instr != MIPS_BREAK_SSTEP)
846 "trap: tid %d, single step at %#lx: %#08x",
847 td->td_tid, va, instr);
850 error = ptrace_clear_single_step(td);
858 case T_IWATCH + T_USER:
859 case T_DWATCH + T_USER:
863 /* compute address of trapped instruction */
865 if (DELAYBRANCH(trapframe->cause))
867 printf("watch exception @ %p\n", (void *)va);
874 case T_TRAP + T_USER:
877 struct trapframe *locr0 = td->td_frame;
879 /* compute address of trap instruction */
881 if (DELAYBRANCH(trapframe->cause))
884 if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
885 locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
888 locr0->pc += sizeof(int);
891 i = SIGEMT; /* Stuff it with something for now */
895 case T_RES_INST + T_USER:
898 inst = *(InstFmt *)(intptr_t)trapframe->pc;
899 switch (inst.RType.op) {
901 switch (inst.RType.func) {
903 /* Register 29 used for TLS */
904 if (inst.RType.rd == 29) {
905 frame_regs = &(trapframe->zero);
906 frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
907 frame_regs[inst.RType.rt] += td->td_md.md_tls_tcb_offset;
908 trapframe->pc += sizeof(int);
916 log_illegal_instruction("RES_INST", trapframe);
918 addr = trapframe->pc;
927 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
928 /* Handle only COP2 exception */
932 addr = trapframe->pc;
933 /* save userland cop2 context if it has been touched */
934 if ((td->td_md.md_flags & MDTD_COP2USED) &&
935 (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
936 if (td->td_md.md_ucop2)
937 octeon_cop2_save(td->td_md.md_ucop2);
939 panic("COP2 was used in user mode but md_ucop2 is NULL");
942 if (td->td_md.md_cop2 == NULL) {
943 td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
944 if (td->td_md.md_cop2 == NULL)
945 panic("Failed to allocate COP2 context");
946 memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
949 octeon_cop2_restore(td->td_md.md_cop2);
951 /* Make userland re-request its context */
952 td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
953 td->td_md.md_flags |= MDTD_COP2USED;
954 td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
955 /* Enable COP2, it will be disabled in cpu_switch */
956 mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
957 return (trapframe->pc);
963 case T_COP_UNUSABLE + T_USER:
964 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
966 /* FP (COP1) instruction */
967 if (cpuinfo.fpu_id == 0) {
968 log_illegal_instruction("COP1_UNUSABLE",
973 addr = trapframe->pc;
974 MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
975 PCPU_SET(fpcurthread, td);
976 #if defined(__mips_n32) || defined(__mips_n64)
977 td->td_frame->sr |= MIPS_SR_COP_1_BIT | MIPS_SR_FR;
979 td->td_frame->sr |= MIPS_SR_COP_1_BIT;
981 td->td_md.md_flags |= MDTD_FPUSED;
986 addr = trapframe->pc;
987 if ((td->td_md.md_flags & MDTD_COP2USED) &&
988 (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
989 if (td->td_md.md_cop2)
990 octeon_cop2_save(td->td_md.md_cop2);
992 panic("COP2 was used in kernel mode but md_cop2 is NULL");
995 if (td->td_md.md_ucop2 == NULL) {
996 td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
997 if (td->td_md.md_ucop2 == NULL)
998 panic("Failed to allocate userland COP2 context");
999 memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
1002 octeon_cop2_restore(td->td_md.md_ucop2);
1004 td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1005 td->td_md.md_flags |= MDTD_COP2USED;
1006 td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1011 log_illegal_instruction("COPn_UNUSABLE", trapframe);
1012 i = SIGILL; /* only FPU instructions allowed */
1017 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1020 printf("FPU Trap: PC %#jx CR %x SR %x\n",
1021 (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1025 case T_FPE + T_USER:
1028 addr = trapframe->pc;
1031 MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1034 case T_OVFLOW + T_USER:
1036 addr = trapframe->pc;
1039 case T_ADDR_ERR_LD: /* misaligned access */
1040 case T_ADDR_ERR_ST: /* misaligned access */
1043 printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1044 (intmax_t)trapframe->badvaddr);
1047 /* Only allow emulation on a user address */
1048 if (allow_unaligned_acc &&
1049 ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1052 if (type == T_ADDR_ERR_LD)
1053 mode = VM_PROT_READ;
1055 mode = VM_PROT_WRITE;
1057 access_type = emulate_unaligned_access(trapframe, mode);
1058 if (access_type != 0)
1059 return (trapframe->pc);
1063 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
1064 if (td->td_pcb->pcb_onfault != NULL) {
1065 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1066 td->td_pcb->pcb_onfault = NULL;
1075 #if !defined(SMP) && defined(DEBUG)
1079 printf("cpu:%d-", PCPU_GET(cpuid));
1081 printf("Trap cause = %d (%s - ", type,
1082 trap_type[type & (~T_USER)]);
1085 printf("user mode)\n");
1087 printf("kernel mode)\n");
1091 printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1092 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1093 (intmax_t)trapframe->sr);
1097 if (debugger_on_trap) {
1098 kdb_why = KDB_WHY_TRAP;
1099 kdb_trap(type, 0, trapframe);
1100 kdb_why = KDB_WHY_UNSET;
1105 td->td_frame->pc = trapframe->pc;
1106 td->td_frame->cause = trapframe->cause;
1107 td->td_frame->badvaddr = trapframe->badvaddr;
1108 ksiginfo_init_trap(&ksi);
1110 ksi.ksi_code = ucode;
1111 ksi.ksi_addr = (void *)addr;
1112 ksi.ksi_trapno = type;
1113 trapsignal(td, &ksi);
1117 * Note: we should only get here if returning to user mode.
1119 userret(td, trapframe);
1120 return (trapframe->pc);
1123 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1131 printf("trapDump(%s)\n", msg);
1132 for (i = 0; i < TRAPSIZE; i++) {
1133 if (trp == trapdebug) {
1134 trp = &trapdebug[TRAPSIZE - 1];
1139 if (trp->cause == 0)
1142 printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1143 trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1144 MIPS_CR_EXC_CODE_SHIFT],
1145 (intmax_t)trp->vadr, (intmax_t)trp->pc,
1146 (intmax_t)trp->cause, (intmax_t)trp->status);
1148 printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1149 (intmax_t)trp->sp, (int)trp->code);
1157 * Return the resulting PC as if the branch was executed.
1160 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1164 register_t *regsPtr = (register_t *) framePtr;
1165 uintptr_t retAddr = 0;
1168 #define GetBranchDest(InstPtr, inst) \
1169 (InstPtr + 4 + ((short)inst.IType.imm << 2))
1173 if (instptr < MIPS_KSEG0_START)
1174 inst.word = fuword32((void *)instptr);
1176 inst = *(InstFmt *) instptr;
1178 if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1179 inst.word = fuword32((void *)instPC);
1181 inst = *(InstFmt *) instPC;
1184 switch ((int)inst.JType.op) {
1186 switch ((int)inst.RType.func) {
1189 retAddr = regsPtr[inst.RType.rs];
1193 retAddr = instPC + 4;
1199 switch ((int)inst.IType.rt) {
1204 if ((int)(regsPtr[inst.RType.rs]) < 0)
1205 retAddr = GetBranchDest(instPC, inst);
1207 retAddr = instPC + 8;
1214 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1215 retAddr = GetBranchDest(instPC, inst);
1217 retAddr = instPC + 8;
1226 retAddr = instPC + 4; /* Like syscall... */
1230 panic("MipsEmulateBranch: Bad branch cond");
1236 retAddr = (inst.JType.target << 2) |
1237 ((unsigned)(instPC + 4) & 0xF0000000);
1242 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1243 retAddr = GetBranchDest(instPC, inst);
1245 retAddr = instPC + 8;
1250 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1251 retAddr = GetBranchDest(instPC, inst);
1253 retAddr = instPC + 8;
1258 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1259 retAddr = GetBranchDest(instPC, inst);
1261 retAddr = instPC + 8;
1266 if ((int)(regsPtr[inst.RType.rs]) > 0)
1267 retAddr = GetBranchDest(instPC, inst);
1269 retAddr = instPC + 8;
1273 switch (inst.RType.rs) {
1276 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1277 condition = fpcCSR & MIPS_FPU_COND_BIT;
1279 condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1281 retAddr = GetBranchDest(instPC, inst);
1283 retAddr = instPC + 8;
1287 retAddr = instPC + 4;
1292 retAddr = instPC + 4;
1298 log_frame_dump(struct trapframe *frame)
1300 log(LOG_ERR, "Trapframe Register Dump:\n");
1301 log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1302 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1304 log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1305 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1307 #if defined(__mips_n32) || defined(__mips_n64)
1308 log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1309 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1311 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1312 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1314 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1315 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1317 log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1318 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1320 log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1321 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1323 log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1324 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1326 log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1327 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1329 log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1330 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1332 log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1333 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1335 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1336 (intmax_t)frame->cause, (intmax_t)frame->pc);
1341 trap_frame_dump(struct trapframe *frame)
1343 printf("Trapframe Register Dump:\n");
1344 printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1345 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1347 printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1348 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1349 #if defined(__mips_n32) || defined(__mips_n64)
1350 printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1351 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1353 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1354 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1356 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1357 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1359 printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1360 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1362 printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1363 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1365 printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1366 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1368 printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1369 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1371 printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1372 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1374 printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1375 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1377 printf("\tcause: %#jx\tpc: %#jx\n",
1378 (intmax_t)frame->cause, (intmax_t)frame->pc);
1385 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1389 struct proc *p = curproc;
1391 pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1393 ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1395 ptep = (pt_entry_t *)0;
1402 log_illegal_instruction(const char *msg, struct trapframe *frame)
1415 printf("cpuid = %d\n", PCPU_GET(cpuid));
1417 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1418 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1419 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1420 p->p_ucred ? p->p_ucred->cr_uid : -1,
1422 (intmax_t)frame->ra);
1424 /* log registers in trap frame */
1425 log_frame_dump(frame);
1427 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1430 * Dump a few words around faulting instruction, if the addres is
1434 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1435 /* dump page table entry for faulting instruction */
1436 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1437 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1439 addr = (unsigned int *)(intptr_t)pc;
1440 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1442 log(LOG_ERR, "%08x %08x %08x %08x\n",
1443 addr[0], addr[1], addr[2], addr[3]);
1445 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1446 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1451 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1458 char *read_or_write;
1461 trap_type &= ~T_USER;
1467 printf("cpuid = %d\n", PCPU_GET(cpuid));
1469 switch (trap_type) {
1473 read_or_write = "write";
1477 case T_BUS_ERR_IFETCH:
1478 read_or_write = "read";
1481 read_or_write = "unknown";
1484 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1485 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1486 "(type %#x) at %#jx\n",
1487 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1488 p->p_ucred ? p->p_ucred->cr_uid : -1,
1492 (intmax_t)frame->badvaddr);
1494 /* log registers in trap frame */
1495 log_frame_dump(frame);
1497 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1500 * Dump a few words around faulting instruction, if the addres is
1503 if (!(pc & 3) && (pc != frame->badvaddr) &&
1504 (trap_type != T_BUS_ERR_IFETCH) &&
1505 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1506 /* dump page table entry for faulting instruction */
1507 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1508 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1510 addr = (unsigned int *)(intptr_t)pc;
1511 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1513 log(LOG_ERR, "%08x %08x %08x %08x\n",
1514 addr[0], addr[1], addr[2], addr[3]);
1516 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1517 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1520 get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1521 log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1522 (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1527 * Unaligned load/store emulation
1530 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1532 register_t *reg = (register_t *) frame;
1533 u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1534 register_t value_msb, value;
1538 * ADDR_ERR faults have higher priority than TLB
1539 * Miss faults. Therefore, it is necessary to
1540 * verify that the faulting address is a valid
1541 * virtual address within the process' address space
1542 * before trying to emulate the unaligned access.
1544 switch (MIPS_INST_OPCODE(inst)) {
1545 case OP_LHU: case OP_LH:
1549 case OP_LWU: case OP_LW:
1558 printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1562 if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode))
1567 * Handle LL/SC LLD/SCD.
1569 switch (MIPS_INST_OPCODE(inst)) {
1571 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1572 lbu_macro(value_msb, addr);
1574 lbu_macro(value, addr);
1575 value |= value_msb << 8;
1576 reg[MIPS_INST_RT(inst)] = value;
1577 return (MIPS_LHU_ACCESS);
1580 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1581 lb_macro(value_msb, addr);
1583 lbu_macro(value, addr);
1584 value |= value_msb << 8;
1585 reg[MIPS_INST_RT(inst)] = value;
1586 return (MIPS_LH_ACCESS);
1589 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1590 lwl_macro(value, addr);
1592 lwr_macro(value, addr);
1593 value &= 0xffffffff;
1594 reg[MIPS_INST_RT(inst)] = value;
1595 return (MIPS_LWU_ACCESS);
1598 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1599 lwl_macro(value, addr);
1601 lwr_macro(value, addr);
1602 reg[MIPS_INST_RT(inst)] = value;
1603 return (MIPS_LW_ACCESS);
1605 #if defined(__mips_n32) || defined(__mips_n64)
1607 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1608 ldl_macro(value, addr);
1610 ldr_macro(value, addr);
1611 reg[MIPS_INST_RT(inst)] = value;
1612 return (MIPS_LD_ACCESS);
1616 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1617 value = reg[MIPS_INST_RT(inst)];
1618 value_msb = value >> 8;
1619 sb_macro(value_msb, addr);
1621 sb_macro(value, addr);
1622 return (MIPS_SH_ACCESS);
1625 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1626 value = reg[MIPS_INST_RT(inst)];
1627 swl_macro(value, addr);
1629 swr_macro(value, addr);
1630 return (MIPS_SW_ACCESS);
1632 #if defined(__mips_n32) || defined(__mips_n64)
1634 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1635 value = reg[MIPS_INST_RT(inst)];
1636 sdl_macro(value, addr);
1638 sdr_macro(value, addr);
1639 return (MIPS_SD_ACCESS);
1642 panic("%s: should not be reached.", __func__);
1649 static struct timeval unaligned_lasterr;
1650 static int unaligned_curerr;
1652 static int unaligned_pps_log_limit = 4;
1654 SYSCTL_INT(_machdep, OID_AUTO, unaligned_log_pps_limit, CTLFLAG_RWTUN,
1655 &unaligned_pps_log_limit, 0,
1656 "limit number of userland unaligned log messages per second");
1659 emulate_unaligned_access(struct trapframe *frame, int mode)
1662 int access_type = 0;
1663 struct thread *td = curthread;
1664 struct proc *p = curproc;
1666 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1669 * Fall through if it's instruction fetch exception
1671 if (!((pc & 3) || (pc == frame->badvaddr))) {
1674 * Handle unaligned load and store
1678 * Return access type if the instruction was emulated.
1679 * Otherwise restore pc and fall through.
1681 access_type = mips_unaligned_load_store(frame,
1682 mode, frame->badvaddr, pc);
1685 if (DELAYBRANCH(frame->cause))
1686 frame->pc = MipsEmulateBranch(frame, frame->pc,
1691 if (ppsratecheck(&unaligned_lasterr,
1692 &unaligned_curerr, unaligned_pps_log_limit)) {
1693 /* XXX TODO: keep global/tid/pid counters? */
1695 "Unaligned %s: pid=%ld (%s), tid=%ld, "
1696 "pc=%#jx, badvaddr=%#jx\n",
1697 access_name[access_type - 1],
1702 (intmax_t)frame->badvaddr);