1 /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 1988 University of Utah.
7 * Copyright (c) 1992, 1993
8 * The Regents of the University of California. All rights reserved.
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department and Ralph Campbell.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: Utah Hdr: trap.c 1.32 91/04/06
40 * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
41 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
47 #include "opt_ktrace.h"
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/sysent.h>
53 #include <sys/kernel.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscall.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_param.h>
64 #include <sys/vmmeter.h>
65 #include <sys/ptrace.h>
68 #include <sys/vnode.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
73 #include <sys/ktrace.h>
75 #include <net/netisr.h>
77 #include <machine/trap.h>
78 #include <machine/cpu.h>
79 #include <machine/cpuinfo.h>
80 #include <machine/pte.h>
81 #include <machine/pmap.h>
82 #include <machine/md_var.h>
83 #include <machine/mips_opcode.h>
84 #include <machine/frame.h>
85 #include <machine/regnum.h>
86 #include <machine/tls.h>
89 #include <machine/db_machdep.h>
90 #include <ddb/db_sym.h>
96 #include <sys/dtrace_bsd.h>
101 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
102 &trap_debug, 0, "Debug information on all traps");
105 #define lbu_macro(data, addr) \
106 __asm __volatile ("lbu %0, 0x0(%1)" \
107 : "=r" (data) /* outputs */ \
108 : "r" (addr)); /* inputs */
110 #define lb_macro(data, addr) \
111 __asm __volatile ("lb %0, 0x0(%1)" \
112 : "=r" (data) /* outputs */ \
113 : "r" (addr)); /* inputs */
115 #define lwl_macro(data, addr) \
116 __asm __volatile ("lwl %0, 0x0(%1)" \
117 : "+r" (data) /* outputs */ \
118 : "r" (addr)); /* inputs */
120 #define lwr_macro(data, addr) \
121 __asm __volatile ("lwr %0, 0x0(%1)" \
122 : "+r" (data) /* outputs */ \
123 : "r" (addr)); /* inputs */
125 #define ldl_macro(data, addr) \
126 __asm __volatile ("ldl %0, 0x0(%1)" \
127 : "+r" (data) /* outputs */ \
128 : "r" (addr)); /* inputs */
130 #define ldr_macro(data, addr) \
131 __asm __volatile ("ldr %0, 0x0(%1)" \
132 : "+r" (data) /* outputs */ \
133 : "r" (addr)); /* inputs */
135 #define sb_macro(data, addr) \
136 __asm __volatile ("sb %0, 0x0(%1)" \
138 : "r" (data), "r" (addr)); /* inputs */
140 #define swl_macro(data, addr) \
141 __asm __volatile ("swl %0, 0x0(%1)" \
143 : "r" (data), "r" (addr)); /* inputs */
145 #define swr_macro(data, addr) \
146 __asm __volatile ("swr %0, 0x0(%1)" \
148 : "r" (data), "r" (addr)); /* inputs */
150 #define sdl_macro(data, addr) \
151 __asm __volatile ("sdl %0, 0x0(%1)" \
153 : "r" (data), "r" (addr)); /* inputs */
155 #define sdr_macro(data, addr) \
156 __asm __volatile ("sdr %0, 0x0(%1)" \
158 : "r" (data), "r" (addr)); /* inputs */
160 static void log_illegal_instruction(const char *, struct trapframe *);
161 static void log_bad_page_fault(char *, struct trapframe *, int);
162 static void log_frame_dump(struct trapframe *frame);
163 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
165 int (*dtrace_invop_jump_addr)(struct trapframe *);
168 static void trap_frame_dump(struct trapframe *frame);
171 void (*machExceptionTable[]) (void)= {
173 * The kernel exception handlers.
175 MipsKernIntr, /* external interrupt */
176 MipsKernGenException, /* TLB modification */
177 MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
178 MipsTLBInvalidException,/* TLB miss (store) */
179 MipsKernGenException, /* address error (load or I-fetch) */
180 MipsKernGenException, /* address error (store) */
181 MipsKernGenException, /* bus error (I-fetch) */
182 MipsKernGenException, /* bus error (load or store) */
183 MipsKernGenException, /* system call */
184 MipsKernGenException, /* breakpoint */
185 MipsKernGenException, /* reserved instruction */
186 MipsKernGenException, /* coprocessor unusable */
187 MipsKernGenException, /* arithmetic overflow */
188 MipsKernGenException, /* trap exception */
189 MipsKernGenException, /* virtual coherence exception inst */
190 MipsKernGenException, /* floating point exception */
191 MipsKernGenException, /* reserved */
192 MipsKernGenException, /* reserved */
193 MipsKernGenException, /* reserved */
194 MipsKernGenException, /* reserved */
195 MipsKernGenException, /* reserved */
196 MipsKernGenException, /* reserved */
197 MipsKernGenException, /* reserved */
198 MipsKernGenException, /* watch exception */
199 MipsKernGenException, /* reserved */
200 MipsKernGenException, /* reserved */
201 MipsKernGenException, /* reserved */
202 MipsKernGenException, /* reserved */
203 MipsKernGenException, /* reserved */
204 MipsKernGenException, /* reserved */
205 MipsKernGenException, /* reserved */
206 MipsKernGenException, /* virtual coherence exception data */
208 * The user exception handlers.
210 MipsUserIntr, /* 0 */
211 MipsUserGenException, /* 1 */
212 MipsTLBInvalidException,/* 2 */
213 MipsTLBInvalidException,/* 3 */
214 MipsUserGenException, /* 4 */
215 MipsUserGenException, /* 5 */
216 MipsUserGenException, /* 6 */
217 MipsUserGenException, /* 7 */
218 MipsUserGenException, /* 8 */
219 MipsUserGenException, /* 9 */
220 MipsUserGenException, /* 10 */
221 MipsUserGenException, /* 11 */
222 MipsUserGenException, /* 12 */
223 MipsUserGenException, /* 13 */
224 MipsUserGenException, /* 14 */
225 MipsUserGenException, /* 15 */
226 MipsUserGenException, /* 16 */
227 MipsUserGenException, /* 17 */
228 MipsUserGenException, /* 18 */
229 MipsUserGenException, /* 19 */
230 MipsUserGenException, /* 20 */
231 MipsUserGenException, /* 21 */
232 MipsUserGenException, /* 22 */
233 MipsUserGenException, /* 23 */
234 MipsUserGenException, /* 24 */
235 MipsUserGenException, /* 25 */
236 MipsUserGenException, /* 26 */
237 MipsUserGenException, /* 27 */
238 MipsUserGenException, /* 28 */
239 MipsUserGenException, /* 29 */
240 MipsUserGenException, /* 20 */
241 MipsUserGenException, /* 31 */
244 char *trap_type[] = {
245 "external interrupt",
247 "TLB miss (load or instr. fetch)",
249 "address error (load or I-fetch)",
250 "address error (store)",
251 "bus error (I-fetch)",
252 "bus error (load or store)",
255 "reserved instruction",
256 "coprocessor unusable",
257 "arithmetic overflow",
259 "virtual coherency instruction",
276 "virtual coherency data",
279 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
280 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
283 #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
284 #define DELAYBRANCH(x) ((x) & MIPS_CR_BR_DELAY)
287 * MIPS load/store access type
300 char *access_name[] = {
301 "Load Halfword Unsigned",
303 "Load Word Unsigned",
312 #include <machine/octeon_cop2.h>
315 static int allow_unaligned_acc = 1;
317 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
318 &allow_unaligned_acc, 0, "Allow unaligned accesses");
321 * FP emulation is assumed to work on O32, but the code is outdated and crufty
322 * enough that it's a more sensible default to have it disabled when using
323 * other ABIs. At the very least, it needs a lot of help in using
324 * type-semantic ABI-oblivious macros for everything it does.
326 #if defined(__mips_o32)
327 static int emulate_fp = 1;
329 static int emulate_fp = 0;
331 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
332 &emulate_fp, 0, "Emulate unimplemented FPU instructions");
334 static int emulate_unaligned_access(struct trapframe *frame, int mode);
336 extern void fswintrberr(void); /* XXX */
339 cpu_fetch_syscall_args(struct thread *td)
341 struct trapframe *locr0;
342 struct sysentvec *se;
343 struct syscall_args *sa;
346 locr0 = td->td_frame;
349 bzero(sa->args, sizeof(sa->args));
351 /* compute next PC after syscall instruction */
352 td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
353 if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */
354 locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
356 locr0->pc += sizeof(int);
357 sa->code = locr0->v0;
363 * This is an indirect syscall, in which the code is the first argument.
365 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
366 if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
368 * Like syscall, but code is a quad, so as to maintain alignment
369 * for the rest of the arguments.
371 if (_QUAD_LOWWORD == 0)
372 sa->code = locr0->a0;
374 sa->code = locr0->a1;
375 sa->args[0] = locr0->a2;
376 sa->args[1] = locr0->a3;
382 * This is either not a quad syscall, or is a quad syscall with a
383 * new ABI in which quads fit in a single register.
385 sa->code = locr0->a0;
386 sa->args[0] = locr0->a1;
387 sa->args[1] = locr0->a2;
388 sa->args[2] = locr0->a3;
390 #if defined(__mips_n32) || defined(__mips_n64)
391 #ifdef COMPAT_FREEBSD32
392 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
395 * Non-o32 ABIs support more arguments in registers.
397 sa->args[3] = locr0->a4;
398 sa->args[4] = locr0->a5;
399 sa->args[5] = locr0->a6;
400 sa->args[6] = locr0->a7;
402 #ifdef COMPAT_FREEBSD32
409 * A direct syscall, arguments are just parameters to the syscall.
411 sa->args[0] = locr0->a0;
412 sa->args[1] = locr0->a1;
413 sa->args[2] = locr0->a2;
414 sa->args[3] = locr0->a3;
416 #if defined (__mips_n32) || defined(__mips_n64)
417 #ifdef COMPAT_FREEBSD32
418 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
421 * Non-o32 ABIs support more arguments in registers.
423 sa->args[4] = locr0->a4;
424 sa->args[5] = locr0->a5;
425 sa->args[6] = locr0->a6;
426 sa->args[7] = locr0->a7;
428 #ifdef COMPAT_FREEBSD32
437 printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
440 se = td->td_proc->p_sysent;
443 * Shouldn't this go before switching on the code?
446 if (sa->code >= se->sv_size)
447 sa->callp = &se->sv_table[0];
449 sa->callp = &se->sv_table[sa->code];
451 sa->narg = sa->callp->sy_narg;
453 if (sa->narg > nsaved) {
454 #if defined(__mips_n32) || defined(__mips_n64)
457 * Is this right for new ABIs? I think the 4 there
458 * should be 8, size there are 8 registers to skip,
459 * not 4, but I'm not certain.
461 #ifdef COMPAT_FREEBSD32
462 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
464 printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
465 sa->code, td->td_proc->p_pid, sa->narg, nsaved);
467 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
468 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
472 error = 0; /* XXX GCC is awful. */
473 for (i = nsaved; i < sa->narg; i++) {
474 error = copyin((caddr_t)(intptr_t)(locr0->sp +
475 (4 + (i - nsaved)) * sizeof(int32_t)),
476 (caddr_t)&arg, sizeof arg);
483 error = copyin((caddr_t)(intptr_t)(locr0->sp +
484 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
485 (u_int)(sa->narg - nsaved) * sizeof(register_t));
494 td->td_retval[0] = 0;
495 td->td_retval[1] = locr0->v1;
503 #include "../../kern/subr_syscall.c"
506 * Handle an exception.
507 * Called from MipsKernGenException() or MipsUserGenException()
508 * when a processor trap occurs.
509 * In the case of a kernel trap, we return the pc where to resume if
510 * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
513 trap(struct trapframe *trapframe)
518 struct thread *td = curthread;
519 struct proc *p = curproc;
528 register_t *frame_regs;
530 trapdebug_enter(trapframe, 0);
537 type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
538 if (TRAPF_USERMODE(trapframe)) {
546 * Enable hardware interrupts if they were on before the trap. If it
547 * was off disable all so we don't accidently enable it when doing a
548 * return to userland.
550 if (trapframe->sr & MIPS_SR_INT_IE) {
551 set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
559 static vm_offset_t last_badvaddr = 0;
560 static vm_offset_t this_badvaddr = 0;
561 static int count = 0;
564 printf("trap type %x (%s - ", type,
565 trap_type[type & (~T_USER)]);
568 printf("user mode)\n");
570 printf("kernel mode)\n");
573 printf("cpuid = %d\n", PCPU_GET(cpuid));
575 pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
576 printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
577 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
578 (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
579 (curproc ? curproc->p_pid : -1), pid);
581 switch (type & ~T_USER) {
587 this_badvaddr = trapframe->badvaddr;
590 this_badvaddr = trapframe->ra;
593 this_badvaddr = trapframe->pc;
596 if ((last_badvaddr == this_badvaddr) &&
597 ((type & ~T_USER) != T_SYSCALL) &&
598 ((type & ~T_USER) != T_COP_UNUSABLE)) {
600 trap_frame_dump(trapframe);
601 panic("too many faults at %p\n", (void *)last_badvaddr);
604 last_badvaddr = this_badvaddr;
612 * A trap can occur while DTrace executes a probe. Before
613 * executing the probe, DTrace blocks re-scheduling and sets
614 * a flag in its per-cpu flags to indicate that it doesn't
615 * want to fault. On returning from the probe, the no-fault
616 * flag is cleared and finally re-scheduling is enabled.
618 * If the DTrace kernel module has registered a trap handler,
619 * call it and if it returns non-zero, assume that it has
620 * handled the trap and modified the trap frame so that this
621 * function can return normally.
624 * XXXDTRACE: add pid probe handler here (if ever)
627 if (dtrace_trap_func != NULL &&
628 (*dtrace_trap_func)(trapframe, type) != 0)
629 return (trapframe->pc);
636 kdb_trap(type, 0, trapframe);
641 /* check for kernel address */
642 if (KERNLAND(trapframe->badvaddr)) {
643 if (pmap_emulate_modified(kernel_pmap,
644 trapframe->badvaddr) != 0) {
645 ftype = VM_PROT_WRITE;
648 return (trapframe->pc);
652 case T_TLB_MOD + T_USER:
653 pmap = &p->p_vmspace->vm_pmap;
654 if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
655 ftype = VM_PROT_WRITE;
659 return (trapframe->pc);
664 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
665 /* check for kernel address */
666 if (KERNLAND(trapframe->badvaddr)) {
671 va = (vm_offset_t)trapframe->badvaddr;
672 rv = vm_fault_trap(kernel_map, va, ftype,
673 VM_FAULT_NORMAL, NULL, NULL);
674 if (rv == KERN_SUCCESS)
675 return (trapframe->pc);
676 if (td->td_pcb->pcb_onfault != NULL) {
677 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
678 td->td_pcb->pcb_onfault = NULL;
685 * It is an error for the kernel to access user space except
686 * through the copyin/copyout routines.
688 if (td->td_pcb->pcb_onfault == NULL)
693 case T_TLB_LD_MISS + T_USER:
694 ftype = VM_PROT_READ;
697 case T_TLB_ST_MISS + T_USER:
698 ftype = VM_PROT_WRITE;
708 va = (vm_offset_t)trapframe->badvaddr;
709 if (KERNLAND(trapframe->badvaddr)) {
711 * Don't allow user-mode faults in kernel
717 rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL,
720 * XXXDTRACE: add dtrace_doubletrap_func here?
723 printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
724 map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
725 ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
728 if (rv == KERN_SUCCESS) {
730 return (trapframe->pc);
736 if (td->td_pcb->pcb_onfault != NULL) {
737 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
738 td->td_pcb->pcb_onfault = NULL;
743 addr = trapframe->badvaddr;
745 msg = "BAD_PAGE_FAULT";
746 log_bad_page_fault(msg, trapframe, type);
751 case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
752 case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
753 if (trapframe->badvaddr < 0 ||
754 trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
755 msg = "ADDRESS_SPACE_ERR";
756 } else if (allow_unaligned_acc) {
759 if (type == (T_ADDR_ERR_LD + T_USER))
762 mode = VM_PROT_WRITE;
764 access_type = emulate_unaligned_access(trapframe, mode);
765 if (access_type != 0)
767 msg = "ALIGNMENT_FIX_ERR";
774 case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
775 case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
776 ucode = 0; /* XXX should be VM_PROT_something */
778 addr = trapframe->pc;
781 log_bad_page_fault(msg, trapframe, type);
784 case T_SYSCALL + T_USER:
786 td->td_sa.trapframe = trapframe;
789 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
790 if (trp == trapdebug)
791 trapdebug[TRAPSIZE - 1].code = td->td_sa.code;
793 trp[-1].code = td->td_sa.code;
795 trapdebug_enter(td->td_frame, -td->td_sa.code);
798 * The sync'ing of I & D caches for SYS_ptrace() is
799 * done by procfs_domem() through procfs_rwmem()
800 * instead of being done here under a special check
804 return (trapframe->pc);
807 #if defined(KDTRACE_HOOKS) || defined(DDB)
810 if (!usermode && dtrace_invop_jump_addr != NULL &&
811 dtrace_invop_jump_addr(trapframe) == 0)
812 return (trapframe->pc);
815 kdb_trap(type, 0, trapframe);
816 return (trapframe->pc);
820 case T_BREAK + T_USER:
828 /* compute address of break instruction */
830 if (DELAYBRANCH(trapframe->cause))
834 if (td->td_md.md_ss_addr != va)
837 /* read break instruction */
838 instr = fuword32((caddr_t)va);
840 if (instr != MIPS_BREAK_SSTEP)
844 "trap: tid %d, single step at %#lx: %#08x",
845 td->td_tid, va, instr);
848 error = ptrace_clear_single_step(td);
856 case T_IWATCH + T_USER:
857 case T_DWATCH + T_USER:
861 /* compute address of trapped instruction */
863 if (DELAYBRANCH(trapframe->cause))
865 printf("watch exception @ %p\n", (void *)va);
872 case T_TRAP + T_USER:
875 struct trapframe *locr0 = td->td_frame;
877 /* compute address of trap instruction */
879 if (DELAYBRANCH(trapframe->cause))
882 if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
883 locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
886 locr0->pc += sizeof(int);
889 i = SIGEMT; /* Stuff it with something for now */
893 case T_RES_INST + T_USER:
896 inst = *(InstFmt *)(intptr_t)trapframe->pc;
897 switch (inst.RType.op) {
899 switch (inst.RType.func) {
901 /* Register 29 used for TLS */
902 if (inst.RType.rd == 29) {
903 frame_regs = &(trapframe->zero);
904 frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
905 frame_regs[inst.RType.rt] += td->td_proc->p_md.md_tls_tcb_offset;
906 trapframe->pc += sizeof(int);
914 log_illegal_instruction("RES_INST", trapframe);
916 addr = trapframe->pc;
925 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
926 /* Handle only COP2 exception */
930 addr = trapframe->pc;
931 /* save userland cop2 context if it has been touched */
932 if ((td->td_md.md_flags & MDTD_COP2USED) &&
933 (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
934 if (td->td_md.md_ucop2)
935 octeon_cop2_save(td->td_md.md_ucop2);
937 panic("COP2 was used in user mode but md_ucop2 is NULL");
940 if (td->td_md.md_cop2 == NULL) {
941 td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
942 if (td->td_md.md_cop2 == NULL)
943 panic("Failed to allocate COP2 context");
944 memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
947 octeon_cop2_restore(td->td_md.md_cop2);
949 /* Make userland re-request its context */
950 td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
951 td->td_md.md_flags |= MDTD_COP2USED;
952 td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
953 /* Enable COP2, it will be disabled in cpu_switch */
954 mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
955 return (trapframe->pc);
961 case T_COP_UNUSABLE + T_USER:
962 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
964 /* FP (COP1) instruction */
965 if (cpuinfo.fpu_id == 0) {
966 log_illegal_instruction("COP1_UNUSABLE",
971 addr = trapframe->pc;
972 MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
973 PCPU_SET(fpcurthread, td);
974 #if defined(__mips_n32) || defined(__mips_n64)
975 td->td_frame->sr |= MIPS_SR_COP_1_BIT | MIPS_SR_FR;
977 td->td_frame->sr |= MIPS_SR_COP_1_BIT;
979 td->td_md.md_flags |= MDTD_FPUSED;
984 addr = trapframe->pc;
985 if ((td->td_md.md_flags & MDTD_COP2USED) &&
986 (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
987 if (td->td_md.md_cop2)
988 octeon_cop2_save(td->td_md.md_cop2);
990 panic("COP2 was used in kernel mode but md_cop2 is NULL");
993 if (td->td_md.md_ucop2 == NULL) {
994 td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
995 if (td->td_md.md_ucop2 == NULL)
996 panic("Failed to allocate userland COP2 context");
997 memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
1000 octeon_cop2_restore(td->td_md.md_ucop2);
1002 td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1003 td->td_md.md_flags |= MDTD_COP2USED;
1004 td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1009 log_illegal_instruction("COPn_UNUSABLE", trapframe);
1010 i = SIGILL; /* only FPU instructions allowed */
1015 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1018 printf("FPU Trap: PC %#jx CR %x SR %x\n",
1019 (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1023 case T_FPE + T_USER:
1026 addr = trapframe->pc;
1029 MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1032 case T_OVFLOW + T_USER:
1034 addr = trapframe->pc;
1037 case T_ADDR_ERR_LD: /* misaligned access */
1038 case T_ADDR_ERR_ST: /* misaligned access */
1041 printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1042 (intmax_t)trapframe->badvaddr);
1045 /* Only allow emulation on a user address */
1046 if (allow_unaligned_acc &&
1047 ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1050 if (type == T_ADDR_ERR_LD)
1051 mode = VM_PROT_READ;
1053 mode = VM_PROT_WRITE;
1055 access_type = emulate_unaligned_access(trapframe, mode);
1056 if (access_type != 0)
1057 return (trapframe->pc);
1061 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
1062 if (td->td_pcb->pcb_onfault != NULL) {
1063 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1064 td->td_pcb->pcb_onfault = NULL;
1073 #if !defined(SMP) && defined(DEBUG)
1077 printf("cpu:%d-", PCPU_GET(cpuid));
1079 printf("Trap cause = %d (%s - ", type,
1080 trap_type[type & (~T_USER)]);
1083 printf("user mode)\n");
1085 printf("kernel mode)\n");
1089 printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1090 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1091 (intmax_t)trapframe->sr);
1095 if (debugger_on_trap) {
1096 kdb_why = KDB_WHY_TRAP;
1097 kdb_trap(type, 0, trapframe);
1098 kdb_why = KDB_WHY_UNSET;
1103 td->td_frame->pc = trapframe->pc;
1104 td->td_frame->cause = trapframe->cause;
1105 td->td_frame->badvaddr = trapframe->badvaddr;
1106 ksiginfo_init_trap(&ksi);
1108 ksi.ksi_code = ucode;
1109 ksi.ksi_addr = (void *)addr;
1110 ksi.ksi_trapno = type & ~T_USER;
1111 trapsignal(td, &ksi);
1115 * Note: we should only get here if returning to user mode.
1117 userret(td, trapframe);
1118 return (trapframe->pc);
1121 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1129 printf("trapDump(%s)\n", msg);
1130 for (i = 0; i < TRAPSIZE; i++) {
1131 if (trp == trapdebug) {
1132 trp = &trapdebug[TRAPSIZE - 1];
1137 if (trp->cause == 0)
1140 printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1141 trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1142 MIPS_CR_EXC_CODE_SHIFT],
1143 (intmax_t)trp->vadr, (intmax_t)trp->pc,
1144 (intmax_t)trp->cause, (intmax_t)trp->status);
1146 printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1147 (intmax_t)trp->sp, (int)trp->code);
1155 * Return the resulting PC as if the branch was executed.
1158 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1162 register_t *regsPtr = (register_t *) framePtr;
1163 uintptr_t retAddr = 0;
1166 #define GetBranchDest(InstPtr, inst) \
1167 (InstPtr + 4 + ((short)inst.IType.imm << 2))
1171 if (instptr < MIPS_KSEG0_START)
1172 inst.word = fuword32((void *)instptr);
1174 inst = *(InstFmt *) instptr;
1176 if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1177 inst.word = fuword32((void *)instPC);
1179 inst = *(InstFmt *) instPC;
1182 switch ((int)inst.JType.op) {
1184 switch ((int)inst.RType.func) {
1187 retAddr = regsPtr[inst.RType.rs];
1191 retAddr = instPC + 4;
1197 switch ((int)inst.IType.rt) {
1202 if ((int)(regsPtr[inst.RType.rs]) < 0)
1203 retAddr = GetBranchDest(instPC, inst);
1205 retAddr = instPC + 8;
1212 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1213 retAddr = GetBranchDest(instPC, inst);
1215 retAddr = instPC + 8;
1224 retAddr = instPC + 4; /* Like syscall... */
1228 panic("MipsEmulateBranch: Bad branch cond");
1234 retAddr = (inst.JType.target << 2) |
1235 ((unsigned)(instPC + 4) & 0xF0000000);
1240 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1241 retAddr = GetBranchDest(instPC, inst);
1243 retAddr = instPC + 8;
1248 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1249 retAddr = GetBranchDest(instPC, inst);
1251 retAddr = instPC + 8;
1256 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1257 retAddr = GetBranchDest(instPC, inst);
1259 retAddr = instPC + 8;
1264 if ((int)(regsPtr[inst.RType.rs]) > 0)
1265 retAddr = GetBranchDest(instPC, inst);
1267 retAddr = instPC + 8;
1271 switch (inst.RType.rs) {
1274 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1275 condition = fpcCSR & MIPS_FPU_COND_BIT;
1277 condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1279 retAddr = GetBranchDest(instPC, inst);
1281 retAddr = instPC + 8;
1285 retAddr = instPC + 4;
1290 retAddr = instPC + 4;
1296 log_frame_dump(struct trapframe *frame)
1298 log(LOG_ERR, "Trapframe Register Dump:\n");
1299 log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1300 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1302 log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1303 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1305 #if defined(__mips_n32) || defined(__mips_n64)
1306 log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1307 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1309 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1310 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1312 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1313 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1315 log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1316 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1318 log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1319 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1321 log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1322 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1324 log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1325 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1327 log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1328 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1330 log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1331 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1333 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1334 (intmax_t)frame->cause, (intmax_t)frame->pc);
1339 trap_frame_dump(struct trapframe *frame)
1341 printf("Trapframe Register Dump:\n");
1342 printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1343 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1345 printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1346 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1347 #if defined(__mips_n32) || defined(__mips_n64)
1348 printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1349 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1351 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1352 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1354 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1355 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1357 printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1358 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1360 printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1361 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1363 printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1364 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1366 printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1367 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1369 printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1370 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1372 printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1373 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1375 printf("\tcause: %#jx\tpc: %#jx\n",
1376 (intmax_t)frame->cause, (intmax_t)frame->pc);
1383 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1387 struct proc *p = curproc;
1389 pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1391 ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1393 ptep = (pt_entry_t *)0;
1400 log_illegal_instruction(const char *msg, struct trapframe *frame)
1404 unsigned int *addr, instr[4];
1413 printf("cpuid = %d\n", PCPU_GET(cpuid));
1415 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1416 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1417 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1418 p->p_ucred ? p->p_ucred->cr_uid : -1,
1420 (intmax_t)frame->ra);
1422 /* log registers in trap frame */
1423 log_frame_dump(frame);
1425 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1428 * Dump a few words around faulting instruction, if the addres is
1431 addr = (unsigned int *)(intptr_t)pc;
1432 if ((pc & 3) == 0 && copyin(addr, instr, sizeof(instr)) == 0) {
1433 /* dump page table entry for faulting instruction */
1434 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1435 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1437 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1439 log(LOG_ERR, "%08x %08x %08x %08x\n",
1440 instr[0], instr[1], instr[2], instr[3]);
1442 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1443 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1448 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1452 unsigned int *addr, instr[4];
1455 char *read_or_write;
1458 trap_type &= ~T_USER;
1464 printf("cpuid = %d\n", PCPU_GET(cpuid));
1466 switch (trap_type) {
1470 read_or_write = "write";
1474 case T_BUS_ERR_IFETCH:
1475 read_or_write = "read";
1478 read_or_write = "unknown";
1481 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1482 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1483 "(type %#x) at %#jx\n",
1484 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1485 p->p_ucred ? p->p_ucred->cr_uid : -1,
1489 (intmax_t)frame->badvaddr);
1491 /* log registers in trap frame */
1492 log_frame_dump(frame);
1494 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1497 * Dump a few words around faulting instruction, if the addres is
1500 addr = (unsigned int *)(intptr_t)pc;
1501 if ((pc & 3) == 0 && pc != frame->badvaddr &&
1502 trap_type != T_BUS_ERR_IFETCH &&
1503 copyin((caddr_t)(intptr_t)pc, instr, sizeof(instr)) == 0) {
1504 /* dump page table entry for faulting instruction */
1505 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1506 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1508 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1510 log(LOG_ERR, "%08x %08x %08x %08x\n",
1511 instr[0], instr[1], instr[2], instr[3]);
1513 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1514 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1517 get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1518 log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1519 (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1524 * Unaligned load/store emulation
1527 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1529 register_t *reg = (register_t *) frame;
1530 u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1531 register_t value_msb = 0, value = 0;
1535 * ADDR_ERR faults have higher priority than TLB
1536 * Miss faults. Therefore, it is necessary to
1537 * verify that the faulting address is a valid
1538 * virtual address within the process' address space
1539 * before trying to emulate the unaligned access.
1541 switch (MIPS_INST_OPCODE(inst)) {
1542 case OP_LHU: case OP_LH:
1546 case OP_LWU: case OP_LW:
1555 printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1559 if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode))
1564 * Handle LL/SC LLD/SCD.
1566 switch (MIPS_INST_OPCODE(inst)) {
1568 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1569 lbu_macro(value_msb, addr);
1571 lbu_macro(value, addr);
1572 value |= value_msb << 8;
1573 reg[MIPS_INST_RT(inst)] = value;
1574 return (MIPS_LHU_ACCESS);
1577 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1578 lb_macro(value_msb, addr);
1580 lbu_macro(value, addr);
1581 value |= value_msb << 8;
1582 reg[MIPS_INST_RT(inst)] = value;
1583 return (MIPS_LH_ACCESS);
1586 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1587 lwl_macro(value, addr);
1589 lwr_macro(value, addr);
1590 value &= 0xffffffff;
1591 reg[MIPS_INST_RT(inst)] = value;
1592 return (MIPS_LWU_ACCESS);
1595 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1596 lwl_macro(value, addr);
1598 lwr_macro(value, addr);
1599 reg[MIPS_INST_RT(inst)] = value;
1600 return (MIPS_LW_ACCESS);
1602 #if defined(__mips_n32) || defined(__mips_n64)
1604 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1605 ldl_macro(value, addr);
1607 ldr_macro(value, addr);
1608 reg[MIPS_INST_RT(inst)] = value;
1609 return (MIPS_LD_ACCESS);
1613 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1614 value = reg[MIPS_INST_RT(inst)];
1615 value_msb = value >> 8;
1616 sb_macro(value_msb, addr);
1618 sb_macro(value, addr);
1619 return (MIPS_SH_ACCESS);
1622 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1623 value = reg[MIPS_INST_RT(inst)];
1624 swl_macro(value, addr);
1626 swr_macro(value, addr);
1627 return (MIPS_SW_ACCESS);
1629 #if defined(__mips_n32) || defined(__mips_n64)
1631 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1632 value = reg[MIPS_INST_RT(inst)];
1633 sdl_macro(value, addr);
1635 sdr_macro(value, addr);
1636 return (MIPS_SD_ACCESS);
1639 panic("%s: should not be reached.", __func__);
1646 static struct timeval unaligned_lasterr;
1647 static int unaligned_curerr;
1649 static int unaligned_pps_log_limit = 4;
1651 SYSCTL_INT(_machdep, OID_AUTO, unaligned_log_pps_limit, CTLFLAG_RWTUN,
1652 &unaligned_pps_log_limit, 0,
1653 "limit number of userland unaligned log messages per second");
1656 emulate_unaligned_access(struct trapframe *frame, int mode)
1659 int access_type = 0;
1660 struct thread *td = curthread;
1661 struct proc *p = curproc;
1663 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1666 * Fall through if it's instruction fetch exception
1668 if (!((pc & 3) || (pc == frame->badvaddr))) {
1671 * Handle unaligned load and store
1675 * Return access type if the instruction was emulated.
1676 * Otherwise restore pc and fall through.
1678 access_type = mips_unaligned_load_store(frame,
1679 mode, frame->badvaddr, pc);
1682 if (DELAYBRANCH(frame->cause))
1683 frame->pc = MipsEmulateBranch(frame, frame->pc,
1688 if (ppsratecheck(&unaligned_lasterr,
1689 &unaligned_curerr, unaligned_pps_log_limit)) {
1690 /* XXX TODO: keep global/tid/pid counters? */
1692 "Unaligned %s: pid=%ld (%s), tid=%ld, "
1693 "pc=%#jx, badvaddr=%#jx\n",
1694 access_name[access_type - 1],
1699 (intmax_t)frame->badvaddr);