1 /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and Ralph Campbell.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: Utah Hdr: trap.c 1.32 91/04/06
38 * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
39 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_compat.h"
46 #include "opt_global.h"
47 #include "opt_ktrace.h"
48 #include "opt_kdtrace.h"
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/sysent.h>
54 #include <sys/kernel.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscall.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_param.h>
64 #include <sys/vmmeter.h>
65 #include <sys/ptrace.h>
68 #include <sys/vnode.h>
69 #include <sys/pioctl.h>
70 #include <sys/sysctl.h>
71 #include <sys/syslog.h>
74 #include <sys/ktrace.h>
76 #include <net/netisr.h>
78 #include <machine/trap.h>
79 #include <machine/cpu.h>
80 #include <machine/pte.h>
81 #include <machine/pmap.h>
82 #include <machine/md_var.h>
83 #include <machine/mips_opcode.h>
84 #include <machine/frame.h>
85 #include <machine/regnum.h>
86 #include <machine/tls.h>
89 #include <machine/db_machdep.h>
90 #include <ddb/db_sym.h>
96 #include <sys/dtrace_bsd.h>
99 * This is a hook which is initialised by the dtrace module
100 * to handle traps which might occur during DTrace probe
103 dtrace_trap_func_t dtrace_trap_func;
105 dtrace_doubletrap_func_t dtrace_doubletrap_func;
108 * This is a hook which is initialised by the systrace module
109 * when it is loaded. This keeps the DTrace syscall provider
110 * implementation opaque.
112 systrace_probe_func_t systrace_probe_func;
115 * These hooks are necessary for the pid, usdt and fasttrap providers.
117 dtrace_fasttrap_probe_ptr_t dtrace_fasttrap_probe_ptr;
118 dtrace_pid_probe_ptr_t dtrace_pid_probe_ptr;
119 dtrace_return_probe_ptr_t dtrace_return_probe_ptr;
124 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
125 &trap_debug, 0, "Debug information on all traps");
128 #define lbu_macro(data, addr) \
129 __asm __volatile ("lbu %0, 0x0(%1)" \
130 : "=r" (data) /* outputs */ \
131 : "r" (addr)); /* inputs */
133 #define lb_macro(data, addr) \
134 __asm __volatile ("lb %0, 0x0(%1)" \
135 : "=r" (data) /* outputs */ \
136 : "r" (addr)); /* inputs */
138 #define lwl_macro(data, addr) \
139 __asm __volatile ("lwl %0, 0x0(%1)" \
140 : "=r" (data) /* outputs */ \
141 : "r" (addr)); /* inputs */
143 #define lwr_macro(data, addr) \
144 __asm __volatile ("lwr %0, 0x0(%1)" \
145 : "=r" (data) /* outputs */ \
146 : "r" (addr)); /* inputs */
148 #define ldl_macro(data, addr) \
149 __asm __volatile ("ldl %0, 0x0(%1)" \
150 : "=r" (data) /* outputs */ \
151 : "r" (addr)); /* inputs */
153 #define ldr_macro(data, addr) \
154 __asm __volatile ("ldr %0, 0x0(%1)" \
155 : "=r" (data) /* outputs */ \
156 : "r" (addr)); /* inputs */
158 #define sb_macro(data, addr) \
159 __asm __volatile ("sb %0, 0x0(%1)" \
161 : "r" (data), "r" (addr)); /* inputs */
163 #define swl_macro(data, addr) \
164 __asm __volatile ("swl %0, 0x0(%1)" \
166 : "r" (data), "r" (addr)); /* inputs */
168 #define swr_macro(data, addr) \
169 __asm __volatile ("swr %0, 0x0(%1)" \
171 : "r" (data), "r" (addr)); /* inputs */
173 #define sdl_macro(data, addr) \
174 __asm __volatile ("sdl %0, 0x0(%1)" \
176 : "r" (data), "r" (addr)); /* inputs */
178 #define sdr_macro(data, addr) \
179 __asm __volatile ("sdr %0, 0x0(%1)" \
181 : "r" (data), "r" (addr)); /* inputs */
183 static void log_illegal_instruction(const char *, struct trapframe *);
184 static void log_bad_page_fault(char *, struct trapframe *, int);
185 static void log_frame_dump(struct trapframe *frame);
186 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
189 static void trap_frame_dump(struct trapframe *frame);
192 void (*machExceptionTable[]) (void)= {
194 * The kernel exception handlers.
196 MipsKernIntr, /* external interrupt */
197 MipsKernGenException, /* TLB modification */
198 MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
199 MipsTLBInvalidException,/* TLB miss (store) */
200 MipsKernGenException, /* address error (load or I-fetch) */
201 MipsKernGenException, /* address error (store) */
202 MipsKernGenException, /* bus error (I-fetch) */
203 MipsKernGenException, /* bus error (load or store) */
204 MipsKernGenException, /* system call */
205 MipsKernGenException, /* breakpoint */
206 MipsKernGenException, /* reserved instruction */
207 MipsKernGenException, /* coprocessor unusable */
208 MipsKernGenException, /* arithmetic overflow */
209 MipsKernGenException, /* trap exception */
210 MipsKernGenException, /* virtual coherence exception inst */
211 MipsKernGenException, /* floating point exception */
212 MipsKernGenException, /* reserved */
213 MipsKernGenException, /* reserved */
214 MipsKernGenException, /* reserved */
215 MipsKernGenException, /* reserved */
216 MipsKernGenException, /* reserved */
217 MipsKernGenException, /* reserved */
218 MipsKernGenException, /* reserved */
219 MipsKernGenException, /* watch exception */
220 MipsKernGenException, /* reserved */
221 MipsKernGenException, /* reserved */
222 MipsKernGenException, /* reserved */
223 MipsKernGenException, /* reserved */
224 MipsKernGenException, /* reserved */
225 MipsKernGenException, /* reserved */
226 MipsKernGenException, /* reserved */
227 MipsKernGenException, /* virtual coherence exception data */
229 * The user exception handlers.
231 MipsUserIntr, /* 0 */
232 MipsUserGenException, /* 1 */
233 MipsTLBInvalidException,/* 2 */
234 MipsTLBInvalidException,/* 3 */
235 MipsUserGenException, /* 4 */
236 MipsUserGenException, /* 5 */
237 MipsUserGenException, /* 6 */
238 MipsUserGenException, /* 7 */
239 MipsUserGenException, /* 8 */
240 MipsUserGenException, /* 9 */
241 MipsUserGenException, /* 10 */
242 MipsUserGenException, /* 11 */
243 MipsUserGenException, /* 12 */
244 MipsUserGenException, /* 13 */
245 MipsUserGenException, /* 14 */
246 MipsUserGenException, /* 15 */
247 MipsUserGenException, /* 16 */
248 MipsUserGenException, /* 17 */
249 MipsUserGenException, /* 18 */
250 MipsUserGenException, /* 19 */
251 MipsUserGenException, /* 20 */
252 MipsUserGenException, /* 21 */
253 MipsUserGenException, /* 22 */
254 MipsUserGenException, /* 23 */
255 MipsUserGenException, /* 24 */
256 MipsUserGenException, /* 25 */
257 MipsUserGenException, /* 26 */
258 MipsUserGenException, /* 27 */
259 MipsUserGenException, /* 28 */
260 MipsUserGenException, /* 29 */
261 MipsUserGenException, /* 20 */
262 MipsUserGenException, /* 31 */
265 char *trap_type[] = {
266 "external interrupt",
268 "TLB miss (load or instr. fetch)",
270 "address error (load or I-fetch)",
271 "address error (store)",
272 "bus error (I-fetch)",
273 "bus error (load or store)",
276 "reserved instruction",
277 "coprocessor unusable",
278 "arithmetic overflow",
280 "virtual coherency instruction",
297 "virtual coherency data",
300 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
301 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
304 #if defined(DDB) || defined(DEBUG)
305 void stacktrace(struct trapframe *);
306 void logstacktrace(struct trapframe *);
309 #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
310 #define DELAYBRANCH(x) ((int)(x) < 0)
313 * MIPS load/store access type
326 char *access_name[] = {
327 "Load Halfword Unsigned",
329 "Load Word Unsigned",
338 #include <machine/octeon_cop2.h>
341 static int allow_unaligned_acc = 1;
343 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
344 &allow_unaligned_acc, 0, "Allow unaligned accesses");
347 * FP emulation is assumed to work on O32, but the code is outdated and crufty
348 * enough that it's a more sensible default to have it disabled when using
349 * other ABIs. At the very least, it needs a lot of help in using
350 * type-semantic ABI-oblivious macros for everything it does.
352 #if defined(__mips_o32)
353 static int emulate_fp = 1;
355 static int emulate_fp = 0;
357 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
358 &emulate_fp, 0, "Emulate unimplemented FPU instructions");
360 static int emulate_unaligned_access(struct trapframe *frame, int mode);
362 extern void fswintrberr(void); /* XXX */
365 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
367 struct trapframe *locr0 = td->td_frame;
368 struct sysentvec *se;
371 bzero(sa->args, sizeof(sa->args));
373 /* compute next PC after syscall instruction */
374 td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
375 if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */
376 locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
378 locr0->pc += sizeof(int);
379 sa->code = locr0->v0;
385 * This is an indirect syscall, in which the code is the first argument.
387 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
388 if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
390 * Like syscall, but code is a quad, so as to maintain alignment
391 * for the rest of the arguments.
393 if (_QUAD_LOWWORD == 0)
394 sa->code = locr0->a0;
396 sa->code = locr0->a1;
397 sa->args[0] = locr0->a2;
398 sa->args[1] = locr0->a3;
404 * This is either not a quad syscall, or is a quad syscall with a
405 * new ABI in which quads fit in a single register.
407 sa->code = locr0->a0;
408 sa->args[0] = locr0->a1;
409 sa->args[1] = locr0->a2;
410 sa->args[2] = locr0->a3;
412 #if defined(__mips_n32) || defined(__mips_n64)
413 #ifdef COMPAT_FREEBSD32
414 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
417 * Non-o32 ABIs support more arguments in registers.
419 sa->args[3] = locr0->a4;
420 sa->args[4] = locr0->a5;
421 sa->args[5] = locr0->a6;
422 sa->args[6] = locr0->a7;
424 #ifdef COMPAT_FREEBSD32
431 * A direct syscall, arguments are just parameters to the syscall.
433 sa->args[0] = locr0->a0;
434 sa->args[1] = locr0->a1;
435 sa->args[2] = locr0->a2;
436 sa->args[3] = locr0->a3;
438 #if defined (__mips_n32) || defined(__mips_n64)
439 #ifdef COMPAT_FREEBSD32
440 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
443 * Non-o32 ABIs support more arguments in registers.
445 sa->args[4] = locr0->a4;
446 sa->args[5] = locr0->a5;
447 sa->args[6] = locr0->a6;
448 sa->args[7] = locr0->a7;
450 #ifdef COMPAT_FREEBSD32
459 printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
462 se = td->td_proc->p_sysent;
465 * Shouldn't this go before switching on the code?
468 sa->code &= se->sv_mask;
470 if (sa->code >= se->sv_size)
471 sa->callp = &se->sv_table[0];
473 sa->callp = &se->sv_table[sa->code];
475 sa->narg = sa->callp->sy_narg;
477 if (sa->narg > nsaved) {
478 #if defined(__mips_n32) || defined(__mips_n64)
481 * Is this right for new ABIs? I think the 4 there
482 * should be 8, size there are 8 registers to skip,
483 * not 4, but I'm not certain.
485 #ifdef COMPAT_FREEBSD32
486 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
488 printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
489 sa->code, td->td_proc->p_pid, sa->narg, nsaved);
491 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
492 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
496 error = 0; /* XXX GCC is awful. */
497 for (i = nsaved; i < sa->narg; i++) {
498 error = copyin((caddr_t)(intptr_t)(locr0->sp +
499 (4 + (i - nsaved)) * sizeof(int32_t)),
500 (caddr_t)&arg, sizeof arg);
507 error = copyin((caddr_t)(intptr_t)(locr0->sp +
508 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
509 (u_int)(sa->narg - nsaved) * sizeof(register_t));
518 td->td_retval[0] = 0;
519 td->td_retval[1] = locr0->v1;
527 #include "../../kern/subr_syscall.c"
530 * Handle an exception.
531 * Called from MipsKernGenException() or MipsUserGenException()
532 * when a processor trap occurs.
533 * In the case of a kernel trap, we return the pc where to resume if
534 * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
537 trap(struct trapframe *trapframe)
542 struct thread *td = curthread;
543 struct proc *p = curproc;
552 register_t *frame_regs;
554 trapdebug_enter(trapframe, 0);
556 type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
557 if (TRAPF_USERMODE(trapframe)) {
565 * Enable hardware interrupts if they were on before the trap. If it
566 * was off disable all so we don't accidently enable it when doing a
567 * return to userland.
569 if (trapframe->sr & MIPS_SR_INT_IE) {
570 set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
578 static vm_offset_t last_badvaddr = 0;
579 static vm_offset_t this_badvaddr = 0;
580 static int count = 0;
583 printf("trap type %x (%s - ", type,
584 trap_type[type & (~T_USER)]);
587 printf("user mode)\n");
589 printf("kernel mode)\n");
592 printf("cpuid = %d\n", PCPU_GET(cpuid));
594 pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
595 printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
596 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
597 (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
598 (curproc ? curproc->p_pid : -1), pid);
600 switch (type & ~T_USER) {
606 this_badvaddr = trapframe->badvaddr;
609 this_badvaddr = trapframe->ra;
612 this_badvaddr = trapframe->pc;
615 if ((last_badvaddr == this_badvaddr) &&
616 ((type & ~T_USER) != T_SYSCALL)) {
618 trap_frame_dump(trapframe);
619 panic("too many faults at %p\n", (void *)last_badvaddr);
622 last_badvaddr = this_badvaddr;
630 * A trap can occur while DTrace executes a probe. Before
631 * executing the probe, DTrace blocks re-scheduling and sets
632 * a flag in it's per-cpu flags to indicate that it doesn't
633 * want to fault. On returning from the probe, the no-fault
634 * flag is cleared and finally re-scheduling is enabled.
636 * If the DTrace kernel module has registered a trap handler,
637 * call it and if it returns non-zero, assume that it has
638 * handled the trap and modified the trap frame so that this
639 * function can return normally.
642 * XXXDTRACE: add fasttrap and pid probes handlers here (if ever)
645 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(trapframe, type))
646 return (trapframe->pc);
653 kdb_trap(type, 0, trapframe);
658 /* check for kernel address */
659 if (KERNLAND(trapframe->badvaddr)) {
660 if (pmap_emulate_modified(kernel_pmap,
661 trapframe->badvaddr) != 0) {
662 ftype = VM_PROT_WRITE;
665 return (trapframe->pc);
669 case T_TLB_MOD + T_USER:
670 pmap = &p->p_vmspace->vm_pmap;
671 if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
672 ftype = VM_PROT_WRITE;
676 return (trapframe->pc);
681 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
682 /* check for kernel address */
683 if (KERNLAND(trapframe->badvaddr)) {
688 va = trunc_page((vm_offset_t)trapframe->badvaddr);
689 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
690 if (rv == KERN_SUCCESS)
691 return (trapframe->pc);
692 if (td->td_pcb->pcb_onfault != NULL) {
693 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
694 td->td_pcb->pcb_onfault = NULL;
701 * It is an error for the kernel to access user space except
702 * through the copyin/copyout routines.
704 if (td->td_pcb->pcb_onfault == NULL)
707 /* check for fuswintr() or suswintr() getting a page fault */
708 /* XXX There must be a nicer way to do this. */
709 if (td->td_pcb->pcb_onfault == fswintrberr) {
710 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
711 td->td_pcb->pcb_onfault = NULL;
717 case T_TLB_LD_MISS + T_USER:
718 ftype = VM_PROT_READ;
721 case T_TLB_ST_MISS + T_USER:
722 ftype = VM_PROT_WRITE;
732 va = trunc_page((vm_offset_t)trapframe->badvaddr);
733 if (KERNLAND(trapframe->badvaddr)) {
735 * Don't allow user-mode faults in kernel
742 * Keep swapout from messing with us during this
749 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
755 * XXXDTRACE: add dtrace_doubletrap_func here?
758 printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
759 map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
760 ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
763 if (rv == KERN_SUCCESS) {
765 return (trapframe->pc);
771 if (td->td_pcb->pcb_onfault != NULL) {
772 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
773 td->td_pcb->pcb_onfault = NULL;
779 i = ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
780 addr = trapframe->pc;
782 msg = "BAD_PAGE_FAULT";
783 log_bad_page_fault(msg, trapframe, type);
788 case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
789 case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
790 if (trapframe->badvaddr < 0 ||
791 trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
792 msg = "ADDRESS_SPACE_ERR";
793 } else if (allow_unaligned_acc) {
796 if (type == (T_ADDR_ERR_LD + T_USER))
799 mode = VM_PROT_WRITE;
801 access_type = emulate_unaligned_access(trapframe, mode);
802 if (access_type != 0)
804 msg = "ALIGNMENT_FIX_ERR";
811 case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
812 case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
813 ucode = 0; /* XXX should be VM_PROT_something */
815 addr = trapframe->pc;
818 log_bad_page_fault(msg, trapframe, type);
821 case T_SYSCALL + T_USER:
823 struct syscall_args sa;
826 sa.trapframe = trapframe;
827 error = syscallenter(td, &sa);
829 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
830 if (trp == trapdebug)
831 trapdebug[TRAPSIZE - 1].code = sa.code;
833 trp[-1].code = sa.code;
835 trapdebug_enter(td->td_frame, -sa.code);
838 * The sync'ing of I & D caches for SYS_ptrace() is
839 * done by procfs_domem() through procfs_rwmem()
840 * instead of being done here under a special check
843 syscallret(td, error, &sa);
844 return (trapframe->pc);
849 kdb_trap(type, 0, trapframe);
850 return (trapframe->pc);
853 case T_BREAK + T_USER:
858 /* compute address of break instruction */
860 if (DELAYBRANCH(trapframe->cause))
863 /* read break instruction */
864 instr = fuword32((caddr_t)va);
866 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
867 p->p_comm, p->p_pid, instr, trapframe->pc,
868 p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
870 if (td->td_md.md_ss_addr != va ||
871 instr != MIPS_BREAK_SSTEP) {
873 addr = trapframe->pc;
877 * The restoration of the original instruction and
878 * the clearing of the berakpoint will be done later
879 * by the call to ptrace_clear_single_step() in
880 * issignal() when SIGTRAP is processed.
882 addr = trapframe->pc;
887 case T_IWATCH + T_USER:
888 case T_DWATCH + T_USER:
892 /* compute address of trapped instruction */
894 if (DELAYBRANCH(trapframe->cause))
896 printf("watch exception @ %p\n", (void *)va);
902 case T_TRAP + T_USER:
906 struct trapframe *locr0 = td->td_frame;
908 /* compute address of trap instruction */
910 if (DELAYBRANCH(trapframe->cause))
912 /* read break instruction */
913 instr = fuword32((caddr_t)va);
915 if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
916 locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
919 locr0->pc += sizeof(int);
922 i = SIGEMT; /* Stuff it with something for now */
926 case T_RES_INST + T_USER:
929 inst = *(InstFmt *)(intptr_t)trapframe->pc;
930 switch (inst.RType.op) {
932 switch (inst.RType.func) {
934 /* Register 29 used for TLS */
935 if (inst.RType.rd == 29) {
936 frame_regs = &(trapframe->zero);
937 frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
938 #if defined(__mips_n64) && defined(COMPAT_FREEBSD32)
939 if (SV_PROC_FLAG(td->td_proc, SV_ILP32))
940 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE32;
943 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE;
944 trapframe->pc += sizeof(int);
952 log_illegal_instruction("RES_INST", trapframe);
954 addr = trapframe->pc;
963 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
964 /* Handle only COP2 exception */
968 addr = trapframe->pc;
969 /* save userland cop2 context if it has been touched */
970 if ((td->td_md.md_flags & MDTD_COP2USED) &&
971 (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
972 if (td->td_md.md_ucop2)
973 octeon_cop2_save(td->td_md.md_ucop2);
975 panic("COP2 was used in user mode but md_ucop2 is NULL");
978 if (td->td_md.md_cop2 == NULL) {
979 td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
980 if (td->td_md.md_cop2 == NULL)
981 panic("Failed to allocate COP2 context");
982 memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
985 octeon_cop2_restore(td->td_md.md_cop2);
987 /* Make userland re-request its context */
988 td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
989 td->td_md.md_flags |= MDTD_COP2USED;
990 td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
991 /* Enable COP2, it will be disabled in cpu_switch */
992 mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
993 return (trapframe->pc);
999 case T_COP_UNUSABLE + T_USER:
1000 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
1002 #if !defined(CPU_HAVEFPU)
1003 /* FP (COP1) instruction */
1004 log_illegal_instruction("COP1_UNUSABLE", trapframe);
1008 addr = trapframe->pc;
1009 MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
1010 PCPU_SET(fpcurthread, td);
1011 td->td_frame->sr |= MIPS_SR_COP_1_BIT;
1012 td->td_md.md_flags |= MDTD_FPUSED;
1017 else if (cop == 2) {
1018 addr = trapframe->pc;
1019 if ((td->td_md.md_flags & MDTD_COP2USED) &&
1020 (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
1021 if (td->td_md.md_cop2)
1022 octeon_cop2_save(td->td_md.md_cop2);
1024 panic("COP2 was used in kernel mode but md_cop2 is NULL");
1027 if (td->td_md.md_ucop2 == NULL) {
1028 td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
1029 if (td->td_md.md_ucop2 == NULL)
1030 panic("Failed to allocate userland COP2 context");
1031 memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
1034 octeon_cop2_restore(td->td_md.md_ucop2);
1036 td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1037 td->td_md.md_flags |= MDTD_COP2USED;
1038 td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1043 log_illegal_instruction("COPn_UNUSABLE", trapframe);
1044 i = SIGILL; /* only FPU instructions allowed */
1049 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1052 printf("FPU Trap: PC %#jx CR %x SR %x\n",
1053 (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1057 case T_FPE + T_USER:
1060 addr = trapframe->pc;
1063 MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1066 case T_OVFLOW + T_USER:
1068 addr = trapframe->pc;
1071 case T_ADDR_ERR_LD: /* misaligned access */
1072 case T_ADDR_ERR_ST: /* misaligned access */
1075 printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1076 (intmax_t)trapframe->badvaddr);
1079 /* Only allow emulation on a user address */
1080 if (allow_unaligned_acc &&
1081 ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1084 if (type == T_ADDR_ERR_LD)
1085 mode = VM_PROT_READ;
1087 mode = VM_PROT_WRITE;
1089 access_type = emulate_unaligned_access(trapframe, mode);
1090 if (access_type != 0)
1091 return (trapframe->pc);
1095 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
1096 if (td->td_pcb->pcb_onfault != NULL) {
1097 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1098 td->td_pcb->pcb_onfault = NULL;
1107 #if !defined(SMP) && defined(DEBUG)
1108 stacktrace(!usermode ? trapframe : td->td_frame);
1112 printf("cpu:%d-", PCPU_GET(cpuid));
1114 printf("Trap cause = %d (%s - ", type,
1115 trap_type[type & (~T_USER)]);
1118 printf("user mode)\n");
1120 printf("kernel mode)\n");
1124 printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1125 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1126 (intmax_t)trapframe->sr);
1130 if (debugger_on_panic || kdb_active) {
1131 kdb_trap(type, 0, trapframe);
1136 td->td_frame->pc = trapframe->pc;
1137 td->td_frame->cause = trapframe->cause;
1138 td->td_frame->badvaddr = trapframe->badvaddr;
1139 ksiginfo_init_trap(&ksi);
1141 ksi.ksi_code = ucode;
1142 ksi.ksi_addr = (void *)addr;
1143 ksi.ksi_trapno = type;
1144 trapsignal(td, &ksi);
1148 * Note: we should only get here if returning to user mode.
1150 userret(td, trapframe);
1151 return (trapframe->pc);
1154 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1162 printf("trapDump(%s)\n", msg);
1163 for (i = 0; i < TRAPSIZE; i++) {
1164 if (trp == trapdebug) {
1165 trp = &trapdebug[TRAPSIZE - 1];
1170 if (trp->cause == 0)
1173 printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1174 trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1175 MIPS_CR_EXC_CODE_SHIFT],
1176 (intmax_t)trp->vadr, (intmax_t)trp->pc,
1177 (intmax_t)trp->cause, (intmax_t)trp->status);
1179 printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1180 (intmax_t)trp->sp, (int)trp->code);
1188 * Return the resulting PC as if the branch was executed.
1191 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1195 register_t *regsPtr = (register_t *) framePtr;
1196 uintptr_t retAddr = 0;
1199 #define GetBranchDest(InstPtr, inst) \
1200 (InstPtr + 4 + ((short)inst.IType.imm << 2))
1204 if (instptr < MIPS_KSEG0_START)
1205 inst.word = fuword32((void *)instptr);
1207 inst = *(InstFmt *) instptr;
1209 if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1210 inst.word = fuword32((void *)instPC);
1212 inst = *(InstFmt *) instPC;
1215 switch ((int)inst.JType.op) {
1217 switch ((int)inst.RType.func) {
1220 retAddr = regsPtr[inst.RType.rs];
1224 retAddr = instPC + 4;
1230 switch ((int)inst.IType.rt) {
1235 if ((int)(regsPtr[inst.RType.rs]) < 0)
1236 retAddr = GetBranchDest(instPC, inst);
1238 retAddr = instPC + 8;
1245 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1246 retAddr = GetBranchDest(instPC, inst);
1248 retAddr = instPC + 8;
1257 retAddr = instPC + 4; /* Like syscall... */
1261 panic("MipsEmulateBranch: Bad branch cond");
1267 retAddr = (inst.JType.target << 2) |
1268 ((unsigned)(instPC + 4) & 0xF0000000);
1273 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1274 retAddr = GetBranchDest(instPC, inst);
1276 retAddr = instPC + 8;
1281 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1282 retAddr = GetBranchDest(instPC, inst);
1284 retAddr = instPC + 8;
1289 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1290 retAddr = GetBranchDest(instPC, inst);
1292 retAddr = instPC + 8;
1297 if ((int)(regsPtr[inst.RType.rs]) > 0)
1298 retAddr = GetBranchDest(instPC, inst);
1300 retAddr = instPC + 8;
1304 switch (inst.RType.rs) {
1307 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1308 condition = fpcCSR & MIPS_FPU_COND_BIT;
1310 condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1312 retAddr = GetBranchDest(instPC, inst);
1314 retAddr = instPC + 8;
1318 retAddr = instPC + 4;
1323 retAddr = instPC + 4;
1329 #if defined(DDB) || defined(DEBUG)
1331 * Print a stack backtrace.
1334 stacktrace(struct trapframe *regs)
1336 stacktrace_subr(regs->pc, regs->sp, regs->ra, printf);
1341 log_frame_dump(struct trapframe *frame)
1343 log(LOG_ERR, "Trapframe Register Dump:\n");
1344 log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1345 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1347 log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1348 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1350 #if defined(__mips_n32) || defined(__mips_n64)
1351 log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1352 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1354 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1355 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1357 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1358 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1360 log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1361 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1363 log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1364 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1366 log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1367 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1369 log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1370 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1372 log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1373 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1375 log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1376 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1379 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1380 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1382 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1383 (intmax_t)frame->cause, (intmax_t)frame->pc);
1389 trap_frame_dump(struct trapframe *frame)
1391 printf("Trapframe Register Dump:\n");
1392 printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1393 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1395 printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1396 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1397 #if defined(__mips_n32) || defined(__mips_n64)
1398 printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1399 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1401 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1402 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1404 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1405 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1407 printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1408 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1410 printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1411 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1413 printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1414 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1416 printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1417 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1419 printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1420 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1422 printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1423 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1426 printf("\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1427 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1429 printf("\tcause: %#jx\tpc: %#jx\n",
1430 (intmax_t)frame->cause, (intmax_t)frame->pc);
1438 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1442 struct proc *p = curproc;
1444 pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1446 ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1448 ptep = (pt_entry_t *)0;
1455 log_illegal_instruction(const char *msg, struct trapframe *frame)
1468 printf("cpuid = %d\n", PCPU_GET(cpuid));
1470 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1471 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1472 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1473 p->p_ucred ? p->p_ucred->cr_uid : -1,
1475 (intmax_t)frame->ra);
1477 /* log registers in trap frame */
1478 log_frame_dump(frame);
1480 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1483 * Dump a few words around faulting instruction, if the addres is
1487 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1488 /* dump page table entry for faulting instruction */
1489 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1490 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1492 addr = (unsigned int *)(intptr_t)pc;
1493 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1495 log(LOG_ERR, "%08x %08x %08x %08x\n",
1496 addr[0], addr[1], addr[2], addr[3]);
1498 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1499 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1504 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1511 char *read_or_write;
1514 trap_type &= ~T_USER;
1520 printf("cpuid = %d\n", PCPU_GET(cpuid));
1522 switch (trap_type) {
1525 read_or_write = "write";
1529 case T_BUS_ERR_IFETCH:
1530 read_or_write = "read";
1533 read_or_write = "unknown";
1536 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1537 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1538 "(type %#x) at %#jx\n",
1539 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1540 p->p_ucred ? p->p_ucred->cr_uid : -1,
1544 (intmax_t)frame->badvaddr);
1546 /* log registers in trap frame */
1547 log_frame_dump(frame);
1549 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1552 * Dump a few words around faulting instruction, if the addres is
1555 if (!(pc & 3) && (pc != frame->badvaddr) &&
1556 (trap_type != T_BUS_ERR_IFETCH) &&
1557 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1558 /* dump page table entry for faulting instruction */
1559 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1560 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1562 addr = (unsigned int *)(intptr_t)pc;
1563 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1565 log(LOG_ERR, "%08x %08x %08x %08x\n",
1566 addr[0], addr[1], addr[2], addr[3]);
1568 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1569 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1572 get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1573 log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1574 (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1579 * Unaligned load/store emulation
1582 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1584 register_t *reg = (register_t *) frame;
1585 u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1586 register_t value_msb, value;
1590 * ADDR_ERR faults have higher priority than TLB
1591 * Miss faults. Therefore, it is necessary to
1592 * verify that the faulting address is a valid
1593 * virtual address within the process' address space
1594 * before trying to emulate the unaligned access.
1596 switch (MIPS_INST_OPCODE(inst)) {
1597 case OP_LHU: case OP_LH:
1601 case OP_LWU: case OP_LW:
1610 printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1614 if (!useracc((void *)((vm_offset_t)addr & ~(size - 1)), size * 2, mode))
1619 * Handle LL/SC LLD/SCD.
1621 switch (MIPS_INST_OPCODE(inst)) {
1623 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1624 lbu_macro(value_msb, addr);
1626 lbu_macro(value, addr);
1627 value |= value_msb << 8;
1628 reg[MIPS_INST_RT(inst)] = value;
1629 return (MIPS_LHU_ACCESS);
1632 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1633 lb_macro(value_msb, addr);
1635 lbu_macro(value, addr);
1636 value |= value_msb << 8;
1637 reg[MIPS_INST_RT(inst)] = value;
1638 return (MIPS_LH_ACCESS);
1641 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1642 lwl_macro(value, addr);
1644 lwr_macro(value, addr);
1645 value &= 0xffffffff;
1646 reg[MIPS_INST_RT(inst)] = value;
1647 return (MIPS_LWU_ACCESS);
1650 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1651 lwl_macro(value, addr);
1653 lwr_macro(value, addr);
1654 reg[MIPS_INST_RT(inst)] = value;
1655 return (MIPS_LW_ACCESS);
1657 #if defined(__mips_n32) || defined(__mips_n64)
1659 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1660 ldl_macro(value, addr);
1662 ldr_macro(value, addr);
1663 reg[MIPS_INST_RT(inst)] = value;
1664 return (MIPS_LD_ACCESS);
1668 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1669 value = reg[MIPS_INST_RT(inst)];
1670 value_msb = value >> 8;
1671 sb_macro(value_msb, addr);
1673 sb_macro(value, addr);
1674 return (MIPS_SH_ACCESS);
1677 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1678 value = reg[MIPS_INST_RT(inst)];
1679 swl_macro(value, addr);
1681 swr_macro(value, addr);
1682 return (MIPS_SW_ACCESS);
1684 #if defined(__mips_n32) || defined(__mips_n64)
1686 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1687 value = reg[MIPS_INST_RT(inst)];
1688 sdl_macro(value, addr);
1690 sdr_macro(value, addr);
1691 return (MIPS_SD_ACCESS);
1694 panic("%s: should not be reached.", __func__);
1699 emulate_unaligned_access(struct trapframe *frame, int mode)
1702 int access_type = 0;
1704 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1707 * Fall through if it's instruction fetch exception
1709 if (!((pc & 3) || (pc == frame->badvaddr))) {
1712 * Handle unaligned load and store
1716 * Return access type if the instruction was emulated.
1717 * Otherwise restore pc and fall through.
1719 access_type = mips_unaligned_load_store(frame,
1720 mode, frame->badvaddr, pc);
1723 if (DELAYBRANCH(frame->cause))
1724 frame->pc = MipsEmulateBranch(frame, frame->pc,
1729 log(LOG_INFO, "Unaligned %s: pc=%#jx, badvaddr=%#jx\n",
1730 access_name[access_type - 1], (intmax_t)pc,
1731 (intmax_t)frame->badvaddr);