1 /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department and Ralph Campbell.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: Utah Hdr: trap.c 1.32 91/04/06
38 * from: @(#)trap.c 8.5 (Berkeley) 1/11/94
39 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_compat.h"
46 #include "opt_ktrace.h"
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/sysent.h>
52 #include <sys/kernel.h>
53 #include <sys/signalvar.h>
54 #include <sys/syscall.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_param.h>
62 #include <sys/vmmeter.h>
63 #include <sys/ptrace.h>
66 #include <sys/vnode.h>
67 #include <sys/pioctl.h>
68 #include <sys/sysctl.h>
69 #include <sys/syslog.h>
72 #include <sys/ktrace.h>
74 #include <net/netisr.h>
76 #include <machine/trap.h>
77 #include <machine/cpu.h>
78 #include <machine/pte.h>
79 #include <machine/pmap.h>
80 #include <machine/md_var.h>
81 #include <machine/mips_opcode.h>
82 #include <machine/frame.h>
83 #include <machine/regnum.h>
84 #include <machine/tls.h>
87 #include <machine/db_machdep.h>
88 #include <ddb/db_sym.h>
94 #include <sys/dtrace_bsd.h>
99 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
100 &trap_debug, 0, "Debug information on all traps");
103 #define lbu_macro(data, addr) \
104 __asm __volatile ("lbu %0, 0x0(%1)" \
105 : "=r" (data) /* outputs */ \
106 : "r" (addr)); /* inputs */
108 #define lb_macro(data, addr) \
109 __asm __volatile ("lb %0, 0x0(%1)" \
110 : "=r" (data) /* outputs */ \
111 : "r" (addr)); /* inputs */
113 #define lwl_macro(data, addr) \
114 __asm __volatile ("lwl %0, 0x0(%1)" \
115 : "=r" (data) /* outputs */ \
116 : "r" (addr)); /* inputs */
118 #define lwr_macro(data, addr) \
119 __asm __volatile ("lwr %0, 0x0(%1)" \
120 : "=r" (data) /* outputs */ \
121 : "r" (addr)); /* inputs */
123 #define ldl_macro(data, addr) \
124 __asm __volatile ("ldl %0, 0x0(%1)" \
125 : "=r" (data) /* outputs */ \
126 : "r" (addr)); /* inputs */
128 #define ldr_macro(data, addr) \
129 __asm __volatile ("ldr %0, 0x0(%1)" \
130 : "=r" (data) /* outputs */ \
131 : "r" (addr)); /* inputs */
133 #define sb_macro(data, addr) \
134 __asm __volatile ("sb %0, 0x0(%1)" \
136 : "r" (data), "r" (addr)); /* inputs */
138 #define swl_macro(data, addr) \
139 __asm __volatile ("swl %0, 0x0(%1)" \
141 : "r" (data), "r" (addr)); /* inputs */
143 #define swr_macro(data, addr) \
144 __asm __volatile ("swr %0, 0x0(%1)" \
146 : "r" (data), "r" (addr)); /* inputs */
148 #define sdl_macro(data, addr) \
149 __asm __volatile ("sdl %0, 0x0(%1)" \
151 : "r" (data), "r" (addr)); /* inputs */
153 #define sdr_macro(data, addr) \
154 __asm __volatile ("sdr %0, 0x0(%1)" \
156 : "r" (data), "r" (addr)); /* inputs */
158 static void log_illegal_instruction(const char *, struct trapframe *);
159 static void log_bad_page_fault(char *, struct trapframe *, int);
160 static void log_frame_dump(struct trapframe *frame);
161 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
164 static void trap_frame_dump(struct trapframe *frame);
167 void (*machExceptionTable[]) (void)= {
169 * The kernel exception handlers.
171 MipsKernIntr, /* external interrupt */
172 MipsKernGenException, /* TLB modification */
173 MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
174 MipsTLBInvalidException,/* TLB miss (store) */
175 MipsKernGenException, /* address error (load or I-fetch) */
176 MipsKernGenException, /* address error (store) */
177 MipsKernGenException, /* bus error (I-fetch) */
178 MipsKernGenException, /* bus error (load or store) */
179 MipsKernGenException, /* system call */
180 MipsKernGenException, /* breakpoint */
181 MipsKernGenException, /* reserved instruction */
182 MipsKernGenException, /* coprocessor unusable */
183 MipsKernGenException, /* arithmetic overflow */
184 MipsKernGenException, /* trap exception */
185 MipsKernGenException, /* virtual coherence exception inst */
186 MipsKernGenException, /* floating point exception */
187 MipsKernGenException, /* reserved */
188 MipsKernGenException, /* reserved */
189 MipsKernGenException, /* reserved */
190 MipsKernGenException, /* reserved */
191 MipsKernGenException, /* reserved */
192 MipsKernGenException, /* reserved */
193 MipsKernGenException, /* reserved */
194 MipsKernGenException, /* watch exception */
195 MipsKernGenException, /* reserved */
196 MipsKernGenException, /* reserved */
197 MipsKernGenException, /* reserved */
198 MipsKernGenException, /* reserved */
199 MipsKernGenException, /* reserved */
200 MipsKernGenException, /* reserved */
201 MipsKernGenException, /* reserved */
202 MipsKernGenException, /* virtual coherence exception data */
204 * The user exception handlers.
206 MipsUserIntr, /* 0 */
207 MipsUserGenException, /* 1 */
208 MipsTLBInvalidException,/* 2 */
209 MipsTLBInvalidException,/* 3 */
210 MipsUserGenException, /* 4 */
211 MipsUserGenException, /* 5 */
212 MipsUserGenException, /* 6 */
213 MipsUserGenException, /* 7 */
214 MipsUserGenException, /* 8 */
215 MipsUserGenException, /* 9 */
216 MipsUserGenException, /* 10 */
217 MipsUserGenException, /* 11 */
218 MipsUserGenException, /* 12 */
219 MipsUserGenException, /* 13 */
220 MipsUserGenException, /* 14 */
221 MipsUserGenException, /* 15 */
222 MipsUserGenException, /* 16 */
223 MipsUserGenException, /* 17 */
224 MipsUserGenException, /* 18 */
225 MipsUserGenException, /* 19 */
226 MipsUserGenException, /* 20 */
227 MipsUserGenException, /* 21 */
228 MipsUserGenException, /* 22 */
229 MipsUserGenException, /* 23 */
230 MipsUserGenException, /* 24 */
231 MipsUserGenException, /* 25 */
232 MipsUserGenException, /* 26 */
233 MipsUserGenException, /* 27 */
234 MipsUserGenException, /* 28 */
235 MipsUserGenException, /* 29 */
236 MipsUserGenException, /* 20 */
237 MipsUserGenException, /* 31 */
240 char *trap_type[] = {
241 "external interrupt",
243 "TLB miss (load or instr. fetch)",
245 "address error (load or I-fetch)",
246 "address error (store)",
247 "bus error (I-fetch)",
248 "bus error (load or store)",
251 "reserved instruction",
252 "coprocessor unusable",
253 "arithmetic overflow",
255 "virtual coherency instruction",
272 "virtual coherency data",
275 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
276 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
279 #if defined(DDB) || defined(DEBUG)
280 void stacktrace(struct trapframe *);
281 void logstacktrace(struct trapframe *);
284 #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
285 #define DELAYBRANCH(x) ((int)(x) < 0)
288 * MIPS load/store access type
301 char *access_name[] = {
302 "Load Halfword Unsigned",
304 "Load Word Unsigned",
313 #include <machine/octeon_cop2.h>
316 static int allow_unaligned_acc = 1;
318 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
319 &allow_unaligned_acc, 0, "Allow unaligned accesses");
322 * FP emulation is assumed to work on O32, but the code is outdated and crufty
323 * enough that it's a more sensible default to have it disabled when using
324 * other ABIs. At the very least, it needs a lot of help in using
325 * type-semantic ABI-oblivious macros for everything it does.
327 #if defined(__mips_o32)
328 static int emulate_fp = 1;
330 static int emulate_fp = 0;
332 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
333 &emulate_fp, 0, "Emulate unimplemented FPU instructions");
335 static int emulate_unaligned_access(struct trapframe *frame, int mode);
337 extern void fswintrberr(void); /* XXX */
340 cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
342 struct trapframe *locr0 = td->td_frame;
343 struct sysentvec *se;
346 bzero(sa->args, sizeof(sa->args));
348 /* compute next PC after syscall instruction */
349 td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */
350 if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */
351 locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0);
353 locr0->pc += sizeof(int);
354 sa->code = locr0->v0;
360 * This is an indirect syscall, in which the code is the first argument.
362 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
363 if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
365 * Like syscall, but code is a quad, so as to maintain alignment
366 * for the rest of the arguments.
368 if (_QUAD_LOWWORD == 0)
369 sa->code = locr0->a0;
371 sa->code = locr0->a1;
372 sa->args[0] = locr0->a2;
373 sa->args[1] = locr0->a3;
379 * This is either not a quad syscall, or is a quad syscall with a
380 * new ABI in which quads fit in a single register.
382 sa->code = locr0->a0;
383 sa->args[0] = locr0->a1;
384 sa->args[1] = locr0->a2;
385 sa->args[2] = locr0->a3;
387 #if defined(__mips_n32) || defined(__mips_n64)
388 #ifdef COMPAT_FREEBSD32
389 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
392 * Non-o32 ABIs support more arguments in registers.
394 sa->args[3] = locr0->a4;
395 sa->args[4] = locr0->a5;
396 sa->args[5] = locr0->a6;
397 sa->args[6] = locr0->a7;
399 #ifdef COMPAT_FREEBSD32
406 * A direct syscall, arguments are just parameters to the syscall.
408 sa->args[0] = locr0->a0;
409 sa->args[1] = locr0->a1;
410 sa->args[2] = locr0->a2;
411 sa->args[3] = locr0->a3;
413 #if defined (__mips_n32) || defined(__mips_n64)
414 #ifdef COMPAT_FREEBSD32
415 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
418 * Non-o32 ABIs support more arguments in registers.
420 sa->args[4] = locr0->a4;
421 sa->args[5] = locr0->a5;
422 sa->args[6] = locr0->a6;
423 sa->args[7] = locr0->a7;
425 #ifdef COMPAT_FREEBSD32
434 printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
437 se = td->td_proc->p_sysent;
440 * Shouldn't this go before switching on the code?
443 sa->code &= se->sv_mask;
445 if (sa->code >= se->sv_size)
446 sa->callp = &se->sv_table[0];
448 sa->callp = &se->sv_table[sa->code];
450 sa->narg = sa->callp->sy_narg;
452 if (sa->narg > nsaved) {
453 #if defined(__mips_n32) || defined(__mips_n64)
456 * Is this right for new ABIs? I think the 4 there
457 * should be 8, size there are 8 registers to skip,
458 * not 4, but I'm not certain.
460 #ifdef COMPAT_FREEBSD32
461 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
463 printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
464 sa->code, td->td_proc->p_pid, sa->narg, nsaved);
466 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
467 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
471 error = 0; /* XXX GCC is awful. */
472 for (i = nsaved; i < sa->narg; i++) {
473 error = copyin((caddr_t)(intptr_t)(locr0->sp +
474 (4 + (i - nsaved)) * sizeof(int32_t)),
475 (caddr_t)&arg, sizeof arg);
482 error = copyin((caddr_t)(intptr_t)(locr0->sp +
483 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
484 (u_int)(sa->narg - nsaved) * sizeof(register_t));
493 td->td_retval[0] = 0;
494 td->td_retval[1] = locr0->v1;
502 #include "../../kern/subr_syscall.c"
505 * Handle an exception.
506 * Called from MipsKernGenException() or MipsUserGenException()
507 * when a processor trap occurs.
508 * In the case of a kernel trap, we return the pc where to resume if
509 * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
512 trap(struct trapframe *trapframe)
517 struct thread *td = curthread;
518 struct proc *p = curproc;
527 register_t *frame_regs;
529 trapdebug_enter(trapframe, 0);
531 type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
532 if (TRAPF_USERMODE(trapframe)) {
540 * Enable hardware interrupts if they were on before the trap. If it
541 * was off disable all so we don't accidently enable it when doing a
542 * return to userland.
544 if (trapframe->sr & MIPS_SR_INT_IE) {
545 set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
553 static vm_offset_t last_badvaddr = 0;
554 static vm_offset_t this_badvaddr = 0;
555 static int count = 0;
558 printf("trap type %x (%s - ", type,
559 trap_type[type & (~T_USER)]);
562 printf("user mode)\n");
564 printf("kernel mode)\n");
567 printf("cpuid = %d\n", PCPU_GET(cpuid));
569 pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
570 printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
571 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
572 (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
573 (curproc ? curproc->p_pid : -1), pid);
575 switch (type & ~T_USER) {
581 this_badvaddr = trapframe->badvaddr;
584 this_badvaddr = trapframe->ra;
587 this_badvaddr = trapframe->pc;
590 if ((last_badvaddr == this_badvaddr) &&
591 ((type & ~T_USER) != T_SYSCALL)) {
593 trap_frame_dump(trapframe);
594 panic("too many faults at %p\n", (void *)last_badvaddr);
597 last_badvaddr = this_badvaddr;
605 * A trap can occur while DTrace executes a probe. Before
606 * executing the probe, DTrace blocks re-scheduling and sets
607 * a flag in its per-cpu flags to indicate that it doesn't
608 * want to fault. On returning from the probe, the no-fault
609 * flag is cleared and finally re-scheduling is enabled.
611 * If the DTrace kernel module has registered a trap handler,
612 * call it and if it returns non-zero, assume that it has
613 * handled the trap and modified the trap frame so that this
614 * function can return normally.
617 * XXXDTRACE: add pid probe handler here (if ever)
620 if (dtrace_trap_func != NULL &&
621 (*dtrace_trap_func)(trapframe, type) != 0)
622 return (trapframe->pc);
629 kdb_trap(type, 0, trapframe);
634 /* check for kernel address */
635 if (KERNLAND(trapframe->badvaddr)) {
636 if (pmap_emulate_modified(kernel_pmap,
637 trapframe->badvaddr) != 0) {
638 ftype = VM_PROT_WRITE;
641 return (trapframe->pc);
645 case T_TLB_MOD + T_USER:
646 pmap = &p->p_vmspace->vm_pmap;
647 if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
648 ftype = VM_PROT_WRITE;
652 return (trapframe->pc);
657 ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
658 /* check for kernel address */
659 if (KERNLAND(trapframe->badvaddr)) {
664 va = trunc_page((vm_offset_t)trapframe->badvaddr);
665 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL);
666 if (rv == KERN_SUCCESS)
667 return (trapframe->pc);
668 if (td->td_pcb->pcb_onfault != NULL) {
669 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
670 td->td_pcb->pcb_onfault = NULL;
677 * It is an error for the kernel to access user space except
678 * through the copyin/copyout routines.
680 if (td->td_pcb->pcb_onfault == NULL)
683 /* check for fuswintr() or suswintr() getting a page fault */
684 /* XXX There must be a nicer way to do this. */
685 if (td->td_pcb->pcb_onfault == fswintrberr) {
686 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
687 td->td_pcb->pcb_onfault = NULL;
693 case T_TLB_LD_MISS + T_USER:
694 ftype = VM_PROT_READ;
697 case T_TLB_ST_MISS + T_USER:
698 ftype = VM_PROT_WRITE;
708 va = trunc_page((vm_offset_t)trapframe->badvaddr);
709 if (KERNLAND(trapframe->badvaddr)) {
711 * Don't allow user-mode faults in kernel
717 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
719 * XXXDTRACE: add dtrace_doubletrap_func here?
722 printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
723 map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
724 ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
727 if (rv == KERN_SUCCESS) {
729 return (trapframe->pc);
735 if (td->td_pcb->pcb_onfault != NULL) {
736 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
737 td->td_pcb->pcb_onfault = NULL;
743 i = ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
744 addr = trapframe->pc;
746 msg = "BAD_PAGE_FAULT";
747 log_bad_page_fault(msg, trapframe, type);
752 case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */
753 case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */
754 if (trapframe->badvaddr < 0 ||
755 trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
756 msg = "ADDRESS_SPACE_ERR";
757 } else if (allow_unaligned_acc) {
760 if (type == (T_ADDR_ERR_LD + T_USER))
763 mode = VM_PROT_WRITE;
765 access_type = emulate_unaligned_access(trapframe, mode);
766 if (access_type != 0)
768 msg = "ALIGNMENT_FIX_ERR";
775 case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */
776 case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */
777 ucode = 0; /* XXX should be VM_PROT_something */
779 addr = trapframe->pc;
782 log_bad_page_fault(msg, trapframe, type);
785 case T_SYSCALL + T_USER:
787 struct syscall_args sa;
790 sa.trapframe = trapframe;
791 error = syscallenter(td, &sa);
793 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
794 if (trp == trapdebug)
795 trapdebug[TRAPSIZE - 1].code = sa.code;
797 trp[-1].code = sa.code;
799 trapdebug_enter(td->td_frame, -sa.code);
802 * The sync'ing of I & D caches for SYS_ptrace() is
803 * done by procfs_domem() through procfs_rwmem()
804 * instead of being done here under a special check
807 syscallret(td, error, &sa);
808 return (trapframe->pc);
813 kdb_trap(type, 0, trapframe);
814 return (trapframe->pc);
817 case T_BREAK + T_USER:
822 /* compute address of break instruction */
824 if (DELAYBRANCH(trapframe->cause))
827 /* read break instruction */
828 instr = fuword32((caddr_t)va);
830 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
831 p->p_comm, p->p_pid, instr, trapframe->pc,
832 p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
834 if (td->td_md.md_ss_addr != va ||
835 instr != MIPS_BREAK_SSTEP) {
837 addr = trapframe->pc;
841 * The restoration of the original instruction and
842 * the clearing of the berakpoint will be done later
843 * by the call to ptrace_clear_single_step() in
844 * issignal() when SIGTRAP is processed.
846 addr = trapframe->pc;
851 case T_IWATCH + T_USER:
852 case T_DWATCH + T_USER:
856 /* compute address of trapped instruction */
858 if (DELAYBRANCH(trapframe->cause))
860 printf("watch exception @ %p\n", (void *)va);
866 case T_TRAP + T_USER:
870 struct trapframe *locr0 = td->td_frame;
872 /* compute address of trap instruction */
874 if (DELAYBRANCH(trapframe->cause))
876 /* read break instruction */
877 instr = fuword32((caddr_t)va);
879 if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */
880 locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
883 locr0->pc += sizeof(int);
886 i = SIGEMT; /* Stuff it with something for now */
890 case T_RES_INST + T_USER:
893 inst = *(InstFmt *)(intptr_t)trapframe->pc;
894 switch (inst.RType.op) {
896 switch (inst.RType.func) {
898 /* Register 29 used for TLS */
899 if (inst.RType.rd == 29) {
900 frame_regs = &(trapframe->zero);
901 frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
902 #if defined(__mips_n64) && defined(COMPAT_FREEBSD32)
903 if (SV_PROC_FLAG(td->td_proc, SV_ILP32))
904 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE32;
907 frame_regs[inst.RType.rt] += TLS_TP_OFFSET + TLS_TCB_SIZE;
908 trapframe->pc += sizeof(int);
916 log_illegal_instruction("RES_INST", trapframe);
918 addr = trapframe->pc;
927 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
928 /* Handle only COP2 exception */
932 addr = trapframe->pc;
933 /* save userland cop2 context if it has been touched */
934 if ((td->td_md.md_flags & MDTD_COP2USED) &&
935 (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
936 if (td->td_md.md_ucop2)
937 octeon_cop2_save(td->td_md.md_ucop2);
939 panic("COP2 was used in user mode but md_ucop2 is NULL");
942 if (td->td_md.md_cop2 == NULL) {
943 td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
944 if (td->td_md.md_cop2 == NULL)
945 panic("Failed to allocate COP2 context");
946 memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
949 octeon_cop2_restore(td->td_md.md_cop2);
951 /* Make userland re-request its context */
952 td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
953 td->td_md.md_flags |= MDTD_COP2USED;
954 td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
955 /* Enable COP2, it will be disabled in cpu_switch */
956 mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
957 return (trapframe->pc);
963 case T_COP_UNUSABLE + T_USER:
964 cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
966 #if !defined(CPU_HAVEFPU)
967 /* FP (COP1) instruction */
968 log_illegal_instruction("COP1_UNUSABLE", trapframe);
972 addr = trapframe->pc;
973 MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
974 PCPU_SET(fpcurthread, td);
975 td->td_frame->sr |= MIPS_SR_COP_1_BIT;
976 td->td_md.md_flags |= MDTD_FPUSED;
982 addr = trapframe->pc;
983 if ((td->td_md.md_flags & MDTD_COP2USED) &&
984 (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
985 if (td->td_md.md_cop2)
986 octeon_cop2_save(td->td_md.md_cop2);
988 panic("COP2 was used in kernel mode but md_cop2 is NULL");
991 if (td->td_md.md_ucop2 == NULL) {
992 td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
993 if (td->td_md.md_ucop2 == NULL)
994 panic("Failed to allocate userland COP2 context");
995 memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
998 octeon_cop2_restore(td->td_md.md_ucop2);
1000 td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1001 td->td_md.md_flags |= MDTD_COP2USED;
1002 td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1007 log_illegal_instruction("COPn_UNUSABLE", trapframe);
1008 i = SIGILL; /* only FPU instructions allowed */
1013 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1016 printf("FPU Trap: PC %#jx CR %x SR %x\n",
1017 (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1021 case T_FPE + T_USER:
1024 addr = trapframe->pc;
1027 MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1030 case T_OVFLOW + T_USER:
1032 addr = trapframe->pc;
1035 case T_ADDR_ERR_LD: /* misaligned access */
1036 case T_ADDR_ERR_ST: /* misaligned access */
1039 printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1040 (intmax_t)trapframe->badvaddr);
1043 /* Only allow emulation on a user address */
1044 if (allow_unaligned_acc &&
1045 ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1048 if (type == T_ADDR_ERR_LD)
1049 mode = VM_PROT_READ;
1051 mode = VM_PROT_WRITE;
1053 access_type = emulate_unaligned_access(trapframe, mode);
1054 if (access_type != 0)
1055 return (trapframe->pc);
1059 case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */
1060 if (td->td_pcb->pcb_onfault != NULL) {
1061 pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1062 td->td_pcb->pcb_onfault = NULL;
1071 #if !defined(SMP) && defined(DEBUG)
1072 stacktrace(!usermode ? trapframe : td->td_frame);
1076 printf("cpu:%d-", PCPU_GET(cpuid));
1078 printf("Trap cause = %d (%s - ", type,
1079 trap_type[type & (~T_USER)]);
1082 printf("user mode)\n");
1084 printf("kernel mode)\n");
1088 printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1089 (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1090 (intmax_t)trapframe->sr);
1094 if (debugger_on_panic || kdb_active) {
1095 kdb_trap(type, 0, trapframe);
1100 td->td_frame->pc = trapframe->pc;
1101 td->td_frame->cause = trapframe->cause;
1102 td->td_frame->badvaddr = trapframe->badvaddr;
1103 ksiginfo_init_trap(&ksi);
1105 ksi.ksi_code = ucode;
1106 ksi.ksi_addr = (void *)addr;
1107 ksi.ksi_trapno = type;
1108 trapsignal(td, &ksi);
1112 * Note: we should only get here if returning to user mode.
1114 userret(td, trapframe);
1115 return (trapframe->pc);
1118 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1126 printf("trapDump(%s)\n", msg);
1127 for (i = 0; i < TRAPSIZE; i++) {
1128 if (trp == trapdebug) {
1129 trp = &trapdebug[TRAPSIZE - 1];
1134 if (trp->cause == 0)
1137 printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1138 trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1139 MIPS_CR_EXC_CODE_SHIFT],
1140 (intmax_t)trp->vadr, (intmax_t)trp->pc,
1141 (intmax_t)trp->cause, (intmax_t)trp->status);
1143 printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1144 (intmax_t)trp->sp, (int)trp->code);
1152 * Return the resulting PC as if the branch was executed.
1155 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1159 register_t *regsPtr = (register_t *) framePtr;
1160 uintptr_t retAddr = 0;
1163 #define GetBranchDest(InstPtr, inst) \
1164 (InstPtr + 4 + ((short)inst.IType.imm << 2))
1168 if (instptr < MIPS_KSEG0_START)
1169 inst.word = fuword32((void *)instptr);
1171 inst = *(InstFmt *) instptr;
1173 if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1174 inst.word = fuword32((void *)instPC);
1176 inst = *(InstFmt *) instPC;
1179 switch ((int)inst.JType.op) {
1181 switch ((int)inst.RType.func) {
1184 retAddr = regsPtr[inst.RType.rs];
1188 retAddr = instPC + 4;
1194 switch ((int)inst.IType.rt) {
1199 if ((int)(regsPtr[inst.RType.rs]) < 0)
1200 retAddr = GetBranchDest(instPC, inst);
1202 retAddr = instPC + 8;
1209 if ((int)(regsPtr[inst.RType.rs]) >= 0)
1210 retAddr = GetBranchDest(instPC, inst);
1212 retAddr = instPC + 8;
1221 retAddr = instPC + 4; /* Like syscall... */
1225 panic("MipsEmulateBranch: Bad branch cond");
1231 retAddr = (inst.JType.target << 2) |
1232 ((unsigned)(instPC + 4) & 0xF0000000);
1237 if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1238 retAddr = GetBranchDest(instPC, inst);
1240 retAddr = instPC + 8;
1245 if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1246 retAddr = GetBranchDest(instPC, inst);
1248 retAddr = instPC + 8;
1253 if ((int)(regsPtr[inst.RType.rs]) <= 0)
1254 retAddr = GetBranchDest(instPC, inst);
1256 retAddr = instPC + 8;
1261 if ((int)(regsPtr[inst.RType.rs]) > 0)
1262 retAddr = GetBranchDest(instPC, inst);
1264 retAddr = instPC + 8;
1268 switch (inst.RType.rs) {
1271 if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1272 condition = fpcCSR & MIPS_FPU_COND_BIT;
1274 condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1276 retAddr = GetBranchDest(instPC, inst);
1278 retAddr = instPC + 8;
1282 retAddr = instPC + 4;
1287 retAddr = instPC + 4;
1293 #if defined(DDB) || defined(DEBUG)
1295 * Print a stack backtrace.
1298 stacktrace(struct trapframe *regs)
1300 stacktrace_subr(regs->pc, regs->sp, regs->ra, printf);
1305 log_frame_dump(struct trapframe *frame)
1307 log(LOG_ERR, "Trapframe Register Dump:\n");
1308 log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1309 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1311 log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1312 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1314 #if defined(__mips_n32) || defined(__mips_n64)
1315 log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1316 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1318 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1319 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1321 log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1322 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1324 log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1325 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1327 log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1328 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1330 log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1331 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1333 log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1334 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1336 log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1337 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1339 log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1340 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1343 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1344 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1346 log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1347 (intmax_t)frame->cause, (intmax_t)frame->pc);
1353 trap_frame_dump(struct trapframe *frame)
1355 printf("Trapframe Register Dump:\n");
1356 printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1357 (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1359 printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1360 (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1361 #if defined(__mips_n32) || defined(__mips_n64)
1362 printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1363 (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1365 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1366 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1368 printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1369 (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1371 printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1372 (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1374 printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1375 (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1377 printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1378 (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1380 printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1381 (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1383 printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1384 (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1386 printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1387 (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1390 printf("\tcause: %#jx\tpc: %#jx\tic: %#jx\n",
1391 (intmax_t)frame->cause, (intmax_t)frame->pc, (intmax_t)frame->ic);
1393 printf("\tcause: %#jx\tpc: %#jx\n",
1394 (intmax_t)frame->cause, (intmax_t)frame->pc);
1402 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1406 struct proc *p = curproc;
1408 pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1410 ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1412 ptep = (pt_entry_t *)0;
1419 log_illegal_instruction(const char *msg, struct trapframe *frame)
1432 printf("cpuid = %d\n", PCPU_GET(cpuid));
1434 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1435 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1436 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1437 p->p_ucred ? p->p_ucred->cr_uid : -1,
1439 (intmax_t)frame->ra);
1441 /* log registers in trap frame */
1442 log_frame_dump(frame);
1444 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1447 * Dump a few words around faulting instruction, if the addres is
1451 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1452 /* dump page table entry for faulting instruction */
1453 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1454 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1456 addr = (unsigned int *)(intptr_t)pc;
1457 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1459 log(LOG_ERR, "%08x %08x %08x %08x\n",
1460 addr[0], addr[1], addr[2], addr[3]);
1462 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1463 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1468 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1475 char *read_or_write;
1478 trap_type &= ~T_USER;
1484 printf("cpuid = %d\n", PCPU_GET(cpuid));
1486 switch (trap_type) {
1490 read_or_write = "write";
1494 case T_BUS_ERR_IFETCH:
1495 read_or_write = "read";
1498 read_or_write = "unknown";
1501 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1502 log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1503 "(type %#x) at %#jx\n",
1504 msg, p->p_pid, (long)td->td_tid, p->p_comm,
1505 p->p_ucred ? p->p_ucred->cr_uid : -1,
1509 (intmax_t)frame->badvaddr);
1511 /* log registers in trap frame */
1512 log_frame_dump(frame);
1514 get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1517 * Dump a few words around faulting instruction, if the addres is
1520 if (!(pc & 3) && (pc != frame->badvaddr) &&
1521 (trap_type != T_BUS_ERR_IFETCH) &&
1522 useracc((caddr_t)(intptr_t)pc, sizeof(int) * 4, VM_PROT_READ)) {
1523 /* dump page table entry for faulting instruction */
1524 log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1525 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1527 addr = (unsigned int *)(intptr_t)pc;
1528 log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1530 log(LOG_ERR, "%08x %08x %08x %08x\n",
1531 addr[0], addr[1], addr[2], addr[3]);
1533 log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1534 (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1537 get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1538 log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1539 (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1544 * Unaligned load/store emulation
1547 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1549 register_t *reg = (register_t *) frame;
1550 u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1551 register_t value_msb, value;
1555 * ADDR_ERR faults have higher priority than TLB
1556 * Miss faults. Therefore, it is necessary to
1557 * verify that the faulting address is a valid
1558 * virtual address within the process' address space
1559 * before trying to emulate the unaligned access.
1561 switch (MIPS_INST_OPCODE(inst)) {
1562 case OP_LHU: case OP_LH:
1566 case OP_LWU: case OP_LW:
1575 printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1579 if (!useracc((void *)((vm_offset_t)addr & ~(size - 1)), size * 2, mode))
1584 * Handle LL/SC LLD/SCD.
1586 switch (MIPS_INST_OPCODE(inst)) {
1588 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1589 lbu_macro(value_msb, addr);
1591 lbu_macro(value, addr);
1592 value |= value_msb << 8;
1593 reg[MIPS_INST_RT(inst)] = value;
1594 return (MIPS_LHU_ACCESS);
1597 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1598 lb_macro(value_msb, addr);
1600 lbu_macro(value, addr);
1601 value |= value_msb << 8;
1602 reg[MIPS_INST_RT(inst)] = value;
1603 return (MIPS_LH_ACCESS);
1606 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1607 lwl_macro(value, addr);
1609 lwr_macro(value, addr);
1610 value &= 0xffffffff;
1611 reg[MIPS_INST_RT(inst)] = value;
1612 return (MIPS_LWU_ACCESS);
1615 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1616 lwl_macro(value, addr);
1618 lwr_macro(value, addr);
1619 reg[MIPS_INST_RT(inst)] = value;
1620 return (MIPS_LW_ACCESS);
1622 #if defined(__mips_n32) || defined(__mips_n64)
1624 KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1625 ldl_macro(value, addr);
1627 ldr_macro(value, addr);
1628 reg[MIPS_INST_RT(inst)] = value;
1629 return (MIPS_LD_ACCESS);
1633 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1634 value = reg[MIPS_INST_RT(inst)];
1635 value_msb = value >> 8;
1636 sb_macro(value_msb, addr);
1638 sb_macro(value, addr);
1639 return (MIPS_SH_ACCESS);
1642 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1643 value = reg[MIPS_INST_RT(inst)];
1644 swl_macro(value, addr);
1646 swr_macro(value, addr);
1647 return (MIPS_SW_ACCESS);
1649 #if defined(__mips_n32) || defined(__mips_n64)
1651 KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1652 value = reg[MIPS_INST_RT(inst)];
1653 sdl_macro(value, addr);
1655 sdr_macro(value, addr);
1656 return (MIPS_SD_ACCESS);
1659 panic("%s: should not be reached.", __func__);
1664 emulate_unaligned_access(struct trapframe *frame, int mode)
1667 int access_type = 0;
1669 pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1672 * Fall through if it's instruction fetch exception
1674 if (!((pc & 3) || (pc == frame->badvaddr))) {
1677 * Handle unaligned load and store
1681 * Return access type if the instruction was emulated.
1682 * Otherwise restore pc and fall through.
1684 access_type = mips_unaligned_load_store(frame,
1685 mode, frame->badvaddr, pc);
1688 if (DELAYBRANCH(frame->cause))
1689 frame->pc = MipsEmulateBranch(frame, frame->pc,
1694 log(LOG_INFO, "Unaligned %s: pc=%#jx, badvaddr=%#jx\n",
1695 access_name[access_type - 1], (intmax_t)pc,
1696 (intmax_t)frame->badvaddr);