2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
36 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
37 * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota
38 * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_compat.h"
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
51 #include <sys/syscall.h>
52 #include <sys/sysent.h>
54 #include <sys/vnode.h>
55 #include <sys/vmmeter.h>
56 #include <sys/kernel.h>
57 #include <sys/sysctl.h>
58 #include <sys/unistd.h>
60 #include <machine/cache.h>
61 #include <machine/clock.h>
62 #include <machine/cpu.h>
63 #include <machine/md_var.h>
64 #include <machine/pcb.h>
67 #include <vm/vm_extern.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_param.h>
75 #include <vm/uma_int.h>
80 #include <sys/sf_buf.h>
83 /* Duplicated from asm.h */
84 #if defined(__mips_o32)
89 #if defined(__mips_o32) || defined(__mips_o64)
90 #define CALLFRAME_SIZ (SZREG * (4 + 2))
91 #elif defined(__mips_n32) || defined(__mips_n64)
92 #define CALLFRAME_SIZ (SZREG * 4)
98 #define NSFBUFS (512 + maxusers * 16)
102 static int nsfbufspeak;
103 static int nsfbufsused;
105 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
106 "Maximum number of sendfile(2) sf_bufs available");
107 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
108 "Number of sendfile(2) sf_bufs at peak usage");
109 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
110 "Number of sendfile(2) sf_bufs in use");
112 static void sf_buf_init(void *arg);
113 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
116 * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
117 * sf_freelist head with the sf_lock mutex.
120 SLIST_HEAD(, sf_buf) sf_head;
124 static u_int sf_buf_alloc_want;
125 #endif /* !__mips_n64 */
128 * Finish a fork operation, with process p2 nearly set up.
129 * Copy and update the pcb, set up the stack so that the child
130 * ready to run and return to user mode.
133 cpu_fork(register struct thread *td1,register struct proc *p2,
134 struct thread *td2,int flags)
136 register struct proc *p1;
140 if ((flags & RFPROC) == 0)
142 /* It is assumed that the vm_thread_alloc called
143 * cpu_thread_alloc() before cpu_fork is called.
146 /* Point the pcb to the top of the stack */
149 /* Copy p1's pcb, note that in this case
150 * our pcb also includes the td_frame being copied
151 * too. The older mips2 code did an additional copy
152 * of the td_frame, for us that's not needed any
153 * longer (this copy does them both)
155 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
157 /* Point mdproc and then copy over td1's contents
158 * md_proc is empty for MIPS
160 td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED;
163 * Set up return-value registers as fork() libc stub expects.
165 td2->td_frame->v0 = 0;
166 td2->td_frame->v1 = 1;
167 td2->td_frame->a3 = 0;
169 if (td1 == PCPU_GET(fpcurthread))
170 MipsSaveCurFPState(td1);
172 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline;
173 /* Make sp 64-bit aligned */
174 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb &
175 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ);
176 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return;
177 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2;
178 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame;
179 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() &
180 (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK);
182 * FREEBSD_DEVELOPERS_FIXME:
183 * Setup any other CPU-Specific registers (Not MIPS Standard)
184 * and/or bits in other standard MIPS registers (if CPU-Specific)
188 td2->td_md.md_tls = td1->td_md.md_tls;
189 td2->td_md.md_saved_intr = MIPS_SR_INT_IE;
190 td2->td_md.md_spinlock_count = 1;
192 if (td1->td_md.md_flags & MDTD_COP2USED) {
193 if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) {
194 if (td1->td_md.md_ucop2)
195 octeon_cop2_save(td1->td_md.md_ucop2);
197 panic("cpu_fork: ucop2 is NULL but COP2 is enabled");
200 if (td1->td_md.md_cop2)
201 octeon_cop2_save(td1->td_md.md_cop2);
203 panic("cpu_fork: cop2 is NULL but COP2 is enabled");
207 if (td1->td_md.md_cop2) {
208 td2->td_md.md_cop2 = octeon_cop2_alloc_ctx();
209 memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2,
210 sizeof(*td1->td_md.md_cop2));
212 if (td1->td_md.md_ucop2) {
213 td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
214 memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2,
215 sizeof(*td1->td_md.md_ucop2));
217 td2->td_md.md_cop2owner = td1->td_md.md_cop2owner;
218 pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX;
219 /* Clear COP2 bits for userland & kernel */
220 td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
221 pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT;
226 * Intercept the return address from a freshly forked process that has NOT
227 * been scheduled yet.
229 * This is needed to make kernel threads stay in kernel mode.
232 cpu_set_fork_handler(struct thread *td, void (*func) __P((void *)), void *arg)
235 * Note that the trap frame follows the args, so the function
236 * is really called like this: func(arg, frame);
238 td->td_pcb->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)func;
239 td->td_pcb->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)arg;
243 cpu_exit(struct thread *td)
248 cpu_thread_exit(struct thread *td)
251 if (PCPU_GET(fpcurthread) == td)
252 PCPU_GET(fpcurthread) = (struct thread *)0;
254 if (td->td_md.md_cop2)
255 memset(td->td_md.md_cop2, 0,
256 sizeof(*td->td_md.md_cop2));
257 if (td->td_md.md_ucop2)
258 memset(td->td_md.md_ucop2, 0,
259 sizeof(*td->td_md.md_ucop2));
264 cpu_thread_free(struct thread *td)
267 if (td->td_md.md_cop2)
268 octeon_cop2_free_ctx(td->td_md.md_cop2);
269 if (td->td_md.md_ucop2)
270 octeon_cop2_free_ctx(td->td_md.md_ucop2);
271 td->td_md.md_cop2 = NULL;
272 td->td_md.md_ucop2 = NULL;
277 cpu_thread_clean(struct thread *td)
282 cpu_thread_swapin(struct thread *td)
288 * The kstack may be at a different physical address now.
289 * Cache the PTEs for the Kernel stack in the machine dependent
290 * part of the thread struct so cpu_switch() can quickly map in
291 * the pcb struct and kernel stack.
293 for (i = 0; i < KSTACK_PAGES; i++) {
294 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE);
295 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK;
300 cpu_thread_swapout(struct thread *td)
305 cpu_thread_alloc(struct thread *td)
310 KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned."));
311 td->td_pcb = (struct pcb *)(td->td_kstack +
312 td->td_kstack_pages * PAGE_SIZE) - 1;
313 td->td_frame = &td->td_pcb->pcb_regs;
315 for (i = 0; i < KSTACK_PAGES; i++) {
316 pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE);
317 td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK;
322 cpu_set_syscall_retval(struct thread *td, int error)
324 struct trapframe *locr0 = td->td_frame;
330 #if defined(__mips_n32) || defined(__mips_n64)
331 #ifdef COMPAT_FREEBSD32
332 if (code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32))
336 if (code == SYS___syscall)
340 if (code == SYS_syscall)
342 else if (code == SYS___syscall) {
344 code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0;
351 if (quad_syscall && code != SYS_lseek) {
353 * System call invoked through the
354 * SYS___syscall interface but the
355 * return value is really just 32
358 locr0->v0 = td->td_retval[0];
360 locr0->v1 = td->td_retval[0];
363 locr0->v0 = td->td_retval[0];
364 locr0->v1 = td->td_retval[1];
370 locr0->pc = td->td_pcb->pcb_tpc;
374 break; /* nothing to do */
377 if (quad_syscall && code != SYS_lseek) {
390 * Initialize machine state (pcb and trap frame) for a new thread about to
391 * upcall. Put enough state in the new thread's PCB to get it to go back
392 * userret(), where we can intercept it again to set the return (upcall)
393 * Address and stack, along with those from upcalls that are from other sources
394 * such as those generated in thread_userret() itself.
397 cpu_set_upcall(struct thread *td, struct thread *td0)
401 /* Point the pcb to the top of the stack. */
405 * Copy the upcall pcb. This loads kernel regs.
406 * Those not loaded individually below get their default
409 * XXXKSE It might be a good idea to simply skip this as
410 * the values of the other registers may be unimportant.
411 * This would remove any requirement for knowing the KSE
412 * at this time (see the matching comment below for
413 * more analysis) (need a good safe default).
414 * In MIPS, the trapframe is the first element of the PCB
415 * and gets copied when we copy the PCB. No separate copy
418 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
421 * Set registers for trampoline to user mode.
424 pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline;
425 /* Make sp 64-bit aligned */
426 pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb &
427 ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ);
428 pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return;
429 pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td;
430 pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td->td_frame;
431 /* Dont set IE bit in SR. sched lock release will take care of it */
432 pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() &
433 (MIPS_SR_PX | MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK);
436 * FREEBSD_DEVELOPERS_FIXME:
437 * Setup any other CPU-Specific registers (Not MIPS Standard)
441 /* SMP Setup to release sched_lock in fork_exit(). */
442 td->td_md.md_spinlock_count = 1;
443 td->td_md.md_saved_intr = MIPS_SR_INT_IE;
445 /* Maybe we need to fix this? */
446 td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) |
447 (MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) |
448 (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK));
453 * Set that machine state for performing an upcall that has to
454 * be done in thread_userret() so that those upcalls generated
455 * in thread_userret() itself can be done as well.
458 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
461 struct trapframe *tf;
465 * At the point where a function is called, sp must be 8
466 * byte aligned[for compatibility with 64-bit CPUs]
467 * in ``See MIPS Run'' by D. Sweetman, p. 269
470 sp = ((register_t)(intptr_t)(stack->ss_sp + stack->ss_size) & ~0x7) -
474 * Set the trap frame to point at the beginning of the uts
478 bzero(tf, sizeof(struct trapframe));
480 tf->pc = (register_t)(intptr_t)entry;
482 * MIPS ABI requires T9 to be the same as PC
483 * in subroutine entry point
485 tf->t9 = (register_t)(intptr_t)entry;
486 tf->a0 = (register_t)(intptr_t)arg;
489 * Keep interrupt mask
491 td->td_frame->sr = MIPS_SR_KSU_USER | MIPS_SR_EXL | MIPS_SR_INT_IE |
492 (mips_rd_status() & MIPS_SR_INT_MASK);
493 #if defined(__mips_n32)
494 td->td_frame->sr |= MIPS_SR_PX;
495 #elif defined(__mips_n64)
496 td->td_frame->sr |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX;
498 /* tf->sr |= (ALL_INT_MASK & idle_mask) | SR_INT_ENAB; */
499 /**XXX the above may now be wrong -- mips2 implements this as panic */
501 * FREEBSD_DEVELOPERS_FIXME:
502 * Setup any other CPU-Specific registers (Not MIPS Standard)
508 * Implement the pre-zeroed page mechanism.
509 * This routine is called from the idle loop.
512 #define ZIDLE_LO(v) ((v) * 2 / 3)
513 #define ZIDLE_HI(v) ((v) * 4 / 5)
516 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
520 sf_buf_init(void *arg)
522 struct sf_buf *sf_bufs;
527 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
529 mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF);
530 SLIST_INIT(&sf_freelist.sf_head);
531 sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
532 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
534 for (i = 0; i < nsfbufs; i++) {
535 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
536 SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
538 sf_buf_alloc_want = 0;
542 * Get an sf_buf from the freelist. Will block if none are available.
545 sf_buf_alloc(struct vm_page *m, int flags)
550 mtx_lock(&sf_freelist.sf_lock);
551 while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
552 if (flags & SFB_NOWAIT)
555 SFSTAT_INC(sf_allocwait);
556 error = msleep(&sf_freelist, &sf_freelist.sf_lock,
557 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
561 * If we got a signal, don't risk going back to sleep.
567 SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
570 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
571 pmap_qenter(sf->kva, &sf->m, 1);
573 mtx_unlock(&sf_freelist.sf_lock);
578 * Release resources back to the system.
581 sf_buf_free(struct sf_buf *sf)
583 pmap_qremove(sf->kva, 1);
584 mtx_lock(&sf_freelist.sf_lock);
585 SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
587 if (sf_buf_alloc_want > 0)
588 wakeup(&sf_freelist);
589 mtx_unlock(&sf_freelist.sf_lock);
591 #endif /* !__mips_n64 */
594 * Software interrupt handler for queued VM system processing.
600 if (busdma_swi_pending)
605 cpu_set_user_tls(struct thread *td, void *tls_base)
608 td->td_md.md_tls = (char*)tls_base;
616 #define DB_PRINT_REG(ptr, regname) \
617 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->regname))
619 #define DB_PRINT_REG_ARRAY(ptr, arrname, regname) \
620 db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->arrname[regname]))
623 dump_trapframe(struct trapframe *trapframe)
626 db_printf("Trapframe at %p\n", trapframe);
628 DB_PRINT_REG(trapframe, zero);
629 DB_PRINT_REG(trapframe, ast);
630 DB_PRINT_REG(trapframe, v0);
631 DB_PRINT_REG(trapframe, v1);
632 DB_PRINT_REG(trapframe, a0);
633 DB_PRINT_REG(trapframe, a1);
634 DB_PRINT_REG(trapframe, a2);
635 DB_PRINT_REG(trapframe, a3);
636 #if defined(__mips_n32) || defined(__mips_n64)
637 DB_PRINT_REG(trapframe, a4);
638 DB_PRINT_REG(trapframe, a5);
639 DB_PRINT_REG(trapframe, a6);
640 DB_PRINT_REG(trapframe, a7);
641 DB_PRINT_REG(trapframe, t0);
642 DB_PRINT_REG(trapframe, t1);
643 DB_PRINT_REG(trapframe, t2);
644 DB_PRINT_REG(trapframe, t3);
646 DB_PRINT_REG(trapframe, t0);
647 DB_PRINT_REG(trapframe, t1);
648 DB_PRINT_REG(trapframe, t2);
649 DB_PRINT_REG(trapframe, t3);
650 DB_PRINT_REG(trapframe, t4);
651 DB_PRINT_REG(trapframe, t5);
652 DB_PRINT_REG(trapframe, t6);
653 DB_PRINT_REG(trapframe, t7);
655 DB_PRINT_REG(trapframe, s0);
656 DB_PRINT_REG(trapframe, s1);
657 DB_PRINT_REG(trapframe, s2);
658 DB_PRINT_REG(trapframe, s3);
659 DB_PRINT_REG(trapframe, s4);
660 DB_PRINT_REG(trapframe, s5);
661 DB_PRINT_REG(trapframe, s6);
662 DB_PRINT_REG(trapframe, s7);
663 DB_PRINT_REG(trapframe, t8);
664 DB_PRINT_REG(trapframe, t9);
665 DB_PRINT_REG(trapframe, k0);
666 DB_PRINT_REG(trapframe, k1);
667 DB_PRINT_REG(trapframe, gp);
668 DB_PRINT_REG(trapframe, sp);
669 DB_PRINT_REG(trapframe, s8);
670 DB_PRINT_REG(trapframe, ra);
671 DB_PRINT_REG(trapframe, sr);
672 DB_PRINT_REG(trapframe, mullo);
673 DB_PRINT_REG(trapframe, mulhi);
674 DB_PRINT_REG(trapframe, badvaddr);
675 DB_PRINT_REG(trapframe, cause);
676 DB_PRINT_REG(trapframe, pc);
679 DB_SHOW_COMMAND(pcb, ddb_dump_pcb)
683 struct trapframe *trapframe;
685 /* Determine which thread to examine. */
687 td = db_lookup_thread(addr, TRUE);
693 db_printf("Thread %d at %p\n", td->td_tid, td);
695 db_printf("PCB at %p\n", pcb);
697 trapframe = &pcb->pcb_regs;
698 dump_trapframe(trapframe);
700 db_printf("PCB Context:\n");
701 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S0);
702 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S1);
703 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S2);
704 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S3);
705 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S4);
706 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S5);
707 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S6);
708 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S7);
709 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SP);
710 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S8);
711 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_RA);
712 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SR);
713 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_GP);
714 DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_PC);
716 db_printf("PCB onfault = %p\n", pcb->pcb_onfault);
717 db_printf("md_saved_intr = 0x%0lx\n", (long)td->td_md.md_saved_intr);
718 db_printf("md_spinlock_count = %d\n", td->td_md.md_spinlock_count);
720 if (td->td_frame != trapframe) {
721 db_printf("td->td_frame %p is not the same as pcb_regs %p\n",
722 td->td_frame, trapframe);
727 * Dump the trapframe beginning at address specified by first argument.
729 DB_SHOW_COMMAND(trapframe, ddb_dump_trapframe)
735 dump_trapframe((struct trapframe *)addr);