4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/pcb.h>
39 #include <machine/stack.h>
42 #include <vm/vm_param.h>
47 extern uintptr_t kernbase;
48 uintptr_t kernelbase = (uintptr_t) &kernbase;
50 #define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK && \
51 ((vm_offset_t)(va)) < VM_MAX_KERNEL_ADDRESS)
53 uint8_t dtrace_fuword8_nocheck(void *);
54 uint16_t dtrace_fuword16_nocheck(void *);
55 uint32_t dtrace_fuword32_nocheck(void *);
56 uint64_t dtrace_fuword64_nocheck(void *);
58 int dtrace_ustackdepth_max = 2048;
61 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
66 struct i386_frame *frame;
68 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
71 pcstack[depth++] = (pc_t) intrpc;
75 __asm __volatile("movl %%ebp,%0" : "=r" (ebp));
77 frame = (struct i386_frame *)ebp;
78 while (depth < pcstack_limit) {
82 callpc = frame->f_retaddr;
84 if (!INKERNEL(callpc))
89 if ((aframes == 0) && (caller != 0)) {
90 pcstack[depth++] = caller;
94 pcstack[depth++] = callpc;
97 if (frame->f_frame <= frame ||
98 (vm_offset_t)frame->f_frame >=
99 (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
101 frame = frame->f_frame;
104 for (; depth < pcstack_limit; depth++) {
110 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
115 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
119 volatile uint16_t *flags =
120 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
123 ASSERT(pcstack == NULL || pcstack_limit > 0);
124 ASSERT(dtrace_ustackdepth_max > 0);
126 #ifdef notyet /* XXX signal stack. */
127 if (p->p_model == DATAMODEL_NATIVE) {
128 s1 = sizeof (struct frame) + 2 * sizeof (long);
129 s2 = s1 + sizeof (siginfo_t);
131 s1 = sizeof (struct frame32) + 3 * sizeof (int);
132 s2 = s1 + sizeof (siginfo32_t);
138 * We limit the number of times we can go around this
139 * loop to account for a circular stack.
141 if (ret++ >= dtrace_ustackdepth_max) {
142 *flags |= CPU_DTRACE_BADSTACK;
143 cpu_core[curcpu].cpuc_dtrace_illval = sp;
147 if (pcstack != NULL) {
148 *pcstack++ = (uint64_t)pc;
150 if (pcstack_limit <= 0)
159 #ifdef notyet /* XXX signal stack. */
160 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
161 if (p->p_model == DATAMODEL_NATIVE) {
162 ucontext_t *ucp = (ucontext_t *)oldcontext;
163 greg_t *gregs = ucp->uc_mcontext.gregs;
165 sp = dtrace_fulword(&gregs[REG_FP]);
166 pc = dtrace_fulword(&gregs[REG_PC]);
168 oldcontext = dtrace_fulword(&ucp->uc_link);
170 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
171 greg32_t *gregs = ucp->uc_mcontext.gregs;
173 sp = dtrace_fuword32(&gregs[EBP]);
174 pc = dtrace_fuword32(&gregs[EIP]);
176 oldcontext = dtrace_fuword32(&ucp->uc_link);
179 if (p->p_model == DATAMODEL_NATIVE) {
180 struct frame *fr = (struct frame *)sp;
182 pc = dtrace_fulword(&fr->fr_savpc);
183 sp = dtrace_fulword(&fr->fr_savfp);
185 struct frame32 *fr = (struct frame32 *)sp;
187 pc = dtrace_fuword32(&fr->fr_savpc);
188 sp = dtrace_fuword32(&fr->fr_savfp);
192 pc = dtrace_fuword32((void *)(sp +
193 offsetof(struct i386_frame, f_retaddr)));
194 sp = dtrace_fuword32((void *)sp);
195 #endif /* ! notyet */
198 *flags |= CPU_DTRACE_BADSTACK;
199 cpu_core[curcpu].cpuc_dtrace_illval = sp;
204 * This is totally bogus: if we faulted, we're going to clear
205 * the fault and break. This is to deal with the apparently
206 * broken Java stacks on x86.
208 if (*flags & CPU_DTRACE_FAULT) {
209 *flags &= ~CPU_DTRACE_FAULT;
218 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
221 struct trapframe *tf;
222 uintptr_t pc, sp, fp;
223 volatile uint16_t *flags =
224 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
227 if (*flags & CPU_DTRACE_FAULT)
230 if (pcstack_limit <= 0)
234 * If there's no user context we still need to zero the stack.
236 if (p == NULL || (tf = curthread->td_frame) == NULL)
239 *pcstack++ = (uint64_t)p->p_pid;
242 if (pcstack_limit <= 0)
249 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
251 * In an entry probe. The frame pointer has not yet been
252 * pushed (that happens in the function prologue). The
253 * best approach is to add the current pc as a missing top
254 * of stack and back the pc up to the caller, which is stored
255 * at the current stack pointer address since the call
256 * instruction puts it there right before the branch.
259 *pcstack++ = (uint64_t)pc;
261 if (pcstack_limit <= 0)
264 pc = dtrace_fuword32((void *) sp);
267 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
269 ASSERT(n <= pcstack_limit);
275 while (pcstack_limit-- > 0)
280 dtrace_getustackdepth(void)
283 struct trapframe *tf;
284 uintptr_t pc, fp, sp;
287 if (p == NULL || (tf = curthread->td_frame) == NULL)
290 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
297 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
299 * In an entry probe. The frame pointer has not yet been
300 * pushed (that happens in the function prologue). The
301 * best approach is to add the current pc as a missing top
302 * of stack and back the pc up to the caller, which is stored
303 * at the current stack pointer address since the call
304 * instruction puts it there right before the branch.
307 pc = dtrace_fuword32((void *) sp);
311 n += dtrace_getustack_common(NULL, 0, pc, fp);
317 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
320 struct trapframe *tf;
321 uintptr_t pc, sp, fp;
322 volatile uint16_t *flags =
323 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
324 #ifdef notyet /* XXX signal stack */
325 uintptr_t oldcontext;
329 if (*flags & CPU_DTRACE_FAULT)
332 if (pcstack_limit <= 0)
336 * If there's no user context we still need to zero the stack.
338 if (p == NULL || (tf = curthread->td_frame) == NULL)
341 *pcstack++ = (uint64_t)p->p_pid;
344 if (pcstack_limit <= 0)
351 #ifdef notyet /* XXX signal stack */
352 oldcontext = lwp->lwp_oldcontext;
354 if (p->p_model == DATAMODEL_NATIVE) {
355 s1 = sizeof (struct frame) + 2 * sizeof (long);
356 s2 = s1 + sizeof (siginfo_t);
358 s1 = sizeof (struct frame32) + 3 * sizeof (int);
359 s2 = s1 + sizeof (siginfo32_t);
363 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
364 *pcstack++ = (uint64_t)pc;
367 if (pcstack_limit <= 0)
370 pc = dtrace_fuword32((void *)sp);
374 *pcstack++ = (uint64_t)pc;
377 if (pcstack_limit <= 0)
383 #ifdef notyet /* XXX signal stack */
384 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
385 if (p->p_model == DATAMODEL_NATIVE) {
386 ucontext_t *ucp = (ucontext_t *)oldcontext;
387 greg_t *gregs = ucp->uc_mcontext.gregs;
389 sp = dtrace_fulword(&gregs[REG_FP]);
390 pc = dtrace_fulword(&gregs[REG_PC]);
392 oldcontext = dtrace_fulword(&ucp->uc_link);
394 ucontext_t *ucp = (ucontext_t *)oldcontext;
395 greg_t *gregs = ucp->uc_mcontext.gregs;
397 sp = dtrace_fuword32(&gregs[EBP]);
398 pc = dtrace_fuword32(&gregs[EIP]);
400 oldcontext = dtrace_fuword32(&ucp->uc_link);
405 pc = dtrace_fuword32((void *)(fp +
406 offsetof(struct i386_frame, f_retaddr)));
407 fp = dtrace_fuword32((void *)fp);
411 * This is totally bogus: if we faulted, we're going to clear
412 * the fault and break. This is to deal with the apparently
413 * broken Java stacks on x86.
415 if (*flags & CPU_DTRACE_FAULT) {
416 *flags &= ~CPU_DTRACE_FAULT;
422 while (pcstack_limit-- > 0)
427 dtrace_getarg(int arg, int aframes)
430 struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
434 for (i = 1; i <= aframes; i++) {
437 if (P2ROUNDUP(fp->f_retaddr, 4) ==
438 (long)dtrace_invop_callsite) {
440 * If we pass through the invalid op handler, we will
441 * use the pointer that it passed to the stack as the
442 * second argument to dtrace_invop() as the pointer to
443 * the stack. When using this stack, we must step
444 * beyond the EIP/RIP that was pushed when the trap was
445 * taken -- hence the "+ 1" below.
447 stack = ((uintptr_t **)&fp[1])[0] + 1;
454 * We know that we did not come through a trap to get into
455 * dtrace_probe() -- the provider simply called dtrace_probe()
456 * directly. As this is the case, we need to shift the argument
457 * that we're looking for: the probe ID is the first argument to
458 * dtrace_probe(), so the argument n will actually be found where
459 * one would expect to find argument (n + 1).
463 stack = (uintptr_t *)fp + 2;
466 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
468 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
474 dtrace_getstackdepth(int aframes)
477 struct i386_frame *frame;
481 ebp = dtrace_getfp();
482 frame = (struct i386_frame *)ebp;
485 if (!INKERNEL((long) frame))
487 if (!INKERNEL((long) frame->f_frame))
490 if (frame->f_frame <= frame ||
491 (vm_offset_t)frame->f_frame >=
492 (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
494 frame = frame->f_frame;
499 return depth - aframes;
503 dtrace_getreg(struct trapframe *rp, uint_t reg)
506 int regmap[] = { /* Order is dependent on reg.d */
513 REG_RBP, /* 6 EBP, REG_FP */
516 REG_RDX, /* 9 EDX, REG_R1 */
517 REG_RCX, /* 10 ECX */
518 REG_RAX, /* 11 EAX, REG_R0 */
519 REG_TRAPNO, /* 12 TRAPNO */
520 REG_ERR, /* 13 ERR */
521 REG_RIP, /* 14 EIP, REG_PC */
523 REG_RFL, /* 16 EFL, REG_PS */
524 REG_RSP, /* 17 UESP, REG_SP */
529 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
533 if (reg >= sizeof (regmap) / sizeof (int)) {
534 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
542 if ((pcb = curthread->td_pcb) == NULL) {
543 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
546 return (pcb->pcb_gs);
568 return (rp->tf_trapno);
576 return (rp->tf_eflags);
584 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
590 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
592 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
594 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
595 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
596 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
604 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
605 volatile uint16_t *flags)
607 if (dtrace_copycheck(uaddr, kaddr, size))
608 dtrace_copy(uaddr, kaddr, size);
612 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
613 volatile uint16_t *flags)
615 if (dtrace_copycheck(uaddr, kaddr, size))
616 dtrace_copy(kaddr, uaddr, size);
620 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
621 volatile uint16_t *flags)
623 if (dtrace_copycheck(uaddr, kaddr, size))
624 dtrace_copystr(uaddr, kaddr, size, flags);
628 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
629 volatile uint16_t *flags)
631 if (dtrace_copycheck(uaddr, kaddr, size))
632 dtrace_copystr(kaddr, uaddr, size, flags);
636 dtrace_fuword8(void *uaddr)
638 if ((uintptr_t)uaddr >= kernelbase) {
639 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
640 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
643 return (dtrace_fuword8_nocheck(uaddr));
647 dtrace_fuword16(void *uaddr)
649 if ((uintptr_t)uaddr >= kernelbase) {
650 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
651 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
654 return (dtrace_fuword16_nocheck(uaddr));
658 dtrace_fuword32(void *uaddr)
660 if ((uintptr_t)uaddr >= kernelbase) {
661 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
662 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
665 return (dtrace_fuword32_nocheck(uaddr));
669 dtrace_fuword64(void *uaddr)
671 if ((uintptr_t)uaddr >= kernelbase) {
672 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
673 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
676 return (dtrace_fuword64_nocheck(uaddr));