4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/pcb.h>
39 #include <machine/stack.h>
42 #include <vm/vm_param.h>
47 extern uintptr_t kernbase;
48 uintptr_t kernelbase = (uintptr_t) &kernbase;
50 uint8_t dtrace_fuword8_nocheck(void *);
51 uint16_t dtrace_fuword16_nocheck(void *);
52 uint32_t dtrace_fuword32_nocheck(void *);
53 uint64_t dtrace_fuword64_nocheck(void *);
55 int dtrace_ustackdepth_max = 2048;
58 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
63 struct i386_frame *frame;
65 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
68 pcstack[depth++] = (pc_t) intrpc;
72 __asm __volatile("movl %%ebp,%0" : "=r" (ebp));
74 frame = (struct i386_frame *)ebp;
75 while (depth < pcstack_limit) {
79 callpc = frame->f_retaddr;
81 if (!INKERNEL(callpc))
86 if ((aframes == 0) && (caller != 0)) {
87 pcstack[depth++] = caller;
91 pcstack[depth++] = callpc;
94 if (frame->f_frame <= frame ||
95 (vm_offset_t)frame->f_frame >= curthread->td_kstack +
96 curthread->td_kstack_pages * PAGE_SIZE)
98 frame = frame->f_frame;
101 for (; depth < pcstack_limit; depth++) {
107 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
112 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
116 volatile uint16_t *flags =
117 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
120 ASSERT(pcstack == NULL || pcstack_limit > 0);
121 ASSERT(dtrace_ustackdepth_max > 0);
123 #ifdef notyet /* XXX signal stack. */
124 if (p->p_model == DATAMODEL_NATIVE) {
125 s1 = sizeof (struct frame) + 2 * sizeof (long);
126 s2 = s1 + sizeof (siginfo_t);
128 s1 = sizeof (struct frame32) + 3 * sizeof (int);
129 s2 = s1 + sizeof (siginfo32_t);
135 * We limit the number of times we can go around this
136 * loop to account for a circular stack.
138 if (ret++ >= dtrace_ustackdepth_max) {
139 *flags |= CPU_DTRACE_BADSTACK;
140 cpu_core[curcpu].cpuc_dtrace_illval = sp;
144 if (pcstack != NULL) {
145 *pcstack++ = (uint64_t)pc;
147 if (pcstack_limit <= 0)
156 #ifdef notyet /* XXX signal stack. */
157 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
158 if (p->p_model == DATAMODEL_NATIVE) {
159 ucontext_t *ucp = (ucontext_t *)oldcontext;
160 greg_t *gregs = ucp->uc_mcontext.gregs;
162 sp = dtrace_fulword(&gregs[REG_FP]);
163 pc = dtrace_fulword(&gregs[REG_PC]);
165 oldcontext = dtrace_fulword(&ucp->uc_link);
167 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
168 greg32_t *gregs = ucp->uc_mcontext.gregs;
170 sp = dtrace_fuword32(&gregs[EBP]);
171 pc = dtrace_fuword32(&gregs[EIP]);
173 oldcontext = dtrace_fuword32(&ucp->uc_link);
176 if (p->p_model == DATAMODEL_NATIVE) {
177 struct frame *fr = (struct frame *)sp;
179 pc = dtrace_fulword(&fr->fr_savpc);
180 sp = dtrace_fulword(&fr->fr_savfp);
182 struct frame32 *fr = (struct frame32 *)sp;
184 pc = dtrace_fuword32(&fr->fr_savpc);
185 sp = dtrace_fuword32(&fr->fr_savfp);
189 pc = dtrace_fuword32((void *)(sp +
190 offsetof(struct i386_frame, f_retaddr)));
191 sp = dtrace_fuword32((void *)sp);
192 #endif /* ! notyet */
195 *flags |= CPU_DTRACE_BADSTACK;
196 cpu_core[curcpu].cpuc_dtrace_illval = sp;
201 * This is totally bogus: if we faulted, we're going to clear
202 * the fault and break. This is to deal with the apparently
203 * broken Java stacks on x86.
205 if (*flags & CPU_DTRACE_FAULT) {
206 *flags &= ~CPU_DTRACE_FAULT;
215 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
218 struct trapframe *tf;
219 uintptr_t pc, sp, fp;
220 volatile uint16_t *flags =
221 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
224 if (*flags & CPU_DTRACE_FAULT)
227 if (pcstack_limit <= 0)
231 * If there's no user context we still need to zero the stack.
233 if (p == NULL || (tf = curthread->td_frame) == NULL)
236 *pcstack++ = (uint64_t)p->p_pid;
239 if (pcstack_limit <= 0)
246 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
248 * In an entry probe. The frame pointer has not yet been
249 * pushed (that happens in the function prologue). The
250 * best approach is to add the current pc as a missing top
251 * of stack and back the pc up to the caller, which is stored
252 * at the current stack pointer address since the call
253 * instruction puts it there right before the branch.
256 *pcstack++ = (uint64_t)pc;
258 if (pcstack_limit <= 0)
261 pc = dtrace_fuword32((void *) sp);
264 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
266 ASSERT(n <= pcstack_limit);
272 while (pcstack_limit-- > 0)
277 dtrace_getustackdepth(void)
280 struct trapframe *tf;
281 uintptr_t pc, fp, sp;
284 if (p == NULL || (tf = curthread->td_frame) == NULL)
287 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
294 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
296 * In an entry probe. The frame pointer has not yet been
297 * pushed (that happens in the function prologue). The
298 * best approach is to add the current pc as a missing top
299 * of stack and back the pc up to the caller, which is stored
300 * at the current stack pointer address since the call
301 * instruction puts it there right before the branch.
304 pc = dtrace_fuword32((void *) sp);
308 n += dtrace_getustack_common(NULL, 0, pc, fp);
314 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
317 struct trapframe *tf;
318 uintptr_t pc, sp, fp;
319 volatile uint16_t *flags =
320 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
321 #ifdef notyet /* XXX signal stack */
322 uintptr_t oldcontext;
326 if (*flags & CPU_DTRACE_FAULT)
329 if (pcstack_limit <= 0)
333 * If there's no user context we still need to zero the stack.
335 if (p == NULL || (tf = curthread->td_frame) == NULL)
338 *pcstack++ = (uint64_t)p->p_pid;
341 if (pcstack_limit <= 0)
348 #ifdef notyet /* XXX signal stack */
349 oldcontext = lwp->lwp_oldcontext;
351 if (p->p_model == DATAMODEL_NATIVE) {
352 s1 = sizeof (struct frame) + 2 * sizeof (long);
353 s2 = s1 + sizeof (siginfo_t);
355 s1 = sizeof (struct frame32) + 3 * sizeof (int);
356 s2 = s1 + sizeof (siginfo32_t);
360 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
361 *pcstack++ = (uint64_t)pc;
364 if (pcstack_limit <= 0)
367 pc = dtrace_fuword32((void *)sp);
371 *pcstack++ = (uint64_t)pc;
374 if (pcstack_limit <= 0)
380 #ifdef notyet /* XXX signal stack */
381 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
382 if (p->p_model == DATAMODEL_NATIVE) {
383 ucontext_t *ucp = (ucontext_t *)oldcontext;
384 greg_t *gregs = ucp->uc_mcontext.gregs;
386 sp = dtrace_fulword(&gregs[REG_FP]);
387 pc = dtrace_fulword(&gregs[REG_PC]);
389 oldcontext = dtrace_fulword(&ucp->uc_link);
391 ucontext_t *ucp = (ucontext_t *)oldcontext;
392 greg_t *gregs = ucp->uc_mcontext.gregs;
394 sp = dtrace_fuword32(&gregs[EBP]);
395 pc = dtrace_fuword32(&gregs[EIP]);
397 oldcontext = dtrace_fuword32(&ucp->uc_link);
402 pc = dtrace_fuword32((void *)(fp +
403 offsetof(struct i386_frame, f_retaddr)));
404 fp = dtrace_fuword32((void *)fp);
408 * This is totally bogus: if we faulted, we're going to clear
409 * the fault and break. This is to deal with the apparently
410 * broken Java stacks on x86.
412 if (*flags & CPU_DTRACE_FAULT) {
413 *flags &= ~CPU_DTRACE_FAULT;
419 while (pcstack_limit-- > 0)
424 dtrace_getarg(int arg, int aframes)
426 struct trapframe *frame;
427 struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
428 uintptr_t *stack, val;
431 for (i = 1; i <= aframes; i++) {
434 if (P2ROUNDUP(fp->f_retaddr, 4) ==
435 (long)dtrace_invop_callsite) {
437 * If we pass through the invalid op handler, we will
438 * use the trap frame pointer that it pushed on the
439 * stack as the second argument to dtrace_invop() as
440 * the pointer to the stack. When using this stack, we
441 * must skip the third argument to dtrace_invop(),
442 * which is included in the i386_frame.
444 frame = (struct trapframe *)(((uintptr_t **)&fp[1])[0]);
446 * Skip the three hardware-saved registers and the
449 stack = (uintptr_t *)frame->tf_isp + 4;
456 * We know that we did not come through a trap to get into
457 * dtrace_probe() -- the provider simply called dtrace_probe()
458 * directly. As this is the case, we need to shift the argument
459 * that we're looking for: the probe ID is the first argument to
460 * dtrace_probe(), so the argument n will actually be found where
461 * one would expect to find argument (n + 1).
465 stack = (uintptr_t *)fp + 2;
468 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
470 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
476 dtrace_getstackdepth(int aframes)
479 struct i386_frame *frame;
483 ebp = dtrace_getfp();
484 frame = (struct i386_frame *)ebp;
487 if (!INKERNEL((long) frame))
489 if (!INKERNEL((long) frame->f_frame))
492 if (frame->f_frame <= frame ||
493 (vm_offset_t)frame->f_frame >= curthread->td_kstack +
494 curthread->td_kstack_pages * PAGE_SIZE)
496 frame = frame->f_frame;
501 return depth - aframes;
505 dtrace_getreg(struct trapframe *rp, uint_t reg)
508 int regmap[] = { /* Order is dependent on reg.d */
515 REG_RBP, /* 6 EBP, REG_FP */
518 REG_RDX, /* 9 EDX, REG_R1 */
519 REG_RCX, /* 10 ECX */
520 REG_RAX, /* 11 EAX, REG_R0 */
521 REG_TRAPNO, /* 12 TRAPNO */
522 REG_ERR, /* 13 ERR */
523 REG_RIP, /* 14 EIP, REG_PC */
525 REG_RFL, /* 16 EFL, REG_PS */
526 REG_RSP, /* 17 UESP, REG_SP */
531 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
535 if (reg >= sizeof (regmap) / sizeof (int)) {
536 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
544 if ((pcb = curthread->td_pcb) == NULL) {
545 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
548 return (pcb->pcb_gs);
570 return (rp->tf_trapno);
578 return (rp->tf_eflags);
586 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
592 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
594 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
596 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
597 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
598 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
606 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
607 volatile uint16_t *flags)
609 if (dtrace_copycheck(uaddr, kaddr, size))
610 dtrace_copy(uaddr, kaddr, size);
614 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
615 volatile uint16_t *flags)
617 if (dtrace_copycheck(uaddr, kaddr, size))
618 dtrace_copy(kaddr, uaddr, size);
622 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
623 volatile uint16_t *flags)
625 if (dtrace_copycheck(uaddr, kaddr, size))
626 dtrace_copystr(uaddr, kaddr, size, flags);
630 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
631 volatile uint16_t *flags)
633 if (dtrace_copycheck(uaddr, kaddr, size))
634 dtrace_copystr(kaddr, uaddr, size, flags);
638 dtrace_fuword8(void *uaddr)
640 if ((uintptr_t)uaddr >= kernelbase) {
641 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
642 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
645 return (dtrace_fuword8_nocheck(uaddr));
649 dtrace_fuword16(void *uaddr)
651 if ((uintptr_t)uaddr >= kernelbase) {
652 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
653 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
656 return (dtrace_fuword16_nocheck(uaddr));
660 dtrace_fuword32(void *uaddr)
662 if ((uintptr_t)uaddr >= kernelbase) {
663 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
664 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
667 return (dtrace_fuword32_nocheck(uaddr));
671 dtrace_fuword64(void *uaddr)
673 if ((uintptr_t)uaddr >= kernelbase) {
674 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
675 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
678 return (dtrace_fuword64_nocheck(uaddr));