4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/reg.h>
39 #include <machine/stack.h>
42 #include <vm/vm_param.h>
47 uint8_t dtrace_fuword8_nocheck(void *);
48 uint16_t dtrace_fuword16_nocheck(void *);
49 uint32_t dtrace_fuword32_nocheck(void *);
50 uint64_t dtrace_fuword64_nocheck(void *);
53 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
58 struct amd64_frame *frame;
60 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
63 pcstack[depth++] = (pc_t) intrpc;
67 __asm __volatile("movq %%rbp,%0" : "=r" (rbp));
69 frame = (struct amd64_frame *)rbp;
70 while (depth < pcstack_limit) {
71 if (!INKERNEL((long) frame))
74 callpc = frame->f_retaddr;
76 if (!INKERNEL(callpc))
81 if ((aframes == 0) && (caller != 0)) {
82 pcstack[depth++] = caller;
86 pcstack[depth++] = callpc;
89 if (frame->f_frame <= frame ||
90 (vm_offset_t)frame->f_frame >=
91 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
93 frame = frame->f_frame;
96 for (; depth < pcstack_limit; depth++) {
102 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
105 volatile uint16_t *flags =
106 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
109 ASSERT(pcstack == NULL || pcstack_limit > 0);
113 if (pcstack != NULL) {
114 *pcstack++ = (uint64_t)pc;
116 if (pcstack_limit <= 0)
123 pc = dtrace_fuword64((void *)(sp +
124 offsetof(struct amd64_frame, f_retaddr)));
125 sp = dtrace_fuword64((void *)sp);
128 * This is totally bogus: if we faulted, we're going to clear
129 * the fault and break. This is to deal with the apparently
130 * broken Java stacks on x86.
132 if (*flags & CPU_DTRACE_FAULT) {
133 *flags &= ~CPU_DTRACE_FAULT;
142 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
145 struct trapframe *tf;
146 uintptr_t pc, sp, fp;
147 volatile uint16_t *flags =
148 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
151 if (*flags & CPU_DTRACE_FAULT)
154 if (pcstack_limit <= 0)
158 * If there's no user context we still need to zero the stack.
160 if (p == NULL || (tf = curthread->td_frame) == NULL)
163 *pcstack++ = (uint64_t)p->p_pid;
166 if (pcstack_limit <= 0)
173 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
175 * In an entry probe. The frame pointer has not yet been
176 * pushed (that happens in the function prologue). The
177 * best approach is to add the current pc as a missing top
178 * of stack and back the pc up to the caller, which is stored
179 * at the current stack pointer address since the call
180 * instruction puts it there right before the branch.
183 *pcstack++ = (uint64_t)pc;
185 if (pcstack_limit <= 0)
188 pc = dtrace_fuword64((void *) sp);
191 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
193 ASSERT(n <= pcstack_limit);
199 while (pcstack_limit-- > 0)
204 dtrace_getustackdepth(void)
207 struct trapframe *tf;
208 uintptr_t pc, fp, sp;
211 if (p == NULL || (tf = curthread->td_frame) == NULL)
214 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
221 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
223 * In an entry probe. The frame pointer has not yet been
224 * pushed (that happens in the function prologue). The
225 * best approach is to add the current pc as a missing top
226 * of stack and back the pc up to the caller, which is stored
227 * at the current stack pointer address since the call
228 * instruction puts it there right before the branch.
231 pc = dtrace_fuword64((void *) sp);
235 n += dtrace_getustack_common(NULL, 0, pc, fp);
241 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
244 struct trapframe *tf;
245 uintptr_t pc, sp, fp;
246 volatile uint16_t *flags =
247 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
248 #ifdef notyet /* XXX signal stack */
249 uintptr_t oldcontext;
253 if (*flags & CPU_DTRACE_FAULT)
256 if (pcstack_limit <= 0)
260 * If there's no user context we still need to zero the stack.
262 if (p == NULL || (tf = curthread->td_frame) == NULL)
265 *pcstack++ = (uint64_t)p->p_pid;
268 if (pcstack_limit <= 0)
275 #ifdef notyet /* XXX signal stack */
276 oldcontext = lwp->lwp_oldcontext;
277 s1 = sizeof (struct xframe) + 2 * sizeof (long);
278 s2 = s1 + sizeof (siginfo_t);
281 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
282 *pcstack++ = (uint64_t)pc;
285 if (pcstack_limit <= 0)
288 pc = dtrace_fuword64((void *)sp);
292 *pcstack++ = (uint64_t)pc;
295 if (pcstack_limit <= 0)
301 #ifdef notyet /* XXX signal stack */
302 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
303 ucontext_t *ucp = (ucontext_t *)oldcontext;
304 greg_t *gregs = ucp->uc_mcontext.gregs;
306 sp = dtrace_fulword(&gregs[REG_FP]);
307 pc = dtrace_fulword(&gregs[REG_PC]);
309 oldcontext = dtrace_fulword(&ucp->uc_link);
313 pc = dtrace_fuword64((void *)(fp +
314 offsetof(struct amd64_frame, f_retaddr)));
315 fp = dtrace_fuword64((void *)fp);
319 * This is totally bogus: if we faulted, we're going to clear
320 * the fault and break. This is to deal with the apparently
321 * broken Java stacks on x86.
323 if (*flags & CPU_DTRACE_FAULT) {
324 *flags &= ~CPU_DTRACE_FAULT;
330 while (pcstack_limit-- > 0)
336 dtrace_getarg(int arg, int aframes)
339 struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp();
344 * A total of 6 arguments are passed via registers; any argument with
345 * index of 5 or lower is therefore in a register.
349 for (i = 1; i <= aframes; i++) {
352 if (P2ROUNDUP(fp->f_retaddr, 16) ==
353 (long)dtrace_invop_callsite) {
355 * In the case of amd64, we will use the pointer to the
356 * regs structure that was pushed when we took the
357 * trap. To get this structure, we must increment
358 * beyond the frame structure, and then again beyond
359 * the calling RIP stored in dtrace_invop(). If the
360 * argument that we're seeking is passed on the stack,
361 * we'll pull the true stack pointer out of the saved
362 * registers and decrement our argument by the number
363 * of arguments passed in registers; if the argument
364 * we're seeking is passed in regsiters, we can just
367 struct trapframe *tf =
368 (struct trapframe *)((uintptr_t)&fp[1]);
373 stack = (uintptr_t *)&tf->tf_rdi;
376 stack = (uintptr_t *)&tf->tf_rsi;
379 stack = (uintptr_t *)&tf->tf_rdx;
382 stack = (uintptr_t *)&tf->tf_rcx;
385 stack = (uintptr_t *)&tf->tf_r8;
388 stack = (uintptr_t *)&tf->tf_r9;
393 stack = (uintptr_t *)(tf->tf_rsp);
402 * We know that we did not come through a trap to get into
403 * dtrace_probe() -- the provider simply called dtrace_probe()
404 * directly. As this is the case, we need to shift the argument
405 * that we're looking for: the probe ID is the first argument to
406 * dtrace_probe(), so the argument n will actually be found where
407 * one would expect to find argument (n + 1).
413 * This shouldn't happen. If the argument is passed in a
414 * register then it should have been, well, passed in a
417 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
422 stack = (uintptr_t *)fp + 2;
425 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
427 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
434 dtrace_getstackdepth(int aframes)
437 struct amd64_frame *frame;
441 rbp = dtrace_getfp();
442 frame = (struct amd64_frame *)rbp;
445 if (!INKERNEL((long) frame))
447 if (!INKERNEL((long) frame->f_frame))
450 if (frame->f_frame <= frame ||
451 (vm_offset_t)frame->f_frame >=
452 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
454 frame = frame->f_frame;
459 return depth - aframes;
463 dtrace_getreg(struct trapframe *rp, uint_t reg)
465 /* This table is dependent on reg.d. */
473 REG_RBP, /* 6 EBP, REG_FP */
475 REG_RBX, /* 8 EBX, REG_R1 */
477 REG_RCX, /* 10 ECX */
478 REG_RAX, /* 11 EAX, REG_R0 */
479 REG_TRAPNO, /* 12 TRAPNO */
480 REG_ERR, /* 13 ERR */
481 REG_RIP, /* 14 EIP, REG_PC */
483 REG_RFL, /* 16 EFL, REG_PS */
484 REG_RSP, /* 17 UESP, REG_SP */
489 if (reg >= sizeof (regmap) / sizeof (int)) {
490 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
496 /* This is dependent on reg.d. */
540 return (rp->tf_trapno);
550 return (rp->tf_rflags);
554 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
560 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
562 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
564 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
565 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
566 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
574 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
575 volatile uint16_t *flags)
577 if (dtrace_copycheck(uaddr, kaddr, size))
578 dtrace_copy(uaddr, kaddr, size);
582 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
583 volatile uint16_t *flags)
585 if (dtrace_copycheck(uaddr, kaddr, size))
586 dtrace_copy(kaddr, uaddr, size);
590 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
591 volatile uint16_t *flags)
593 if (dtrace_copycheck(uaddr, kaddr, size))
594 dtrace_copystr(uaddr, kaddr, size, flags);
598 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
599 volatile uint16_t *flags)
601 if (dtrace_copycheck(uaddr, kaddr, size))
602 dtrace_copystr(kaddr, uaddr, size, flags);
606 dtrace_fuword8(void *uaddr)
608 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
609 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
610 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
613 return (dtrace_fuword8_nocheck(uaddr));
617 dtrace_fuword16(void *uaddr)
619 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
620 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
621 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
624 return (dtrace_fuword16_nocheck(uaddr));
628 dtrace_fuword32(void *uaddr)
630 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
631 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
632 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
635 return (dtrace_fuword32_nocheck(uaddr));
639 dtrace_fuword64(void *uaddr)
641 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
642 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
643 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
646 return (dtrace_fuword64_nocheck(uaddr));