4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright 2012,2013 Justin Hibbits <jhibbits@freebsd.org>
27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
30 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/stack.h>
36 #include <sys/sysent.h>
39 #include <machine/frame.h>
40 #include <machine/md_var.h>
41 #include <machine/reg.h>
42 #include <machine/stack.h>
45 #include <vm/vm_param.h>
50 /* Offset to the LR Save word (ppc32) */
51 #define RETURN_OFFSET 4
52 /* Offset to LR Save word (ppc64). CR Save area sits between back chain and LR */
53 #define RETURN_OFFSET64 16
56 #define OFFSET 4 /* Account for the TOC reload slot */
61 #define INKERNEL(x) ((x) <= VM_MAX_KERNEL_ADDRESS && \
62 (x) >= VM_MIN_KERNEL_ADDRESS)
65 dtrace_sp_inkernel(uintptr_t sp, int aframes)
70 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
72 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
74 if ((callpc & 3) || (callpc < 0x100))
78 * trapexit() and asttrapexit() are sentinels
79 * for kernel stack tracing.
81 * Special-case this for 'aframes == 0', because fbt sets aframes to the
82 * trap callchain depth, so we want to break out of it.
84 if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
85 callpc + OFFSET == (vm_offset_t) &asttrapexit) &&
92 static __inline uintptr_t
93 dtrace_next_sp(uintptr_t sp)
98 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
100 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
104 * trapexit() and asttrapexit() are sentinels
105 * for kernel stack tracing.
107 * Special-case this for 'aframes == 0', because fbt sets aframes to the
108 * trap callchain depth, so we want to break out of it.
110 if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
111 callpc + OFFSET == (vm_offset_t) &asttrapexit))
112 /* Access the trap frame */
114 return (*(uintptr_t *)sp + 48 + sizeof(register_t));
116 return (*(uintptr_t *)sp + 8 + sizeof(register_t));
119 return (*(uintptr_t*)sp);
122 static __inline uintptr_t
123 dtrace_get_pc(uintptr_t sp)
128 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET64);
130 callpc = *(vm_offset_t *)(sp + RETURN_OFFSET);
134 * trapexit() and asttrapexit() are sentinels
135 * for kernel stack tracing.
137 * Special-case this for 'aframes == 0', because fbt sets aframes to the
138 * trap callchain depth, so we want to break out of it.
140 if ((callpc + OFFSET == (vm_offset_t) &trapexit ||
141 callpc + OFFSET == (vm_offset_t) &asttrapexit))
142 /* Access the trap frame */
144 return (*(uintptr_t *)sp + 48 + offsetof(struct trapframe, lr));
146 return (*(uintptr_t *)sp + 8 + offsetof(struct trapframe, lr));
155 return (greg_t)__builtin_frame_address(0);
159 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
165 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
169 pcstack[depth++] = (pc_t) intrpc;
175 while (depth < pcstack_limit) {
179 if (!dtrace_sp_inkernel(sp, aframes))
181 callpc = dtrace_get_pc(sp);
185 if ((aframes == 0) && (caller != 0)) {
186 pcstack[depth++] = caller;
190 pcstack[depth++] = callpc;
194 sp = dtrace_next_sp(sp);
197 for (; depth < pcstack_limit; depth++) {
203 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
209 ASSERT(pcstack == NULL || pcstack_limit > 0);
213 if (pcstack != NULL) {
214 *pcstack++ = (uint64_t)pc;
216 if (pcstack_limit <= 0)
223 if (SV_PROC_FLAG(p, SV_ILP32)) {
224 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
225 sp = dtrace_fuword32((void *)sp);
228 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
229 sp = dtrace_fuword64((void *)sp);
237 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
240 struct trapframe *tf;
242 volatile uint16_t *flags =
243 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
246 if (*flags & CPU_DTRACE_FAULT)
249 if (pcstack_limit <= 0)
253 * If there's no user context we still need to zero the stack.
255 if (p == NULL || (tf = curthread->td_frame) == NULL)
258 *pcstack++ = (uint64_t)p->p_pid;
261 if (pcstack_limit <= 0)
267 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
269 * In an entry probe. The frame pointer has not yet been
270 * pushed (that happens in the function prologue). The
271 * best approach is to add the current pc as a missing top
272 * of stack and back the pc up to the caller, which is stored
273 * at the current stack pointer address since the call
274 * instruction puts it there right before the branch.
277 *pcstack++ = (uint64_t)pc;
279 if (pcstack_limit <= 0)
285 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
287 ASSERT(n <= pcstack_limit);
293 while (pcstack_limit-- > 0)
298 dtrace_getustackdepth(void)
301 struct trapframe *tf;
305 if (p == NULL || (tf = curthread->td_frame) == NULL)
308 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
314 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
316 * In an entry probe. The frame pointer has not yet been
317 * pushed (that happens in the function prologue). The
318 * best approach is to add the current pc as a missing top
319 * of stack and back the pc up to the caller, which is stored
320 * at the current stack pointer address since the call
321 * instruction puts it there right before the branch.
324 if (SV_PROC_FLAG(p, SV_ILP32)) {
325 pc = dtrace_fuword32((void *) sp);
328 pc = dtrace_fuword64((void *) sp);
332 n += dtrace_getustack_common(NULL, 0, pc, sp);
338 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
341 struct trapframe *tf;
343 volatile uint16_t *flags =
344 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
345 #ifdef notyet /* XXX signal stack */
346 uintptr_t oldcontext;
350 if (*flags & CPU_DTRACE_FAULT)
353 if (pcstack_limit <= 0)
357 * If there's no user context we still need to zero the stack.
359 if (p == NULL || (tf = curthread->td_frame) == NULL)
362 *pcstack++ = (uint64_t)p->p_pid;
365 if (pcstack_limit <= 0)
371 #ifdef notyet /* XXX signal stack */
372 oldcontext = lwp->lwp_oldcontext;
373 s1 = sizeof (struct xframe) + 2 * sizeof (long);
374 s2 = s1 + sizeof (siginfo_t);
377 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
378 *pcstack++ = (uint64_t)pc;
381 if (pcstack_limit <= 0)
384 if (SV_PROC_FLAG(p, SV_ILP32)) {
385 pc = dtrace_fuword32((void *)sp);
388 pc = dtrace_fuword64((void *)sp);
393 *pcstack++ = (uint64_t)pc;
396 if (pcstack_limit <= 0)
402 #ifdef notyet /* XXX signal stack */
403 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
404 ucontext_t *ucp = (ucontext_t *)oldcontext;
405 greg_t *gregs = ucp->uc_mcontext.gregs;
407 sp = dtrace_fulword(&gregs[REG_FP]);
408 pc = dtrace_fulword(&gregs[REG_PC]);
410 oldcontext = dtrace_fulword(&ucp->uc_link);
414 if (SV_PROC_FLAG(p, SV_ILP32)) {
415 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
416 sp = dtrace_fuword32((void *)sp);
419 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
420 sp = dtrace_fuword64((void *)sp);
425 * This is totally bogus: if we faulted, we're going to clear
426 * the fault and break. This is to deal with the apparently
427 * broken Java stacks on x86.
429 if (*flags & CPU_DTRACE_FAULT) {
430 *flags &= ~CPU_DTRACE_FAULT;
436 while (pcstack_limit-- > 0)
442 dtrace_getarg(int arg, int aframes)
445 uintptr_t *fp = (uintptr_t *)dtrace_getfp();
450 * A total of 8 arguments are passed via registers; any argument with
451 * index of 7 or lower is therefore in a register.
455 for (i = 1; i <= aframes; i++) {
456 fp = (uintptr_t *)*fp;
459 * On ppc32 AIM, and booke, trapexit() is the immediately following
460 * label. On ppc64 AIM trapexit() follows a nop.
463 if ((long)(fp[2]) + 4 == (long)trapexit) {
465 if ((long)(fp[1]) == (long)trapexit) {
468 * In the case of powerpc, we will use the pointer to the regs
469 * structure that was pushed when we took the trap. To get this
470 * structure, we must increment beyond the frame structure. If the
471 * argument that we're seeking is passed on the stack, we'll pull
472 * the true stack pointer out of the saved registers and decrement
473 * our argument by the number of arguments passed in registers; if
474 * the argument we're seeking is passed in regsiters, we can just
478 struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 48);
480 struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 8);
484 stack = &rp->fixreg[3];
486 stack = (uintptr_t *)(rp->fixreg[1]);
495 * We know that we did not come through a trap to get into
496 * dtrace_probe() -- the provider simply called dtrace_probe()
497 * directly. As this is the case, we need to shift the argument
498 * that we're looking for: the probe ID is the first argument to
499 * dtrace_probe(), so the argument n will actually be found where
500 * one would expect to find argument (n + 1).
506 * This shouldn't happen. If the argument is passed in a
507 * register then it should have been, well, passed in a
510 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
518 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
520 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
527 dtrace_getstackdepth(int aframes)
541 if (!dtrace_sp_inkernel(sp, aframes))
549 sp = *(uintptr_t *)sp;
558 dtrace_getreg(struct trapframe *rp, uint_t reg)
561 return (rp->fixreg[reg]);
579 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
585 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
587 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
589 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
590 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
591 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
599 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
600 volatile uint16_t *flags)
602 if (dtrace_copycheck(uaddr, kaddr, size))
603 if (copyin((const void *)uaddr, (void *)kaddr, size)) {
604 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
605 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
610 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
611 volatile uint16_t *flags)
613 if (dtrace_copycheck(uaddr, kaddr, size)) {
614 if (copyout((const void *)kaddr, (void *)uaddr, size)) {
615 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
616 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
622 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
623 volatile uint16_t *flags)
628 if (dtrace_copycheck(uaddr, kaddr, size)) {
629 error = copyinstr((const void *)uaddr, (void *)kaddr,
632 /* ENAMETOOLONG is not a fault condition. */
633 if (error && error != ENAMETOOLONG) {
634 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
635 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
641 * The bulk of this function could be replaced to match dtrace_copyinstr()
642 * if we ever implement a copyoutstr().
645 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
646 volatile uint16_t *flags)
650 if (dtrace_copycheck(uaddr, kaddr, size)) {
651 len = strlen((const char *)kaddr);
655 if (copyout((const void *)kaddr, (void *)uaddr, len)) {
656 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
657 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
663 dtrace_fuword8(void *uaddr)
665 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
666 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
667 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
670 return (fubyte(uaddr));
674 dtrace_fuword16(void *uaddr)
678 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
679 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
680 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
681 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
688 dtrace_fuword32(void *uaddr)
690 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
691 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
692 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
695 return (fuword32(uaddr));
699 dtrace_fuword64(void *uaddr)
703 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
704 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
705 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
706 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
713 dtrace_fulword(void *uaddr)
717 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
718 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
719 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
720 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;