4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Portions Copyright 2012,2013 Justin Hibbits <jhibbits@freebsd.org>
27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
30 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/stack.h>
36 #include <sys/sysent.h>
39 #include <machine/frame.h>
40 #include <machine/md_var.h>
41 #include <machine/reg.h>
42 #include <machine/stack.h>
45 #include <vm/vm_param.h>
50 /* Offset to the LR Save word (ppc32) */
51 #define RETURN_OFFSET 4
52 #define RETURN_OFFSET64 8
54 #define INKERNEL(x) ((x) <= VM_MAX_KERNEL_ADDRESS && \
55 (x) >= VM_MIN_KERNEL_ADDRESS)
60 return (greg_t)__builtin_frame_address(0);
64 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
70 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
73 pcstack[depth++] = (pc_t) intrpc;
79 while (depth < pcstack_limit) {
80 if (!INKERNEL((long) sp))
84 callpc = *(uintptr_t *)(sp + RETURN_OFFSET64);
86 callpc = *(uintptr_t *)(sp + RETURN_OFFSET);
89 if (!INKERNEL(callpc))
94 if ((aframes == 0) && (caller != 0)) {
95 pcstack[depth++] = caller;
99 pcstack[depth++] = callpc;
102 sp = *(uintptr_t*)sp;
105 for (; depth < pcstack_limit; depth++) {
111 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
117 ASSERT(pcstack == NULL || pcstack_limit > 0);
121 if (pcstack != NULL) {
122 *pcstack++ = (uint64_t)pc;
124 if (pcstack_limit <= 0)
131 if (SV_PROC_FLAG(p, SV_ILP32)) {
132 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
133 sp = dtrace_fuword32((void *)sp);
136 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
137 sp = dtrace_fuword64((void *)sp);
145 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
148 struct trapframe *tf;
150 volatile uint16_t *flags =
151 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
154 if (*flags & CPU_DTRACE_FAULT)
157 if (pcstack_limit <= 0)
161 * If there's no user context we still need to zero the stack.
163 if (p == NULL || (tf = curthread->td_frame) == NULL)
166 *pcstack++ = (uint64_t)p->p_pid;
169 if (pcstack_limit <= 0)
175 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
177 * In an entry probe. The frame pointer has not yet been
178 * pushed (that happens in the function prologue). The
179 * best approach is to add the current pc as a missing top
180 * of stack and back the pc up to the caller, which is stored
181 * at the current stack pointer address since the call
182 * instruction puts it there right before the branch.
185 *pcstack++ = (uint64_t)pc;
187 if (pcstack_limit <= 0)
193 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
195 ASSERT(n <= pcstack_limit);
201 while (pcstack_limit-- > 0)
206 dtrace_getustackdepth(void)
209 struct trapframe *tf;
213 if (p == NULL || (tf = curthread->td_frame) == NULL)
216 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
222 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
224 * In an entry probe. The frame pointer has not yet been
225 * pushed (that happens in the function prologue). The
226 * best approach is to add the current pc as a missing top
227 * of stack and back the pc up to the caller, which is stored
228 * at the current stack pointer address since the call
229 * instruction puts it there right before the branch.
232 if (SV_PROC_FLAG(p, SV_ILP32)) {
233 pc = dtrace_fuword32((void *) sp);
236 pc = dtrace_fuword64((void *) sp);
240 n += dtrace_getustack_common(NULL, 0, pc, sp);
246 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
249 struct trapframe *tf;
251 volatile uint16_t *flags =
252 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
253 #ifdef notyet /* XXX signal stack */
254 uintptr_t oldcontext;
258 if (*flags & CPU_DTRACE_FAULT)
261 if (pcstack_limit <= 0)
265 * If there's no user context we still need to zero the stack.
267 if (p == NULL || (tf = curthread->td_frame) == NULL)
270 *pcstack++ = (uint64_t)p->p_pid;
273 if (pcstack_limit <= 0)
279 #ifdef notyet /* XXX signal stack */
280 oldcontext = lwp->lwp_oldcontext;
281 s1 = sizeof (struct xframe) + 2 * sizeof (long);
282 s2 = s1 + sizeof (siginfo_t);
285 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
286 *pcstack++ = (uint64_t)pc;
289 if (pcstack_limit <= 0)
292 if (SV_PROC_FLAG(p, SV_ILP32)) {
293 pc = dtrace_fuword32((void *)sp);
296 pc = dtrace_fuword64((void *)sp);
301 *pcstack++ = (uint64_t)pc;
304 if (pcstack_limit <= 0)
310 #ifdef notyet /* XXX signal stack */
311 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
312 ucontext_t *ucp = (ucontext_t *)oldcontext;
313 greg_t *gregs = ucp->uc_mcontext.gregs;
315 sp = dtrace_fulword(&gregs[REG_FP]);
316 pc = dtrace_fulword(&gregs[REG_PC]);
318 oldcontext = dtrace_fulword(&ucp->uc_link);
322 if (SV_PROC_FLAG(p, SV_ILP32)) {
323 pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
324 sp = dtrace_fuword32((void *)sp);
327 pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
328 sp = dtrace_fuword64((void *)sp);
333 * This is totally bogus: if we faulted, we're going to clear
334 * the fault and break. This is to deal with the apparently
335 * broken Java stacks on x86.
337 if (*flags & CPU_DTRACE_FAULT) {
338 *flags &= ~CPU_DTRACE_FAULT;
344 while (pcstack_limit-- > 0)
350 dtrace_getarg(int arg, int aframes)
353 uintptr_t *fp = (uintptr_t *)dtrace_getfp();
358 * A total of 8 arguments are passed via registers; any argument with
359 * index of 7 or lower is therefore in a register.
363 for (i = 1; i <= aframes; i++) {
364 fp = (uintptr_t *)*fp;
367 * On ppc32 AIM, and booke, trapexit() is the immediately following
368 * label. On ppc64 AIM trapexit() follows a nop.
370 if (((long)(fp[1]) == (long)trapexit) ||
371 (((long)(fp[1]) + 4 == (long)trapexit))) {
373 * In the case of powerpc, we will use the pointer to the regs
374 * structure that was pushed when we took the trap. To get this
375 * structure, we must increment beyond the frame structure. If the
376 * argument that we're seeking is passed on the stack, we'll pull
377 * the true stack pointer out of the saved registers and decrement
378 * our argument by the number of arguments passed in registers; if
379 * the argument we're seeking is passed in regsiters, we can just
383 struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 48);
385 struct reg *rp = (struct reg *)((uintptr_t)fp[0] + 8);
389 stack = &rp->fixreg[3];
391 stack = (uintptr_t *)(rp->fixreg[1]);
400 * We know that we did not come through a trap to get into
401 * dtrace_probe() -- the provider simply called dtrace_probe()
402 * directly. As this is the case, we need to shift the argument
403 * that we're looking for: the probe ID is the first argument to
404 * dtrace_probe(), so the argument n will actually be found where
405 * one would expect to find argument (n + 1).
411 * This shouldn't happen. If the argument is passed in a
412 * register then it should have been, well, passed in a
415 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
423 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
425 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
431 dtrace_getstackdepth(int aframes)
440 if (!INKERNEL((long) sp))
442 if (!INKERNEL((long) *(void **)sp))
445 sp = *(uintptr_t *)sp;
450 return depth - aframes;
454 dtrace_getreg(struct trapframe *rp, uint_t reg)
457 return (rp->fixreg[reg]);
475 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
481 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
483 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
485 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
486 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
487 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
495 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
496 volatile uint16_t *flags)
498 if (dtrace_copycheck(uaddr, kaddr, size))
499 if (copyin((const void *)uaddr, (void *)kaddr, size)) {
500 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
501 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
506 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
507 volatile uint16_t *flags)
509 if (dtrace_copycheck(uaddr, kaddr, size)) {
510 if (copyout((const void *)kaddr, (void *)uaddr, size)) {
511 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
512 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
518 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
519 volatile uint16_t *flags)
524 if (dtrace_copycheck(uaddr, kaddr, size)) {
525 error = copyinstr((const void *)uaddr, (void *)kaddr,
528 /* ENAMETOOLONG is not a fault condition. */
529 if (error && error != ENAMETOOLONG) {
530 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
531 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
537 * The bulk of this function could be replaced to match dtrace_copyinstr()
538 * if we ever implement a copyoutstr().
541 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
542 volatile uint16_t *flags)
546 if (dtrace_copycheck(uaddr, kaddr, size)) {
547 len = strlen((const char *)kaddr);
551 if (copyout((const void *)kaddr, (void *)uaddr, len)) {
552 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
553 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
559 dtrace_fuword8(void *uaddr)
561 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
562 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
563 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
566 return (fubyte(uaddr));
570 dtrace_fuword16(void *uaddr)
574 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
575 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
576 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
577 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
584 dtrace_fuword32(void *uaddr)
586 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
587 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
588 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
591 return (fuword32(uaddr));
595 dtrace_fuword64(void *uaddr)
599 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
600 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
601 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
602 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
609 dtrace_fulword(void *uaddr)
613 if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
614 if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
615 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
616 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;