4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/reg.h>
39 #include <machine/stack.h>
42 #include <vm/vm_param.h>
46 uint8_t dtrace_fuword8_nocheck(void *);
47 uint16_t dtrace_fuword16_nocheck(void *);
48 uint32_t dtrace_fuword32_nocheck(void *);
49 uint64_t dtrace_fuword64_nocheck(void *);
52 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
57 struct amd64_frame *frame;
59 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
62 pcstack[depth++] = (pc_t) intrpc;
66 __asm __volatile("movq %%rbp,%0" : "=r" (rbp));
68 frame = (struct amd64_frame *)rbp;
69 while (depth < pcstack_limit) {
70 if (!INKERNEL((long) frame))
73 callpc = frame->f_retaddr;
75 if (!INKERNEL(callpc))
80 if ((aframes == 0) && (caller != 0)) {
81 pcstack[depth++] = caller;
85 pcstack[depth++] = callpc;
88 if (frame->f_frame <= frame ||
89 (vm_offset_t)frame->f_frame >=
90 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
92 frame = frame->f_frame;
95 for (; depth < pcstack_limit; depth++) {
101 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
104 volatile uint16_t *flags =
105 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
106 struct amd64_frame *frame;
109 ASSERT(pcstack == NULL || pcstack_limit > 0);
111 while (pc != 0 && sp != 0) {
113 if (pcstack != NULL) {
114 *pcstack++ = (uint64_t)pc;
116 if (pcstack_limit <= 0)
120 frame = (struct amd64_frame *) sp;
122 pc = dtrace_fulword(&frame->f_retaddr);
123 sp = dtrace_fulword(&frame->f_frame);
126 * This is totally bogus: if we faulted, we're going to clear
127 * the fault and break. This is to deal with the apparently
128 * broken Java stacks on x86.
130 if (*flags & CPU_DTRACE_FAULT) {
131 *flags &= ~CPU_DTRACE_FAULT;
140 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
143 struct trapframe *tf;
145 volatile uint16_t *flags =
146 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
149 if (*flags & CPU_DTRACE_FAULT)
152 if (pcstack_limit <= 0)
156 * If there's no user context we still need to zero the stack.
158 if (p == NULL || (tf = curthread->td_frame) == NULL)
161 *pcstack++ = (uint64_t)p->p_pid;
164 if (pcstack_limit <= 0)
170 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
171 *pcstack++ = (uint64_t)pc;
173 if (pcstack_limit <= 0)
176 pc = dtrace_fulword((void *) sp);
179 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
181 ASSERT(n <= pcstack_limit);
187 while (pcstack_limit-- > 0)
192 dtrace_getustackdepth(void)
195 struct trapframe *tf;
199 if (p == NULL || (tf = curthread->td_frame) == NULL)
202 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
208 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
211 pc = dtrace_fulword((void *) sp);
214 n += dtrace_getustack_common(NULL, 0, pc, sp);
221 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
223 klwp_t *lwp = ttolwp(curthread);
226 uintptr_t pc, sp, oldcontext;
227 volatile uint16_t *flags =
228 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
231 if (*flags & CPU_DTRACE_FAULT)
234 if (pcstack_limit <= 0)
238 * If there's no user context we still need to zero the stack.
240 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
243 *pcstack++ = (uint64_t)p->p_pid;
246 if (pcstack_limit <= 0)
251 oldcontext = lwp->lwp_oldcontext;
253 s1 = sizeof (struct xframe) + 2 * sizeof (long);
254 s2 = s1 + sizeof (siginfo_t);
256 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
257 *pcstack++ = (uint64_t)pc;
260 if (pcstack_limit <= 0)
263 if (p->p_model == DATAMODEL_NATIVE)
264 pc = dtrace_fulword((void *)rp->r_sp);
266 pc = dtrace_fuword32((void *)rp->r_sp);
269 while (pc != 0 && sp != 0) {
270 *pcstack++ = (uint64_t)pc;
273 if (pcstack_limit <= 0)
276 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
277 ucontext_t *ucp = (ucontext_t *)oldcontext;
278 greg_t *gregs = ucp->uc_mcontext.gregs;
280 sp = dtrace_fulword(&gregs[REG_FP]);
281 pc = dtrace_fulword(&gregs[REG_PC]);
283 oldcontext = dtrace_fulword(&ucp->uc_link);
285 struct xframe *fr = (struct xframe *)sp;
287 pc = dtrace_fulword(&fr->fr_savpc);
288 sp = dtrace_fulword(&fr->fr_savfp);
292 * This is totally bogus: if we faulted, we're going to clear
293 * the fault and break. This is to deal with the apparently
294 * broken Java stacks on x86.
296 if (*flags & CPU_DTRACE_FAULT) {
297 *flags &= ~CPU_DTRACE_FAULT;
303 while (pcstack_limit-- > 0)
310 dtrace_getarg(int arg, int aframes)
313 struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp();
318 * A total of 6 arguments are passed via registers; any argument with
319 * index of 5 or lower is therefore in a register.
323 for (i = 1; i <= aframes; i++) {
326 if (fp->f_retaddr == (long)dtrace_invop_callsite) {
328 * In the case of amd64, we will use the pointer to the
329 * regs structure that was pushed when we took the
330 * trap. To get this structure, we must increment
331 * beyond the frame structure, and then again beyond
332 * the calling RIP stored in dtrace_invop(). If the
333 * argument that we're seeking is passed on the stack,
334 * we'll pull the true stack pointer out of the saved
335 * registers and decrement our argument by the number
336 * of arguments passed in registers; if the argument
337 * we're seeking is passed in regsiters, we can just
340 struct reg *rp = (struct reg *)((uintptr_t)&fp[1] +
344 stack = (uintptr_t *)&rp->r_rdi;
346 stack = (uintptr_t *)(rp->r_rsp);
355 * We know that we did not come through a trap to get into
356 * dtrace_probe() -- the provider simply called dtrace_probe()
357 * directly. As this is the case, we need to shift the argument
358 * that we're looking for: the probe ID is the first argument to
359 * dtrace_probe(), so the argument n will actually be found where
360 * one would expect to find argument (n + 1).
366 * This shouldn't happen. If the argument is passed in a
367 * register then it should have been, well, passed in a
370 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
375 stack = (uintptr_t *)&fp[1];
378 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
380 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
387 dtrace_getstackdepth(int aframes)
390 struct amd64_frame *frame;
394 rbp = dtrace_getfp();
395 frame = (struct amd64_frame *)rbp;
398 if (!INKERNEL((long) frame))
400 if (!INKERNEL((long) frame->f_frame))
403 if (frame->f_frame <= frame ||
404 (vm_offset_t)frame->f_frame >=
405 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
407 frame = frame->f_frame;
412 return depth - aframes;
417 dtrace_getreg(struct regs *rp, uint_t reg)
433 REG_TRAPNO, /* TRAPNO */
443 if (reg >= sizeof (regmap) / sizeof (int)) {
444 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
493 return (rp->r_trapno);
507 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
513 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
517 return ((&rp->r_gs)[reg]);
523 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
525 ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
527 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
528 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
529 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
537 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
538 volatile uint16_t *flags)
540 if (dtrace_copycheck(uaddr, kaddr, size))
541 dtrace_copy(uaddr, kaddr, size);
545 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
546 volatile uint16_t *flags)
548 if (dtrace_copycheck(uaddr, kaddr, size))
549 dtrace_copy(kaddr, uaddr, size);
553 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
554 volatile uint16_t *flags)
556 if (dtrace_copycheck(uaddr, kaddr, size))
557 dtrace_copystr(uaddr, kaddr, size, flags);
561 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
562 volatile uint16_t *flags)
564 if (dtrace_copycheck(uaddr, kaddr, size))
565 dtrace_copystr(kaddr, uaddr, size, flags);
569 dtrace_fuword8(void *uaddr)
571 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
572 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
573 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
576 return (dtrace_fuword8_nocheck(uaddr));
580 dtrace_fuword16(void *uaddr)
582 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
583 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
584 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
587 return (dtrace_fuword16_nocheck(uaddr));
591 dtrace_fuword32(void *uaddr)
593 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
594 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
595 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
598 return (dtrace_fuword32_nocheck(uaddr));
602 dtrace_fuword64(void *uaddr)
604 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
605 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
606 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
609 return (dtrace_fuword64_nocheck(uaddr));