4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/reg.h>
39 #include <machine/stack.h>
42 #include <vm/vm_param.h>
45 extern uintptr_t kernbase;
46 uintptr_t kernelbase = (uintptr_t) &kernbase;
48 uint8_t dtrace_fuword8_nocheck(void *);
49 uint16_t dtrace_fuword16_nocheck(void *);
50 uint32_t dtrace_fuword32_nocheck(void *);
51 uint64_t dtrace_fuword64_nocheck(void *);
54 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
59 struct amd64_frame *frame;
61 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
64 pcstack[depth++] = (pc_t) intrpc;
68 __asm __volatile("movq %%rbp,%0" : "=r" (rbp));
70 frame = (struct amd64_frame *)rbp;
71 while (depth < pcstack_limit) {
72 if (!INKERNEL((long) frame))
75 callpc = frame->f_retaddr;
77 if (!INKERNEL(callpc))
82 if ((aframes == 0) && (caller != 0)) {
83 pcstack[depth++] = caller;
87 pcstack[depth++] = callpc;
90 if (frame->f_frame <= frame ||
91 (vm_offset_t)frame->f_frame >=
92 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
94 frame = frame->f_frame;
97 for (; depth < pcstack_limit; depth++) {
103 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
106 volatile uint16_t *flags =
107 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
108 struct amd64_frame *frame;
111 ASSERT(pcstack == NULL || pcstack_limit > 0);
113 while (pc != 0 && sp != 0) {
115 if (pcstack != NULL) {
116 *pcstack++ = (uint64_t)pc;
118 if (pcstack_limit <= 0)
122 frame = (struct amd64_frame *) sp;
124 pc = dtrace_fulword(&frame->f_retaddr);
125 sp = dtrace_fulword(&frame->f_frame);
128 * This is totally bogus: if we faulted, we're going to clear
129 * the fault and break. This is to deal with the apparently
130 * broken Java stacks on x86.
132 if (*flags & CPU_DTRACE_FAULT) {
133 *flags &= ~CPU_DTRACE_FAULT;
142 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
145 struct trapframe *tf;
147 volatile uint16_t *flags =
148 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
151 if (*flags & CPU_DTRACE_FAULT)
154 if (pcstack_limit <= 0)
158 * If there's no user context we still need to zero the stack.
160 if (p == NULL || (tf = curthread->td_frame) == NULL)
163 *pcstack++ = (uint64_t)p->p_pid;
166 if (pcstack_limit <= 0)
172 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
173 *pcstack++ = (uint64_t)pc;
175 if (pcstack_limit <= 0)
178 pc = dtrace_fulword((void *) sp);
181 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
183 ASSERT(n <= pcstack_limit);
189 while (pcstack_limit-- > 0)
194 dtrace_getustackdepth(void)
197 struct trapframe *tf;
201 if (p == NULL || (tf = curthread->td_frame) == NULL)
204 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
210 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
213 pc = dtrace_fulword((void *) sp);
216 n += dtrace_getustack_common(NULL, 0, pc, sp);
223 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
225 klwp_t *lwp = ttolwp(curthread);
228 uintptr_t pc, sp, oldcontext;
229 volatile uint16_t *flags =
230 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
233 if (*flags & CPU_DTRACE_FAULT)
236 if (pcstack_limit <= 0)
240 * If there's no user context we still need to zero the stack.
242 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
245 *pcstack++ = (uint64_t)p->p_pid;
248 if (pcstack_limit <= 0)
253 oldcontext = lwp->lwp_oldcontext;
255 s1 = sizeof (struct xframe) + 2 * sizeof (long);
256 s2 = s1 + sizeof (siginfo_t);
258 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
259 *pcstack++ = (uint64_t)pc;
262 if (pcstack_limit <= 0)
265 if (p->p_model == DATAMODEL_NATIVE)
266 pc = dtrace_fulword((void *)rp->r_sp);
268 pc = dtrace_fuword32((void *)rp->r_sp);
271 while (pc != 0 && sp != 0) {
272 *pcstack++ = (uint64_t)pc;
275 if (pcstack_limit <= 0)
278 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
279 ucontext_t *ucp = (ucontext_t *)oldcontext;
280 greg_t *gregs = ucp->uc_mcontext.gregs;
282 sp = dtrace_fulword(&gregs[REG_FP]);
283 pc = dtrace_fulword(&gregs[REG_PC]);
285 oldcontext = dtrace_fulword(&ucp->uc_link);
287 struct xframe *fr = (struct xframe *)sp;
289 pc = dtrace_fulword(&fr->fr_savpc);
290 sp = dtrace_fulword(&fr->fr_savfp);
294 * This is totally bogus: if we faulted, we're going to clear
295 * the fault and break. This is to deal with the apparently
296 * broken Java stacks on x86.
298 if (*flags & CPU_DTRACE_FAULT) {
299 *flags &= ~CPU_DTRACE_FAULT;
305 while (pcstack_limit-- > 0)
312 dtrace_getarg(int arg, int aframes)
315 struct amd64_frame *fp = (struct amd64_frame *)dtrace_getfp();
320 * A total of 6 arguments are passed via registers; any argument with
321 * index of 5 or lower is therefore in a register.
325 for (i = 1; i <= aframes; i++) {
328 if (fp->f_retaddr == (long)dtrace_invop_callsite) {
330 * In the case of amd64, we will use the pointer to the
331 * regs structure that was pushed when we took the
332 * trap. To get this structure, we must increment
333 * beyond the frame structure, and then again beyond
334 * the calling RIP stored in dtrace_invop(). If the
335 * argument that we're seeking is passed on the stack,
336 * we'll pull the true stack pointer out of the saved
337 * registers and decrement our argument by the number
338 * of arguments passed in registers; if the argument
339 * we're seeking is passed in regsiters, we can just
342 struct reg *rp = (struct reg *)((uintptr_t)&fp[1] +
346 stack = (uintptr_t *)&rp->r_rdi;
348 stack = (uintptr_t *)(rp->r_rsp);
357 * We know that we did not come through a trap to get into
358 * dtrace_probe() -- the provider simply called dtrace_probe()
359 * directly. As this is the case, we need to shift the argument
360 * that we're looking for: the probe ID is the first argument to
361 * dtrace_probe(), so the argument n will actually be found where
362 * one would expect to find argument (n + 1).
368 * This shouldn't happen. If the argument is passed in a
369 * register then it should have been, well, passed in a
372 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
377 stack = (uintptr_t *)&fp[1];
380 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
382 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
389 dtrace_getstackdepth(int aframes)
392 struct amd64_frame *frame;
396 rbp = dtrace_getfp();
397 frame = (struct amd64_frame *)rbp;
400 if (!INKERNEL((long) frame))
402 if (!INKERNEL((long) frame->f_frame))
405 if (frame->f_frame <= frame ||
406 (vm_offset_t)frame->f_frame >=
407 (vm_offset_t)rbp + KSTACK_PAGES * PAGE_SIZE)
409 frame = frame->f_frame;
414 return depth - aframes;
419 dtrace_getreg(struct regs *rp, uint_t reg)
435 REG_TRAPNO, /* TRAPNO */
445 if (reg >= sizeof (regmap) / sizeof (int)) {
446 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
495 return (rp->r_trapno);
509 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
515 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
519 return ((&rp->r_gs)[reg]);
525 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
527 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
529 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
530 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
531 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
539 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
540 volatile uint16_t *flags)
542 if (dtrace_copycheck(uaddr, kaddr, size))
543 dtrace_copy(uaddr, kaddr, size);
547 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
548 volatile uint16_t *flags)
550 if (dtrace_copycheck(uaddr, kaddr, size))
551 dtrace_copy(kaddr, uaddr, size);
555 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
556 volatile uint16_t *flags)
558 if (dtrace_copycheck(uaddr, kaddr, size))
559 dtrace_copystr(uaddr, kaddr, size, flags);
563 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
564 volatile uint16_t *flags)
566 if (dtrace_copycheck(uaddr, kaddr, size))
567 dtrace_copystr(kaddr, uaddr, size, flags);
571 dtrace_fuword8(void *uaddr)
573 if ((uintptr_t)uaddr >= kernelbase) {
574 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
575 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
578 return (dtrace_fuword8_nocheck(uaddr));
582 dtrace_fuword16(void *uaddr)
584 if ((uintptr_t)uaddr >= kernelbase) {
585 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
586 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
589 return (dtrace_fuword16_nocheck(uaddr));
593 dtrace_fuword32(void *uaddr)
595 if ((uintptr_t)uaddr >= kernelbase) {
596 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
597 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
600 return (dtrace_fuword32_nocheck(uaddr));
604 dtrace_fuword64(void *uaddr)
606 if ((uintptr_t)uaddr >= kernelbase) {
607 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
608 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
611 return (dtrace_fuword64_nocheck(uaddr));