4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/md_var.h>
37 #include <machine/stack.h>
40 #include <vm/vm_param.h>
43 extern uintptr_t kernbase;
44 uintptr_t kernelbase = (uintptr_t) &kernbase;
46 #define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK && \
47 ((vm_offset_t)(va)) < VM_MAX_KERNEL_ADDRESS)
49 uint8_t dtrace_fuword8_nocheck(void *);
50 uint16_t dtrace_fuword16_nocheck(void *);
51 uint32_t dtrace_fuword32_nocheck(void *);
52 uint64_t dtrace_fuword64_nocheck(void *);
55 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
60 struct i386_frame *frame;
62 pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
65 pcstack[depth++] = (pc_t) intrpc;
69 __asm __volatile("movl %%ebp,%0" : "=r" (ebp));
71 frame = (struct i386_frame *)ebp;
72 while (depth < pcstack_limit) {
76 callpc = frame->f_retaddr;
78 if (!INKERNEL(callpc))
83 if ((aframes == 0) && (caller != 0)) {
84 pcstack[depth++] = caller;
88 pcstack[depth++] = callpc;
91 if (frame->f_frame <= frame ||
92 (vm_offset_t)frame->f_frame >=
93 (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
95 frame = frame->f_frame;
98 for (; depth < pcstack_limit; depth++) {
105 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
108 klwp_t *lwp = ttolwp(curthread);
110 uintptr_t oldcontext = lwp->lwp_oldcontext;
111 volatile uint16_t *flags =
112 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
116 ASSERT(pcstack == NULL || pcstack_limit > 0);
118 if (p->p_model == DATAMODEL_NATIVE) {
119 s1 = sizeof (struct frame) + 2 * sizeof (long);
120 s2 = s1 + sizeof (siginfo_t);
122 s1 = sizeof (struct frame32) + 3 * sizeof (int);
123 s2 = s1 + sizeof (siginfo32_t);
126 while (pc != 0 && sp != 0) {
128 if (pcstack != NULL) {
129 *pcstack++ = (uint64_t)pc;
131 if (pcstack_limit <= 0)
135 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
136 if (p->p_model == DATAMODEL_NATIVE) {
137 ucontext_t *ucp = (ucontext_t *)oldcontext;
138 greg_t *gregs = ucp->uc_mcontext.gregs;
140 sp = dtrace_fulword(&gregs[REG_FP]);
141 pc = dtrace_fulword(&gregs[REG_PC]);
143 oldcontext = dtrace_fulword(&ucp->uc_link);
145 ucontext32_t *ucp = (ucontext32_t *)oldcontext;
146 greg32_t *gregs = ucp->uc_mcontext.gregs;
148 sp = dtrace_fuword32(&gregs[EBP]);
149 pc = dtrace_fuword32(&gregs[EIP]);
151 oldcontext = dtrace_fuword32(&ucp->uc_link);
154 if (p->p_model == DATAMODEL_NATIVE) {
155 struct frame *fr = (struct frame *)sp;
157 pc = dtrace_fulword(&fr->fr_savpc);
158 sp = dtrace_fulword(&fr->fr_savfp);
160 struct frame32 *fr = (struct frame32 *)sp;
162 pc = dtrace_fuword32(&fr->fr_savpc);
163 sp = dtrace_fuword32(&fr->fr_savfp);
168 * This is totally bogus: if we faulted, we're going to clear
169 * the fault and break. This is to deal with the apparently
170 * broken Java stacks on x86.
172 if (*flags & CPU_DTRACE_FAULT) {
173 *flags &= ~CPU_DTRACE_FAULT;
182 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
184 klwp_t *lwp = ttolwp(curthread);
188 volatile uint16_t *flags =
189 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
192 if (*flags & CPU_DTRACE_FAULT)
195 if (pcstack_limit <= 0)
199 * If there's no user context we still need to zero the stack.
201 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
204 *pcstack++ = (uint64_t)p->p_pid;
207 if (pcstack_limit <= 0)
213 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
214 *pcstack++ = (uint64_t)pc;
216 if (pcstack_limit <= 0)
219 if (p->p_model == DATAMODEL_NATIVE)
220 pc = dtrace_fulword((void *)rp->r_sp);
222 pc = dtrace_fuword32((void *)rp->r_sp);
225 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
227 ASSERT(n <= pcstack_limit);
233 while (pcstack_limit-- > 0)
238 dtrace_getustackdepth(void)
243 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
245 klwp_t *lwp = ttolwp(curthread);
248 uintptr_t pc, sp, oldcontext;
249 volatile uint16_t *flags =
250 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
253 if (*flags & CPU_DTRACE_FAULT)
256 if (pcstack_limit <= 0)
260 * If there's no user context we still need to zero the stack.
262 if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
265 *pcstack++ = (uint64_t)p->p_pid;
268 if (pcstack_limit <= 0)
273 oldcontext = lwp->lwp_oldcontext;
275 if (p->p_model == DATAMODEL_NATIVE) {
276 s1 = sizeof (struct frame) + 2 * sizeof (long);
277 s2 = s1 + sizeof (siginfo_t);
279 s1 = sizeof (struct frame32) + 3 * sizeof (int);
280 s2 = s1 + sizeof (siginfo32_t);
283 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
284 *pcstack++ = (uint64_t)pc;
287 if (pcstack_limit <= 0)
290 if (p->p_model == DATAMODEL_NATIVE)
291 pc = dtrace_fulword((void *)rp->r_sp);
293 pc = dtrace_fuword32((void *)rp->r_sp);
296 while (pc != 0 && sp != 0) {
297 *pcstack++ = (uint64_t)pc;
300 if (pcstack_limit <= 0)
303 if (oldcontext == sp + s1 || oldcontext == sp + s2) {
304 if (p->p_model == DATAMODEL_NATIVE) {
305 ucontext_t *ucp = (ucontext_t *)oldcontext;
306 greg_t *gregs = ucp->uc_mcontext.gregs;
308 sp = dtrace_fulword(&gregs[REG_FP]);
309 pc = dtrace_fulword(&gregs[REG_PC]);
311 oldcontext = dtrace_fulword(&ucp->uc_link);
313 ucontext_t *ucp = (ucontext_t *)oldcontext;
314 greg_t *gregs = ucp->uc_mcontext.gregs;
316 sp = dtrace_fuword32(&gregs[EBP]);
317 pc = dtrace_fuword32(&gregs[EIP]);
319 oldcontext = dtrace_fuword32(&ucp->uc_link);
322 if (p->p_model == DATAMODEL_NATIVE) {
323 struct frame *fr = (struct frame *)sp;
325 pc = dtrace_fulword(&fr->fr_savpc);
326 sp = dtrace_fulword(&fr->fr_savfp);
328 struct frame32 *fr = (struct frame32 *)sp;
330 pc = dtrace_fuword32(&fr->fr_savpc);
331 sp = dtrace_fuword32(&fr->fr_savfp);
336 * This is totally bogus: if we faulted, we're going to clear
337 * the fault and break. This is to deal with the apparently
338 * broken Java stacks on x86.
340 if (*flags & CPU_DTRACE_FAULT) {
341 *flags &= ~CPU_DTRACE_FAULT;
347 while (pcstack_limit-- > 0)
353 dtrace_getarg(int arg, int aframes)
356 struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
360 for (i = 1; i <= aframes; i++) {
363 if (fp->f_retaddr == (long)dtrace_invop_callsite) {
365 * If we pass through the invalid op handler, we will
366 * use the pointer that it passed to the stack as the
367 * second argument to dtrace_invop() as the pointer to
368 * the stack. When using this stack, we must step
369 * beyond the EIP/RIP that was pushed when the trap was
370 * taken -- hence the "+ 1" below.
372 stack = ((uintptr_t **)&fp[1])[1] + 1;
379 * We know that we did not come through a trap to get into
380 * dtrace_probe() -- the provider simply called dtrace_probe()
381 * directly. As this is the case, we need to shift the argument
382 * that we're looking for: the probe ID is the first argument to
383 * dtrace_probe(), so the argument n will actually be found where
384 * one would expect to find argument (n + 1).
388 stack = (uintptr_t *)&fp[1];
391 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
393 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
399 dtrace_getstackdepth(int aframes)
402 struct i386_frame *frame;
406 ebp = dtrace_getfp();
407 frame = (struct i386_frame *)ebp;
410 if (!INKERNEL((long) frame))
412 if (!INKERNEL((long) frame->f_frame))
415 if (frame->f_frame <= frame ||
416 (vm_offset_t)frame->f_frame >=
417 (vm_offset_t)ebp + KSTACK_PAGES * PAGE_SIZE)
419 frame = frame->f_frame;
424 return depth - aframes;
429 dtrace_getreg(struct regs *rp, uint_t reg)
445 REG_TRAPNO, /* TRAPNO */
455 if (reg >= sizeof (regmap) / sizeof (int)) {
456 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
505 return (rp->r_trapno);
519 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
525 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
529 return ((&rp->r_gs)[reg]);
535 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
537 ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
539 if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
540 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
541 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
549 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
550 volatile uint16_t *flags)
552 if (dtrace_copycheck(uaddr, kaddr, size))
553 dtrace_copy(uaddr, kaddr, size);
557 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
558 volatile uint16_t *flags)
560 if (dtrace_copycheck(uaddr, kaddr, size))
561 dtrace_copy(kaddr, uaddr, size);
565 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
566 volatile uint16_t *flags)
568 if (dtrace_copycheck(uaddr, kaddr, size))
569 dtrace_copystr(uaddr, kaddr, size, flags);
573 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
574 volatile uint16_t *flags)
576 if (dtrace_copycheck(uaddr, kaddr, size))
577 dtrace_copystr(kaddr, uaddr, size, flags);
581 dtrace_fuword8(void *uaddr)
583 if ((uintptr_t)uaddr >= kernelbase) {
584 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
585 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
588 return (dtrace_fuword8_nocheck(uaddr));
592 dtrace_fuword16(void *uaddr)
594 if ((uintptr_t)uaddr >= kernelbase) {
595 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
596 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
599 return (dtrace_fuword16_nocheck(uaddr));
603 dtrace_fuword32(void *uaddr)
605 if ((uintptr_t)uaddr >= kernelbase) {
606 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
607 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
610 return (dtrace_fuword32_nocheck(uaddr));
614 dtrace_fuword64(void *uaddr)
616 if ((uintptr_t)uaddr >= kernelbase) {
617 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
618 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
621 return (dtrace_fuword64_nocheck(uaddr));