4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
28 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/stack.h>
36 #include <machine/frame.h>
37 #include <machine/md_var.h>
38 #include <machine/reg.h>
41 #include <vm/vm_param.h>
44 #include <machine/atomic.h>
45 #include <machine/db_machdep.h>
46 #include <machine/md_var.h>
47 #include <machine/stack.h>
48 #include <ddb/db_sym.h>
55 * Wee need some reasonable default to prevent backtrace code
56 * from wandering too far
58 #define MAX_FUNCTION_SIZE 0x10000
59 #define MAX_PROLOGUE_SIZE 0x100
60 #define MAX_USTACK_DEPTH 2048
62 uint8_t dtrace_fuword8_nocheck(void *);
63 uint16_t dtrace_fuword16_nocheck(void *);
64 uint32_t dtrace_fuword32_nocheck(void *);
65 uint64_t dtrace_fuword64_nocheck(void *);
68 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
71 struct unwind_state state;
79 pcstack[depth++] = (pc_t) intrpc;
84 __asm __volatile("mov %0, sp" : "=&r" (sp));
86 state.fp = (uint64_t)__builtin_frame_address(0);
88 state.pc = (uint64_t)dtrace_getpcstack;
90 while (depth < pcstack_limit) {
91 if (!INKERNEL(state.pc) || !INKERNEL(state.fp))
96 /* FP to previous frame (X29) */
97 state.fp = *(register_t *)(fp);
99 state.pc = *(register_t *)(fp + 8) - 4;
102 * NB: Unlike some other architectures, we don't need to
103 * explicitly insert cpu_dtrace_caller as it appears in the
104 * normal kernel stack trace rather than a special trap frame.
109 pcstack[depth++] = state.pc;
114 for (; depth < pcstack_limit; depth++) {
120 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
123 volatile uint16_t *flags =
124 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
126 uintptr_t oldfp = fp;
128 ASSERT(pcstack == NULL || pcstack_limit > 0);
132 * We limit the number of times we can go around this
133 * loop to account for a circular stack.
135 if (ret++ >= MAX_USTACK_DEPTH) {
136 *flags |= CPU_DTRACE_BADSTACK;
137 cpu_core[curcpu].cpuc_dtrace_illval = fp;
141 if (pcstack != NULL) {
142 *pcstack++ = (uint64_t)pc;
144 if (pcstack_limit <= 0)
151 pc = dtrace_fuword64((void *)(fp +
152 offsetof(struct arm64_frame, f_retaddr)));
153 fp = dtrace_fuword64((void *)fp);
156 *flags |= CPU_DTRACE_BADSTACK;
157 cpu_core[curcpu].cpuc_dtrace_illval = fp;
163 * This workaround might not be necessary. It needs to be
164 * revised and removed from all architectures if found
165 * unwanted. Leaving the original x86 comment for reference.
167 * This is totally bogus: if we faulted, we're going to clear
168 * the fault and break. This is to deal with the apparently
169 * broken Java stacks on x86.
171 if (*flags & CPU_DTRACE_FAULT) {
172 *flags &= ~CPU_DTRACE_FAULT;
183 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
186 struct trapframe *tf;
187 uintptr_t pc, sp, fp;
188 volatile uint16_t *flags =
189 (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
192 if (*flags & CPU_DTRACE_FAULT)
195 if (pcstack_limit <= 0)
199 * If there's no user context we still need to zero the stack.
201 if (p == NULL || (tf = curthread->td_frame) == NULL)
204 *pcstack++ = (uint64_t)p->p_pid;
207 if (pcstack_limit <= 0)
214 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
216 * In an entry probe. The frame pointer has not yet been
217 * pushed (that happens in the function prologue). The
218 * best approach is to add the current pc as a missing top
219 * of stack and back the pc up to the caller, which is stored
220 * at the current stack pointer address since the call
221 * instruction puts it there right before the branch.
224 *pcstack++ = (uint64_t)pc;
226 if (pcstack_limit <= 0)
232 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
234 ASSERT(n <= pcstack_limit);
240 while (pcstack_limit-- > 0)
245 dtrace_getustackdepth(void)
248 printf("IMPLEMENT ME: %s\n", __func__);
254 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
257 printf("IMPLEMENT ME: %s\n", __func__);
262 dtrace_getarg(int arg, int aframes)
265 printf("IMPLEMENT ME: %s\n", __func__);
271 dtrace_getstackdepth(int aframes)
273 struct unwind_state state;
282 __asm __volatile("mov %0, sp" : "=&r" (sp));
284 state.fp = (uint64_t)__builtin_frame_address(0);
286 state.pc = (uint64_t)dtrace_getstackdepth;
289 done = unwind_frame(&state);
290 if (!INKERNEL(state.pc) || !INKERNEL(state.fp))
298 return (depth - aframes);
302 dtrace_getreg(struct trapframe *rp, uint_t reg)
305 printf("IMPLEMENT ME: %s\n", __func__);
311 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
314 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
315 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
316 cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
324 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
325 volatile uint16_t *flags)
328 if (dtrace_copycheck(uaddr, kaddr, size))
329 dtrace_copy(uaddr, kaddr, size);
333 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
334 volatile uint16_t *flags)
337 if (dtrace_copycheck(uaddr, kaddr, size))
338 dtrace_copy(kaddr, uaddr, size);
342 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
343 volatile uint16_t *flags)
346 if (dtrace_copycheck(uaddr, kaddr, size))
347 dtrace_copystr(uaddr, kaddr, size, flags);
351 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
352 volatile uint16_t *flags)
355 if (dtrace_copycheck(uaddr, kaddr, size))
356 dtrace_copystr(kaddr, uaddr, size, flags);
360 dtrace_fuword8(void *uaddr)
363 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
364 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
365 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
369 return (dtrace_fuword8_nocheck(uaddr));
373 dtrace_fuword16(void *uaddr)
376 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
377 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
378 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
382 return (dtrace_fuword16_nocheck(uaddr));
386 dtrace_fuword32(void *uaddr)
389 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
390 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
391 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
395 return (dtrace_fuword32_nocheck(uaddr));
399 dtrace_fuword64(void *uaddr)
402 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
403 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
404 cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
408 return (dtrace_fuword64_nocheck(uaddr));