2 * Copyright (c) 2005,2008 Joseph Koshy
3 * Copyright (c) 2007 The FreeBSD Foundation
6 * Portions of this software were developed by A. Joseph Koshy under
7 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <machine/cpu.h>
41 #include <machine/cputypes.h>
42 #include <machine/intr_machdep.h>
43 #if (__FreeBSD_version >= 1100000)
44 #include <x86/apicvar.h>
46 #include <machine/apicvar.h>
48 #include <machine/pmc_mdep.h>
49 #include <machine/md_var.h>
52 #include <vm/vm_param.h>
55 #include "hwpmc_soft.h"
58 * Attempt to walk a user call stack using a too-simple algorithm.
59 * In the general case we need unwind information associated with
60 * the executable to be able to walk the user stack.
62 * We are handed a trap frame laid down at the time the PMC interrupt
63 * was taken. If the application is using frame pointers, the saved
65 * a. at the beginning of a function before the stack frame is laid
67 * b. just before a 'ret', after the stack frame has been taken off,
68 * c. somewhere else in the function with a valid stack frame being
71 * If the application is not using frame pointers, this algorithm will
72 * fail to yield an interesting call chain.
74 * TODO: figure out a way to use unwind information.
78 pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
82 uintptr_t fp, oldfp, pc, r, sp;
84 KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
85 __LINE__, (void *) tf));
87 pc = PMC_TRAPFRAME_TO_PC(tf);
88 oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
89 sp = PMC_TRAPFRAME_TO_USER_SP(tf);
93 r = fp + sizeof(uintptr_t); /* points to return address */
95 if (!PMC_IN_USERSPACE(pc))
98 if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
101 if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
102 PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
103 if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
105 } else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
106 sp += sizeof(uintptr_t);
107 if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
109 } else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
110 copyin((void *) fp, &fp, sizeof(fp)) != 0)
113 for (; n < nframes;) {
114 if (pc == 0 || !PMC_IN_USERSPACE(pc))
122 r = fp + sizeof(uintptr_t); /* address of return address */
125 if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
126 copyin((void *) fp, &fp, sizeof(fp)) != 0)
134 * Walking the kernel call stack.
136 * We are handed the trap frame laid down at the time the PMC
137 * interrupt was taken. The saved PC could be:
138 * a. in the lowlevel trap handler, meaning that there isn't a C stack
140 * b. at the beginning of a function before the stack frame is laid
142 * c. just before a 'ret', after the stack frame has been taken off,
143 * d. somewhere else in a function with a valid stack frame being
146 * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
147 * the return address is at [%ebp+4]/[%rbp+8].
149 * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
150 * the frame pointer doesn't need to be changed when going up one
151 * level in the stack.
153 * For case (a), we check if the PC lies in low-level trap handling
154 * code, and if so we terminate our trace.
158 pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
162 uintptr_t fp, pc, r, sp, stackstart, stackend;
165 KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
169 pc = PMC_TRAPFRAME_TO_PC(tf);
170 fp = PMC_TRAPFRAME_TO_FP(tf);
171 sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
174 r = fp + sizeof(uintptr_t); /* points to return address */
179 stackstart = (uintptr_t) td->td_kstack;
180 stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
182 if (PMC_IN_TRAP_HANDLER(pc) ||
183 !PMC_IN_KERNEL(pc) ||
184 !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
185 !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
186 !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
189 instr = *(uint32_t *) pc;
192 * Determine whether the interrupted function was in the
193 * processing of either laying down its stack frame or taking
196 * If we haven't started laying down a stack frame, or are
197 * just about to return, then our caller's address is at
198 * *sp, and we don't have a frame to unwind.
200 if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
201 PMC_AT_FUNCTION_EPILOGUE_RET(instr))
202 pc = *(uintptr_t *) sp;
203 else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
205 * The code was midway through laying down a frame.
206 * At this point sp[0] has a frame back pointer,
207 * and the caller's address is therefore at sp[1].
209 sp += sizeof(uintptr_t);
210 if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
212 pc = *(uintptr_t *) sp;
215 * Not in the function prologue or epilogue.
217 pc = *(uintptr_t *) r;
218 fp = *(uintptr_t *) fp;
221 for (n = 1; n < nframes; n++) {
224 if (PMC_IN_TRAP_HANDLER(pc))
227 r = fp + sizeof(uintptr_t);
228 if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
229 !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
231 pc = *(uintptr_t *) r;
232 fp = *(uintptr_t *) fp;
239 * Machine dependent initialization for x86 class platforms.
248 /* determine the CPU kind */
249 if (cpu_vendor_id == CPU_VENDOR_AMD)
250 md = pmc_amd_initialize();
251 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
252 md = pmc_intel_initialize();
256 /* disallow sampling if we do not have an LAPIC */
257 if (md != NULL && !lapic_enable_pmc())
258 for (i = 0; i < md->pmd_nclass; i++) {
259 if (i == PMC_CLASS_INDEX_SOFT)
261 md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
268 pmc_md_finalize(struct pmc_mdep *md)
272 if (cpu_vendor_id == CPU_VENDOR_AMD)
273 pmc_amd_finalize(md);
274 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
275 pmc_intel_finalize(md);
277 KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));