2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1983, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #if !defined(_KERNEL) && defined(LIBC_SCCS) && !defined(lint)
33 static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 6/4/93";
35 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <vm/vm_param.h>
47 #include <machine/atomic.h>
50 * mcount is called on entry to each function compiled with the profiling
51 * switch set. _mcount(), which is declared in a machine-dependent way
52 * with _MCOUNT_DECL, does the actual work and is either inlined into a
53 * C routine or called by an assembly stub. In any case, this magic is
54 * taken care of by the MCOUNT definition in <machine/profile.h>.
56 * _mcount updates data structures that represent traversals of the
57 * program's call graph edges. frompc and selfpc are the return
58 * address and function address that represents the given call graph edge.
60 * Note: the original BSD code used the same variable (frompcindex) for
61 * both frompcindex and frompc. Any reasonable, modern compiler will
62 * perform this optimization.
64 /* _mcount; may be static, inline, etc */
65 _MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
72 struct tostruct *top, *prevtop;
80 #ifndef GUPROF /* XXX */
82 * check that we are profiling
83 * and that we aren't recursively invoked.
85 if (p->state != GMON_PROF_ON)
91 if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
94 frompci = frompc - p->lowpc;
98 * When we are called from an exception handler, frompci may be
99 * for a user address. Convert such frompci's to the index of
100 * user() to merge all user counts.
102 if (frompci >= p->textsize) {
103 if (frompci + p->lowpc
104 >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
106 frompci = (uintfptr_t)user - p->lowpc;
107 if (frompci >= p->textsize)
113 if (p->state != GMON_PROF_HIRES)
114 goto skip_guprof_stuff;
116 * Look at the clock and add the count of clock cycles since the
117 * clock was last looked at to a counter for frompc. This
118 * solidifies the count for the function containing frompc and
119 * effectively starts another clock for the current function.
120 * The count for the new clock will be solidified when another
121 * function call is made or the function returns.
123 * We use the usual sampling counters since they can be located
124 * efficiently. 4-byte counters are usually necessary.
126 * There are many complications for subtracting the profiling
127 * overheads from the counts for normal functions and adding
128 * them to the counts for mcount(), mexitcount() and cputime().
129 * We attempt to handle fractional cycles, but the overheads
130 * are usually underestimated because they are calibrated for
131 * a simpler than usual setup.
133 delta = cputime() - p->mcount_overhead;
134 p->cputime_overhead_resid += p->cputime_overhead_frac;
135 p->mcount_overhead_resid += p->mcount_overhead_frac;
137 *p->mcount_count += delta + p->mcount_overhead
138 - p->cputime_overhead;
139 else if (delta != 0) {
140 if (p->cputime_overhead_resid >= CALIB_SCALE) {
141 p->cputime_overhead_resid -= CALIB_SCALE;
146 if (p->mcount_overhead_resid >= CALIB_SCALE) {
147 p->mcount_overhead_resid -= CALIB_SCALE;
151 KCOUNT(p, frompci) += delta;
153 *p->mcount_count += p->mcount_overhead_sub;
155 *p->cputime_count += p->cputime_overhead;
161 * When we are called from an exception handler, frompc is faked
162 * to be for where the exception occurred. We've just solidified
163 * the count for there. Now convert frompci to the index of btrap()
164 * for trap handlers and bintr() for interrupt handlers to make
165 * exceptions appear in the call graph as calls from btrap() and
166 * bintr() instead of calls from all over.
168 if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
169 && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
170 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
171 frompci = (uintfptr_t)bintr - p->lowpc;
173 frompci = (uintfptr_t)btrap - p->lowpc;
178 * check that frompc is a reasonable pc value.
179 * for example: signal catchers get called from the stack,
180 * not from text space. too bad.
182 if (frompci >= p->textsize)
185 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
186 toindex = *frompcindex;
189 * first time traversing this arc
191 toindex = ++p->tos[0].link;
192 if (toindex >= p->tolimit)
193 /* halt further profiling */
196 *frompcindex = toindex;
197 top = &p->tos[toindex];
198 top->selfpc = selfpc;
203 top = &p->tos[toindex];
204 if (top->selfpc == selfpc) {
206 * arc at front of chain; usual case.
212 * have to go looking down chain for it.
213 * top points to what we are looking at,
214 * prevtop points to previous top.
215 * we know it is not at the head of the chain.
217 for (; /* goto done */; ) {
218 if (top->link == 0) {
220 * top is end of the chain and none of the chain
221 * had top->selfpc == selfpc.
222 * so we allocate a new tostruct
223 * and link it to the head of the chain.
225 toindex = ++p->tos[0].link;
226 if (toindex >= p->tolimit)
229 top = &p->tos[toindex];
230 top->selfpc = selfpc;
232 top->link = *frompcindex;
233 *frompcindex = toindex;
237 * otherwise, check the next arc on the chain.
240 top = &p->tos[top->link];
241 if (top->selfpc == selfpc) {
244 * increment its count
245 * move it to the head of the chain.
248 toindex = prevtop->link;
249 prevtop->link = top->link;
250 top->link = *frompcindex;
251 *frompcindex = toindex;
260 atomic_store_rel_int(&p->state, GMON_PROF_ON);
264 atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
272 * Actual definition of mcount function. Defined in <machine/profile.h>,
273 * which is included by <sys/gmon.h>.
279 mexitcount(uintfptr_t selfpc)
282 uintfptr_t selfpcdiff;
285 selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
286 if (selfpcdiff < p->textsize) {
290 * Solidify the count for the current function.
292 delta = cputime() - p->mexitcount_overhead;
293 p->cputime_overhead_resid += p->cputime_overhead_frac;
294 p->mexitcount_overhead_resid += p->mexitcount_overhead_frac;
296 *p->mexitcount_count += delta + p->mexitcount_overhead
297 - p->cputime_overhead;
298 else if (delta != 0) {
299 if (p->cputime_overhead_resid >= CALIB_SCALE) {
300 p->cputime_overhead_resid -= CALIB_SCALE;
305 if (p->mexitcount_overhead_resid
307 p->mexitcount_overhead_resid
309 ++*p->mexitcount_count;
312 KCOUNT(p, selfpcdiff) += delta;
314 *p->mexitcount_count += p->mexitcount_overhead_sub;
316 *p->cputime_count += p->cputime_overhead;