2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysproto.h>
40 #include <sys/kernel.h>
42 #include <sys/mutex.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sysctl.h>
47 #include <machine/cpu.h>
50 #include <sys/malloc.h>
54 static MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
56 static void kmstartup(void *);
57 SYSINIT(kmem, SI_SUB_KPROF, SI_ORDER_FIRST, kmstartup, NULL);
59 struct gmonparam _gmonparam = { GMON_PROF_OFF };
63 nullfunc_loop_profiled()
67 for (i = 0; i < CALIB_SCALE; i++)
71 #define nullfunc_loop_profiled_end nullfunc_profiled /* XXX */
80 * Update the histograms to support extending the text region arbitrarily.
81 * This is done slightly naively (no sparse regions), so will waste slight
82 * amounts of memory, but will overall work nicely enough to allow profiling
86 kmupetext(uintfptr_t nhighpc)
88 struct gmonparam np; /* slightly large */
89 struct gmonparam *p = &_gmonparam;
93 bcopy(p, &np, sizeof(*p));
94 np.highpc = ROUNDUP(nhighpc, HISTFRACTION * sizeof(HISTCOUNTER));
95 if (np.highpc <= p->highpc)
97 np.textsize = np.highpc - p->lowpc;
98 np.kcountsize = np.textsize / HISTFRACTION;
99 np.hashfraction = HASHFRACTION;
100 np.fromssize = np.textsize / HASHFRACTION;
101 np.tolimit = np.textsize * ARCDENSITY / 100;
102 if (np.tolimit < MINARCS)
103 np.tolimit = MINARCS;
104 else if (np.tolimit > MAXARCS)
105 np.tolimit = MAXARCS;
106 np.tossize = np.tolimit * sizeof(struct tostruct);
107 cp = malloc(np.kcountsize + np.fromssize + np.tossize,
110 * Check for something else extending highpc while we slept.
112 if (np.highpc <= p->highpc) {
116 np.tos = (struct tostruct *)cp;
118 np.kcount = (HISTCOUNTER *)cp;
120 np.froms = (u_short *)cp;
122 /* Reinitialize pointers to overhead counters. */
123 np.cputime_count = &KCOUNT(&np, PC_TO_I(&np, cputime));
124 np.mcount_count = &KCOUNT(&np, PC_TO_I(&np, mcount));
125 np.mexitcount_count = &KCOUNT(&np, PC_TO_I(&np, mexitcount));
128 bcopy(p->tos, np.tos, p->tossize);
129 bzero((char *)np.tos + p->tossize, np.tossize - p->tossize);
130 bcopy(p->kcount, np.kcount, p->kcountsize);
131 bzero((char *)np.kcount + p->kcountsize, np.kcountsize -
133 bcopy(p->froms, np.froms, p->fromssize);
134 bzero((char *)np.froms + p->fromssize, np.fromssize - p->fromssize);
136 bcopy(&np, p, sizeof(*p));
142 kmstartup(void *dummy)
145 struct gmonparam *p = &_gmonparam;
147 int cputime_overhead;
151 int mexitcount_overhead;
152 int nullfunc_loop_overhead;
153 int nullfunc_loop_profiled_time;
158 * Round lowpc and highpc to multiples of the density we're using
159 * so the rest of the scaling (here and in gprof) stays in ints.
161 p->lowpc = ROUNDDOWN((u_long)btext, HISTFRACTION * sizeof(HISTCOUNTER));
162 p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
163 p->textsize = p->highpc - p->lowpc;
164 printf("Profiling kernel, textsize=%lu [%jx..%jx]\n",
165 p->textsize, (uintmax_t)p->lowpc, (uintmax_t)p->highpc);
166 p->kcountsize = p->textsize / HISTFRACTION;
167 p->hashfraction = HASHFRACTION;
168 p->fromssize = p->textsize / HASHFRACTION;
169 p->tolimit = p->textsize * ARCDENSITY / 100;
170 if (p->tolimit < MINARCS)
171 p->tolimit = MINARCS;
172 else if (p->tolimit > MAXARCS)
173 p->tolimit = MAXARCS;
174 p->tossize = p->tolimit * sizeof(struct tostruct);
175 cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
176 M_GPROF, M_WAITOK | M_ZERO);
177 p->tos = (struct tostruct *)cp;
179 p->kcount = (HISTCOUNTER *)cp;
181 p->froms = (u_short *)cp;
182 p->histcounter_type = FUNCTION_ALIGNMENT / HISTFRACTION * NBBY;
185 /* Signed counters. */
186 p->histcounter_type = -p->histcounter_type;
188 /* Initialize pointers to overhead counters. */
189 p->cputime_count = &KCOUNT(p, PC_TO_I(p, cputime));
190 p->mcount_count = &KCOUNT(p, PC_TO_I(p, mcount));
191 p->mexitcount_count = &KCOUNT(p, PC_TO_I(p, mexitcount));
194 * Disable interrupts to avoid interference while we calibrate
200 * Determine overheads.
201 * XXX this needs to be repeated for each useful timer/counter.
203 cputime_overhead = 0;
205 for (i = 0; i < CALIB_SCALE; i++)
206 cputime_overhead += cputime();
211 empty_loop_time = cputime();
213 nullfunc_loop_profiled();
216 * Start profiling. There won't be any normal function calls since
217 * interrupts are disabled, but we will call the profiling routines
218 * directly to determine their overheads.
220 p->state = GMON_PROF_HIRES;
223 nullfunc_loop_profiled();
226 for (i = 0; i < CALIB_SCALE; i++)
227 MCOUNT_OVERHEAD(sys_profil);
228 mcount_overhead = KCOUNT(p, PC_TO_I(p, sys_profil));
231 for (i = 0; i < CALIB_SCALE; i++)
232 MEXITCOUNT_OVERHEAD();
233 MEXITCOUNT_OVERHEAD_GETLABEL(tmp_addr);
234 mexitcount_overhead = KCOUNT(p, PC_TO_I(p, tmp_addr));
236 p->state = GMON_PROF_OFF;
241 nullfunc_loop_profiled_time = 0;
242 for (tmp_addr = (uintfptr_t)nullfunc_loop_profiled;
243 tmp_addr < (uintfptr_t)nullfunc_loop_profiled_end;
244 tmp_addr += HISTFRACTION * sizeof(HISTCOUNTER))
245 nullfunc_loop_profiled_time += KCOUNT(p, PC_TO_I(p, tmp_addr));
246 #define CALIB_DOSCALE(count) (((count) + CALIB_SCALE / 3) / CALIB_SCALE)
247 #define c2n(count, freq) ((int)((count) * 1000000000LL / freq))
248 printf("cputime %d, empty_loop %d, nullfunc_loop_profiled %d, mcount %d, mexitcount %d\n",
249 CALIB_DOSCALE(c2n(cputime_overhead, p->profrate)),
250 CALIB_DOSCALE(c2n(empty_loop_time, p->profrate)),
251 CALIB_DOSCALE(c2n(nullfunc_loop_profiled_time, p->profrate)),
252 CALIB_DOSCALE(c2n(mcount_overhead, p->profrate)),
253 CALIB_DOSCALE(c2n(mexitcount_overhead, p->profrate)));
254 cputime_overhead -= empty_loop_time;
255 mcount_overhead -= empty_loop_time;
256 mexitcount_overhead -= empty_loop_time;
259 * Profiling overheads are determined by the times between the
261 * MC1: mcount() is called
262 * MC2: cputime() (called from mcount()) latches the timer
263 * MC3: mcount() completes
264 * ME1: mexitcount() is called
265 * ME2: cputime() (called from mexitcount()) latches the timer
266 * ME3: mexitcount() completes.
267 * The times between the events vary slightly depending on instruction
268 * combination and cache misses, etc. Attempt to determine the
269 * minimum times. These can be subtracted from the profiling times
270 * without much risk of reducing the profiling times below what they
271 * would be when profiling is not configured. Abbreviate:
272 * ab = minimum time between MC1 and MC3
273 * a = minimum time between MC1 and MC2
274 * b = minimum time between MC2 and MC3
275 * cd = minimum time between ME1 and ME3
276 * c = minimum time between ME1 and ME2
277 * d = minimum time between ME2 and ME3.
278 * These satisfy the relations:
279 * ab <= mcount_overhead (just measured)
281 * cd <= mexitcount_overhead (just measured)
283 * a + d <= nullfunc_loop_profiled_time (just measured)
284 * a >= 0, b >= 0, c >= 0, d >= 0.
285 * Assume that ab and cd are equal to the minimums.
287 p->cputime_overhead = CALIB_DOSCALE(cputime_overhead);
288 p->mcount_overhead = CALIB_DOSCALE(mcount_overhead - cputime_overhead);
289 p->mexitcount_overhead = CALIB_DOSCALE(mexitcount_overhead
291 nullfunc_loop_overhead = nullfunc_loop_profiled_time - empty_loop_time;
292 p->mexitcount_post_overhead = CALIB_DOSCALE((mcount_overhead
293 - nullfunc_loop_overhead)
295 p->mexitcount_pre_overhead = p->mexitcount_overhead
296 + p->cputime_overhead
297 - p->mexitcount_post_overhead;
298 p->mcount_pre_overhead = CALIB_DOSCALE(nullfunc_loop_overhead)
299 - p->mexitcount_post_overhead;
300 p->mcount_post_overhead = p->mcount_overhead
301 + p->cputime_overhead
302 - p->mcount_pre_overhead;
304 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d nsec\n",
305 c2n(p->cputime_overhead, p->profrate),
306 c2n(p->mcount_overhead, p->profrate),
307 c2n(p->mcount_pre_overhead, p->profrate),
308 c2n(p->mcount_post_overhead, p->profrate),
309 c2n(p->cputime_overhead, p->profrate),
310 c2n(p->mexitcount_overhead, p->profrate),
311 c2n(p->mexitcount_pre_overhead, p->profrate),
312 c2n(p->mexitcount_post_overhead, p->profrate));
314 "Profiling overheads: mcount: %d+%d, %d+%d; mexitcount: %d+%d, %d+%d cycles\n",
315 p->cputime_overhead, p->mcount_overhead,
316 p->mcount_pre_overhead, p->mcount_post_overhead,
317 p->cputime_overhead, p->mexitcount_overhead,
318 p->mexitcount_pre_overhead, p->mexitcount_post_overhead);
323 * Return kernel profiling information.
326 sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
328 int *name = (int *) arg1;
329 u_int namelen = arg2;
330 struct gmonparam *gp = &_gmonparam;
334 /* all sysctl names at this level are terminal */
336 return (ENOTDIR); /* overloaded */
341 error = sysctl_handle_int(oidp, &state, 0, req);
346 if (state == GMON_PROF_OFF) {
349 stopprofclock(&proc0);
352 } else if (state == GMON_PROF_ON) {
353 gp->state = GMON_PROF_OFF;
355 gp->profrate = profhz;
357 startprofclock(&proc0);
361 } else if (state == GMON_PROF_HIRES) {
362 gp->state = GMON_PROF_OFF;
364 stopprofclock(&proc0);
369 } else if (state != gp->state)
373 return (sysctl_handle_opaque(oidp,
374 gp->kcount, gp->kcountsize, req));
376 return (sysctl_handle_opaque(oidp,
377 gp->froms, gp->fromssize, req));
379 return (sysctl_handle_opaque(oidp,
380 gp->tos, gp->tossize, req));
381 case GPROF_GMONPARAM:
382 return (sysctl_handle_opaque(oidp, gp, sizeof *gp, req));
389 static SYSCTL_NODE(_kern, KERN_PROF, prof,
390 CTLFLAG_RW | CTLFLAG_MPSAFE, sysctl_kern_prof,
395 * Profiling system call.
397 * The scale factor is a fixed point number with 16 bits of fraction, so that
398 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
400 #ifndef _SYS_SYSPROTO_H_
410 sys_profil(struct thread *td, struct profil_args *uap)
415 if (uap->scale > (1 << 16))
419 if (uap->scale == 0) {
426 upp = &td->td_proc->p_stats->p_prof;
428 upp->pr_off = uap->offset;
429 upp->pr_scale = uap->scale;
430 upp->pr_base = uap->samples;
431 upp->pr_size = uap->size;
440 * Scale is a fixed-point number with the binary point 16 bits
441 * into the value, and is <= 1.0. pc is at most 32 bits, so the
442 * intermediate result is at most 48 bits.
444 #define PC_TO_INDEX(pc, prof) \
445 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
446 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
449 * Collect user-level profiling statistics; called on a profiling tick,
450 * when a process is running in user-mode. This routine may be called
451 * from an interrupt context. We perform the update with an AST
452 * that will vector us to trap() with a context in which copyin and
453 * copyout will work. Trap will then call addupc_task().
455 * Note that we may (rarely) not get around to the AST soon enough, and
456 * lose profile ticks when the next tick overwrites this one, but in this
457 * case the system is overloaded and the profile is probably already
461 addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
467 prof = &td->td_proc->p_stats->p_prof;
468 PROC_PROFLOCK(td->td_proc);
469 if (pc < prof->pr_off || PC_TO_INDEX(pc, prof) >= prof->pr_size) {
470 PROC_PROFUNLOCK(td->td_proc);
471 return; /* out of range; ignore */
474 PROC_PROFUNLOCK(td->td_proc);
475 td->td_profil_addr = pc;
476 td->td_profil_ticks = ticks;
477 td->td_pflags |= TDP_OWEUPC;
479 td->td_flags |= TDF_ASTPENDING;
484 * Actually update the profiling statistics. If the update fails, we
485 * simply turn off profiling.
488 addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
490 struct proc *p = td->td_proc;
501 if (!(p->p_flag & P_PROFIL)) {
506 prof = &p->p_stats->p_prof;
508 if (pc < prof->pr_off ||
509 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
514 addr = prof->pr_base + i;
517 if (copyin(addr, &v, sizeof(v)) == 0) {
519 if (copyout(&v, addr, sizeof(v)) == 0) {
528 if (--p->p_profthreads == 0) {
529 if (p->p_flag & P_STOPPROF) {
530 wakeup(&p->p_profthreads);
531 p->p_flag &= ~P_STOPPROF;